aws:
# Credentials can be hardcoded or set in environment variables
access_key_id: XXX
secret_access_key: XXX
s3:
region: ap-southeast-1
buckets:
assets: s3://snowplow-hosted-assets #
jsonpath_assets: #
log: s3n://XXX-snwplw-etl/logs/ #
raw:
in:
- s3n://XXX-snwplw-logs/ #
processing: s3n://XXX-snwplw-etl/processing/ #
archive: s3://XXX-snwplw-archive/raw #
enriched:
good: s3://XXX-snwplw-data/enriched/good #
bad: s3://XXX-snwplw-data/enriched/bad #
errors: s3://XXX-snwplw-data/enriched/errors #
archive: s3://XXX-snwplw-data/enriched/archive #
shredded:
good: s3://XXX-snwplw-data/shredded/good #
bad: s3://XXX-snwplw-data/shredded/bad #
archive: s3://XXX-snwplw-data/shredded/archive #
errors: s3://XXX-snwplw-data/shredded/errors #
emr:
ami_version: 4.5.0
region: ap-southeast-1 # Always set this
jobflow_role: EMR_EC2_DefaultRole # Created using $ aws emr create-default-roles
service_role: EMR_DefaultRole # Created using $ aws emr create-default-roles
placement: ap-southeast-1a # Set this if not running in VPC. Leave blank otherwise
ec2_subnet_id: #subnet-XXX # Set this if running in VPC. Leave blank otherwise
ec2_key_name: XXX-production
bootstrap: [] # Set this to specify custom boostrap actions. Leave empty otherwise
software:
hbase: # Optional. To launch on cluster, provide version, "0.92.0", keep quotes. Leave empty otherwise.
lingual: # Optional. To launch on cluster, provide version, "1.1", keep quotes. Leave empty otherwise.
# Adjust your Hadoop cluster below
jobflow:
master_instance_type: m1.medium
core_instance_count: 2
core_instance_type: m1.medium
task_instance_count: 0 # Increase to use spot instances
task_instance_type: m1.medium
task_instance_bid: 0.015 # In USD. Adjust bid, or leave blank for non-spot-priced (i.e. on-demand) task instances
bootstrap_failure_tries: 3 # Number of times to attempt the job in the event of bootstrap failures
additional_info: # Optional JSON string for selecting additional features
collectors:
format: cloudfront # For example: 'clj-tomcat' for the Clojure Collector, 'thrift' for Thrift records, 'tsv/com.amazon.aws.cloudfront/wd_access_log' for Cloudfront access logs or 'ndjson/urbanairship.connect/v1' for UrbanAirship Connect events
enrich:
job_name: Snowplow ETL # Give your job a name
versions:
hadoop_enrich: 1.7.0 # Version of the Hadoop Enrichment process
hadoop_shred: 0.9.0 # Version of the Hadoop Shredding process
hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process
continue_on_unexpected_error: false # Set to 'true' (and set :out_errors: above) if you don't want any exceptions thrown from ETL
output_compression: NONE # Compression only supported with Redshift, set to NONE if you have Postgres targets. Allowed formats: NONE, GZIP
storage:
download:
folder: /home/ec2-user/snowplow-data-download # Postgres-only config option. Where to store the downloaded files. Leave blank for Redshift
targets:
- name: "My PostgreSQL database"
type: postgres
host: XXX # Hostname of database server
database: snowplow # Name of database
port: 5432 # Default Postgres port
ssl_mode: disable # One of disable (default), require, verify-ca or verify-full
table: atomic.events
username: XXX
password: XXX
es_nodes_wan_only:
maxerror: # Not required for Postgres
comprows: # Not required for Postgres
monitoring:
tags: {} # Name-value pairs describing this job
logging:
level: DEBUG # You can optionally switch to INFO for production
snowplow:
I am using this version of the runner http://dl.bintray.com/snowplow/snowplow-generic/snowplow_emr_r77_great_auk.zip
and the command to execute is ‘./snowplow-emr-etl-runner --debug --config config.yml --resolver resolver.json’