EMR contract broken

Went through few posts with similar errors but none of could fix my issue.

./emr-etl-runner -d -c emr.yml

error

Expected: {:logging=>{:level=>String}, :aws=>{:access_key_id=>String, :secret_access_key=>String}, :s3=>{:region=>String, :buckets=>{:assets=>String, :log=>String, :raw=>{:in=>String, :processing=>String}, :enriched=>{:good=>String, :bad=>String, :errors=>#<Contracts::Maybe:0x53dc0066 @vals=[String, nil]>}, :shredded=>{:good=>String, :bad=>String, :errors=>#<Contracts::Maybe:0x109e538d @vals=[String, nil]>}}}, :emr=>{:ami_version=>String, :region=>String, :placement=>#<Contracts::Maybe:0xa7f5681 @vals=[String, nil]>, :ec2_subnet_id=>#<Contracts::Maybe:0x2e083489 @vals=[String, nil]>, :ec2_key_name=>String, :software=>{:hbase=>#<Contracts::Maybe:0x1cb125fb @vals=[String, nil]>, :lingual=>#<Contracts::Maybe:0x32349074 @vals=[String, nil]>}, :jobflow=>{:master_instance_type=>String, :core_instance_count=>Contracts::Num, :core_instance_type=>String, :task_instance_count=>Contracts::Num, :task_instance_type=>String, :task_instance_bid=>#<Contracts::Maybe:0x18f7c96c @vals=[Contracts::Num, nil]>}}, :etl=>{:job_name=>String, :versions=>{:hadoop_enrich=>String, :hadoop_shred=>String}, :collector_format=>String, :continue_on_unexpected_error=>Contracts::Bool}, :iglu=>{:schema=>String, :data=>{:cache_size=>Contracts::Num, :repositories=>#<Contracts::ArrayOf:0x33c8de42 @contract={:name=>String, :priority=>Contracts::Num, :vendor_prefixes=>#<Contracts::ArrayOf:0x362e1cd @contract=String>, :connection=>{:http=>{:uri=>String}}}>}}},
    Actual: {"aws"=>{"access_key_id"=>"****", "secret_access_key"=>"****", "s3"=>{"region"=>"us-east-1", "buckets"=>{"assets"=>"s3n://snowplow-hosted-assets", "jsonpath_assets"=>nil, "log"=>"s3n://snowplowLog/logs", "raw"=>{"in"=>["s3n://elasticbeanstalk-us-east-1-823104196891/resources/environments/logs/publish/e-a2csmvtisj/i-0c21bbb0742cba24c/"], "processing"=>"s3://snowplow-raw/processing", "archive"=>"s3://snowplow-raw/archive"}, "enriched"=>{"good"=>"s3://enriched/good", "bad"=>"s3://enriched/bad", "errors"=>nil, "archive"=>"s3://enriched/archive"}, "shredded"=>{"good"=>"s3://shredded/good", "bad"=>"s3://shredded/bad", "errors"=>nil, "archive"=>"s3://shredded/archive"}}}, "emr"=>{"ami_version"=>"5.5.0", "region"=>"us-east-1", "jobflow_role"=>"EMR_EC2_DefaultRole", "service_role"=>"EMR_DefaultRole", "placement"=>nil, "ec2_subnet_id"=>"subnet-26afd30c", "ec2_key_name"=>"snowplow-ec2", "bootstrap"=>[], "software"=>{"hbase"=>nil, "lingual"=>nil}, "jobflow"=>{"job_name"=>"Snowplow ETL", "master_instance_type"=>"m1.medium", "core_instance_count"=>2, "core_instance_type"=>"m1.medium", "core_instance_ebs"=>{"volume_size"=>100, "volume_type"=>"gp2", "volume_iops"=>400, "ebs_optimized"=>false}, "task_instance_count"=>0, "task_instance_type"=>"m1.medium", "task_instance_bid"=>0.015}, "bootstrap_failure_tries"=>3, "configuration"=>{"yarn-site"=>{"yarn.resourcemanager.am.max-attempts"=>1}, "spark"=>{"maximizeResourceAllocation"=>true}}, "additional_info"=>nil}}, "collectors"=>{"format"=>"clj-tomcat"}, "enrich"=>{"versions"=>{"spark_enrich"=>"1.9.0"}, "continue_on_unexpected_error"=>false, "output_compression"=>"NONE"}, "storage"=>{"versions"=>{"rdb_loader"=>"0.12.0", "rdb_shredder"=>"0.12.0", "hadoop_elasticsearch"=>"0.1.0"}}, "monitoring"=>{"tags"=>{}, "logging"=>{"level"=>"DEBUG"}, "snowplow"=>{"method"=>"get", "app_id"=>"snowplow", "collector"=>"ec2-xxxxxx.compute-1.amazonaws.com"}}, "iglu"=>{"schema"=>"iglu:com.snowplowanalytics.iglu/resolver-config/jsonschema/1-0-0", "data"=>{"cache_size"=>500, "repositories"=>[{"name"=>"Iglu Central", "priority"=>0, "vendor_prefixes"=>["com.snowplowanalytics"], "connection"=>{"http"=>{"uri"=>"http://iglucentral.com"}}}]}}}

Config file

  aws:
# Credentials can be hardcoded or set in environment variables
access_key_id: ***
secret_access_key: ***
s3:
  region: us-east-1
  buckets:
    assets: s3n://snowplow-hosted-assets # DO NOT CHANGE unless you are hosting the jarfiles etc yourself in your own bucket
    jsonpath_assets: # If you have defined your own JSON Schemas, add the s3:// path to your own JSON Path files in your own bucket here
    log: s3n://snowplowLog/logs
    raw:
      in:                  # This is a YAML array of one or more in buckets - you MUST use hyphens before each entry in the array, as below
        - s3n://elasticbeanstalk-us-east-1-823104196891/resources/environments/logs/publish/e-a2csmvtisj/i-0c21bbb0742cba24c/         # e.g. s3://my-old-collector-bucket
      processing: s3://snowplow-raw/processing
      archive: s3://snowplow-raw/archive    # e.g. s3://my-archive-bucket/raw
    enriched:
      good: s3://enriched/good       # e.g. s3://my-out-bucket/enriched/good
      bad: s3://enriched/bad        # e.g. s3://my-out-bucket/enriched/bad
      errors:    # Leave blank unless :continue_on_unexpected_error: set to true below
      archive: s3://enriched/archive    # Where to archive enriched events to, e.g. s3://my-archive-bucket/enriched
    shredded:
      good: s3://shredded/good       # e.g. s3://my-out-bucket/shredded/good
      bad: s3://shredded/bad        # e.g. s3://my-out-bucket/shredded/bad
      errors:    # Leave blank unless :continue_on_unexpected_error: set to true below
      archive: s3://shredded/archive    # Where to archive shredded events to, e.g. s3://my-archive-bucket/shredded
emr:
  ami_version: 4.5.0
  region: us-east-1        # Always set this
  #jobflow_role: EMR_EC2_DefaultRole # Created using $ aws emr create-default-roles
  #service_role: EMR_DefaultRole     # Created using $ aws emr create-default-roles
  placement:     # Set this if not running in VPC. Leave blank otherwise
  ec2_subnet_id: subnet-****  # Set this if running in VPC. Leave blank otherwise
  ec2_key_name: snowplow-ec2
  bootstrap: []           # Set this to specify custom boostrap actions. Leave empty otherwise
  software:
    hbase:                # Optional. To launch on cluster, provide version, "0.92.0", keep quotes. Leave empty otherwise.
    lingual:              # Optional. To launch on cluster, provide version, "1.1", keep quotes. Leave empty otherwise.
  # Adjust your Hadoop cluster below
  jobflow:
    job_name: Snowplow ETL # Give your job a name
    master_instance_type: m1.medium
    core_instance_count: 2
    core_instance_type: m1.medium
    core_instance_ebs:    # Optional. Attach an EBS volume to each core instance.
      volume_size: 100   # Gigabytes
      volume_type: gp2
      volume_iops: 400    # Optional. Will only be used if volume_type is "io1"
      ebs_optimized: false # Optional. Will default to true
    task_instance_count: 0 # Increase to use spot instances
    task_instance_type: m1.medium
    task_instance_bid: 0.015 # In USD. Adjust bid, or leave blank for non-spot-priced (i.e. on-demand) task instances
  bootstrap_failure_tries: 3 # Number of times to attempt the job in the event of bootstrap failures
  configuration:
    yarn-site:
      yarn.resourcemanager.am.max-attempts: 1
    spark:
      maximizeResourceAllocation: true
  additional_info:        # Optional JSON string for selecting additional features
collectors:
   format: clj-tomcat # For example: 'clj-tomcat' for the Clojure Collector, 'thrift' for Thrift records, 'tsv/com.amazon.aws.cloudfront/wd_access_log' for Cloudfront access logs or 'ndjson/urbanairship.connect/v1' for UrbanAirship Connect events
enrich:
 versions:
  spark_enrich: 1.9.0 # Version of the Spark Enrichment process
continue_on_unexpected_error: false # Set to 'true' (and set :out_errors: above) if you don't want any exceptions thrown from ETL
output_compression: NONE # Compression only supported with Redshift, set to NONE if you have Postgres targets. Allowed formats: NONE, GZIP
storage:
versions:
  rdb_loader: 0.12.0
  rdb_shredder: 0.12.0        # Version of the Spark Shredding process
  hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process
monitoring:
tags: {} # Name-value pairs describing this job
logging:
  level: DEBUG # You can optionally switch to INFO for production
snowplow:
  method: get
  app_id: snowplow # e.g. snowplow
  collector: ec2-xxxxx.amazonaws.com # e.g. d3rkrsqld9gmqf.cloudfront.net
iglu:
  schema: iglu:com.snowplowanalytics.iglu/resolver-config/jsonschema/1-0-0
  data:
    cache_size: 500
    repositories:
      - name: "Iglu Central"
        priority: 0
        vendor_prefixes:
          - com.snowplowanalytics
        connection:
          http:
            uri: http://iglucentral.com

@shashankn91,

A quick look at your config shows the wrong indentation.

The error for actual (expected) structure gives you a hint

"storage"=>{"versions"=>{"rdb_loader"=>"0.12.0", "rdb_shredder"=>"0.12.0", "hadoop_elasticsearch"=>"0.1.0"}}

That is versions has to be intended as in

storage:
  versions:
    rdb_loader: 0.12.0
    rdb_shredder: 0.12.0        # Version of the Spark Shredding process
    hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process

Your config on the other hand has

storage:
versions:
  rdb_loader: 0.12.0
  rdb_shredder: 0.12.0        # Version of the Spark Shredding process
  hadoop_elasticsearch: 0.1.0 # Version of the Hadoop to Elasticsearch copying process

Indentation is significant in YAML format. Check out an example if in doubt (I believe you use version R90): https://github.com/snowplow/snowplow/blob/r90-lascaux/3-enrich/emr-etl-runner/config/config.yml.sample. Do bear in mind the different structure of the configuration file for different Snowplow versions.

1 Like

@ihor Thanks for the reply.

I found the issue -
yarn.resourcemanager.am.max-attempts: 1 maximizeResourceAllocation: true

should be
yarn.resourcemanager.am.max-attempts: "1" maximizeResourceAllocation: "true"

@shashankn91,

The quotes are irrelevant in YAML here. I don’t believe they would cause any issue.