pax_global_header00006660000000000000000000000064143734466010014522gustar00rootroot0000000000000052 comment=b3877bfcedba06e461110e7e9410b708bef0739e fog-aws-3.18.0/000077500000000000000000000000001437344660100131565ustar00rootroot00000000000000fog-aws-3.18.0/.github/000077500000000000000000000000001437344660100145165ustar00rootroot00000000000000fog-aws-3.18.0/.github/dependabot.yml000066400000000000000000000003171437344660100173470ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "bundler" directory: "/" schedule: interval: "daily" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" fog-aws-3.18.0/.github/workflows/000077500000000000000000000000001437344660100165535ustar00rootroot00000000000000fog-aws-3.18.0/.github/workflows/codeql.yml000066400000000000000000000045261437344660100205540ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ master ] pull_request: # The branches below must be a subset of the branches above branches: [ master ] jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'ruby' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: - name: Checkout repository uses: actions/checkout@v3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v2 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 fog-aws-3.18.0/.github/workflows/dependency-review.yml000066400000000000000000000015641437344660100227210ustar00rootroot00000000000000# Dependency Review Action # # This Action will scan dependency manifest files that change as part of a Pull Reqest, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging. # # Source repository: https://github.com/actions/dependency-review-action # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement name: 'Dependency Review' on: [pull_request] permissions: contents: read jobs: dependency-review: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@v3 - name: 'Dependency Review' uses: actions/dependency-review-action@v3 fog-aws-3.18.0/.github/workflows/ruby.yml000066400000000000000000000021151437344660100202560ustar00rootroot00000000000000# This workflow uses actions that are not certified by GitHub. # They are provided by a third-party and are governed by # separate terms of service, privacy policy, and support # documentation. # This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake # For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby name: Ruby on: push: branches: [ master ] pull_request: branches: [ master ] permissions: contents: read jobs: test: continue-on-error: ${{ matrix.ruby-version == 'truffleruby-head' }} env: BUNDLER_GEMFILE: gemfiles/Gemfile-edge runs-on: ubuntu-latest strategy: matrix: ruby-version: ['2.5', '2.6', '2.7', '3.0', '3.1', 'head', 'truffleruby-head'] steps: - uses: actions/checkout@v3 - name: Set up Ruby uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby-version }} bundler-cache: true # runs 'bundle install' and caches installed gems automatically - name: Run tests run: bundle exec rake fog-aws-3.18.0/.github/workflows/stale.yml000066400000000000000000000015471437344660100204150ustar00rootroot00000000000000name: Mark stale issues and pull requests on: schedule: - cron: "30 1 * * *" permissions: contents: read jobs: stale: permissions: issues: write # for actions/stale to close stale issues pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-latest steps: - uses: actions/stale@v6 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-stale: 60 days-before-close: 7 exempt-issue-labels: 'pinned,security' exempt-pr-labels: 'pinned,security' stale-issue-message: 'This issue has been marked inactive and will be closed if no further activity occurs.' stale-pr-message: 'This pr has been marked inactive and will be closed if no further activity occurs.' stale-issue-label: 'no-issue-activity' stale-pr-label: 'no-pr-activity' fog-aws-3.18.0/.gitignore000066400000000000000000000002421437344660100151440ustar00rootroot00000000000000/.bundle/ /.yardoc /Gemfile.lock /_yardoc/ /coverage/ /doc/ /pkg/ /spec/reports/ /tmp/ *.bundle *.so *.o *.a mkmf.log tests/.fog gemfiles/Gemfile-edge.lock .idea fog-aws-3.18.0/CHANGELOG.md000066400000000000000000002523461437344660100150030ustar00rootroot00000000000000# Changelog ## [v3.18.0](https://github.com/fog/fog-aws/tree/v3.18.0) (2023-02-16) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.17.0...v3.18.0) **Merged pull requests:** - Only compute SSE-C headers when needed in multipart upload [\#669](https://github.com/fog/fog-aws/pull/669) ([stanhu](https://github.com/stanhu)) ## [v3.17.0](https://github.com/fog/fog-aws/tree/v3.17.0) (2023-02-09) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.16.0...v3.17.0) **Merged pull requests:** - Support disabling of Content-MD5 for FIPS [\#668](https://github.com/fog/fog-aws/pull/668) ([stanhu](https://github.com/stanhu)) ## [v3.16.0](https://github.com/fog/fog-aws/tree/v3.16.0) (2023-01-26) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.15.0...v3.16.0) **Closed issues:** - README lists incorrect usage of IAM auth [\#663](https://github.com/fog/fog-aws/issues/663) - How can i config to use s3 in localstack [\#657](https://github.com/fog/fog-aws/issues/657) - Fog::Storage::AWS::Files#each always iterates over entire collection [\#232](https://github.com/fog/fog-aws/issues/232) - superclass mismatch for class AWS [\#655](https://github.com/fog/fog-aws/issues/655) - Lambda IAM Role Not Working [\#650](https://github.com/fog/fog-aws/issues/650) **Merged pull requests:** - adding missing region ap-southeast-4 [\#665](https://github.com/fog/fog-aws/pull/665) ([emptyhammond](https://github.com/emptyhammond)) o - adding missing region eu-south-2 [\#662](https://github.com/fog/fog-aws/pull/662) ([ivangool](https://github.com/ivangool)) - Bump actions/dependency-review-action from 2 to 3 [\#659](https://github.com/fog/fog-aws/pull/659) ([dependabot[bot]](https://github.com/apps/dependabot)) - Update aws.rb [\#658](https://github.com/fog/fog-aws/pull/658) ([ivangool](https://github.com/ivangool)) - Bump actions/stale from 5 to 6 [\#656](https://github.com/fog/fog-aws/pull/656) ([dependabot[bot]](https://github.com/apps/dependabot)) ## [v3.15.0](https://github.com/fog/fog-aws/tree/v3.15.0) (2022-09-12) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.14.0...v3.15.0) **Closed issues:** - URI.decode is obsolete \(and not available in Ruby 3.0\) [\#653](https://github.com/fog/fog-aws/issues/653) - S3: File copy not working outside us-east-1 region [\#645](https://github.com/fog/fog-aws/issues/645) - Unable to list, update and remove RDS tags in AWS GovCloud Account regions. [\#644](https://github.com/fog/fog-aws/issues/644) - Documentation links broken / 404 [\#642](https://github.com/fog/fog-aws/issues/642) **Merged pull requests:** - Replace URI.decode as obsolete, and removed in Ruby 3.0 [\#654](https://github.com/fog/fog-aws/pull/654) ([kuahyeow](https://github.com/kuahyeow)) - Fix typo in readme [\#652](https://github.com/fog/fog-aws/pull/652) ([geemus](https://github.com/geemus)) - change sync\_clock to plain GET [\#651](https://github.com/fog/fog-aws/pull/651) ([duckworth](https://github.com/duckworth)) - Update README file with download url example [\#649](https://github.com/fog/fog-aws/pull/649) ([lucasocon](https://github.com/lucasocon)) - Bump actions/dependency-review-action from 1 to 2 [\#648](https://github.com/fog/fog-aws/pull/648) ([dependabot[bot]](https://github.com/apps/dependabot)) - add x2gd and t4g instance flavours [\#647](https://github.com/fog/fog-aws/pull/647) ([mushyy](https://github.com/mushyy)) - Fix a typo in CHANGELOG [\#646](https://github.com/fog/fog-aws/pull/646) ([y-yagi](https://github.com/y-yagi)) ## [v3.14.0](https://github.com/fog/fog-aws/tree/v3.14.0) (2022-05-09) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.13.0...v3.14.0) **Closed issues:** - Add a special note to the documentation around the danger of using directory.get [\#633](https://github.com/fog/fog-aws/issues/633) **Merged pull requests:** - RDS tags issue in AWS GovCloud Account regions. [\#643](https://github.com/fog/fog-aws/pull/643) ([svavhal](https://github.com/svavhal)) - Create codeql.yml [\#641](https://github.com/fog/fog-aws/pull/641) ([naveensrinivasan](https://github.com/naveensrinivasan)) - chore\(deps\): Included dependency review [\#640](https://github.com/fog/fog-aws/pull/640) ([naveensrinivasan](https://github.com/naveensrinivasan)) - Bump actions/stale from 4 to 5 [\#639](https://github.com/fog/fog-aws/pull/639) ([dependabot[bot]](https://github.com/apps/dependabot)) - Set permissions for GitHub actions [\#638](https://github.com/fog/fog-aws/pull/638) ([naveensrinivasan](https://github.com/naveensrinivasan)) - Add option to control IAM credential refresh [\#637](https://github.com/fog/fog-aws/pull/637) ([gl-gh-hchouraria](https://github.com/gl-gh-hchouraria)) - Add warning messages around directories.get [\#636](https://github.com/fog/fog-aws/pull/636) ([orrin-naylor-instacart](https://github.com/orrin-naylor-instacart)) - Bump actions/checkout from 2.4.0 to 3 [\#632](https://github.com/fog/fog-aws/pull/632) ([dependabot[bot]](https://github.com/apps/dependabot)) - Add Ruby 3.1 to the CI matrix [\#631](https://github.com/fog/fog-aws/pull/631) ([petergoldstein](https://github.com/petergoldstein)) ## [v3.13.0](https://github.com/fog/fog-aws/tree/v3.13.0) (2022-02-13) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.12.0...v3.13.0) **Closed issues:** - Please include all files required for running tests in the gem [\#625](https://github.com/fog/fog-aws/issues/625) - Using Hitachi compatible S3 and still see the AWS S3 host [\#624](https://github.com/fog/fog-aws/issues/624) - Spawn compute node with Elastic Inference [\#617](https://github.com/fog/fog-aws/issues/617) **Merged pull requests:** - Drop ipaddress dependency in favor of built in ipaddr [\#630](https://github.com/fog/fog-aws/pull/630) ([ekohl](https://github.com/ekohl)) - Exclude test files from gem [\#629](https://github.com/fog/fog-aws/pull/629) ([ursm](https://github.com/ursm)) - Add Truffleruby head to CI [\#628](https://github.com/fog/fog-aws/pull/628) ([gogainda](https://github.com/gogainda)) - Bump actions/checkout from 2.3.5 to 2.4.0 [\#627](https://github.com/fog/fog-aws/pull/627) ([dependabot[bot]](https://github.com/apps/dependabot)) - Update Fog::AWS::Storage::File\#body [\#626](https://github.com/fog/fog-aws/pull/626) ([10io](https://github.com/10io)) - Bump actions/checkout from 2.3.4 to 2.3.5 [\#623](https://github.com/fog/fog-aws/pull/623) ([dependabot[bot]](https://github.com/apps/dependabot)) - Add json files to the gem file [\#622](https://github.com/fog/fog-aws/pull/622) ([acant](https://github.com/acant)) ## [v3.12.0](https://github.com/fog/fog-aws/tree/v3.12.0) (2021-08-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.11.0...v3.12.0) **Merged pull requests:** - Add support for regional STS endpoints [\#620](https://github.com/fog/fog-aws/pull/620) ([stanhu](https://github.com/stanhu)) - Add IPv6 support for Ingress Security Groups [\#619](https://github.com/fog/fog-aws/pull/619) ([p8](https://github.com/p8)) - Separate CHANGELOG entry for 3.11.0 [\#618](https://github.com/fog/fog-aws/pull/618) ([sunny](https://github.com/sunny)) ## [v3.11.0](https://github.com/fog/fog-aws/tree/v3.11.0) (2021-08-05) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.10.0...v3.11.0) **Closed issues:** - Support for Regional STS Endpoints [\#604](https://github.com/fog/fog-aws/issues/604) - Pass tags when creating EBS ? [\#603](https://github.com/fog/fog-aws/issues/603) - S3 multiple delete api should handle delete for multiple versions for a single object [\#598](https://github.com/fog/fog-aws/issues/598) - Fog does not return error from s3 [\#586](https://github.com/fog/fog-aws/issues/586) - Add support for r6g and c6g instance types [\#580](https://github.com/fog/fog-aws/issues/580) - Using internal S3 provider ... and something funky is going on! [\#575](https://github.com/fog/fog-aws/issues/575) - How to upload File to S3 with with accelerate? [\#554](https://github.com/fog/fog-aws/issues/554) - How to use assume\_role\_with\_web\_identity auth for S3 storage? [\#543](https://github.com/fog/fog-aws/issues/543) - Fog::AWS::Glacier::TreeHash::add\_part works only sometimes [\#520](https://github.com/fog/fog-aws/issues/520) - fog-aws: Fog::AWS::Glacier::Job doesn't support RetrievalByteRange [\#519](https://github.com/fog/fog-aws/issues/519) - Path style is being deprecated [\#516](https://github.com/fog/fog-aws/issues/516) - Fog::DNS::AWS can't read zones [\#513](https://github.com/fog/fog-aws/issues/513) - Lambda Parser can't handle VPC config, containing array of hash values [\#509](https://github.com/fog/fog-aws/issues/509) - Excon::Error::Forbidden: Expected\(200\) \<=\> Actual\(403 Forbidden\) [\#508](https://github.com/fog/fog-aws/issues/508) - file.save does not work with STDIN [\#500](https://github.com/fog/fog-aws/issues/500) - ELBv2 Support ? [\#489](https://github.com/fog/fog-aws/issues/489) - S3 Select Support? [\#484](https://github.com/fog/fog-aws/issues/484) - nil:NilClass error [\#483](https://github.com/fog/fog-aws/issues/483) - Mocks for VM creation require access and secret key when using instance profiles [\#482](https://github.com/fog/fog-aws/issues/482) - Always use bucket virtual hostname? [\#464](https://github.com/fog/fog-aws/issues/464) - Spot instance creation : Support for BlockDurationMinutes [\#461](https://github.com/fog/fog-aws/issues/461) - How can I remove the `Content-Encoding` metadata properties if I upload from fog [\#447](https://github.com/fog/fog-aws/issues/447) - AWS::ECS with `use_iam_profile` errors out [\#441](https://github.com/fog/fog-aws/issues/441) - Option to turn off Warnings [\#426](https://github.com/fog/fog-aws/issues/426) - Accessing AWS S3 using EC2 Instance Profile [\#423](https://github.com/fog/fog-aws/issues/423) - Support step and target tracking auto scaling policies [\#422](https://github.com/fog/fog-aws/issues/422) - could not create ec2 volume with custom encryption key, volume only create with default 'aws/ebs' encryption key [\#420](https://github.com/fog/fog-aws/issues/420) - Download File with content\_disposition [\#418](https://github.com/fog/fog-aws/issues/418) - Fog::Compute::AWS::Error iamInstanceProfile.name is invalid [\#410](https://github.com/fog/fog-aws/issues/410) - Mocks for EC2 instance creation do not behave as expected [\#404](https://github.com/fog/fog-aws/issues/404) - Cannot copy an encrypted snapshot from one account to another account [\#398](https://github.com/fog/fog-aws/issues/398) - Fog::Compute::AWS::Image\#deregister ignores non-root snapshots. [\#380](https://github.com/fog/fog-aws/issues/380) - AWS S3 overwrites files with same name [\#378](https://github.com/fog/fog-aws/issues/378) - Support S3 object tagging [\#377](https://github.com/fog/fog-aws/issues/377) - Reqeust to support Aws::DynamoDBStreams [\#373](https://github.com/fog/fog-aws/issues/373) - Not all Rds versions and Instance Types are rendered [\#371](https://github.com/fog/fog-aws/issues/371) - Tag instances upon creation of new instance [\#359](https://github.com/fog/fog-aws/issues/359) - Creating instances in AWS fails with Socket Error [\#352](https://github.com/fog/fog-aws/issues/352) - `NameError: uninitialized constant Fog::ServicesMixin` when requiring `fog/storage` [\#345](https://github.com/fog/fog-aws/issues/345) - Add full support for target groups [\#328](https://github.com/fog/fog-aws/issues/328) - Fog transfer acceleration endpoints [\#303](https://github.com/fog/fog-aws/issues/303) - "Fog::DNS\[:aws\] | change\_resource\_record\_sets \(aws, dns\)" test suite flaky [\#301](https://github.com/fog/fog-aws/issues/301) - Cross account access using IAM role [\#294](https://github.com/fog/fog-aws/issues/294) - Write timeout trying to upload a large file to S3 [\#291](https://github.com/fog/fog-aws/issues/291) - Support Autoscaling lifecycle hooks [\#289](https://github.com/fog/fog-aws/issues/289) - directories ignore region option [\#287](https://github.com/fog/fog-aws/issues/287) - Feature: Access logs for ELB [\#271](https://github.com/fog/fog-aws/issues/271) - S3: retry on 500 internal server error [\#264](https://github.com/fog/fog-aws/issues/264) - Alias for server side encryption not working [\#260](https://github.com/fog/fog-aws/issues/260) - InvalidParameterCombination =\> You cannot move a DB instance with Single-Az enabled to a VPC \(Fog::AWS::RDS::Error\) [\#255](https://github.com/fog/fog-aws/issues/255) - Using STS [\#253](https://github.com/fog/fog-aws/issues/253) - Auto Scaling Group does not enable metrics [\#251](https://github.com/fog/fog-aws/issues/251) - aws has no storage service [\#248](https://github.com/fog/fog-aws/issues/248) - Timeouts on Compute\#describe\_volumes due to extreme numbers of volumes [\#244](https://github.com/fog/fog-aws/issues/244) - Support CreateReusableDelegationSet [\#243](https://github.com/fog/fog-aws/issues/243) - Tags server creation in Mock vs Real [\#239](https://github.com/fog/fog-aws/issues/239) - Excon::Errors::SocketError Broken pipe \(Errno::EPIPE\) when use Activeadmin upload image by nested form [\#237](https://github.com/fog/fog-aws/issues/237) - Fog Mock doesn't update [\#236](https://github.com/fog/fog-aws/issues/236) - ECS service\_update does not support "deploymentConfig" [\#234](https://github.com/fog/fog-aws/issues/234) - Fog::Storage::AWS::Files\#each always iterates over entire collection [\#232](https://github.com/fog/fog-aws/issues/232) - repeated bucket name in the URL on AWS and issue with :path\_style [\#228](https://github.com/fog/fog-aws/issues/228) - Already initialized constant warnings [\#212](https://github.com/fog/fog-aws/issues/212) - SQS API version is outdated [\#198](https://github.com/fog/fog-aws/issues/198) - Problem when using irb [\#195](https://github.com/fog/fog-aws/issues/195) - compute.servers \(via DescribeInstances\) does not include tags reliably [\#192](https://github.com/fog/fog-aws/issues/192) - EBS create volume io1 or gp2 [\#186](https://github.com/fog/fog-aws/issues/186) - Aws cloudformation stack-policy-body [\#179](https://github.com/fog/fog-aws/issues/179) - EXCON\_DEBUG and DEBUG env variables do not help debug -S key issues [\#177](https://github.com/fog/fog-aws/issues/177) - AWS4 SignatureDoesNotMatch if header contains two spaces [\#160](https://github.com/fog/fog-aws/issues/160) - Add support for elasticache redis replication groups [\#136](https://github.com/fog/fog-aws/issues/136) - Getting SignatureDoesNotMatch error with eu-central-1 [\#127](https://github.com/fog/fog-aws/issues/127) - Cannot saving auto scaling group [\#125](https://github.com/fog/fog-aws/issues/125) - fog-aws not working with dynamoDB Local [\#118](https://github.com/fog/fog-aws/issues/118) - Fog::Compute::AWS::Error InvalidParameterValue =\> secondary-addresses [\#115](https://github.com/fog/fog-aws/issues/115) - Is there an equivalent to describe-instance-status? [\#66](https://github.com/fog/fog-aws/issues/66) - No usage instructions in Readme [\#64](https://github.com/fog/fog-aws/issues/64) - AWS - distributionConfig.enabled' failed to satisfy constraint: Member must not be null [\#48](https://github.com/fog/fog-aws/issues/48) - Clarify versioning on README [\#42](https://github.com/fog/fog-aws/issues/42) - AWS SQS AddPermission API missing [\#26](https://github.com/fog/fog-aws/issues/26) - AWS China region [\#25](https://github.com/fog/fog-aws/issues/25) - AWS CloudFormation ListStacks options [\#24](https://github.com/fog/fog-aws/issues/24) - Setting region of AWS::Compute after initialization [\#23](https://github.com/fog/fog-aws/issues/23) - Support AWS Support API [\#22](https://github.com/fog/fog-aws/issues/22) - InvalidClientTokenId =\> The security token included in the request is invalid [\#21](https://github.com/fog/fog-aws/issues/21) - Change architecture attribute in AWS::Compute::Server model [\#20](https://github.com/fog/fog-aws/issues/20) - Add support for Amazon Kinesis [\#19](https://github.com/fog/fog-aws/issues/19) - Bring AWS CloudFront API Models/Requests up to date [\#17](https://github.com/fog/fog-aws/issues/17) - AWS security group tests have become unstable [\#16](https://github.com/fog/fog-aws/issues/16) - AWS auto scaling: availability zones are not a required parameter [\#15](https://github.com/fog/fog-aws/issues/15) - Is anyone going to add support for AWS ElasticTranscoder [\#14](https://github.com/fog/fog-aws/issues/14) - add missing attributes to aws describe\_reserved\_instances parser [\#13](https://github.com/fog/fog-aws/issues/13) - AWS AutoScaling group min\_size & max\_size getting set to 0 [\#12](https://github.com/fog/fog-aws/issues/12) - auto\_scaling\_group.instances does not return only instances for that group [\#11](https://github.com/fog/fog-aws/issues/11) - Why are the credential keys not generalized? [\#10](https://github.com/fog/fog-aws/issues/10) - Invalid XML Character in S3 Response [\#8](https://github.com/fog/fog-aws/issues/8) - reading s3 upload progress [\#7](https://github.com/fog/fog-aws/issues/7) - delete\_on\_termination=true attribute on new volume is not set on create [\#6](https://github.com/fog/fog-aws/issues/6) - user\_data is still base64 encoded in Real launch\_configurations [\#5](https://github.com/fog/fog-aws/issues/5) **Merged pull requests:** - Add storage option to configure multipart put/copy [\#616](https://github.com/fog/fog-aws/pull/616) ([slonopotamus](https://github.com/slonopotamus)) - Bump actions/stale from 3.0.19 to 4 [\#615](https://github.com/fog/fog-aws/pull/615) ([dependabot[bot]](https://github.com/apps/dependabot)) - Update file.rb [\#613](https://github.com/fog/fog-aws/pull/613) ([iqre8](https://github.com/iqre8)) - fix storage for ruby 3.0 [\#611](https://github.com/fog/fog-aws/pull/611) ([vincentjoseph](https://github.com/vincentjoseph)) - Implement AWS TagSpecifications \(closes \#603\) [\#610](https://github.com/fog/fog-aws/pull/610) ([eLvErDe](https://github.com/eLvErDe)) - Bump actions/stale from 3.0.18 to 3.0.19 [\#609](https://github.com/fog/fog-aws/pull/609) ([dependabot[bot]](https://github.com/apps/dependabot)) - Bump actions/stale from 3 to 3.0.18 [\#608](https://github.com/fog/fog-aws/pull/608) ([dependabot[bot]](https://github.com/apps/dependabot)) - Bump actions/checkout from 2 to 2.3.4 [\#607](https://github.com/fog/fog-aws/pull/607) ([dependabot[bot]](https://github.com/apps/dependabot)) - drop git in gemspec [\#602](https://github.com/fog/fog-aws/pull/602) ([abrahamparayil](https://github.com/abrahamparayil)) - Update rubyzip requirement from ~\> 1.3.0 to ~\> 2.3.0 [\#601](https://github.com/fog/fog-aws/pull/601) ([dependabot[bot]](https://github.com/apps/dependabot)) ## [v3.10.0](https://github.com/fog/fog-aws/tree/v3.10.0) (2021-03-22) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.9.0...v3.10.0) **Closed issues:** - Fog::AWS::Storage timeout against S3 [\#599](https://github.com/fog/fog-aws/issues/599) - Incorrect bucket redirection URL generation [\#594](https://github.com/fog/fog-aws/issues/594) - Fully Support SSE-C encryption [\#571](https://github.com/fog/fog-aws/issues/571) - Enable hibernation on creation of ec2 instance [\#566](https://github.com/fog/fog-aws/issues/566) - Tests broken with fog-core 2.1.0 [\#504](https://github.com/fog/fog-aws/issues/504) - changelog? [\#471](https://github.com/fog/fog-aws/issues/471) - How to use iam\_instance\_profile? [\#342](https://github.com/fog/fog-aws/issues/342) - how to support additional aws regions, e.g. cn-north-1 [\#164](https://github.com/fog/fog-aws/issues/164) - Still empty content-encoding when it is not set [\#130](https://github.com/fog/fog-aws/issues/130) **Merged pull requests:** - Handle multiple versions of objects in multiple delete request [\#600](https://github.com/fog/fog-aws/pull/600) ([shanu-kr](https://github.com/shanu-kr)) - Add Truffleruby head to CI [\#596](https://github.com/fog/fog-aws/pull/596) ([gogainda](https://github.com/gogainda)) - Fixes domain name duplication in url [\#593](https://github.com/fog/fog-aws/pull/593) ([midhunkrishna](https://github.com/midhunkrishna)) ## [v3.9.0](https://github.com/fog/fog-aws/tree/v3.9.0) (2021-03-02) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.8.0...v3.9.0) **Merged pull requests:** - Fix handling of lowercased HTTP headers [\#597](https://github.com/fog/fog-aws/pull/597) ([stanhu](https://github.com/stanhu)) - Generate a default session name if one is not provided [\#595](https://github.com/fog/fog-aws/pull/595) ([stanhu](https://github.com/stanhu)) - Enable test on Ruby 3.0 [\#591](https://github.com/fog/fog-aws/pull/591) ([voxik](https://github.com/voxik)) ## [v3.8.0](https://github.com/fog/fog-aws/tree/v3.8.0) (2021-01-13) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.7.0...v3.8.0) **Closed issues:** - Getting Auth failure Exception for non enabled AWS regions in AWS account. [\#585](https://github.com/fog/fog-aws/issues/585) **Merged pull requests:** - Filter unknown UploadPartCopy parameters [\#589](https://github.com/fog/fog-aws/pull/589) ([stanhu](https://github.com/stanhu)) - Fix NameError in multipart copy [\#588](https://github.com/fog/fog-aws/pull/588) ([stanhu](https://github.com/stanhu)) - Rewind pointer if file is eof on put\_object mock [\#587](https://github.com/fog/fog-aws/pull/587) ([ekulz](https://github.com/ekulz)) - Update .travis.yml [\#584](https://github.com/fog/fog-aws/pull/584) ([nageshlop](https://github.com/nageshlop)) ## [v3.7.0](https://github.com/fog/fog-aws/tree/v3.7.0) (2020-12-01) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.7...v3.7.0) **Closed issues:** - File\#copy does not support files above 5 GB [\#577](https://github.com/fog/fog-aws/issues/577) - fog-aws: AWS extended length resource ID issues \(8-\>18\) [\#517](https://github.com/fog/fog-aws/issues/517) **Merged pull requests:** - Add all m6gd, r6g, r6gd, c6g, and c6gd instance classes [\#582](https://github.com/fog/fog-aws/pull/582) ([calebwoofenden](https://github.com/calebwoofenden)) - Test Ruby v2.6.6 and v2.7.2 in CI [\#581](https://github.com/fog/fog-aws/pull/581) ([stanhu](https://github.com/stanhu)) - Add multi-threaded support for File\#copy [\#579](https://github.com/fog/fog-aws/pull/579) ([stanhu](https://github.com/stanhu)) - Add support for multipart Fog::AWS::Storage::File\#copy [\#578](https://github.com/fog/fog-aws/pull/578) ([stanhu](https://github.com/stanhu)) - Add AssumeRoleWithWebIdentity to fetch\_credentials [\#576](https://github.com/fog/fog-aws/pull/576) ([jpac-run](https://github.com/jpac-run)) ## [v3.6.7](https://github.com/fog/fog-aws/tree/v3.6.7) (2020-08-26) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.6...v3.6.7) **Merged pull requests:** - S3 dot Region endpoint structure applied [\#574](https://github.com/fog/fog-aws/pull/574) ([gharutyunyan-vineti](https://github.com/gharutyunyan-vineti)) ## [v3.6.6](https://github.com/fog/fog-aws/tree/v3.6.6) (2020-06-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.5...v3.6.6) **Closed issues:** - max\_keys param in storage.directories.get.... what am I missing? [\#568](https://github.com/fog/fog-aws/issues/568) - Fog Logs? [\#561](https://github.com/fog/fog-aws/issues/561) **Merged pull requests:** - added missing region EU South \(Milan\) [\#570](https://github.com/fog/fog-aws/pull/570) ([saldan](https://github.com/saldan)) - hibernation option to compute [\#569](https://github.com/fog/fog-aws/pull/569) ([taniahagan](https://github.com/taniahagan)) - Fix VPC model is\_default requires [\#567](https://github.com/fog/fog-aws/pull/567) ([biinari](https://github.com/biinari)) ## [v3.6.5](https://github.com/fog/fog-aws/tree/v3.6.5) (2020-05-22) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.4...v3.6.5) **Closed issues:** - Fog::Compute::AWS is deprecated, please use Fog::AWS::Compute warning [\#565](https://github.com/fog/fog-aws/issues/565) - Duplicate compute flavours [\#563](https://github.com/fog/fog-aws/issues/563) - 3.6.4 does not fetch iam credentials using IMDSv2 when running from inside containers with IMDSv2 Defaults [\#560](https://github.com/fog/fog-aws/issues/560) **Merged pull requests:** - Fix naming of various AWS compute flavors [\#564](https://github.com/fog/fog-aws/pull/564) ([abrom](https://github.com/abrom)) - Gracefully handle failure of IMDSv2 and allow fallback to IMDSv1 [\#562](https://github.com/fog/fog-aws/pull/562) ([atyndall](https://github.com/atyndall)) ## [v3.6.4](https://github.com/fog/fog-aws/tree/v3.6.4) (2020-05-14) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.3...v3.6.4) **Closed issues:** - Is fog-aws compatible with AWS Trust Services? [\#558](https://github.com/fog/fog-aws/issues/558) **Merged pull requests:** - Add support for IMDSv2 in CredentialFetcher [\#559](https://github.com/fog/fog-aws/pull/559) ([atyndall](https://github.com/atyndall)) - Don’t install development scripts [\#557](https://github.com/fog/fog-aws/pull/557) ([amarshall](https://github.com/amarshall)) ## [v3.6.3](https://github.com/fog/fog-aws/tree/v3.6.3) (2020-04-22) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.6.2...v3.6.3) **Merged pull requests:** - Add South Africa \(Cape Town\) Region [\#556](https://github.com/fog/fog-aws/pull/556) ([lvangool](https://github.com/lvangool)) - Adds Instance Type r5.16xlarge and r5.8xlarge [\#555](https://github.com/fog/fog-aws/pull/555) ([rupikakapoor](https://github.com/rupikakapoor)) - Update kinesis.rb [\#553](https://github.com/fog/fog-aws/pull/553) ([ioquatix](https://github.com/ioquatix)) ## [v3.6.2](https://github.com/fog/fog-aws/tree/v3.6.2) (2020-03-24) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.5.2...v3.6.2) **Closed issues:** - config.assets.prefix is being looked at as a bucket name [\#551](https://github.com/fog/fog-aws/issues/551) - Class name typo: AssumeRoleWithWithWebIdentity [\#548](https://github.com/fog/fog-aws/issues/548) - filename too long [\#544](https://github.com/fog/fog-aws/issues/544) **Merged pull requests:** - Adding two missing regions to Fog::AWS.regions [\#552](https://github.com/fog/fog-aws/pull/552) ([lvangool](https://github.com/lvangool)) - Adds missing param WebIdentityToken for the request to the AWS api [\#550](https://github.com/fog/fog-aws/pull/550) ([dgoradia](https://github.com/dgoradia)) - Fixes type in class name for STS assume\_role\_with\_web\_identity parser [\#549](https://github.com/fog/fog-aws/pull/549) ([dgoradia](https://github.com/dgoradia)) - Add missing AWS flavors [\#547](https://github.com/fog/fog-aws/pull/547) ([ybart](https://github.com/ybart)) - Update elasticache mocking regions [\#545](https://github.com/fog/fog-aws/pull/545) ([yads](https://github.com/yads)) - Feature/elbv2 creation endpoint [\#542](https://github.com/fog/fog-aws/pull/542) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Fix/sd 8581/retrieve provider snapshot status from provider [\#541](https://github.com/fog/fog-aws/pull/541) ([toubs13](https://github.com/toubs13)) - Fix/missing implementation in listener parser [\#540](https://github.com/fog/fog-aws/pull/540) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Enhance/elbv2 tag endpoints [\#539](https://github.com/fog/fog-aws/pull/539) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Improve documentation and development setup [\#538](https://github.com/fog/fog-aws/pull/538) ([gustavosobral](https://github.com/gustavosobral)) - Add object tagging [\#537](https://github.com/fog/fog-aws/pull/537) ([gustavosobral](https://github.com/gustavosobral)) - Fix load balancers parser to handle more than one availability zone with addresses [\#536](https://github.com/fog/fog-aws/pull/536) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Remove useless attribute location from directory model [\#535](https://github.com/fog/fog-aws/pull/535) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Create service ELBV2 to handle specificities of 2015-12-01 API version [\#534](https://github.com/fog/fog-aws/pull/534) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Add missing m5a flavors [\#533](https://github.com/fog/fog-aws/pull/533) ([ybart](https://github.com/ybart)) - Enhance/add some attributes to hosted zone parsers [\#531](https://github.com/fog/fog-aws/pull/531) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Fix VPC tenancy on creation [\#530](https://github.com/fog/fog-aws/pull/530) ([ramonpm](https://github.com/ramonpm)) - Fix subnet's parsings [\#529](https://github.com/fog/fog-aws/pull/529) ([KevinLoiseau](https://github.com/KevinLoiseau)) ## [v3.5.2](https://github.com/fog/fog-aws/tree/v3.5.2) (2019-07-16) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.5.1...v3.5.2) **Closed issues:** - Support for Hong Kong Region \(ap-east-1\)? [\#527](https://github.com/fog/fog-aws/issues/527) - Make S3 Signature v4 Streaming Optional [\#523](https://github.com/fog/fog-aws/issues/523) **Merged pull requests:** - Add ap-east-1 \(Hong Kong\) to Fog::AWS.regions [\#528](https://github.com/fog/fog-aws/pull/528) ([tisba](https://github.com/tisba)) - Update shared\_mock\_methods.rb [\#526](https://github.com/fog/fog-aws/pull/526) ([MiWieczo](https://github.com/MiWieczo)) - Make S3 Signature v4 streaming optional [\#525](https://github.com/fog/fog-aws/pull/525) ([stanhu](https://github.com/stanhu)) ## [v3.5.1](https://github.com/fog/fog-aws/tree/v3.5.1) (2019-06-10) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.5.0...v3.5.1) **Merged pull requests:** - Fixed issue with InvocationType header for AWS Lambda [\#524](https://github.com/fog/fog-aws/pull/524) ([GarrisonD](https://github.com/GarrisonD)) - Add support for generating tree hash tests by adding unaligned parts. [\#521](https://github.com/fog/fog-aws/pull/521) ([hkmaly](https://github.com/hkmaly)) ## [v3.5.0](https://github.com/fog/fog-aws/tree/v3.5.0) (2019-04-25) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.4.0...v3.5.0) **Closed issues:** - Missing AWS region: EU \(Stockholm\) eu-north-1 [\#514](https://github.com/fog/fog-aws/issues/514) - Support for AWS fargate [\#510](https://github.com/fog/fog-aws/issues/510) **Merged pull requests:** - Add AWS Stockholm region [\#515](https://github.com/fog/fog-aws/pull/515) ([fred-secludit](https://github.com/fred-secludit)) - Enhance/handle ELBv2 api version [\#512](https://github.com/fog/fog-aws/pull/512) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Enhance/add attribute db subnet group for db instance [\#511](https://github.com/fog/fog-aws/pull/511) ([KevinLoiseau](https://github.com/KevinLoiseau)) ## [v3.4.0](https://github.com/fog/fog-aws/tree/v3.4.0) (2019-03-11) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.3.0...v3.4.0) **Closed issues:** - Warning: possibly useless use of == in void context [\#498](https://github.com/fog/fog-aws/issues/498) - Cluster.ready? returns false [\#496](https://github.com/fog/fog-aws/issues/496) - With out AWS access key & secrect AWS services not working\(IAM Role associated\) [\#495](https://github.com/fog/fog-aws/issues/495) - "AWS::STS | assume role with web identity \(aws\)" interferes with "Fog::Compute\[:iam\] | roles \(aws, iam\)" [\#491](https://github.com/fog/fog-aws/issues/491) - Access S3 using a proxy [\#486](https://github.com/fog/fog-aws/issues/486) - Warning that doesn't make sense [\#479](https://github.com/fog/fog-aws/issues/479) - Undefined method `change\_resource\_record\_sets\_data' for Fog::AWS:Module called from fog/aws/requests/dns/change\_resource\_record\_sets.rb when attempting to modify a DNS record. [\#477](https://github.com/fog/fog-aws/issues/477) - Is DescribeImageAttribute support missing? [\#473](https://github.com/fog/fog-aws/issues/473) - How to fix deprecation warning: "The format Fog::CDN::AWS is deprecated" [\#466](https://github.com/fog/fog-aws/issues/466) - Test suite failures in "Fog::Compute\[:iam\] | roles" [\#296](https://github.com/fog/fog-aws/issues/296) - Support Amazon S3 Transfer Acceleration [\#250](https://github.com/fog/fog-aws/issues/250) - Creating VPC instances in AWS [\#116](https://github.com/fog/fog-aws/issues/116) **Merged pull requests:** - Avoid using bucket\_name.host if host is overriden. [\#507](https://github.com/fog/fog-aws/pull/507) ([deepfryed](https://github.com/deepfryed)) - Fix some requests when S3 acceleration is enabled [\#506](https://github.com/fog/fog-aws/pull/506) ([NARKOZ](https://github.com/NARKOZ)) - Add support for S3 transfer acceleration [\#505](https://github.com/fog/fog-aws/pull/505) ([NARKOZ](https://github.com/NARKOZ)) - Correct DynamoDB update\_item method [\#503](https://github.com/fog/fog-aws/pull/503) ([postmodern](https://github.com/postmodern)) - Add MaxResults filter to describe security groups [\#502](https://github.com/fog/fog-aws/pull/502) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Fix for Aurora Server Provisioning. [\#501](https://github.com/fog/fog-aws/pull/501) ([lockstone](https://github.com/lockstone)) - Fixes/fog/aws/rds/ready [\#497](https://github.com/fog/fog-aws/pull/497) ([villemuittari](https://github.com/villemuittari)) - Feature/adding modify instance placement [\#494](https://github.com/fog/fog-aws/pull/494) ([loperaja](https://github.com/loperaja)) - Add AMD CPU instance types [\#493](https://github.com/fog/fog-aws/pull/493) ([jfuechsl](https://github.com/jfuechsl)) - Update documentation for x-amz headers [\#492](https://github.com/fog/fog-aws/pull/492) ([knapo](https://github.com/knapo)) - Add missing generation 5 compute instance flavors [\#490](https://github.com/fog/fog-aws/pull/490) ([jfuechsl](https://github.com/jfuechsl)) - Add ability to force delete a bucket with objects [\#488](https://github.com/fog/fog-aws/pull/488) ([ramonpm](https://github.com/ramonpm)) - Modernize various tests to Ruby 2.x syntax [\#485](https://github.com/fog/fog-aws/pull/485) ([teancom](https://github.com/teancom)) - EYPP-6850 add m4.16xlarge flavor [\#480](https://github.com/fog/fog-aws/pull/480) ([thorn](https://github.com/thorn)) - pull request in attempt at fix for undefined method issue mentioned in fog/fog-aws\#477 [\#478](https://github.com/fog/fog-aws/pull/478) ([klarrimore](https://github.com/klarrimore)) - Changes to add describe\_image\_attribute support [\#476](https://github.com/fog/fog-aws/pull/476) ([keithjpaulson](https://github.com/keithjpaulson)) - add tags for describe address [\#474](https://github.com/fog/fog-aws/pull/474) ([toubs13](https://github.com/toubs13)) ## [v3.3.0](https://github.com/fog/fog-aws/tree/v3.3.0) (2018-09-18) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.2.0...v3.3.0) **Merged pull requests:** - Rename CDN::AWS to AWS::CDN [\#467](https://github.com/fog/fog-aws/pull/467) ([jaredbeck](https://github.com/jaredbeck)) ## [v3.2.0](https://github.com/fog/fog-aws/tree/v3.2.0) (2018-09-17) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.1.0...v3.2.0) **Merged pull requests:** - Rename Storage::AWS to AWS::Storage [\#470](https://github.com/fog/fog-aws/pull/470) ([sue445](https://github.com/sue445)) - Rename DNS::AWS to AWS::DNS [\#469](https://github.com/fog/fog-aws/pull/469) ([sue445](https://github.com/sue445)) - Rename Compute::AWS to AWS::Compute [\#468](https://github.com/fog/fog-aws/pull/468) ([sue445](https://github.com/sue445)) ## [v3.1.0](https://github.com/fog/fog-aws/tree/v3.1.0) (2018-09-17) [Full Changelog](https://github.com/fog/fog-aws/compare/v3.0.0...v3.1.0) **Closed issues:** - Option to disable ssl verification [\#465](https://github.com/fog/fog-aws/issues/465) - s3: fog returns bad URL \(with correct signature\) [\#462](https://github.com/fog/fog-aws/issues/462) - Getting permanent link without X-Amz-Expires=600 [\#459](https://github.com/fog/fog-aws/issues/459) - add region cn-northwest-1 [\#455](https://github.com/fog/fog-aws/issues/455) - Parameterize "RequestLimitExceeded" jitter magnitude [\#448](https://github.com/fog/fog-aws/issues/448) - Release new version to RubyGems [\#442](https://github.com/fog/fog-aws/issues/442) - Fog::Compute::AWS::Vpcs returns VPCs with nil ids [\#387](https://github.com/fog/fog-aws/issues/387) **Merged pull requests:** - Escape / in presigned URLs [\#463](https://github.com/fog/fog-aws/pull/463) ([alexcern](https://github.com/alexcern)) - Fix t1.micro bits [\#460](https://github.com/fog/fog-aws/pull/460) ([tas50](https://github.com/tas50)) - Storage region support for cn-northwest-1 [\#458](https://github.com/fog/fog-aws/pull/458) ([deepfryed](https://github.com/deepfryed)) - Simplify constructor [\#457](https://github.com/fog/fog-aws/pull/457) ([lvangool](https://github.com/lvangool)) - Add missing attribute to RDS server [\#456](https://github.com/fog/fog-aws/pull/456) ([brianknight10](https://github.com/brianknight10)) - Fix & update aws flavor \(provided in GiB\) to Megabytes \(floor rounded\). [\#454](https://github.com/fog/fog-aws/pull/454) ([xward](https://github.com/xward)) - Update aws flavors cpu count for gpu oriented flavor. [\#453](https://github.com/fog/fog-aws/pull/453) ([xward](https://github.com/xward)) - Update aws flavors cpu count. [\#452](https://github.com/fog/fog-aws/pull/452) ([xward](https://github.com/xward)) - Parameterized retry [\#451](https://github.com/fog/fog-aws/pull/451) ([lvangool](https://github.com/lvangool)) - Fix c1.xlarge cpu count [\#449](https://github.com/fog/fog-aws/pull/449) ([romaintb](https://github.com/romaintb)) - Retry if instance not found when adding EC2 tags [\#446](https://github.com/fog/fog-aws/pull/446) ([tracemeyers](https://github.com/tracemeyers)) - Support new Paris and AP Osaka load balancers in DNS [\#445](https://github.com/fog/fog-aws/pull/445) ([mattheworiordan](https://github.com/mattheworiordan)) - Docs: Update changelog for 3.0.0 [\#444](https://github.com/fog/fog-aws/pull/444) ([jaredbeck](https://github.com/jaredbeck)) - Add encryption to EFS FileSystem creation [\#438](https://github.com/fog/fog-aws/pull/438) ([acant](https://github.com/acant)) - SetInstanceProtection endpoint for auto scaling groups support [\#436](https://github.com/fog/fog-aws/pull/436) ([thorn](https://github.com/thorn)) ## [v3.0.0](https://github.com/fog/fog-aws/tree/v3.0.0) (2018-04-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v2.0.1...v3.0.0) **Closed issues:** - Easily Delete S3 directory and it contents? [\#435](https://github.com/fog/fog-aws/issues/435) - S3 upload help -- likely user error :\) [\#432](https://github.com/fog/fog-aws/issues/432) - Fog not work without pry [\#317](https://github.com/fog/fog-aws/issues/317) **Merged pull requests:** - fix: attach volume on \#save, remove \#server= [\#443](https://github.com/fog/fog-aws/pull/443) ([lanej](https://github.com/lanej)) - Adding g3 flavors [\#440](https://github.com/fog/fog-aws/pull/440) ([AlexLamande](https://github.com/AlexLamande)) - Add c5 and m5 instance types. [\#439](https://github.com/fog/fog-aws/pull/439) ([rogersd](https://github.com/rogersd)) - Include link to full documentation [\#434](https://github.com/fog/fog-aws/pull/434) ([kylefox](https://github.com/kylefox)) - fog-core 2.x, fog-json 1.x [\#433](https://github.com/fog/fog-aws/pull/433) ([lanej](https://github.com/lanej)) ## [v2.0.1](https://github.com/fog/fog-aws/tree/v2.0.1) (2018-02-28) [Full Changelog](https://github.com/fog/fog-aws/compare/v2.0.0...v2.0.1) **Closed issues:** - Unable to use fog-aws with DigitalOcean Spaces: MissingContentLength [\#428](https://github.com/fog/fog-aws/issues/428) - Add new France region [\#424](https://github.com/fog/fog-aws/issues/424) - How to set root volume size with bootstrap method? [\#417](https://github.com/fog/fog-aws/issues/417) - Update Dependencies [\#227](https://github.com/fog/fog-aws/issues/227) **Merged pull requests:** - Expose S3 pre-signed object delete url [\#431](https://github.com/fog/fog-aws/pull/431) ([nolith](https://github.com/nolith)) - Fronzen strings fix [\#430](https://github.com/fog/fog-aws/pull/430) ([zhulik](https://github.com/zhulik)) - Expose elastic ip's private\_ip\_address [\#427](https://github.com/fog/fog-aws/pull/427) ([KevinLoiseau](https://github.com/KevinLoiseau)) - add france \(eu-west-3\) new region, fix \#424 [\#425](https://github.com/fog/fog-aws/pull/425) ([Val](https://github.com/Val)) ## [v2.0.0](https://github.com/fog/fog-aws/tree/v2.0.0) (2017-11-28) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.4.1...v2.0.0) **Closed issues:** - connect\_write timeout on AWS CodeBuild [\#413](https://github.com/fog/fog-aws/issues/413) - cannot load such file -- fog \(LoadError\) [\#401](https://github.com/fog/fog-aws/issues/401) - Missing file for extraction? [\#390](https://github.com/fog/fog-aws/issues/390) - Regression: IO stream sent to AWS fails [\#388](https://github.com/fog/fog-aws/issues/388) - Stack Level Too Deep on YAML Serialization [\#385](https://github.com/fog/fog-aws/issues/385) - models/elb/model\_tests does not properly cleanup [\#347](https://github.com/fog/fog-aws/issues/347) - Generates wrong url when region is not DEFAULT\_REGION [\#214](https://github.com/fog/fog-aws/issues/214) **Merged pull requests:** - upgrade rubyzip to \>= 1.2.1 [\#416](https://github.com/fog/fog-aws/pull/416) ([lanej](https://github.com/lanej)) - correction in iam/list\_access\_keys parser: Username should be UserName [\#415](https://github.com/fog/fog-aws/pull/415) ([patleb](https://github.com/patleb)) - Avoid creating connection if region is not nil [\#414](https://github.com/fog/fog-aws/pull/414) ([hideto0710](https://github.com/hideto0710)) - Resolving issue where `Fog::Json` was called instead of `Fog::JSON`. [\#412](https://github.com/fog/fog-aws/pull/412) ([mgarrick](https://github.com/mgarrick)) - Add t2.micro in flavors list [\#411](https://github.com/fog/fog-aws/pull/411) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Adding AWS P3 Tesla GPU instance types [\#409](https://github.com/fog/fog-aws/pull/409) ([hamelsmu](https://github.com/hamelsmu)) - Add jitter to exponential backoff [\#408](https://github.com/fog/fog-aws/pull/408) ([masstamike](https://github.com/masstamike)) - Add emulation of default VPC to mocked mode. [\#407](https://github.com/fog/fog-aws/pull/407) ([rzaharenkov](https://github.com/rzaharenkov)) - Update rds instance options model [\#406](https://github.com/fog/fog-aws/pull/406) ([carloslima](https://github.com/carloslima)) - Drop Ruby\<2.0 support [\#405](https://github.com/fog/fog-aws/pull/405) ([tbrisker](https://github.com/tbrisker)) - allow Gemfile-edge travis builds to fail [\#403](https://github.com/fog/fog-aws/pull/403) ([lanej](https://github.com/lanej)) - Add `default_for_az` attribute to subnet [\#402](https://github.com/fog/fog-aws/pull/402) ([rzaharenkov](https://github.com/rzaharenkov)) - bundler ~\> 1.15 [\#399](https://github.com/fog/fog-aws/pull/399) ([lanej](https://github.com/lanej)) - Fix detaching instances from auto scaling group. [\#397](https://github.com/fog/fog-aws/pull/397) ([rzaharenkov](https://github.com/rzaharenkov)) - Issue \#387 Fog::Compute::AWS::Vpcs returns VPCs with nil ids [\#396](https://github.com/fog/fog-aws/pull/396) ([maguec](https://github.com/maguec)) - feat\(CONTRIBUTORS\): Update [\#394](https://github.com/fog/fog-aws/pull/394) ([plribeiro3000](https://github.com/plribeiro3000)) - fix\(Tests\):Remove debugging [\#393](https://github.com/fog/fog-aws/pull/393) ([plribeiro3000](https://github.com/plribeiro3000)) - Migrate Service mapper from Fog [\#392](https://github.com/fog/fog-aws/pull/392) ([plribeiro3000](https://github.com/plribeiro3000)) - Add ability to encrypt a copy of an unencrypted snapshot [\#391](https://github.com/fog/fog-aws/pull/391) ([nodecarter](https://github.com/nodecarter)) - Fix VPC parser [\#389](https://github.com/fog/fog-aws/pull/389) ([ddiachkov](https://github.com/ddiachkov)) - fix default\_security\_group detection [\#348](https://github.com/fog/fog-aws/pull/348) ([lanej](https://github.com/lanej)) ## [v1.4.1](https://github.com/fog/fog-aws/tree/v1.4.1) (2017-08-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.4.0...v1.4.1) **Closed issues:** - retrieval of ipv6 vpc [\#379](https://github.com/fog/fog-aws/issues/379) - Timeout when trying to bootstrap or ssh spot request instances [\#372](https://github.com/fog/fog-aws/issues/372) - Why default VPC does not require Elastic IP to connect in internet [\#338](https://github.com/fog/fog-aws/issues/338) - Chunked images response causing Nokogiri::XML::SyntaxError [\#273](https://github.com/fog/fog-aws/issues/273) **Merged pull requests:** - Update changelog for 1.4.0 [\#383](https://github.com/fog/fog-aws/pull/383) ([greysteil](https://github.com/greysteil)) - Allow specifying kms key id to use [\#382](https://github.com/fog/fog-aws/pull/382) ([fcheung](https://github.com/fcheung)) - added support to retrieve and create vpc with ipv6 cidr block [\#381](https://github.com/fog/fog-aws/pull/381) ([chanakyacool](https://github.com/chanakyacool)) - Add MaxResults filter to describe reserved instances offerings [\#376](https://github.com/fog/fog-aws/pull/376) ([KevinLoiseau](https://github.com/KevinLoiseau)) - Fix Fog::Compute::AWS::Images\#all [\#375](https://github.com/fog/fog-aws/pull/375) ([eddiej](https://github.com/eddiej)) - Fix AWS credential mocking [\#374](https://github.com/fog/fog-aws/pull/374) ([v-yarotsky](https://github.com/v-yarotsky)) ## [v1.4.0](https://github.com/fog/fog-aws/tree/v1.4.0) (2017-06-14) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.3.0...v1.4.0) **Closed issues:** - Support REST Bucket Get v2 [\#369](https://github.com/fog/fog-aws/issues/369) - Fog::AWS::IAM::Error: InvalidAction =\> Could not find operation "ReplaceIamInstanceProfileAssociation" for version 2010-05-08 [\#368](https://github.com/fog/fog-aws/issues/368) - Multipart upload fails on empty files [\#364](https://github.com/fog/fog-aws/issues/364) - The action `ModifyVolume` is not valid for this web service. [\#363](https://github.com/fog/fog-aws/issues/363) - Cache/read local amazon data [\#354](https://github.com/fog/fog-aws/issues/354) **Merged pull requests:** - add NextContinuationToken support to GetBucket operation [\#370](https://github.com/fog/fog-aws/pull/370) ([khoan](https://github.com/khoan)) - Add a top-level require that matches the gem name [\#367](https://github.com/fog/fog-aws/pull/367) ([lanej](https://github.com/lanej)) - Fixed credential refresh when instance metadata host is inaccessible [\#366](https://github.com/fog/fog-aws/pull/366) ([ankane](https://github.com/ankane)) - Handle multipart upload of empty files [\#365](https://github.com/fog/fog-aws/pull/365) ([fcheung](https://github.com/fcheung)) - Add p2 instance types [\#362](https://github.com/fog/fog-aws/pull/362) ([caged](https://github.com/caged)) - Exponential backoff [\#361](https://github.com/fog/fog-aws/pull/361) ([VVMichaelSawyer](https://github.com/VVMichaelSawyer)) - Skip call to instance metadata host if region is specified [\#360](https://github.com/fog/fog-aws/pull/360) ([ankane](https://github.com/ankane)) ## [v1.3.0](https://github.com/fog/fog-aws/tree/v1.3.0) (2017-03-29) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.2.1...v1.3.0) **Closed issues:** - Do we need to list all files before creating one? [\#357](https://github.com/fog/fog-aws/issues/357) **Merged pull requests:** - Authorize vpc to rds sg [\#356](https://github.com/fog/fog-aws/pull/356) ([ehowe](https://github.com/ehowe)) - classic link enhancements [\#355](https://github.com/fog/fog-aws/pull/355) ([ehowe](https://github.com/ehowe)) - Add new i3 class instances. [\#353](https://github.com/fog/fog-aws/pull/353) ([rogersd](https://github.com/rogersd)) - Add check for self.etag before running gsub [\#351](https://github.com/fog/fog-aws/pull/351) ([dmcorboy](https://github.com/dmcorboy)) - Modify volume [\#350](https://github.com/fog/fog-aws/pull/350) ([ehowe](https://github.com/ehowe)) ## [v1.2.1](https://github.com/fog/fog-aws/tree/v1.2.1) (2017-02-27) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.2.0...v1.2.1) **Closed issues:** - Fog mock does not mimmick real behaviour for some Excon errors [\#341](https://github.com/fog/fog-aws/issues/341) **Merged pull requests:** - Spot fixes [\#349](https://github.com/fog/fog-aws/pull/349) ([ehowe](https://github.com/ehowe)) - add natGatewayId to describe\_route\_tables [\#346](https://github.com/fog/fog-aws/pull/346) ([mliao2](https://github.com/mliao2)) - Fog mock accuracy, fixes \#341 [\#344](https://github.com/fog/fog-aws/pull/344) ([easkay](https://github.com/easkay)) - Subnet [\#343](https://github.com/fog/fog-aws/pull/343) ([ehowe](https://github.com/ehowe)) - Fix multipart upload [\#340](https://github.com/fog/fog-aws/pull/340) ([nobmurakita](https://github.com/nobmurakita)) ## [v1.2.0](https://github.com/fog/fog-aws/tree/v1.2.0) (2017-01-20) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.1.0...v1.2.0) **Closed issues:** - Support for AWS Application Load Balancer \(ALB\) [\#335](https://github.com/fog/fog-aws/issues/335) **Merged pull requests:** - Better iam policies [\#339](https://github.com/fog/fog-aws/pull/339) ([ehowe](https://github.com/ehowe)) - Pin nokogiri gem for Ruby 1.9 and Ruby 2.0 [\#337](https://github.com/fog/fog-aws/pull/337) ([sodabrew](https://github.com/sodabrew)) - Fix parsing of the Reserved Instance 'recurringCharge' field and add 'scope' field [\#336](https://github.com/fog/fog-aws/pull/336) ([sodabrew](https://github.com/sodabrew)) - Fixes / improvements for AutoScaling [\#334](https://github.com/fog/fog-aws/pull/334) ([lanej](https://github.com/lanej)) ## [v1.1.0](https://github.com/fog/fog-aws/tree/v1.1.0) (2016-12-16) [Full Changelog](https://github.com/fog/fog-aws/compare/v1.0.0...v1.1.0) **Closed issues:** - Support new Ohio region \(us-east-2\) [\#313](https://github.com/fog/fog-aws/issues/313) **Merged pull requests:** - Canada and London regions [\#333](https://github.com/fog/fog-aws/pull/333) ([mattheworiordan](https://github.com/mattheworiordan)) - Updated ELB Dual Stack hosted zone DNS records [\#332](https://github.com/fog/fog-aws/pull/332) ([mattheworiordan](https://github.com/mattheworiordan)) - Added support for attaching auto scaling groups to target groups [\#330](https://github.com/fog/fog-aws/pull/330) ([maf23](https://github.com/maf23)) - credential\_fetcher: Mark AWS metadata calls as idempotent [\#329](https://github.com/fog/fog-aws/pull/329) ([mtekel](https://github.com/mtekel)) ## [v1.0.0](https://github.com/fog/fog-aws/tree/v1.0.0) (2016-12-12) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.13.0...v1.0.0) **Merged pull requests:** - fix host header with another port on s3 [\#327](https://github.com/fog/fog-aws/pull/327) ([rodrigoapereira](https://github.com/rodrigoapereira)) - Add new t2.xlarge, t2.2xlarge and r4 class instances. [\#326](https://github.com/fog/fog-aws/pull/326) ([rogersd](https://github.com/rogersd)) - Fix the bug that can't create fifo queue in SQS. [\#323](https://github.com/fog/fog-aws/pull/323) ([ebihara99999](https://github.com/ebihara99999)) - data pipeline mocks [\#318](https://github.com/fog/fog-aws/pull/318) ([ehowe](https://github.com/ehowe)) ## [v0.13.0](https://github.com/fog/fog-aws/tree/v0.13.0) (2016-11-29) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.12.0...v0.13.0) **Closed issues:** - Fog::Compute::AWS::Image not properly loaded [\#324](https://github.com/fog/fog-aws/issues/324) - Add creation\_date field for aws images [\#320](https://github.com/fog/fog-aws/issues/320) - Bug: \[fog\]\[WARNING\] Unrecognized arguments: region, use\_iam\_profile [\#315](https://github.com/fog/fog-aws/issues/315) - Better contributing documentation [\#311](https://github.com/fog/fog-aws/issues/311) - AutoscalingGroups with a TargetGroup set are not parsed correctly [\#308](https://github.com/fog/fog-aws/issues/308) - autoscaling create launch config doesn't work with BlockDeviceMappings [\#307](https://github.com/fog/fog-aws/issues/307) - Is there a configuration setting for the AWS provider to adjust the url scheme for S3 buckets? [\#305](https://github.com/fog/fog-aws/issues/305) - DB Subnet Group id for Cluster returns nil [\#292](https://github.com/fog/fog-aws/issues/292) **Merged pull requests:** - Fixed some missing parts in change sets [\#322](https://github.com/fog/fog-aws/pull/322) ([nilroy](https://github.com/nilroy)) - Add creation date and enhanced networking support for images [\#321](https://github.com/fog/fog-aws/pull/321) ([puneetloya](https://github.com/puneetloya)) - Fix warnings in running tests [\#319](https://github.com/fog/fog-aws/pull/319) ([ebihara99999](https://github.com/ebihara99999)) - Add `Fog::AWS::STS.Mock#assume_role` [\#316](https://github.com/fog/fog-aws/pull/316) ([pedrommonteiro](https://github.com/pedrommonteiro)) - Ohio region [\#314](https://github.com/fog/fog-aws/pull/314) ([chanakyacool](https://github.com/chanakyacool)) - mime types gem update [\#312](https://github.com/fog/fog-aws/pull/312) ([lucianosousa](https://github.com/lucianosousa)) - fix S3 \#delete\_multiple\_objects for UTF-8 names [\#310](https://github.com/fog/fog-aws/pull/310) ([alepore](https://github.com/alepore)) - Support for target groups \(fix for \#308\) [\#309](https://github.com/fog/fog-aws/pull/309) ([msiuts](https://github.com/msiuts)) - create, describe, and destroy elastic file systems [\#304](https://github.com/fog/fog-aws/pull/304) ([ehowe](https://github.com/ehowe)) - Correct optional parameter naming in documentation for Fog::AWS::Auto… [\#302](https://github.com/fog/fog-aws/pull/302) ([ehealy](https://github.com/ehealy)) - Modify Db subnet group [\#293](https://github.com/fog/fog-aws/pull/293) ([chanakyacool](https://github.com/chanakyacool)) ## [v0.12.0](https://github.com/fog/fog-aws/tree/v0.12.0) (2016-09-22) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.11.0...v0.12.0) **Implemented enhancements:** - Add gestion of egress security group rules [\#290](https://github.com/fog/fog-aws/pull/290) ([KevinLoiseau](https://github.com/KevinLoiseau)) **Closed issues:** - Fog directory appends local system path with amazon url when i try to give dynamic fog directory [\#295](https://github.com/fog/fog-aws/issues/295) - Getting OperationAborted error on file storage operation [\#288](https://github.com/fog/fog-aws/issues/288) - AWS Elasticsearch API [\#286](https://github.com/fog/fog-aws/issues/286) - Disable chunked encoding [\#285](https://github.com/fog/fog-aws/issues/285) **Merged pull requests:** - add support endpoint and models/requests for trusted advisor checks [\#300](https://github.com/fog/fog-aws/pull/300) ([ehowe](https://github.com/ehowe)) - Add attribute is\_default in vpc [\#299](https://github.com/fog/fog-aws/pull/299) ([zhitongLBN](https://github.com/zhitongLBN)) - Cloud Formation: additional parameters [\#298](https://github.com/fog/fog-aws/pull/298) ([neillturner](https://github.com/neillturner)) - Cloud Formation: support for change sets, stack policy and other missing calls. [\#297](https://github.com/fog/fog-aws/pull/297) ([neillturner](https://github.com/neillturner)) ## [v0.11.0](https://github.com/fog/fog-aws/tree/v0.11.0) (2016-08-04) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.10.0...v0.11.0) **Merged pull requests:** - GitHub does no longer provide http:// pages [\#284](https://github.com/fog/fog-aws/pull/284) ([amatsuda](https://github.com/amatsuda)) - Skip multipart if body size is less than chunk. [\#283](https://github.com/fog/fog-aws/pull/283) ([brettcave](https://github.com/brettcave)) - ECS container credentials [\#281](https://github.com/fog/fog-aws/pull/281) ([ryansch](https://github.com/ryansch)) - test\(ci\): fix 1.9 builds with json \>= 2.0 [\#280](https://github.com/fog/fog-aws/pull/280) ([lanej](https://github.com/lanej)) - Change DBSubnetGroup to DBSubnetGroupName model cluster while creation [\#279](https://github.com/fog/fog-aws/pull/279) ([chanakyacool](https://github.com/chanakyacool)) ## [v0.10.0](https://github.com/fog/fog-aws/tree/v0.10.0) (2016-07-15) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.9.4...v0.10.0) **Closed issues:** - How to setup private files with CloudFront? [\#275](https://github.com/fog/fog-aws/issues/275) - Feature: Custom Managed Policies [\#272](https://github.com/fog/fog-aws/issues/272) - Support an IAM list\_attached\_role\_policies method [\#191](https://github.com/fog/fog-aws/issues/191) **Merged pull requests:** - RDS test fixes [\#276](https://github.com/fog/fog-aws/pull/276) ([MrPrimate](https://github.com/MrPrimate)) - Expanding IAM support [\#274](https://github.com/fog/fog-aws/pull/274) ([MrPrimate](https://github.com/MrPrimate)) - Rds snapshot improvements [\#269](https://github.com/fog/fog-aws/pull/269) ([tekken](https://github.com/tekken)) - add default region to use\_iam\_profile [\#268](https://github.com/fog/fog-aws/pull/268) ([shaiguitar](https://github.com/shaiguitar)) ## [v0.9.4](https://github.com/fog/fog-aws/tree/v0.9.4) (2016-06-28) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.9.3...v0.9.4) **Closed issues:** - S3: retry on 503 Service Unavailable [\#265](https://github.com/fog/fog-aws/issues/265) - Digest::Base Error [\#261](https://github.com/fog/fog-aws/issues/261) **Merged pull requests:** - Updated Region 'Mumbai' ap-south-1 [\#267](https://github.com/fog/fog-aws/pull/267) ([chanakyacool](https://github.com/chanakyacool)) - Replaces usage of Digest with OpenSSL::Digest [\#266](https://github.com/fog/fog-aws/pull/266) ([esthervillars](https://github.com/esthervillars)) - AWS DNS - support newer DNS hosted zone IDs for dualstack ELBs [\#263](https://github.com/fog/fog-aws/pull/263) ([mattheworiordan](https://github.com/mattheworiordan)) ## [v0.9.3](https://github.com/fog/fog-aws/tree/v0.9.3) (2016-06-20) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.9.2...v0.9.3) **Closed issues:** - Users list is empty in Fog::AWS::IAM::Groups [\#256](https://github.com/fog/fog-aws/issues/256) - I'd like to configure my Excon read\_timeout and write\_timeout [\#254](https://github.com/fog/fog-aws/issues/254) - Bump fog-core to \>=1.38.0 [\#247](https://github.com/fog/fog-aws/issues/247) - no implicit conversion of Array into String in `aws/storage.rb` from `bucket_name` in params. [\#246](https://github.com/fog/fog-aws/issues/246) - \[S3\] Bucket name gets duplicated in case of redirect from AWS [\#242](https://github.com/fog/fog-aws/issues/242) - CloudFormation stack tags cause describe\_stacks to break [\#240](https://github.com/fog/fog-aws/issues/240) **Merged pull requests:** - Parse EbsOptimized parameter in launch configuration description [\#259](https://github.com/fog/fog-aws/pull/259) ([djudd](https://github.com/djudd)) - Allow case-insensitive record comparison [\#258](https://github.com/fog/fog-aws/pull/258) ([matthewpick](https://github.com/matthewpick)) - Fix for empty ETag values [\#257](https://github.com/fog/fog-aws/pull/257) ([baryshev](https://github.com/baryshev)) - do not make requests if mocked. [\#252](https://github.com/fog/fog-aws/pull/252) ([shaiguitar](https://github.com/shaiguitar)) - Parse CloudWatch alarm actions as arrays instead of strings [\#245](https://github.com/fog/fog-aws/pull/245) ([eherot](https://github.com/eherot)) - Add support for CloudFormation stack tags. [\#241](https://github.com/fog/fog-aws/pull/241) ([jamesremuscat](https://github.com/jamesremuscat)) - Add log warning message about when not on us-region [\#200](https://github.com/fog/fog-aws/pull/200) ([kitofr](https://github.com/kitofr)) ## [v0.9.2](https://github.com/fog/fog-aws/tree/v0.9.2) (2016-03-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.9.1...v0.9.2) **Closed issues:** - CHANGELOG.md is out of date [\#235](https://github.com/fog/fog-aws/issues/235) **Merged pull requests:** - Aurora [\#238](https://github.com/fog/fog-aws/pull/238) ([ehowe](https://github.com/ehowe)) ## [v0.9.1](https://github.com/fog/fog-aws/tree/v0.9.1) (2016-03-04) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.8.2...v0.9.1) ## [v0.8.2](https://github.com/fog/fog-aws/tree/v0.8.2) (2016-03-04) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.9.0...v0.8.2) **Merged pull requests:** - autoscaler attach/detatch [\#229](https://github.com/fog/fog-aws/pull/229) ([shaiguitar](https://github.com/shaiguitar)) ## [v0.9.0](https://github.com/fog/fog-aws/tree/v0.9.0) (2016-03-03) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.8.1...v0.9.0) **Closed issues:** - Fog::Storage::AWS::File\#save deprecation warning without alternative [\#226](https://github.com/fog/fog-aws/issues/226) - Long format of aws resources [\#216](https://github.com/fog/fog-aws/issues/216) **Merged pull requests:** - Update README.md [\#233](https://github.com/fog/fog-aws/pull/233) ([h0lyalg0rithm](https://github.com/h0lyalg0rithm)) - fix mime-types CI issues, add 2.3.0 testing [\#231](https://github.com/fog/fog-aws/pull/231) ([lanej](https://github.com/lanej)) - support for rds clusters and aurora [\#230](https://github.com/fog/fog-aws/pull/230) ([ehowe](https://github.com/ehowe)) - Correct default DescribeAvailabilityZone filter to zone-name [\#225](https://github.com/fog/fog-aws/pull/225) ([gregburek](https://github.com/gregburek)) - Security Group perms of FromPort 0 and ToPort -1 [\#223](https://github.com/fog/fog-aws/pull/223) ([jacobo](https://github.com/jacobo)) - Page default parameters [\#222](https://github.com/fog/fog-aws/pull/222) ([ehowe](https://github.com/ehowe)) - rds enhancements [\#220](https://github.com/fog/fog-aws/pull/220) ([ehowe](https://github.com/ehowe)) - Added ap-northeast-2 to the fog mocks. [\#219](https://github.com/fog/fog-aws/pull/219) ([wyhaines](https://github.com/wyhaines)) - restore db instance fom db snapshot [\#217](https://github.com/fog/fog-aws/pull/217) ([ehowe](https://github.com/ehowe)) ## [v0.8.1](https://github.com/fog/fog-aws/tree/v0.8.1) (2016-01-08) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.8.0...v0.8.1) **Merged pull requests:** - Add new aws regions [\#213](https://github.com/fog/fog-aws/pull/213) ([atmos](https://github.com/atmos)) ## [v0.8.0](https://github.com/fog/fog-aws/tree/v0.8.0) (2016-01-04) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.6...v0.8.0) **Fixed bugs:** - IAM roles.all should paginate [\#176](https://github.com/fog/fog-aws/issues/176) **Closed issues:** - Fog gives wrong location for buckets when connected via non-default region [\#208](https://github.com/fog/fog-aws/issues/208) - Is there any way to skip object level `acl` setting while `public` option is true [\#207](https://github.com/fog/fog-aws/issues/207) - using/testing on ruby 1.9 [\#203](https://github.com/fog/fog-aws/issues/203) - S3 KMS encryption support [\#196](https://github.com/fog/fog-aws/issues/196) - Support S3 auto-expiring files? [\#194](https://github.com/fog/fog-aws/issues/194) - Fog::AWS::ELB::InvalidConfigurationRequest: policy cannot be enabled [\#193](https://github.com/fog/fog-aws/issues/193) - get\_https\_url generating negative expiry [\#188](https://github.com/fog/fog-aws/issues/188) - Streaming requests shouldn't be idempotent [\#181](https://github.com/fog/fog-aws/issues/181) - S3 connection hangs; does Fog support timeout? [\#180](https://github.com/fog/fog-aws/issues/180) - Doesn't work after upgrading to 0.1.2 [\#83](https://github.com/fog/fog-aws/issues/83) **Merged pull requests:** - When not specified, region for a bucket should be DEFAULT\_REGION. [\#211](https://github.com/fog/fog-aws/pull/211) ([jamesremuscat](https://github.com/jamesremuscat)) - Support NoncurrentVersion\[Expiration,Transition\] for s3 lifecycle. [\#210](https://github.com/fog/fog-aws/pull/210) ([xtoddx](https://github.com/xtoddx)) - Update dynamodb to use the latest API version [\#209](https://github.com/fog/fog-aws/pull/209) ([dmathieu](https://github.com/dmathieu)) - Make sure to send the KmsKeyId when creating an RDS cluster [\#206](https://github.com/fog/fog-aws/pull/206) ([drcapulet](https://github.com/drcapulet)) - Reset 'finished' when rewinding S3Streamer [\#205](https://github.com/fog/fog-aws/pull/205) ([jschneiderhan](https://github.com/jschneiderhan)) - Add mime-types to test section in Gemfile [\#204](https://github.com/fog/fog-aws/pull/204) ([kitofr](https://github.com/kitofr)) - filters on tags can pass an array [\#202](https://github.com/fog/fog-aws/pull/202) ([craiggenner](https://github.com/craiggenner)) - Document options for S3 server-side encryption [\#199](https://github.com/fog/fog-aws/pull/199) ([shuhei](https://github.com/shuhei)) - make net/ssh require optional [\#197](https://github.com/fog/fog-aws/pull/197) ([geemus](https://github.com/geemus)) - Cache cluster security group parser [\#190](https://github.com/fog/fog-aws/pull/190) ([eherot](https://github.com/eherot)) - Allow region to be set for STS [\#189](https://github.com/fog/fog-aws/pull/189) ([fcheung](https://github.com/fcheung)) - add cn support for s3 [\#187](https://github.com/fog/fog-aws/pull/187) ([ming535](https://github.com/ming535)) - mock instance stop and start properly [\#184](https://github.com/fog/fog-aws/pull/184) ([ehowe](https://github.com/ehowe)) - Disable idempotent option when block is passed to get\_object [\#183](https://github.com/fog/fog-aws/pull/183) ([ghost](https://github.com/ghost)) - Yield arguments to Mock\#get\_object block more similar to Excon [\#182](https://github.com/fog/fog-aws/pull/182) ([tdg5](https://github.com/tdg5)) - add IAM role paging [\#178](https://github.com/fog/fog-aws/pull/178) ([lanej](https://github.com/lanej)) - properly mock rds name update [\#170](https://github.com/fog/fog-aws/pull/170) ([ehowe](https://github.com/ehowe)) ## [v0.7.6](https://github.com/fog/fog-aws/tree/v0.7.6) (2015-08-26) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.5...v0.7.6) **Closed issues:** - mock directories.create destroys existing directory [\#172](https://github.com/fog/fog-aws/issues/172) **Merged pull requests:** - Add GovCloud region name to validation set. [\#175](https://github.com/fog/fog-aws/pull/175) ([triplepoint](https://github.com/triplepoint)) - Mocked put\_bucket no longer clobbers existing bucket [\#174](https://github.com/fog/fog-aws/pull/174) ([jgr](https://github.com/jgr)) ## [v0.7.5](https://github.com/fog/fog-aws/tree/v0.7.5) (2015-08-24) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.4...v0.7.5) **Closed issues:** - how to change filepath for html\_table\_reporter in reporter options [\#167](https://github.com/fog/fog-aws/issues/167) - Access Key, etc still required for Storage access when using use\_iam\_profile [\#162](https://github.com/fog/fog-aws/issues/162) - Support for KMS ID for EBS Volume [\#141](https://github.com/fog/fog-aws/issues/141) **Merged pull requests:** - validate rds server security group associations [\#173](https://github.com/fog/fog-aws/pull/173) ([lanej](https://github.com/lanej)) - format security groups when modifying db instance [\#171](https://github.com/fog/fog-aws/pull/171) ([michelleN](https://github.com/michelleN)) - standardize region validation [\#169](https://github.com/fog/fog-aws/pull/169) ([lanej](https://github.com/lanej)) - expose elb region [\#168](https://github.com/fog/fog-aws/pull/168) ([lanej](https://github.com/lanej)) - volume\#key\_id and encrypted tests [\#165](https://github.com/fog/fog-aws/pull/165) ([lanej](https://github.com/lanej)) - raise InvalidParameterCombination error [\#163](https://github.com/fog/fog-aws/pull/163) ([michelleN](https://github.com/michelleN)) - storage request bad xml schema for put bucket notification fix [\#161](https://github.com/fog/fog-aws/pull/161) ([bahchis](https://github.com/bahchis)) - Use regex instead of string matching to support redirect correctly when path\_style is set to true [\#159](https://github.com/fog/fog-aws/pull/159) ([drich10](https://github.com/drich10)) - update \#promote\_read\_replica mock [\#158](https://github.com/fog/fog-aws/pull/158) ([lanej](https://github.com/lanej)) ## [v0.7.4](https://github.com/fog/fog-aws/tree/v0.7.4) (2015-07-30) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.3...v0.7.4) **Fixed bugs:** - Route53 zone listing fix and support for private hosted zones [\#154](https://github.com/fog/fog-aws/pull/154) ([solud](https://github.com/solud)) **Merged pull requests:** - AutoScaling attach/detach ELB support + tests [\#156](https://github.com/fog/fog-aws/pull/156) ([nbfowler](https://github.com/nbfowler)) ## [v0.7.3](https://github.com/fog/fog-aws/tree/v0.7.3) (2015-07-10) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.2...v0.7.3) **Closed issues:** - "Error: The specified marker is not valid" after upgrade to 0.7.0 [\#148](https://github.com/fog/fog-aws/issues/148) **Merged pull requests:** - encrypted storage on rds [\#153](https://github.com/fog/fog-aws/pull/153) ([ehowe](https://github.com/ehowe)) ## [v0.7.2](https://github.com/fog/fog-aws/tree/v0.7.2) (2015-07-08) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.1...v0.7.2) **Fixed bugs:** - NoMethodError trying to create a new AWS Route53 entry using version 0.7.1 [\#150](https://github.com/fog/fog-aws/issues/150) **Merged pull requests:** - fix \#change\_resource\_record\_sets [\#151](https://github.com/fog/fog-aws/pull/151) ([lanej](https://github.com/lanej)) ## [v0.7.1](https://github.com/fog/fog-aws/tree/v0.7.1) (2015-07-08) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.7.0...v0.7.1) **Merged pull requests:** - Fix broken xmlns in DNS requests [\#149](https://github.com/fog/fog-aws/pull/149) ([decklin](https://github.com/decklin)) - Fix blank content-encoding headers [\#147](https://github.com/fog/fog-aws/pull/147) ([fcheung](https://github.com/fcheung)) ## [v0.7.0](https://github.com/fog/fog-aws/tree/v0.7.0) (2015-07-07) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.6.0...v0.7.0) **Closed issues:** - Add support for AWS Lambda [\#124](https://github.com/fog/fog-aws/issues/124) **Merged pull requests:** - Describe vpcPeeringConnectionId [\#146](https://github.com/fog/fog-aws/pull/146) ([fdr](https://github.com/fdr)) - Adds isDefault to parser for describe\_vpcs [\#144](https://github.com/fog/fog-aws/pull/144) ([gregburek](https://github.com/gregburek)) - Support kinesis [\#143](https://github.com/fog/fog-aws/pull/143) ([mikehale](https://github.com/mikehale)) - The :geo\_location attribute needs to be xml formatted before calling aws [\#142](https://github.com/fog/fog-aws/pull/142) ([carloslima](https://github.com/carloslima)) - Escape Lambda function name in request paths [\#140](https://github.com/fog/fog-aws/pull/140) ([nomadium](https://github.com/nomadium)) - list\_hosted\_zones expects that options to be hash with symbol as key [\#139](https://github.com/fog/fog-aws/pull/139) ([slashmili](https://github.com/slashmili)) ## [v0.6.0](https://github.com/fog/fog-aws/tree/v0.6.0) (2015-06-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.5.0...v0.6.0) **Merged pull requests:** - Add support for AWS Lambda service [\#123](https://github.com/fog/fog-aws/pull/123) ([nomadium](https://github.com/nomadium)) ## [v0.5.0](https://github.com/fog/fog-aws/tree/v0.5.0) (2015-06-17) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.4.1...v0.5.0) **Merged pull requests:** - add t2.large [\#137](https://github.com/fog/fog-aws/pull/137) ([lanej](https://github.com/lanej)) - Make Mock create\_vpc method arity match Real [\#135](https://github.com/fog/fog-aws/pull/135) ([fdr](https://github.com/fdr)) - Add support for EC2 Container Service [\#120](https://github.com/fog/fog-aws/pull/120) ([nomadium](https://github.com/nomadium)) ## [v0.4.1](https://github.com/fog/fog-aws/tree/v0.4.1) (2015-06-15) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.4.0...v0.4.1) **Closed issues:** - Fog doesn't support storage\_type or gp2 for RDS? [\#129](https://github.com/fog/fog-aws/issues/129) - Fog-aws not working with Hitachi [\#122](https://github.com/fog/fog-aws/issues/122) - "NoMethodError: undefined method `body' for \#\" [\#112](https://github.com/fog/fog-aws/issues/112) - Add support for EC2 Container Service \(ECS\) [\#93](https://github.com/fog/fog-aws/issues/93) **Merged pull requests:** - Fix attributes of flavors [\#134](https://github.com/fog/fog-aws/pull/134) ([yumminhuang](https://github.com/yumminhuang)) - Fix S3 signature v4 signing [\#133](https://github.com/fog/fog-aws/pull/133) ([fcheung](https://github.com/fcheung)) - Add New M4 Instance Type [\#132](https://github.com/fog/fog-aws/pull/132) ([yumminhuang](https://github.com/yumminhuang)) - raise correct error when exceeding address limit [\#131](https://github.com/fog/fog-aws/pull/131) ([lanej](https://github.com/lanej)) - make elb/policies collection standalone [\#128](https://github.com/fog/fog-aws/pull/128) ([lanej](https://github.com/lanej)) - model managed policies [\#126](https://github.com/fog/fog-aws/pull/126) ([lanej](https://github.com/lanej)) ## [v0.4.0](https://github.com/fog/fog-aws/tree/v0.4.0) (2015-05-27) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.3.0...v0.4.0) **Merged pull requests:** - model iam groups [\#121](https://github.com/fog/fog-aws/pull/121) ([lanej](https://github.com/lanej)) ## [v0.3.0](https://github.com/fog/fog-aws/tree/v0.3.0) (2015-05-21) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.2.2...v0.3.0) **Closed issues:** - How to determine the disableApiTermination attribute value [\#98](https://github.com/fog/fog-aws/issues/98) **Merged pull requests:** - support iam/get\_user without username [\#114](https://github.com/fog/fog-aws/pull/114) ([lanej](https://github.com/lanej)) - Added a new request - describe\_instance\_attribute [\#110](https://github.com/fog/fog-aws/pull/110) ([nilroy](https://github.com/nilroy)) ## [v0.2.2](https://github.com/fog/fog-aws/tree/v0.2.2) (2015-05-13) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.2.1...v0.2.2) ## [v0.2.1](https://github.com/fog/fog-aws/tree/v0.2.1) (2015-05-13) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.2.0...v0.2.1) **Merged pull requests:** - mocks for topic permissions [\#111](https://github.com/fog/fog-aws/pull/111) ([lanej](https://github.com/lanej)) ## [v0.2.0](https://github.com/fog/fog-aws/tree/v0.2.0) (2015-05-13) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.1.2...v0.2.0) **Implemented enhancements:** - update RDS to 2014-10-31 version [\#107](https://github.com/fog/fog-aws/pull/107) ([lanej](https://github.com/lanej)) **Closed issues:** - IAM authentication not compatible with GovCloud [\#100](https://github.com/fog/fog-aws/issues/100) - Enabling termination protection [\#95](https://github.com/fog/fog-aws/issues/95) - SSLv3 deprecation: action required? [\#88](https://github.com/fog/fog-aws/issues/88) **Merged pull requests:** - configure server attributes in mock [\#109](https://github.com/fog/fog-aws/pull/109) ([michelleN](https://github.com/michelleN)) - support aws kms [\#108](https://github.com/fog/fog-aws/pull/108) ([lanej](https://github.com/lanej)) - Another attempt to solve content-encoding header issues [\#106](https://github.com/fog/fog-aws/pull/106) ([fcheung](https://github.com/fcheung)) - default replica AutoMinorVersionUpgrade to master [\#104](https://github.com/fog/fog-aws/pull/104) ([michelleN](https://github.com/michelleN)) - Refresh credentials if needed when signing S3 URL [\#103](https://github.com/fog/fog-aws/pull/103) ([matkam](https://github.com/matkam)) - Allow the IAM constructor to accept a region [\#102](https://github.com/fog/fog-aws/pull/102) ([benbalter](https://github.com/benbalter)) - configure auto\_minor\_version\_upgrade in mock [\#101](https://github.com/fog/fog-aws/pull/101) ([michelleN](https://github.com/michelleN)) - Adding instanceTenancy to reserved instance parser. [\#97](https://github.com/fog/fog-aws/pull/97) ([dmbrooking](https://github.com/dmbrooking)) - Parse elasticache configuration endpoint from response [\#96](https://github.com/fog/fog-aws/pull/96) ([fcheung](https://github.com/fcheung)) - Fix mock VPC ELB creation in regions other than us-east-1 [\#94](https://github.com/fog/fog-aws/pull/94) ([mrpoundsign](https://github.com/mrpoundsign)) - Fix repository URL in README.md [\#91](https://github.com/fog/fog-aws/pull/91) ([tricknotes](https://github.com/tricknotes)) - adding support for d2 instance type [\#90](https://github.com/fog/fog-aws/pull/90) ([yumminhuang](https://github.com/yumminhuang)) - Support weight round robin mock [\#89](https://github.com/fog/fog-aws/pull/89) ([freddy61025](https://github.com/freddy61025)) - Update README.md [\#87](https://github.com/fog/fog-aws/pull/87) ([nomadium](https://github.com/nomadium)) - Add mock for EC2 request\_spot\_instances API request [\#86](https://github.com/fog/fog-aws/pull/86) ([nomadium](https://github.com/nomadium)) - Move more requires to autoload [\#85](https://github.com/fog/fog-aws/pull/85) ([plribeiro3000](https://github.com/plribeiro3000)) - Add mock for EC2 describe\_spot\_price\_history API request [\#84](https://github.com/fog/fog-aws/pull/84) ([nomadium](https://github.com/nomadium)) ## [v0.1.2](https://github.com/fog/fog-aws/tree/v0.1.2) (2015-04-07) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.1.1...v0.1.2) **Closed issues:** - Ruby warnings Comparable & Return nil [\#81](https://github.com/fog/fog-aws/issues/81) - CircleCI failing [\#80](https://github.com/fog/fog-aws/issues/80) - Heroku error [\#77](https://github.com/fog/fog-aws/issues/77) - Repeatable signed urls for the same expiry [\#65](https://github.com/fog/fog-aws/issues/65) **Merged pull requests:** - Handle missing parameters in describe\_spot\_price\_history request [\#82](https://github.com/fog/fog-aws/pull/82) ([nomadium](https://github.com/nomadium)) - create db instance in the correct region [\#79](https://github.com/fog/fog-aws/pull/79) ([lanej](https://github.com/lanej)) - Remove assignment within conditional in File\#body [\#78](https://github.com/fog/fog-aws/pull/78) ([greysteil](https://github.com/greysteil)) - mock DescribeDBEngineVersions [\#76](https://github.com/fog/fog-aws/pull/76) ([ehowe](https://github.com/ehowe)) - Fix blank content-encoding when none is supplied [\#75](https://github.com/fog/fog-aws/pull/75) ([fcheung](https://github.com/fcheung)) - \[rds\] prevent final snapshot on replicas [\#74](https://github.com/fog/fog-aws/pull/74) ([lanej](https://github.com/lanej)) - Fix for `undefined method `map' for nil:NilClass` [\#73](https://github.com/fog/fog-aws/pull/73) ([mattheworiordan](https://github.com/mattheworiordan)) - Resource record sets bug fix + support eu-central-1 [\#72](https://github.com/fog/fog-aws/pull/72) ([mattheworiordan](https://github.com/mattheworiordan)) - Fix EC2 security groups where SSH inbound rule isn't first [\#71](https://github.com/fog/fog-aws/pull/71) ([ayumi](https://github.com/ayumi)) - eu-central missing from Fog::Compute::AWS::Mock [\#70](https://github.com/fog/fog-aws/pull/70) ([wyhaines](https://github.com/wyhaines)) - Remove executable bit from files. [\#69](https://github.com/fog/fog-aws/pull/69) ([voxik](https://github.com/voxik)) - Remove Mac specific files. [\#68](https://github.com/fog/fog-aws/pull/68) ([voxik](https://github.com/voxik)) - Stringify keys for query parameters [\#67](https://github.com/fog/fog-aws/pull/67) ([jfmyers9](https://github.com/jfmyers9)) - Mock method for AWS S3 post\_object\_hidden\_fields [\#63](https://github.com/fog/fog-aws/pull/63) ([byterussian](https://github.com/byterussian)) - Reduce loading time [\#62](https://github.com/fog/fog-aws/pull/62) ([plribeiro3000](https://github.com/plribeiro3000)) - Add support for cname buckets [\#61](https://github.com/fog/fog-aws/pull/61) ([dsgh](https://github.com/dsgh)) ## [v0.1.1](https://github.com/fog/fog-aws/tree/v0.1.1) (2015-02-25) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.1.0...v0.1.1) **Closed issues:** - head\_url signed [\#47](https://github.com/fog/fog-aws/issues/47) - AWS Credentials required when using IAM Profile [\#44](https://github.com/fog/fog-aws/issues/44) **Merged pull requests:** - Support for IAM managed policies [\#60](https://github.com/fog/fog-aws/pull/60) ([fcheung](https://github.com/fcheung)) - Fix for ScanFilter parameters [\#58](https://github.com/fog/fog-aws/pull/58) ([nawaidshamim](https://github.com/nawaidshamim)) - \[dns\] fix Records\#get, mock records and proper errors [\#57](https://github.com/fog/fog-aws/pull/57) ([lanej](https://github.com/lanej)) - \[aws|compute\] support c4.8xlarge flavor [\#56](https://github.com/fog/fog-aws/pull/56) ([ddoc](https://github.com/ddoc)) - \[aws|compute\] adding support for c4 instance class [\#55](https://github.com/fog/fog-aws/pull/55) ([ddoc](https://github.com/ddoc)) - not allowed to delete a "revoking" rds firewall [\#54](https://github.com/fog/fog-aws/pull/54) ([lanej](https://github.com/lanej)) - raise when destroying an ec2 firewall authorized to an rds firewall [\#53](https://github.com/fog/fog-aws/pull/53) ([lanej](https://github.com/lanej)) - Making it easier to get pre-signed head requests [\#51](https://github.com/fog/fog-aws/pull/51) ([mrloop](https://github.com/mrloop)) - Support customer encryption headers in multipart uploads [\#50](https://github.com/fog/fog-aws/pull/50) ([lautis](https://github.com/lautis)) - don't allow sg authorization to unknown sgs [\#49](https://github.com/fog/fog-aws/pull/49) ([lanej](https://github.com/lanej)) ## [v0.1.0](https://github.com/fog/fog-aws/tree/v0.1.0) (2015-02-03) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.8...v0.1.0) **Closed issues:** - AWS Launch Configuration missing Ebs.Volume\_Type [\#18](https://github.com/fog/fog-aws/issues/18) **Merged pull requests:** - Fix v4 signature when path has repeated slashes in the middle [\#46](https://github.com/fog/fog-aws/pull/46) ([fcheung](https://github.com/fcheung)) - get signin token for federation [\#45](https://github.com/fog/fog-aws/pull/45) ([ehowe](https://github.com/ehowe)) - add 'volumeType' and 'encrypted' to blockDeviceMapping parser [\#43](https://github.com/fog/fog-aws/pull/43) ([ichii386](https://github.com/ichii386)) - add missing mocks [\#41](https://github.com/fog/fog-aws/pull/41) ([michelleN](https://github.com/michelleN)) - Add idempotent excon option to some route53 API calls [\#40](https://github.com/fog/fog-aws/pull/40) ([josacar](https://github.com/josacar)) - Allow for AWS errors not specifying region [\#39](https://github.com/fog/fog-aws/pull/39) ([greysteil](https://github.com/greysteil)) - correct engine version param on rds replicas [\#38](https://github.com/fog/fog-aws/pull/38) ([lanej](https://github.com/lanej)) - default namespace and evaluation period on alarm [\#37](https://github.com/fog/fog-aws/pull/37) ([michelleN](https://github.com/michelleN)) - \[AWS|Autoscaling\] Add missing ebs attributes to describe\_launch\_configurations [\#35](https://github.com/fog/fog-aws/pull/35) ([fcheung](https://github.com/fcheung)) - \[AWS|Storage\] signed\_url should use v2 signature when aws\_signature\_version is 2 [\#34](https://github.com/fog/fog-aws/pull/34) ([fcheung](https://github.com/fcheung)) - BUGFIX: When fog\_credentials endpoint is set @region defaults to nil [\#33](https://github.com/fog/fog-aws/pull/33) ([nicholasklick](https://github.com/nicholasklick)) - \[AWS|Autoscaling\] Support classic link related properties for launch configurations [\#32](https://github.com/fog/fog-aws/pull/32) ([fcheung](https://github.com/fcheung)) - fix autoscaling activities collection setup [\#31](https://github.com/fog/fog-aws/pull/31) ([fcheung](https://github.com/fcheung)) - Add PlacementTenancy to launch configuration parser and test case [\#29](https://github.com/fog/fog-aws/pull/29) ([benpillet](https://github.com/benpillet)) - Use Fog::Formatador [\#27](https://github.com/fog/fog-aws/pull/27) ([ghost](https://github.com/ghost)) ## [v0.0.8](https://github.com/fog/fog-aws/tree/v0.0.8) (2015-01-27) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.7...v0.0.8) **Closed issues:** - NoMethodError - undefined method `signature\_parameters' for nil:NilClass [\#28](https://github.com/fog/fog-aws/issues/28) ## [v0.0.7](https://github.com/fog/fog-aws/tree/v0.0.7) (2015-01-23) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.6...v0.0.7) **Closed issues:** - SSL Error on S3 connection [\#9](https://github.com/fog/fog-aws/issues/9) **Merged pull requests:** - simulate sns confirmation message [\#36](https://github.com/fog/fog-aws/pull/36) ([lanej](https://github.com/lanej)) - Support for VPC Classic Link [\#3](https://github.com/fog/fog-aws/pull/3) ([fcheung](https://github.com/fcheung)) ## [v0.0.6](https://github.com/fog/fog-aws/tree/v0.0.6) (2015-01-12) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.5...v0.0.6) **Closed issues:** - missed files [\#1](https://github.com/fog/fog-aws/issues/1) **Merged pull requests:** - \[AWS|Core\] Fix signature v4 non canonicalising header case properly [\#4](https://github.com/fog/fog-aws/pull/4) ([fcheung](https://github.com/fcheung)) - another attempt at s3 region redirecting [\#2](https://github.com/fog/fog-aws/pull/2) ([geemus](https://github.com/geemus)) ## [v0.0.5](https://github.com/fog/fog-aws/tree/v0.0.5) (2015-01-06) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.4...v0.0.5) ## [v0.0.4](https://github.com/fog/fog-aws/tree/v0.0.4) (2015-01-04) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.3...v0.0.4) ## [v0.0.3](https://github.com/fog/fog-aws/tree/v0.0.3) (2015-01-02) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.2...v0.0.3) ## [v0.0.2](https://github.com/fog/fog-aws/tree/v0.0.2) (2015-01-02) [Full Changelog](https://github.com/fog/fog-aws/compare/v0.0.1...v0.0.2) ## [v0.0.1](https://github.com/fog/fog-aws/tree/v0.0.1) (2015-01-02) [Full Changelog](https://github.com/fog/fog-aws/compare/rm...v0.0.1) ## [rm](https://github.com/fog/fog-aws/tree/rm) (2014-11-27) [Full Changelog](https://github.com/fog/fog-aws/compare/fog-brightbox_v0.0.1...rm) ## [fog-brightbox_v0.0.1](https://github.com/fog/fog-aws/tree/fog-brightbox_v0.0.1) (2014-02-19) [Full Changelog](https://github.com/fog/fog-aws/compare/d496bcd266d584ffcded6e265e8166138a3fb22a...fog-brightbox_v0.0.1) \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* fog-aws-3.18.0/CONTRIBUTING.md000066400000000000000000000015211437344660100154060ustar00rootroot00000000000000## Getting Involved New contributors are always welcome, when it doubt please ask questions. We strive to be an open and welcoming community. Please be nice to one another. ### Coding * Pick a task: * Offer feedback on open [pull requests](https://github.com/fog/fog-aws/pulls). * Review open [issues](https://github.com/fog/fog-aws/issues) for things to help on. * [Create an issue](https://github.com/fog/fog-aws/issues/new) to start a discussion on additions or features. * Fork the project, add your changes and tests to cover them in a topic branch. * Commit your changes and rebase against `fog/fog-aws` to ensure everything is up to date. * [Submit a pull request](https://github.com/fog/fog-aws/compare/). ### Non-Coding * Offer feedback on open [issues](https://github.com/fog/fog-aws/issues). * Organize or volunteer at events. fog-aws-3.18.0/CONTRIBUTORS.md000066400000000000000000000104071437344660100154370ustar00rootroot00000000000000* Aakash Shah * Aaron Stone * Akira Matsuda * Alessandro Lepore * Alex Coomans * Alexander Stuart-Kregor * Andrew Kane * Anthony Mangano * Ayumi Yu * Ben Balter * Benjamin Pillet * Brett Cave * Brian Nelson * Caged * Carlos Lima * Chanakya Devraj * Corey Donohoe * Craig Genner * Damien Mathieu * Dan Brooking * Dan Rogers * Daniel Farina * Danny Guinther * David Judd * David Vaz * Decklin Foster * Duarte Henriques * Ed Healy * Eddie Johnston * Eric Herot * Esther Villars * Eugene Howe * Freddy Chu * Frederick Cheung * GitHub * Greg Burek * Grey Baker * Huang Yaming * ICHII Takashi * Jack Thomas * Jacob Burkhart * James Muscat * James Myers * James Rucker * Jon-Erik Schneiderhan * Jonathan Hanson * Jose Luis Salas * Josh Lane & Michelle Noorali * Josh Lane & Michelle Noorali * Josh Lane * Josh Lane * Josh Lane * Joshua Lane * Kevin Loiseau * KevinLoiseau * Khoa Nguyen * Kirk Haines * Kristoffer Roupé * Liu zhitong * Luciano Sousa * Martin Forssen * Mathew Kamkar * Matthew O'Riordan * Michael Hale * Michael Sawyer * Michal Tekel * Michelle Noorali & Josh Lane * Michelle Noorali * Miguel Landaeta * Miguel Landaeta * Miguel Landaeta * Milad Rastian * Moritz Siuts * Nawaid Shamim * Neill Turner * Nicholas Fowler * Nicholas Klick * Nilanjan Roy * Paulo Henrique Lopes Ribeiro * Paulo Ribeiro * Pedro Matos Monteiro * Puneet Loya * Robert von Massow * Rocco Galluzzo * Rodrigo de Almeida Pereira * Ryan Schlesinger * Ryunosuke SATO * Sergey Bahchissaraitsev * Shai Rosenfeld * Shuhei Kagawa * Suraj Shirvankar * Todd Willey * Ville Lautanala * Vlad Yarotsky * Vít Ondruch * Wesley Beary * Yaming Huang * Yusuke Ebihara * chanakya devraj * chanakyad-cuelogic * ddoc * geemus * huming * miliao * mrloop * nob.murakita * solud * starbelly fog-aws-3.18.0/Gemfile000066400000000000000000000003731437344660100144540ustar00rootroot00000000000000source 'https://rubygems.org' # Specify your gem's dependencies in fog-aws.gemspec gemspec group :test, :default do gem 'pry-nav' gem 'mime-types', '~> 3.1' end group :test do gem "simplecov" gem "codeclimate-test-reporter", "~> 1.0.0" end fog-aws-3.18.0/LICENSE.md000066400000000000000000000021751437344660100145670ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2014-2019 [CONTRIBUTORS.md](https://github.com/fog/fog-aws/blob/master/CONTRIBUTORS.md) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. fog-aws-3.18.0/README.md000066400000000000000000000142041437344660100144360ustar00rootroot00000000000000# Fog::Aws ![Gem Version](https://badge.fury.io/rb/fog-aws.svg) [![Build Status](https://github.com/fog/fog-aws/actions/workflows/ruby.yml/badge.svg)](https://github.com/fog/fog-aws/actions/workflows/ruby.yml) [![Test Coverage](https://codeclimate.com/github/fog/fog-aws/badges/coverage.svg)](https://codeclimate.com/github/fog/fog-aws) [![Code Climate](https://codeclimate.com/github/fog/fog-aws.svg)](https://codeclimate.com/github/fog/fog-aws) ## Installation Add this line to your application's Gemfile: ```ruby gem 'fog-aws' ``` And then execute: $ bundle Or install it yourself as: $ gem install fog-aws ## Usage Before you can use fog-aws, you must require it in your application: ```ruby require 'fog/aws' ``` Since it's a bad practice to have your credentials in source code, you should load them from default fog configuration file: ```~/.fog```. This file could look like this: ``` default: aws_access_key_id: aws_secret_access_key: ``` ### EC2 #### Connecting to the EC2 Service: ```ruby ec2 = Fog::Compute.new :provider => 'AWS', :region => 'us-west-2' ``` You can review all the requests available with this service using ```#requests``` method: ```ruby ec2.requests # => [:allocate_address, :assign_private_ip_addresses, :associate_address, ...] ``` #### Launch an EC2 on-demand instance: ```ruby response = ec2.run_instances( "ami-23ebb513", 1, 1, "InstanceType" => "t1.micro", "SecurityGroup" => "ssh", "KeyName" => "miguel" ) instance_id = response.body["instancesSet"].first["instanceId"] # => "i-02db5af4" instance = ec2.servers.get(instance_id) instance.wait_for { ready? } puts instance.public_ip_address # => "356.300.501.20" ``` #### Terminate an EC2 instance: ```ruby instance = ec2.servers.get("i-02db5af4") instance.destroy ``` `Fog::AWS` is more than EC2 since it supports many services provided by AWS. The best way to learn and to know about how many services are supported is to take a look at the source code. To review the tests directory and to play with the library in ```bin/console``` can be very helpful resources as well. ### S3 #### Connecting to the S3 Service: ```ruby s3 = Fog::Storage.new(provider: 'AWS', region: 'eu-central-1') ``` #### Creating a file: ```ruby directory = s3.directories.new(key: 'gaudi-portal-dev') file = directory.files.create(key: 'user/1/Gemfile', body: File.open('Gemfile'), tags: 'Org-Id=1&Service-Name=My-Service') ``` #### Listing files: ```ruby directory = s3.directories.get('gaudi-portal-dev', prefix: 'user/1/') directory.files ``` **Warning!** `s3.directories.get` retrieves and caches meta data for the first 10,000 objects in the bucket, which can be very expensive. When possible use `s3.directories.new`. #### Generating a URL for a file: ```ruby directory.files.new(key: 'user/1/Gemfile').url(Time.now + 60) ``` ##### Generate download URL You should pass an option argument that contains the `query` key with `response-content-disposition` inside indicating that is an attachment and the filename to be used when downloaded. ```ruby options = { query: { 'response-content-disposition' => "attachment; filename=#{key}" } } directory.files.new(key: 'user/1/Gemfile').url(Time.now + 60, options) ``` ##### Controlling credential refresh time with IAM authentication When using IAM authentication with [temporary security credentials](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html), generated S3 pre-signed URLs [only last as long as the temporary credential](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html). Generating the URLs in the following manner will return a URL that will not last as long as its requested expiration time if the remainder of the authentication token lifetime was shorter. ```ruby s3 = Fog::Storage.new(provider: 'AWS', use_iam_profile: true) directory = s3.directories.get('gaudi-portal-dev', prefix: 'user/1/') directory.files.new(key: 'user/1/Gemfile').url(Time.now + 60) ``` By default the temporary credentials in use are refreshed only within the last 15 seconds of its expiration time. The URL requested with 60 seconds lifetime using the above example will only remain valid for 15 seconds in the worst case. The problem can be avoided by refreshing the token early and often, by setting configuration `aws_credentials_refresh_threshold_seconds` (default: 15) which controls the time when the refresh must occur. It is expressed in seconds before the temporary credential's expiration time. The following example can ensure pre-signed URLs last as long as 60 seconds by automatically refreshing the credentials when its remainder lifetime is lower than 60 seconds: ```ruby s3 = Fog::Storage.new( provider: 'AWS', use_iam_profile: true, aws_credentials_refresh_threshold_seconds: 60 ) directory = s3.directories.get('gaudi-portal-dev', prefix: 'user/1/') directory.files.new(key: 'user/1/Gemfile').url(Time.now + 60) ``` #### Copying a file ```ruby directory = s3.directories.new(key: 'gaudi-portal-dev') file = directory.files.get('user/1/Gemfile') file.copy("target-bucket", "user/2/Gemfile.copy") ``` To speed transfers of large files, the `concurrency` option can be used to spawn multiple threads. Note that the file must be at least 5 MB for multipart uploads to work. For example: ```ruby directory = s3.directories.new(key: 'gaudi-portal-dev') file = directory.files.get('user/1/Gemfile') file.multipart_chunk_size = 10 * 1024 * 1024 file.concurrency = 10 file.copy("target-bucket", "user/2/Gemfile.copy") ``` ## Documentation See the [online documentation](http://www.rubydoc.info/github/fog/fog-aws) for a complete API reference. ## Development After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. ## Contributing 1. Fork it ( https://github.com/fog/fog-aws/fork ) 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) 5. Create a new Pull Request fog-aws-3.18.0/Rakefile000066400000000000000000000005001437344660100146160ustar00rootroot00000000000000require "bundler/gem_tasks" require "github_changelog_generator/task" task :default => :test mock = ENV['FOG_MOCK'] || 'true' task :test do sh("export FOG_MOCK=#{mock} && bundle exec shindont") end GitHubChangelogGenerator::RakeTask.new :changelog do |config| config.user = 'fog' config.project = 'fog-aws' end fog-aws-3.18.0/bin/000077500000000000000000000000001437344660100137265ustar00rootroot00000000000000fog-aws-3.18.0/bin/console000077500000000000000000000005261437344660100153210ustar00rootroot00000000000000#!/usr/bin/env ruby require "bundler/setup" require "fog-aws" # You can add fixtures and/or initialization code here to make experimenting # with your gem easier. You can also use a different console, if you like. # (If you use this, don't forget to add pry to your Gemfile!) # require "pry" # Pry.start require "irb" IRB.start(__FILE__) fog-aws-3.18.0/bin/setup000077500000000000000000000002031437344660100150070ustar00rootroot00000000000000#!/usr/bin/env bash set -euo pipefail IFS=$'\n\t' set -vx bundle install # Do any other automated setup that you need to do here fog-aws-3.18.0/fog-aws.gemspec000066400000000000000000000027161437344660100160740ustar00rootroot00000000000000# coding: utf-8 lib = File.expand_path('../lib', __FILE__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) require 'fog/aws/version' Gem::Specification.new do |spec| spec.name = "fog-aws" spec.version = Fog::AWS::VERSION spec.authors = ["Josh Lane", "Wesley Beary"] spec.email = ["me@joshualane.com", "geemus@gmail.com"] spec.summary = %q{Module for the 'fog' gem to support Amazon Web Services.} spec.description = %q{This library can be used as a module for `fog` or as standalone provider to use the Amazon Web Services in applications..} spec.homepage = "https://github.com/fog/fog-aws" spec.license = "MIT" spec.files = Dir['lib/**/*.{rb,json}', 'CHANGELOG.md', 'CONTRIBUTING.md', 'CONTRIBUTORS.md', 'LICENSE.md', 'README.md', 'fog-aws.gemspec',] spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } spec.require_paths = ["lib"] spec.required_ruby_version = '>= 2.0.0' spec.add_development_dependency 'bundler' spec.add_development_dependency 'github_changelog_generator', '~> 1.16' spec.add_development_dependency 'rake', '>= 12.3.3' spec.add_development_dependency 'rubyzip', '~> 2.3.0' spec.add_development_dependency 'shindo', '~> 0.3' spec.add_dependency 'fog-core', '~> 2.1' spec.add_dependency 'fog-json', '~> 1.1' spec.add_dependency 'fog-xml', '~> 0.1' end fog-aws-3.18.0/gemfiles/000077500000000000000000000000001437344660100147515ustar00rootroot00000000000000fog-aws-3.18.0/gemfiles/Gemfile-edge000066400000000000000000000004471437344660100171530ustar00rootroot00000000000000source "https://rubygems.org" # Shared components gem "fog-core", :github => "fog/fog-core" gem "fog-json", :github => "fog/fog-json" group :test, :default do gem 'pry-nav' gem 'mime-types', '~> 3.1' end gem "codeclimate-test-reporter", group: :test, require: nil gemspec :path => "../" fog-aws-3.18.0/lib/000077500000000000000000000000001437344660100137245ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog-aws.rb000066400000000000000000000000221437344660100156060ustar00rootroot00000000000000require 'fog/aws' fog-aws-3.18.0/lib/fog/000077500000000000000000000000001437344660100144775ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws.rb000066400000000000000000000221271437344660100156220ustar00rootroot00000000000000require 'fog/core' require 'fog/xml' require 'fog/json' require File.expand_path('../aws/version', __FILE__) module Fog module AWS extend Fog::Provider autoload :CredentialFetcher, File.expand_path('../aws/credential_fetcher', __FILE__) autoload :Errors, File.expand_path('../aws/errors', __FILE__) autoload :Mock, File.expand_path('../aws/mock', __FILE__) autoload :ServiceMapper, File.expand_path('../aws/service_mapper', __FILE__) autoload :SignatureV4, File.expand_path('../aws/signaturev4', __FILE__) # Services autoload :AutoScaling, File.expand_path('../aws/auto_scaling', __FILE__) autoload :CDN, File.expand_path('../aws/cdn', __FILE__) autoload :CloudFormation, File.expand_path('../aws/cloud_formation', __FILE__) autoload :CloudWatch, File.expand_path('../aws/cloud_watch', __FILE__) autoload :Compute, File.expand_path('../aws/compute', __FILE__) autoload :DataPipeline, File.expand_path('../aws/data_pipeline', __FILE__) autoload :DNS, File.expand_path('../aws/dns', __FILE__) autoload :DynamoDB, File.expand_path('../aws/dynamodb', __FILE__) autoload :ECS, File.expand_path('../aws/ecs', __FILE__) autoload :EFS, File.expand_path('../aws/efs', __FILE__) autoload :ELB, File.expand_path('../aws/elb', __FILE__) autoload :ELBV2, File.expand_path('../aws/elbv2', __FILE__) autoload :EMR, File.expand_path('../aws/emr', __FILE__) autoload :ElasticBeanstalk, File.expand_path('../aws/beanstalk', __FILE__) autoload :Elasticache, File.expand_path('../aws/elasticache', __FILE__) autoload :Federation, File.expand_path('../aws/federation', __FILE__) autoload :Glacier, File.expand_path('../aws/glacier', __FILE__) autoload :IAM, File.expand_path('../aws/iam', __FILE__) autoload :Kinesis, File.expand_path('../aws/kinesis', __FILE__) autoload :KMS, File.expand_path('../aws/kms', __FILE__) autoload :Lambda, File.expand_path('../aws/lambda', __FILE__) autoload :RDS, File.expand_path('../aws/rds', __FILE__) autoload :Redshift, File.expand_path('../aws/redshift', __FILE__) autoload :SES, File.expand_path('../aws/ses', __FILE__) autoload :SNS, File.expand_path('../aws/sns', __FILE__) autoload :SQS, File.expand_path('../aws/sqs', __FILE__) autoload :STS, File.expand_path('../aws/sts', __FILE__) autoload :Storage, File.expand_path('../aws/storage', __FILE__) autoload :Support, File.expand_path('../aws/support', __FILE__) autoload :SimpleDB, File.expand_path('../aws/simpledb', __FILE__) service(:auto_scaling, 'AutoScaling') service(:beanstalk, 'ElasticBeanstalk') service(:cdn, 'CDN') service(:cloud_formation, 'CloudFormation') service(:cloud_watch, 'CloudWatch') service(:compute, 'Compute') service(:data_pipeline, 'DataPipeline') service(:dns, 'DNS') service(:dynamodb, 'DynamoDB') service(:elasticache, 'Elasticache') service(:ecs, 'ECS') service(:efs, 'EFS') service(:elb, 'ELB') service(:elbv2, 'ELBV2') service(:emr, 'EMR') service(:federation, 'Federation') service(:glacier, 'Glacier') service(:iam, 'IAM') service(:kinesis, 'Kinesis') service(:kms, 'KMS') service(:lambda, 'Lambda') service(:rds, 'RDS') service(:redshift, 'Redshift') service(:ses, 'SES') service(:simpledb, 'SimpleDB') service(:sns, 'SNS') service(:sqs, 'SQS') service(:storage, 'Storage') service(:sts, 'STS') service(:support, 'Support') def self.indexed_param(key, values) params = {} unless key.include?('%d') key << '.%d' end [*values].each_with_index do |value, index| if value.respond_to?('keys') k = format(key, index + 1) value.each do | vkey, vvalue | params["#{k}.#{vkey}"] = vvalue end else params[format(key, index + 1)] = value end end params end def self.serialize_keys(key, value, options = {}) case value when Hash value.each do | k, v | options.merge!(serialize_keys("#{key}.#{k}", v)) end return options when Array value.each_with_index do | it, idx | options.merge!(serialize_keys("#{key}.member.#{(idx + 1)}", it)) end return options else return {key => value} end end def self.indexed_request_param(name, values) idx = -1 Array(values).reduce({}) do |params, value| params["#{name}.#{idx += 1}"] = value params end end def self.indexed_filters(filters) params = {} filters.keys.each_with_index do |key, key_index| key_index += 1 params[format('Filter.%d.Name', key_index)] = key [*filters[key]].each_with_index do |value, value_index| value_index += 1 params[format('Filter.%d.Value.%d', key_index, value_index)] = value end end params end def self.escape(string) string.gsub(/([^a-zA-Z0-9_.\-~]+)/) { "%" + $1.unpack("H2" * $1.bytesize).join("%").upcase } end def self.signed_params_v4(params, headers, options={}) date = Fog::Time.now params = params.merge('Version' => options[:version]) headers = headers.merge('Host' => options[:host], 'x-amz-date' => date.to_iso8601_basic) headers['x-amz-security-token'] = options[:aws_session_token] if options[:aws_session_token] query = options[:query] || {} if !options[:body] body = '' for key in params.keys.sort unless (value = params[key]).nil? body << "#{key}=#{escape(value.to_s)}&" end end body.chop! else body = options[:body] end headers['Authorization'] = options[:signer].sign({:method => options[:method], :headers => headers, :body => body, :query => query, :path => options[:path]}, date) return body, headers end def self.signed_params(params, options = {}) params.merge!({ 'AWSAccessKeyId' => options[:aws_access_key_id], 'SignatureMethod' => 'HmacSHA256', 'SignatureVersion' => '2', 'Timestamp' => Time.now.utc.strftime("%Y-%m-%dT%H:%M:%SZ"), 'Version' => options[:version] }) params.merge!({ 'SecurityToken' => options[:aws_session_token] }) if options[:aws_session_token] body = '' for key in params.keys.sort unless (value = params[key]).nil? body << "#{key}=#{escape(value.to_s)}&" end end string_to_sign = "POST\n#{options[:host]}:#{options[:port]}\n#{options[:path]}\n" << body.chop signed_string = options[:hmac].sign(string_to_sign) body << "Signature=#{escape(Base64.encode64(signed_string).chomp!)}" body end def self.parse_security_group_options(group_name, options) options ||= Hash.new if group_name.is_a?(Hash) options = group_name elsif group_name if options.key?('GroupName') raise Fog::AWS::Compute::Error, 'Arguments specified both group_name and GroupName in options' end options = options.clone options['GroupName'] = group_name end name_specified = options.key?('GroupName') && !options['GroupName'].nil? group_id_specified = options.key?('GroupId') && !options['GroupId'].nil? unless name_specified || group_id_specified raise Fog::AWS::Compute::Error, 'Neither GroupName nor GroupId specified' end if name_specified && group_id_specified options.delete('GroupName') end options end def self.json_response?(response) return false unless response && response.headers response.get_header('Content-Type') =~ %r{application/.*json.*}i ? true : false end def self.regions @regions ||= [ 'af-south-1', 'ap-east-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3', 'ap-south-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-southeast-3', 'ap-southeast-4', 'ca-central-1', 'cn-north-1', 'cn-northwest-1', 'eu-central-1', 'eu-north-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-south-1', 'eu-south-2', 'me-south-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'sa-east-1', 'us-gov-east-1', 'us-gov-west-1' ] end def self.validate_region!(region, host=nil) if (!host || host.end_with?('.amazonaws.com')) && !regions.include?(region) raise ArgumentError, "Unknown region: #{region.inspect}" end end end end fog-aws-3.18.0/lib/fog/aws/000077500000000000000000000000001437344660100152715ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/auto_scaling.rb000066400000000000000000000231071437344660100202710ustar00rootroot00000000000000module Fog module AWS class AutoScaling < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class IdentifierTaken < Fog::Errors::Error; end class ResourceInUse < Fog::Errors::Error; end class ValidationError < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :persistent, :region, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/auto_scaling' request :attach_load_balancers request :attach_load_balancer_target_groups request :create_auto_scaling_group request :create_launch_configuration request :create_or_update_tags request :delete_auto_scaling_group request :delete_launch_configuration request :delete_notification_configuration request :delete_policy request :delete_scheduled_action request :delete_tags request :describe_adjustment_types request :describe_auto_scaling_groups request :describe_auto_scaling_instances request :describe_auto_scaling_notification_types request :describe_launch_configurations request :describe_metric_collection_types request :describe_notification_configurations request :describe_policies request :describe_scaling_activities request :describe_scaling_process_types request :describe_scheduled_actions request :describe_tags request :describe_termination_policy_types request :detach_load_balancers request :detach_load_balancer_target_groups request :detach_instances request :attach_instances request :disable_metrics_collection request :enable_metrics_collection request :execute_policy request :put_notification_configuration request :put_scaling_policy request :put_scheduled_update_group_action request :resume_processes request :set_desired_capacity request :set_instance_health request :set_instance_protection request :suspend_processes request :terminate_instance_in_auto_scaling_group request :update_auto_scaling_group model_path 'fog/aws/models/auto_scaling' model :activity collection :activities model :configuration collection :configurations model :group collection :groups model :instance collection :instances model :policy collection :policies ExpectedOptions = {} class Real include Fog::AWS::CredentialFetcher::ConnectionMethods attr_accessor :region # Initialize connection to AutoScaling # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # as = AutoScaling.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * AutoScaling object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.auto_scaling' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "autoscaling.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @port = options[:port] || 443 @persistent = options[:persistent] || false @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :aws_session_token => @aws_session_token, :method => 'POST', :signer => @signer, :host => @host, :path => @path, :port => @port, :version => '2011-01-01' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) begin @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'AlreadyExists' Fog::AWS::AutoScaling::IdentifierTaken.slurp(error, match[:message]) when 'ResourceInUse' Fog::AWS::AutoScaling::ResourceInUse.slurp(error, match[:message]) when 'ValidationError' Fog::AWS::AutoScaling::ValidationError.slurp(error, CGI.unescapeHTML(match[:message])) else Fog::AWS::AutoScaling::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'autoscaling') end end class Mock include Fog::AWS::CredentialFetcher::ConnectionMethods attr_accessor :region def self.data @data ||= Hash.new do |hash, region| owner_id = Fog::AWS::Mock.owner_id hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :adjustment_types => [ 'ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity' ], :auto_scaling_groups => {}, :scaling_policies => {}, :health_states => [ 'Healthy', 'Unhealthy' ], :launch_configurations => {}, :metric_collection_types => { :granularities => [ '1Minute' ], :metrics => [ 'GroupMinSize', 'GroupMaxSize', 'GroupDesiredCapacity', 'GroupInServiceInstances', 'GroupPendingInstances', 'GroupTerminatingInstances', 'GroupTotalInstances' ] }, :notification_configurations => {}, :notification_types => [ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', 'autoscaling:TEST_NOTIFICATION' ], :owner_id => owner_id, :process_types => [ 'AZRebalance', 'AddToLoadBalancer', 'AlarmNotification', 'HealthCheck', 'Launch', 'ReplaceUnhealthy', 'ScheduledActions', 'Terminate' ], :termination_policy_types => [ 'ClosestToNextInstanceHour', 'Default', 'NewestInstance', 'OldestInstance', 'OldestLaunchConfiguration' ] } end end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) end def region_data self.class.data[@region] end def data self.region_data[@aws_access_key_id] end def reset_data self.region_data.delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end end end end fog-aws-3.18.0/lib/fog/aws/beanstalk.rb000066400000000000000000000127001437344660100175620ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class InvalidParameterError < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/beanstalk' request :check_dns_availability request :create_application request :create_application_version request :create_configuration_template request :create_environment request :create_storage_location request :delete_application request :delete_application_version request :delete_configuration_template request :delete_environment_configuration request :describe_applications request :describe_application_versions request :describe_configuration_options request :describe_configuration_settings request :describe_environment_resources request :describe_environments request :describe_events request :list_available_solution_stacks request :rebuild_environment request :request_environment_info request :restart_app_server request :retrieve_environment_info request :swap_environment_cnames request :terminate_environment request :update_application request :update_application_version request :update_configuration_template request :update_environment request :validate_configuration_settings model_path 'fog/aws/models/beanstalk' model :application collection :applications model :environment collection :environments model :event collection :events model :template collection :templates model :version collection :versions class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} options[:region] ||= 'us-east-1' @host = options[:host] || "elasticbeanstalk.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.beanstalk' @region = options[:region] setup_credentials(options) end def reload @connection.reset end # Returns an array of available solutions stack details def solution_stacks list_available_solution_stacks.body['ListAvailableSolutionStacksResult']['SolutionStackDetails'] end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'elasticbeanstalk') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :method => "POST", :host => @host, :path => @path, :port => @port, :version => '2010-12-01' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'InvalidParameterValue' Fog::AWS::ElasticBeanstalk::InvalidParameterError.slurp(error, match[:message]) else Fog::AWS::ElasticBeanstalk::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/cdn.rb000066400000000000000000000176711437344660100163760ustar00rootroot00000000000000module Fog module AWS class CDN < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :version, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name model_path 'fog/aws/models/cdn' model :distribution collection :distributions model :streaming_distribution collection :streaming_distributions request_path 'fog/aws/requests/cdn' request 'delete_distribution' request 'delete_streaming_distribution' request 'get_distribution' request 'get_distribution_list' request 'get_invalidation_list' request 'get_invalidation' request 'get_streaming_distribution' request 'get_streaming_distribution_list' request 'post_distribution' request 'post_streaming_distribution' request 'post_invalidation' request 'put_distribution_config' request 'put_streaming_distribution_config' class Mock def self.data @data ||= Hash.new do |hash, key| hash[key] = { :distributions => {}, :streaming_distributions => {}, :invalidations => {} } end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) end def data self.class.data[@aws_access_key_id] end def reset_data self.class.data.delete(@aws_access_key_id) end def signature(params) "foo" end def setup_credentials(options={}) @aws_access_key_id = options[:aws_access_key_id] end def self.distribution_id random_id(14) end def self.generic_id random_id(14) end def self.domain_name "#{random_id(12).downcase}.cloudfront.net" end def self.random_id(length) Fog::Mock.random_selection("abcdefghijklmnopqrstuvwxyz0123456789", length).upcase end CDN_ERRORS = { :access_denies => {:code => 'AccessDenied',:msg => 'Access denied.',:status => 403}, :inappropriate_xml => {:code => 'InappropriateXML',:msg => 'The XML document you provided was well-formed and valid, but not appropriate for this operation.',:status => 400}, :internal_error => {:code => 'InternalError',:msg => 'We encountered an internal error. Please try again.',:status => 500}, :invalid_action => {:code => 'InvalidAction',:msg => 'The action specified is not valid.',:status => 400}, :invalid_argument => {:code => 'InvalidArgument',:msg => '%s', :status => 400}, :not_implemented => {:code => 'NotImplemented', :msg => 'Not implemented.',:status => 501}, :no_such_distribution => { :code => 'NoSuchDistribution', :msg => 'The specified distribution does not exist', :status => 404 }, :no_such_streaming_distribution => { :code => 'NoSuchStreamingDistribution', :msg => 'The specified streaming distribution does not exist', :status => 404 }, :no_such_invalidation => { :code => 'NoSuchInvalidation', :msg => 'The specified invalidation does not exist', :status => 404 }, :cname_exists => { :code => 'CNAMEAlreadyExists', :msg => 'One or more of the CNAMEs you provided are already associated with a different distribution', :status => 409 }, :illegal_update => { :code => 'IllegalUpdate', :msg => 'Origin and CallerReference cannot be updated.', :status => 400 }, :invalid_if_match_version => { :code => 'InvalidIfMatchVersion', :msg => 'The If-Match version is missing or not valid for the distribution.', :status => 400}, :distribution_not_disabled => { :code => 'DistributionNotDisabled', :msg => 'The distribution you are trying to delete has not been disabled.', :status => 409 }, } def self.error(code, argument = '') if error = CDN_ERRORS[code] raise_error(error[:status], error[:code], error[:msg] % argument) end end def self.raise_error(status, code, message='') response = Excon::Response.new response.status = status response.body = < Sender #{code} #{message}. #{Fog::AWS::Mock.request_id} EOF raise(Excon::Errors.status_error({:expects => 201}, response)) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Cloudfront # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # cdn = Fog::AWS::CDN.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * cdn object with connection to aws. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.cdn' @connection_options = options[:connection_options] || {} @host = options[:host] || 'cloudfront.amazonaws.com' @path = options[:path] || '/' @persistent = options.fetch(:persistent, true) @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @version = options[:version] || '2010-11-01' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @hmac = Fog::HMAC.new('sha1', @aws_secret_access_key) end def request(params, &block) refresh_credentials_if_expired params[:headers] ||= {} params[:headers]['Date'] = Fog::Time.now.to_date_header params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['Authorization'] = "AWS #{@aws_access_key_id}:#{signature(params)}" params[:path] = "/#{@version}/#{params[:path]}" if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params, &block) end else _request(params, &block) end end def _request(params, &block) @connection.request(params, &block) end def signature(params) string_to_sign = params[:headers]['Date'] signed_string = @hmac.sign(string_to_sign) Base64.encode64(signed_string).chomp! end end end end # @deprecated module CDN # @deprecated class AWS < Fog::AWS::CDN # @deprecated # @overrides Fog::Service.new (from the fog-core gem) def self.new(*) Fog::Logger.deprecation 'Fog::CDN::AWS is deprecated, please use Fog::AWS::CDN.' super end end end end fog-aws-3.18.0/lib/fog/aws/cloud_formation.rb000066400000000000000000000120721437344660100210040ustar00rootroot00000000000000module Fog module AWS class CloudFormation < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :persistent, :region, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/cloud_formation' request :cancel_update_stack request :continue_update_rollback request :create_change_set request :create_stack request :update_stack request :delete_change_set request :delete_stack request :describe_account_limits request :describe_change_set request :describe_stack_events request :describe_stack_resource request :describe_stack_resources request :describe_stacks request :estimate_template_cost request :execute_change_set request :get_stack_policy request :get_template request :get_template_summary request :set_stack_policy request :signal_resource request :validate_template request :list_change_sets request :list_stacks request :list_stack_resources class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to CloudFormation # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # cf = CloudFormation.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * CloudFormation object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.cloud_formation' @connection_options = options[:connection_options] || {} options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "cloudformation.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'cloudformation') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :host => @host, :path => @path, :port => @port, :version => '2010-05-15', :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'NotFound', 'ValidationError' Fog::AWS::CloudFormation::NotFound.slurp(error, match[:message]) else Fog::AWS::CloudFormation::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/cloud_watch.rb000066400000000000000000000123441437344660100201160ustar00rootroot00000000000000module Fog module AWS class CloudWatch < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/cloud_watch' request :list_metrics request :get_metric_statistics request :put_metric_data request :describe_alarms request :put_metric_alarm request :delete_alarms request :describe_alarm_history request :enable_alarm_actions request :disable_alarm_actions request :describe_alarms_for_metric request :set_alarm_state model_path 'fog/aws/models/cloud_watch' model :metric collection :metrics model :metric_statistic collection :metric_statistics model :alarm_datum collection :alarm_data model :alarm_history collection :alarm_histories model :alarm collection :alarms class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :metric_alarms => {} } end end end def self.reset @data = nil end def initialize(options={}) @aws_access_key_id = options[:aws_access_key_id] @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Cloudwatch # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # elb = CloudWatch.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1', etc. # # ==== Returns # * CloudWatch object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.cloud_watch' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "monitoring.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'monitoring') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :host => @host, :path => @path, :port => @port, :version => '2010-08-01', :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) end end end end end fog-aws-3.18.0/lib/fog/aws/compute.rb000066400000000000000000000616031437344660100173000ustar00rootroot00000000000000module Fog module AWS class Compute < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class RequestLimitExceeded < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :endpoint, :region, :host, :path, :port, :scheme, :persistent, :aws_session_token, :use_iam_profile, :aws_credentials_expire_at, :instrumentor, :instrumentor_name, :version, :retry_request_limit_exceeded, :retry_jitter_magnitude secrets :aws_secret_access_key, :hmac, :aws_session_token model_path 'fog/aws/models/compute' model :address collection :addresses model :dhcp_options collection :dhcp_options model :flavor collection :flavors model :image collection :images model :internet_gateway collection :internet_gateways model :key_pair collection :key_pairs model :network_acl collection :network_acls model :network_interface collection :network_interfaces model :route_table collection :route_tables model :security_group collection :security_groups model :server collection :servers model :snapshot collection :snapshots model :tag collection :tags model :volume collection :volumes model :spot_request collection :spot_requests model :subnet collection :subnets model :vpc collection :vpcs request_path 'fog/aws/requests/compute' request :allocate_address request :assign_private_ip_addresses request :associate_address request :associate_dhcp_options request :attach_network_interface request :associate_route_table request :attach_classic_link_vpc request :attach_internet_gateway request :attach_volume request :authorize_security_group_egress request :authorize_security_group_ingress request :cancel_spot_instance_requests request :create_dhcp_options request :create_internet_gateway request :create_image request :create_key_pair request :create_network_acl request :create_network_acl_entry request :create_network_interface request :create_placement_group request :create_route request :create_route_table request :create_security_group request :create_snapshot request :create_spot_datafeed_subscription request :create_subnet request :create_tags request :create_volume request :create_vpc request :copy_image request :copy_snapshot request :delete_dhcp_options request :delete_internet_gateway request :delete_key_pair request :delete_network_acl request :delete_network_acl_entry request :delete_network_interface request :delete_security_group request :delete_placement_group request :delete_route request :delete_route_table request :delete_snapshot request :delete_spot_datafeed_subscription request :delete_subnet request :delete_tags request :delete_volume request :delete_vpc request :deregister_image request :describe_account_attributes request :describe_addresses request :describe_availability_zones request :describe_classic_link_instances request :describe_dhcp_options request :describe_images request :describe_image_attribute request :describe_instances request :describe_instance_attribute request :describe_internet_gateways request :describe_reserved_instances request :describe_instance_status request :describe_key_pairs request :describe_network_acls request :describe_network_interface_attribute request :describe_network_interfaces request :describe_route_tables request :describe_placement_groups request :describe_regions request :describe_reserved_instances_offerings request :describe_security_groups request :describe_snapshots request :describe_spot_datafeed_subscription request :describe_spot_instance_requests request :describe_spot_price_history request :describe_subnets request :describe_tags request :describe_volumes request :describe_volumes_modifications request :describe_volume_status request :describe_vpcs request :describe_vpc_attribute request :describe_vpc_classic_link request :describe_vpc_classic_link_dns_support request :detach_network_interface request :detach_internet_gateway request :detach_volume request :detach_classic_link_vpc request :disable_vpc_classic_link request :disable_vpc_classic_link_dns_support request :disassociate_address request :disassociate_route_table request :enable_vpc_classic_link request :enable_vpc_classic_link_dns_support request :get_console_output request :get_password_data request :import_key_pair request :modify_image_attribute request :modify_instance_attribute request :modify_instance_placement request :modify_network_interface_attribute request :modify_snapshot_attribute request :modify_subnet_attribute request :modify_volume request :modify_volume_attribute request :modify_vpc_attribute request :move_address_to_vpc request :purchase_reserved_instances_offering request :reboot_instances request :release_address request :replace_network_acl_association request :replace_network_acl_entry request :replace_route request :register_image request :request_spot_instances request :reset_network_interface_attribute request :restore_address_to_classic request :revoke_security_group_egress request :revoke_security_group_ingress request :run_instances request :terminate_instances request :start_instances request :stop_instances request :monitor_instances request :unmonitor_instances class InvalidURIError < Exception; end # deprecation class Real def modify_image_attributes(*params) Fog::Logger.deprecation("modify_image_attributes is deprecated, use modify_image_attribute instead [light_black](#{caller.first})[/]") modify_image_attribute(*params) end # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html def supported_platforms describe_account_attributes.body["accountAttributeSet"].find{ |h| h["attributeName"] == "supported-platforms" }["values"] end end class Mock MOCKED_TAG_TYPES = { 'acl' => 'network_acl', 'ami' => 'image', 'igw' => 'internet_gateway', 'i' => 'instance', 'rtb' => 'route_table', 'snap' => 'snapshot', 'vol' => 'volume', 'vpc' => 'vpc' } VPC_BLANK_VALUE = 'none' include Fog::AWS::CredentialFetcher::ConnectionMethods def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| owner_id = Fog::AWS::Mock.owner_id security_group_id = Fog::AWS::Mock.security_group_id region_hash[key] = { :deleted_at => {}, :addresses => {}, :images => {}, :image_launch_permissions => Hash.new do |permissions_hash, image_key| permissions_hash[image_key] = { :users => [] } end, :instances => {}, :reserved_instances => {}, :key_pairs => {}, :limits => { :addresses => 5 }, :owner_id => owner_id, :security_groups => { 'default' => { 'groupDescription' => 'default group', 'groupName' => 'default', 'groupId' => security_group_id, 'ipPermissionsEgress' => [], 'ipPermissions' => [ { 'groups' => [{'groupName' => 'default', 'userId' => owner_id, 'groupId' => security_group_id }], 'fromPort' => -1, 'toPort' => -1, 'ipProtocol' => 'icmp', 'ipRanges' => [], 'ipv6Ranges' => [] }, { 'groups' => [{'groupName' => 'default', 'userId' => owner_id, 'groupId' => security_group_id}], 'fromPort' => 0, 'toPort' => 65535, 'ipProtocol' => 'tcp', 'ipRanges' => [], 'ipv6Ranges' => [] }, { 'groups' => [{'groupName' => 'default', 'userId' => owner_id, 'groupId' => security_group_id}], 'fromPort' => 0, 'toPort' => 65535, 'ipProtocol' => 'udp', 'ipRanges' => [], 'ipv6Ranges' => [] } ], 'ownerId' => owner_id }, 'amazon-elb-sg' => { 'groupDescription' => 'amazon-elb-sg', 'groupName' => 'amazon-elb-sg', 'groupId' => 'amazon-elb', 'ownerId' => 'amazon-elb', 'ipPermissionsEgree' => [], 'ipPermissions' => [], }, }, :network_acls => {}, :network_interfaces => {}, :snapshots => {}, :volumes => {}, :internet_gateways => {}, :tags => {}, :tag_sets => Hash.new do |tag_set_hash, resource_id| tag_set_hash[resource_id] = {} end, :subnets => [], :vpcs => [], :dhcp_options => [], :route_tables => [], :account_attributes => [ { "values" => ["5"], "attributeName" => "vpc-max-security-groups-per-interface" }, { "values" => ["20"], "attributeName" => "max-instances" }, { "values" => ["EC2", "VPC"], "attributeName" => "supported-platforms" }, { "values" => [VPC_BLANK_VALUE], "attributeName" => "default-vpc" }, { "values" => ["5"], "attributeName" => "max-elastic-ips" }, { "values" => ["5"], "attributeName" => "vpc-max-elastic-ips" } ], :spot_requests => {}, :volume_modifications => {} } end end end def self.reset @data = nil end attr_accessor :region def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @aws_credentials_expire_at = Time::now + 20 setup_credentials(options) @region = options[:region] || 'us-east-1' if @endpoint = options[:endpoint] endpoint = URI.parse(@endpoint) @host = endpoint.host or raise InvalidURIError.new("could not parse endpoint: #{@endpoint}") @path = endpoint.path @port = endpoint.port @scheme = endpoint.scheme else @host = options[:host] || "ec2.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' end Fog::AWS.validate_region!(@region, @host) end def region_data self.class.data[@region] end def data self.region_data[@aws_access_key_id] end def reset_data self.region_data.delete(@aws_access_key_id) end def visible_images images = self.data[:images].values.reduce({}) do |h, image| h.update(image['imageId'] => image) end self.region_data.each do |aws_access_key_id, data| data[:image_launch_permissions].each do |image_id, list| if list[:users].include?(self.data[:owner_id]) images.update(image_id => data[:images][image_id]) end end end images end def supported_platforms describe_account_attributes.body["accountAttributeSet"].find{ |h| h["attributeName"] == "supported-platforms" }["values"] end def enable_ec2_classic set_supported_platforms(%w[EC2 VPC]) end def disable_ec2_classic set_supported_platforms(%w[VPC]) end def set_supported_platforms(values) self.data[:account_attributes].find { |h| h["attributeName"] == "supported-platforms" }["values"] = values end def default_vpc vpc_id = describe_account_attributes.body["accountAttributeSet"].find{ |h| h["attributeName"] == "default-vpc" }["values"].first vpc_id == VPC_BLANK_VALUE ? nil : vpc_id end def default_vpc=(value) self.data[:account_attributes].find { |h| h["attributeName"] == "default-vpc" }["values"] = [value] end def setup_default_vpc! return if default_vpc.present? disable_ec2_classic vpc_id = Fog::AWS::Mock.default_vpc_for(region) self.default_vpc = vpc_id data[:vpcs] << { 'vpcId' => vpc_id, 'state' => 'available', 'cidrBlock' => '172.31.0.0/16', 'dhcpOptionsId' => Fog::AWS::Mock.dhcp_options_id, 'tagSet' => {}, 'instanceTenancy' => 'default', 'enableDnsSupport' => true, 'enableDnsHostnames' => true, 'isDefault' => true } internet_gateway_id = Fog::AWS::Mock.internet_gateway_id data[:internet_gateways][internet_gateway_id] = { 'internetGatewayId' => internet_gateway_id, 'attachmentSet' => { 'vpcId' => vpc_id, 'state' => 'available' }, 'tagSet' => {} } data[:route_tables] << { 'routeTableId' => Fog::AWS::Mock.route_table_id, 'vpcId' => vpc_id, 'routes' => [ { 'destinationCidrBlock' => '172.31.0.0/16', 'gatewayId' => 'local', 'state' => 'active', 'origin' => 'CreateRouteTable' }, { 'destinationCidrBlock' => '0.0.0.0/0', 'gatewayId' => internet_gateway_id, 'state' => 'active', 'origin' => 'CreateRoute' } ] } describe_availability_zones.body['availabilityZoneInfo'].map { |z| z['zoneName'] }.each_with_index do |zone, i| data[:subnets] << { 'subnetId' => Fog::AWS::Mock.subnet_id, 'state' => 'available', 'vpcId' => vpc_id, 'cidrBlock' => "172.31.#{i}.0/16", 'availableIpAddressCount' => '251', 'availabilityZone' => zone, 'tagSet' => {}, 'mapPublicIpOnLaunch' => true, 'defaultForAz' => true } end end def tagged_resources(resources) Array(resources).map do |resource_id| if match = resource_id.match(/^(\w+)-[a-z0-9]{8,17}/i) id = match.captures.first else raise(Fog::Service::NotFound.new("Unknown resource id #{resource_id}")) end if MOCKED_TAG_TYPES.has_key? id type = MOCKED_TAG_TYPES[id] else raise(Fog::Service::NotFound.new("Mocking tags of resource #{resource_id} has not been implemented")) end case type when 'image' unless visible_images.has_key? resource_id raise(Fog::Service::NotFound.new("Cannot tag #{resource_id}, the image does not exist")) end when 'vpc' if self.data[:vpcs].select {|v| v['vpcId'] == resource_id }.empty? raise(Fog::Service::NotFound.new("Cannot tag #{resource_id}, the vpc does not exist")) end when 'route_table' unless self.data[:route_tables].detect { |r| r['routeTableId'] == resource_id } raise(Fog::Service::NotFound.new("Cannot tag #{resource_id}, the route table does not exist")) end else unless self.data[:"#{type}s"][resource_id] raise(Fog::Service::NotFound.new("Cannot tag #{resource_id}, the #{type} does not exist")) end end { 'resourceId' => resource_id, 'resourceType' => type } end end def apply_tag_filters(resources, filters, resource_id_key) tag_set_fetcher = lambda {|resource| self.data[:tag_sets][resource[resource_id_key]] } # tag-key: match resources tagged with this key (any value) if filters.key?('tag-key') value = filters.delete('tag-key') resources = resources.select{|r| tag_set_fetcher[r].key?(value)} end # tag-value: match resources tagged with this value (any key) if filters.key?('tag-value') value = filters.delete('tag-value') resources = resources.select{|r| tag_set_fetcher[r].values.include?(value)} end # tag:key: match resources tagged with a key-value pair. Value may be an array, which is OR'd. tag_filters = {} filters.keys.each do |key| tag_filters[key.gsub('tag:', '')] = filters.delete(key) if /^tag:/ =~ key end for tag_key, tag_value in tag_filters resources = resources.select{|r| [tag_value].flatten.include? tag_set_fetcher[r][tag_key]} end resources end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to EC2 # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # sdb = SimpleDB.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, # 'eu-west-1', 'us-east-1', and etc. # * aws_session_token<~String> - when using Session Tokens or Federated Users, a session_token must be presented # # ==== Returns # * EC2 object with connection to aws. attr_accessor :region def initialize(options={}) @connection_options = options[:connection_options] || {} @region = options[:region] ||= 'us-east-1' @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.compute' @version = options[:version] || '2016-11-15' @retry_request_limit_exceeded = options.fetch(:retry_request_limit_exceeded, true) @retry_jitter_magnitude = options[:retry_jitter_magnitude] || 0.1 @use_iam_profile = options[:use_iam_profile] setup_credentials(options) if @endpoint = options[:endpoint] endpoint = URI.parse(@endpoint) @host = endpoint.host or raise InvalidURIError.new("could not parse endpoint: #{@endpoint}") @path = endpoint.path @port = endpoint.port @scheme = endpoint.scheme else @host = options[:host] || "ec2.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' end Fog::AWS.validate_region!(@region, @host) @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'ec2') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, {'Content-Type' => 'application/x-www-form-urlencoded'}, { :host => @host, :path => @path, :port => @port, :version => @version, :signer => @signer, :aws_session_token => @aws_session_token, :method => "POST" } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser, retries = 0) max_retries = 10 begin @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'NotFound', 'Unknown' Fog::AWS::Compute::NotFound.slurp(error, match[:message]) when 'RequestLimitExceeded' if @retry_request_limit_exceeded && retries < max_retries jitter = rand * 10 * @retry_jitter_magnitude wait_time = ((2.0 ** (1.0 + retries) * 100) / 1000.0) + jitter Fog::Logger.warning "Waiting #{wait_time} seconds to retry." sleep(wait_time) retries += 1 retry elsif @retry_request_limit_exceeded Fog::AWS::Compute::RequestLimitExceeded.slurp(error, "Max retries exceeded (#{max_retries}) #{match[:code]} => #{match[:message]}") else Fog::AWS::Compute::RequestLimitExceeded.slurp(error, "#{match[:code]} => #{match[:message]}") end else Fog::AWS::Compute::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end # @deprecated module Compute # @deprecated class AWS < Fog::AWS::Compute # @deprecated # @overrides Fog::Service.new (from the fog-core gem) def self.new(*) Fog::Logger.deprecation 'Fog::Compute::AWS is deprecated, please use Fog::AWS::Compute.' super end end end end fog-aws-3.18.0/lib/fog/aws/credential_fetcher.rb000066400000000000000000000152571437344660100214420ustar00rootroot00000000000000# frozen_string_literal: true require 'securerandom' module Fog module AWS module CredentialFetcher INSTANCE_METADATA_HOST = "http://169.254.169.254" INSTANCE_METADATA_TOKEN = "/latest/api/token" INSTANCE_METADATA_PATH = "/latest/meta-data/iam/security-credentials/" INSTANCE_METADATA_AZ = "/latest/meta-data/placement/availability-zone/" CONTAINER_CREDENTIALS_HOST = "http://169.254.170.2" module ServiceMethods def fetch_credentials(options) if options[:use_iam_profile] && Fog.mocking? return Fog::AWS::Compute::Mock.data[:iam_role_based_creds] end if options[:use_iam_profile] begin role_data = nil region = options[:region] || ENV["AWS_DEFAULT_REGION"] if ENV["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"] connection = options[:connection] || Excon.new(CONTAINER_CREDENTIALS_HOST) credential_path = options[:credential_path] || ENV["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"] role_data = connection.get(:path => credential_path, :idempotent => true, :expects => 200).body session = Fog::JSON.decode(role_data) if region.nil? connection = options[:metadata_connection] || Excon.new(INSTANCE_METADATA_HOST) token_header = fetch_credentials_token_header(connection, options[:disable_imds_v2]) region = connection.get(:path => INSTANCE_METADATA_AZ, :idempotent => true, :expects => 200, :headers => token_header).body[0..-2] end elsif ENV["AWS_WEB_IDENTITY_TOKEN_FILE"] params = { :Action => "AssumeRoleWithWebIdentity", :RoleArn => options[:role_arn] || ENV.fetch("AWS_ROLE_ARN"), :RoleSessionName => options[:role_session_name] || ENV["AWS_ROLE_SESSION_NAME"] || "fog-aws-#{SecureRandom.hex}", :WebIdentityToken => File.read(options[:aws_web_identity_token_file] || ENV.fetch("AWS_WEB_IDENTITY_TOKEN_FILE")), :Version => "2011-06-15", } sts_endpoint = if ENV["AWS_STS_REGIONAL_ENDPOINTS"] == "regional" && region "https://sts.#{region}.amazonaws.com" else "https://sts.amazonaws.com" end connection = options[:connection] || Excon.new(sts_endpoint, :query => params) document = Nokogiri::XML(connection.get(:idempotent => true, :expects => 200).body) session = { "AccessKeyId" => document.css("AccessKeyId").children.text, "SecretAccessKey" => document.css("SecretAccessKey").children.text, "Token" => document.css("SessionToken").children.text, "Expiration" => document.css("Expiration").children.text, } if region.nil? connection = options[:metadata_connection] || Excon.new(INSTANCE_METADATA_HOST) token_header = fetch_credentials_token_header(connection, options[:disable_imds_v2]) region = connection.get(:path => INSTANCE_METADATA_AZ, :idempotent => true, :expects => 200, :headers => token_header).body[0..-2] end else connection = options[:connection] || Excon.new(INSTANCE_METADATA_HOST) token_header = fetch_credentials_token_header(connection, options[:disable_imds_v2]) role_name = connection.get(:path => INSTANCE_METADATA_PATH, :idempotent => true, :expects => 200, :headers => token_header).body role_data = connection.get(:path => INSTANCE_METADATA_PATH+role_name, :idempotent => true, :expects => 200, :headers => token_header).body session = Fog::JSON.decode(role_data) region ||= connection.get(:path => INSTANCE_METADATA_AZ, :idempotent => true, :expects => 200, :headers => token_header).body[0..-2] end credentials = {} credentials[:aws_access_key_id] = session['AccessKeyId'] credentials[:aws_secret_access_key] = session['SecretAccessKey'] credentials[:aws_session_token] = session['Token'] credentials[:aws_credentials_expire_at] = Time.xmlschema session['Expiration'] # set region by default to the one the instance is in. credentials[:region] = region credentials[:sts_endpoint] = sts_endpoint if sts_endpoint #these indicate the metadata service is unavailable or has no profile setup credentials rescue Excon::Error => e Fog::Logger.warning("Unable to fetch credentials: #{e.message}") super end else super end end def fetch_credentials_token_header(connection, disable_imds_v2) return nil if disable_imds_v2 token = connection.put( :path => INSTANCE_METADATA_TOKEN, :idempotent => true, :expects => 200, :retry_interval => 1, :retry_limit => 3, :read_timeout => 1, :write_timeout => 1, :connect_timeout => 1, :headers => { "X-aws-ec2-metadata-token-ttl-seconds" => "300" } ).body { "X-aws-ec2-metadata-token" => token } rescue Excon::Error nil end end module ConnectionMethods def refresh_credentials_if_expired refresh_credentials if credentials_expired? end private # When defined, 'aws_credentials_refresh_threshold_seconds' controls # when the credential needs to be refreshed, expressed in seconds before # the current credential's expiration time def credentials_refresh_threshold @aws_credentials_refresh_threshold_seconds || 15 end def credentials_expired? @use_iam_profile && (!@aws_credentials_expire_at || (@aws_credentials_expire_at && Fog::Time.now > @aws_credentials_expire_at - credentials_refresh_threshold)) #new credentials become available from around 5 minutes before expiration time end def refresh_credentials if @use_iam_profile new_credentials = service.fetch_credentials :use_iam_profile => @use_iam_profile, :region => @region if new_credentials.any? setup_credentials new_credentials return true else false end else false end end end end end end fog-aws-3.18.0/lib/fog/aws/data_pipeline.rb000066400000000000000000000141211437344660100204130ustar00rootroot00000000000000module Fog module AWS class DataPipeline < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/data_pipeline' request :activate_pipeline request :create_pipeline request :deactivate_pipeline request :delete_pipeline request :describe_pipelines request :list_pipelines request :put_pipeline_definition request :get_pipeline_definition request :query_objects request :describe_objects model_path 'fog/aws/models/data_pipeline' model :pipeline collection :pipelines class Mock include Fog::AWS::CredentialFetcher::ConnectionMethods def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :pipelines => {}, :pipeline_definitions => {}, } end end end def self.reset @data = nil end def data self.class.data[@region][@aws_access_key_id] end def reset self.class.reset end attr_accessor :region def initialize(options={}) @region = options[:region] || "us-east-1" @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] end def stringify_keys(object) case object when Hash object.inject({}) { |h,(k,v)| h[k.to_s] = stringify_keys(v); h } when Array object.map { |v| stringify_keys(v) } else object end end def find_pipeline(id) pipeline = self.data[:pipelines].values.detect { |p| p["pipelineId"] == id } if pipeline.nil? || pipeline[:deleted] raise Fog::AWS::DataPipeline::NotFound.new("Pipeline with id: #{id} does not exist") end pipeline end end class Real attr_reader :region include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to DataPipeline # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # datapipeline = DataPipeline.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1' and etc. # # ==== Returns # * DataPipeline object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.data_pipeline' @connection_options = options[:connection_options] || {} @version = '2012-10-29' @region = options[:region] || 'us-east-1' @host = options[:host] || "datapipeline.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def owner_id @owner_id ||= security_groups.get('default').owner_id end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'datapipeline') end def request(params) refresh_credentials_if_expired # Params for all DataPipeline requests params.merge!({ :expects => 200, :method => :post, :path => '/', }) date = Fog::Time.now params[:headers] = { 'Date' => date.to_date_header, 'Host' => @host, 'X-Amz-Date' => date.to_iso8601_basic, 'Content-Type' => 'application/x-amz-json-1.1', 'Content-Length' => params[:body].bytesize.to_s, }.merge!(params[:headers] || {}) params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['Authorization'] = @signer.sign(params, date) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params) end else _request(params) end end def _request(params) response = @connection.request(params) unless response.body.empty? response.body = Fog::JSON.decode(response.body) end response rescue Excon::Error::BadRequest => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? if %w(PipelineNotFoundException PipelineDeletedException).include?(match[:code]) raise Fog::AWS::DataPipeline::NotFound.slurp(error, match[:message]) end end end end end end fog-aws-3.18.0/lib/fog/aws/dns.rb000066400000000000000000000273471437344660100164170ustar00rootroot00000000000000module Fog module AWS class DNS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :version, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name model_path 'fog/aws/models/dns' model :record collection :records model :zone collection :zones request_path 'fog/aws/requests/dns' request :create_health_check request :create_hosted_zone request :delete_health_check request :get_health_check request :get_hosted_zone request :delete_hosted_zone request :list_health_checks request :list_hosted_zones request :change_resource_record_sets request :list_resource_record_sets request :get_change class Mock include Fog::AWS::CredentialFetcher::ConnectionMethods def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :buckets => {}, :limits => { :duplicate_domains => 5 }, :zones => {}, :changes => {} } end end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @region = options[:region] end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end def signature(params) "foo" end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Route 53 DNS service # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # dns = Fog::AWS::DNS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * dns object with connection to aws. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.dns' @connection_options = options[:connection_options] || {} @host = options[:host] || 'route53.amazonaws.com' @path = options[:path] || '/' @persistent = options.fetch(:persistent, true) @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @version = options[:version] || '2013-04-01' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @hmac = Fog::HMAC.new('sha1', @aws_secret_access_key) end def request(params, &block) refresh_credentials_if_expired params[:headers] ||= {} params[:headers]['Date'] = Fog::Time.now.to_date_header params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['X-Amzn-Authorization'] = "AWS3-HTTPS AWSAccessKeyId=#{@aws_access_key_id},Algorithm=HmacSHA1,Signature=#{signature(params)}" params[:path] = "/#{@version}/#{params[:path]}" if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params, &block) end else _request(params, &block) end end def _request(params, &block) @connection.request(params, &block) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) if match.empty? raise else raise case match[:code] when 'NoSuchHostedZone', 'NoSuchChange' then Fog::AWS::DNS::NotFound.slurp(error, match[:message]) else Fog::AWS::DNS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end def signature(params) string_to_sign = params[:headers]['Date'] signed_string = @hmac.sign(string_to_sign) Base64.encode64(signed_string).chomp! end end def self.hosted_zone_for_alias_target(dns_name) hosted_zones = if dns_name.match(/^dualstack\./) elb_dualstack_hosted_zone_mapping else elb_hosted_zone_mapping end Hash[hosted_zones.select { |k, _| dns_name =~ /\A.+\.#{k}\.elb\.amazonaws\.com\.?\z/ }].values.last end def self.elb_hosted_zone_mapping @elb_hosted_zone_mapping ||= { "ap-northeast-1" => "Z2YN17T5R711GT", "ap-southeast-1" => "Z1WI8VXHPB1R38", "ap-southeast-2" => "Z2999QAZ9SRTIC", "eu-west-1" => "Z3NF1Z3NOM5OY2", "eu-central-1" => "Z215JYRZR1TBD5", "sa-east-1" => "Z2ES78Y61JGQKS", "us-east-1" => "Z3DZXE0Q79N41H", "us-west-1" => "Z1M58G0W56PQJA", "us-west-2" => "Z33MTJ483KN6FU", } end # See https://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region # This needs to be kept in sync manually sadly for now as seemingly this data is not available via an API def self.elb_dualstack_hosted_zone_mapping @elb_dualstack_hosted_zone_mapping ||= { "ap-northeast-1" => "Z14GRHDCWA56QT", "ap-northeast-2" => "ZWKZPGTI48KDX", "ap-northeast-3" => "Z5LXEXXYW11ES", "ap-south-1" => "ZP97RAFLXTNZK", "ap-southeast-1" => "Z1LMS91P8CMLE5", "ap-southeast-2" => "Z1GM3OXH4ZPM65", "ca-central-1" => "ZQSVJUPU6J1EY", "eu-central-1" => "Z215JYRZR1TBD5", "eu-west-1" => "Z32O12XQLNTSW2", "eu-west-2" => "ZHURV8PSTC4K8", "eu-west-3" => "Z3Q77PNBQS71R4", "us-east-1" => "Z35SXDOTRQ7X7K", "us-east-2" => "Z3AADJGX6KTTL2", "us-west-1" => "Z368ELLRRE2KJ0", "us-west-2" => "Z1H1FL5HABSF5", "sa-east-1" => "Z2P70J7HTTTPLU", } end # Returns the xml request for a given changeset def self.change_resource_record_sets_data(zone_id, change_batch, version, options = {}) # AWS methods return zone_ids that looks like '/hostedzone/id'. Let the caller either use # that form or just the actual id (which is what this request needs) zone_id = zone_id.sub('/hostedzone/', '') optional_tags = '' options.each do |option, value| case option when :comment optional_tags += "#{value}" end end #build XML if change_batch.count > 0 changes = "#{optional_tags}" change_batch.each do |change_item| action_tag = %Q{#{change_item[:action]}} name_tag = %Q{#{change_item[:name]}} type_tag = %Q{#{change_item[:type]}} # TTL must be omitted if using an alias record ttl_tag = '' ttl_tag += %Q{#{change_item[:ttl]}} unless change_item[:alias_target] weight_tag = '' set_identifier_tag = '' region_tag = '' if change_item[:set_identifier] set_identifier_tag += %Q{#{change_item[:set_identifier]}} if change_item[:weight] # Weighted Record weight_tag += %Q{#{change_item[:weight]}} elsif change_item[:region] # Latency record region_tag += %Q{#{change_item[:region]}} end end failover_tag = if change_item[:failover] %Q{#{change_item[:failover]}} end geolocation_tag = if change_item[:geo_location] xml_geo = change_item[:geo_location].map { |k,v| "<#{k}>#{v}" }.join %Q{#{xml_geo}} end resource_records = change_item[:resource_records] || [] resource_record_tags = '' resource_records.each do |record| resource_record_tags += %Q{#{record}} end # ResourceRecords must be omitted if using an alias record resource_tag = '' resource_tag += %Q{#{resource_record_tags}} if resource_records.any? alias_target_tag = '' if change_item[:alias_target] # Accept either underscore or camel case for hash keys. dns_name = change_item[:alias_target][:dns_name] || change_item[:alias_target][:DNSName] hosted_zone_id = change_item[:alias_target][:hosted_zone_id] || change_item[:alias_target][:HostedZoneId] || AWS.hosted_zone_for_alias_target(dns_name) evaluate_target_health = change_item[:alias_target][:evaluate_target_health] || change_item[:alias_target][:EvaluateTargetHealth] || false evaluate_target_health_xml = !evaluate_target_health.nil? ? %Q{#{evaluate_target_health}} : '' alias_target_tag += %Q{#{hosted_zone_id}#{dns_name}#{evaluate_target_health_xml}} end health_check_id_tag = if change_item[:health_check_id] %Q{#{change_item[:health_check_id]}} end change_tags = %Q{#{action_tag}#{name_tag}#{type_tag}#{set_identifier_tag}#{weight_tag}#{region_tag}#{failover_tag}#{geolocation_tag}#{ttl_tag}#{resource_tag}#{alias_target_tag}#{health_check_id_tag}} changes += change_tags end changes += '' end %Q{#{changes}} end end end # @deprecated module DNS # @deprecated class AWS < Fog::AWS::DNS # @deprecated # @overrides Fog::Service.new (from the fog-core gem) def self.new(*) Fog::Logger.deprecation 'Fog::DNS::AWS is deprecated, please use Fog::AWS::DNS.' super end end end end fog-aws-3.18.0/lib/fog/aws/dynamodb.rb000066400000000000000000000110741437344660100174160ustar00rootroot00000000000000module Fog module AWS class DynamoDB < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :aws_session_token, :host, :path, :port, :scheme, :persistent, :region, :use_iam_profile, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/dynamodb' request :batch_get_item request :batch_write_item request :create_table request :delete_item request :delete_table request :describe_table request :get_item request :list_tables request :put_item request :query request :scan request :update_item request :update_table class Mock def self.data @data ||= Hash.new do |hash, key| hash[key] = { :domains => {} } end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) end def data self.class.data[@aws_access_key_id] end def reset_data self.class.data.delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to DynamoDB # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # ddb = DynamoDB.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * DynamoDB object with connection to aws def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.dynamodb' @host = options[:host] || "dynamodb.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || '443' @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'dynamodb') end def reload @connection.reset end def request(params) refresh_credentials_if_expired # defaults for all dynamodb requests params.merge!({ :expects => 200, :method => :post, :path => '/' }) # setup headers and sign with signature v4 date = Fog::Time.now params[:headers] = { 'Content-Type' => 'application/x-amz-json-1.0', 'Date' => date.to_iso8601_basic, 'Host' => @host, }.merge!(params[:headers]) params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['Authorization'] = @signer.sign(params, date) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params) end else _request(params) end end def _request(params) response = @connection.request(params) unless response.body.empty? response.body = Fog::JSON.decode(response.body) end response end end end end end fog-aws-3.18.0/lib/fog/aws/ecs.rb000066400000000000000000000136641437344660100164020ustar00rootroot00000000000000module Fog module AWS class ECS < Fog::Service requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :version, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/ecs' request :list_clusters request :create_cluster request :delete_cluster request :describe_clusters request :list_task_definitions request :describe_task_definition request :deregister_task_definition request :register_task_definition request :list_task_definition_families request :list_services request :describe_services request :create_service request :delete_service request :update_service request :list_container_instances request :describe_container_instances request :deregister_container_instance request :list_tasks request :describe_tasks request :run_task request :start_task request :stop_task class Real attr_reader :region include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to ECS # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # ecs = ECS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1' and etc. # # ==== Returns # * ECS object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.ecs' @connection_options = options[:connection_options] || {} @region = options[:region] || 'us-east-1' @host = options[:host] || "ecs.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version = options[:version] || '2014-11-13' setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'ecs') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, {'Content-Type' => 'application/x-www-form-urlencoded' }, { :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port, :version => @version, :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'NotFound' Fog::AWS::ECS::NotFound.slurp(error, match[:message]) else Fog::AWS::ECS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :clusters => [], :task_definitions => [], :services => [], :container_instances => [], :tasks => [] } end end end def self.reset @data = nil end attr_accessor :region def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) setup_credentials(options) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end end end end fog-aws-3.18.0/lib/fog/aws/efs.rb000066400000000000000000000147051437344660100164020ustar00rootroot00000000000000module Fog module AWS class EFS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class FileSystemInUse < Fog::Errors::Error; end class IncorrectFileSystemLifeCycleState < Fog::Errors::Error; end class InvalidSubnet < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :instrumentor, :instrumentor_name model_path 'fog/aws/models/efs' request_path 'fog/aws/requests/efs' model :file_system model :mount_target collection :file_systems collection :mount_targets request :create_file_system request :create_mount_target request :delete_file_system request :delete_mount_target request :describe_file_systems request :describe_mount_target_security_groups request :describe_mount_targets request :modify_mount_target_security_groups class Mock include Fog::AWS::CredentialFetcher::ConnectionMethods def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :file_systems => {}, :mount_targets => {}, :security_groups => {} } end end end def self.reset @data = nil end def data self.class.data[@region][@aws_access_key_id] end def reset self.class.reset end attr_accessor :region def initialize(options={}) @region = options[:region] || "us-east-1" @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] end def mock_compute @mock_compute ||= Fog::AWS::Compute.new(:aws_access_key_id => @aws_access_key_id, :aws_secret_access_key => @aws_secret_access_key, :region => @region) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.efs' @region = options[:region] || 'us-east-1' @host = options[:host] || "elasticfilesystem.#{@region}.amazonaws.com" @port = options[:port] || 443 @scheme = options[:scheme] || "https" @persistent = options[:persistent] || false @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version = options[:version] || '2015-02-01' @path = options[:path] || "/#{@version}/" setup_credentials(options) end def reload @connection.reset end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] #global services that have no region are signed with the us-east-1 region #the only exception is GovCloud, which requires the region to be explicitly specified as us-gov-west-1 @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'elasticfilesystem') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) expects = params.delete(:expects) || 200 path = @path + params.delete(:path) method = params.delete(:method) || 'GET' request_body = Fog::JSON.encode(params) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => "application/x-amz-json-1.0", }, { :host => @host, :path => path, :port => @port, :version => @version, :signer => @signer, :aws_session_token => @aws_session_token, :method => method, :body => request_body } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser, method, path, expects) end else _request(body, headers, idempotent, parser, method, path, expects) end end def _request(body, headers, idempotent, parser, method, path, expects) response = @connection.request({ :body => body, :expects => expects, :idempotent => idempotent, :headers => headers, :method => method, :parser => parser, :path => path }) unless response.body.empty? response.body = Fog::JSON.decode(response.body) end response rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? if match[:code] == "IncorrectFileSystemLifeCycleState" raise Fog::AWS::EFS::IncorrectFileSystemLifeCycleState.slurp(error, match[:message]) elsif match[:code] == 'FileSystemInUse' raise Fog::AWS::EFS::FileSystemInUse.slurp(error, match[:message]) elsif match[:code].match(/(FileSystem|MountTarget)NotFound/) raise Fog::AWS::EFS::NotFound.slurp(error, match[:message]) end raise case match[:message] when /invalid ((file system)|(mount target)|(security group)) id/i Fog::AWS::EFS::NotFound.slurp(error, match[:message]) when /invalid subnet id/i Fog::AWS::EFS::InvalidSubnet.slurp(error, match[:message]) else Fog::AWS::EFS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/elasticache.rb000066400000000000000000000202331437344660100200630ustar00rootroot00000000000000module Fog module AWS class Elasticache < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class IdentifierTaken < Fog::Errors::Error; end class InvalidInstance < Fog::Errors::Error; end class AuthorizationAlreadyExists < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/elasticache' request :create_cache_cluster request :delete_cache_cluster request :describe_cache_clusters request :modify_cache_cluster request :reboot_cache_cluster request :create_cache_parameter_group request :delete_cache_parameter_group request :describe_cache_parameter_groups request :modify_cache_parameter_group request :reset_cache_parameter_group request :describe_engine_default_parameters request :describe_cache_parameters request :describe_reserved_cache_nodes request :create_cache_security_group request :delete_cache_security_group request :describe_cache_security_groups request :authorize_cache_security_group_ingress request :revoke_cache_security_group_ingress request :create_cache_subnet_group request :describe_cache_subnet_groups request :delete_cache_subnet_group request :describe_events model_path 'fog/aws/models/elasticache' model :cluster collection :clusters model :security_group collection :security_groups model :parameter_group collection :parameter_groups model :subnet_group collection :subnet_groups class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.elasticache' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "elasticache.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new( "#{@scheme}://#{@host}:#{@port}#{@path}", options[:persistent] ) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'elasticache') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :method => 'POST', :host => @host, :path => @path, :port => @port, :version => '2013-06-15' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'CacheSecurityGroupNotFound', 'CacheParameterGroupNotFound', 'CacheClusterNotFound' Fog::AWS::Elasticache::NotFound.slurp(error, match[:message]) when 'CacheSecurityGroupAlreadyExists' Fog::AWS::Elasticache::IdentifierTaken.slurp(error, match[:message]) when 'InvalidParameterValue' Fog::AWS::Elasticache::InvalidInstance.slurp(error, match[:message]) else Fog::AWS::Elasticache::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end class Mock include Fog::AWS::CredentialFetcher::ConnectionMethods def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :clusters => {}, # cache cluster data, indexed by cluster ID :security_groups => {}, # security groups :subnet_groups => {}, :parameter_groups => {"default.memcached1.4" => { "CacheParameterGroupFamily"=>"memcached1.4", "Description"=>"Default parameter group for memcached1.4", "CacheParameterGroupName"=>"default.memcached1.4" }, "default.redis2.6" => {"CacheParameterGroupFamily"=>"redis2.6", "Description"=>"Default parameter group for redis2.6", "CacheParameterGroupName"=>"default.redis2.6" } } } end end end def self.reset @data = nil end def initialize(options={}) @aws_credentials_expire_at = Time::now + 20 setup_credentials(options) @region = options[:region] || 'us-east-1' unless ['ap-south-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-central-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ca-central-1', 'sa-east-1', 'cn-north-1', 'cn-northwest-1', 'ap-east-1', 'us-gov-west-1'].include?(@region) raise ArgumentError, "Unknown region: #{@region.inspect}" end end def region_data self.class.data[@region] end def data self.region_data[@aws_access_key_id] end def reset_data self.region_data.delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end # returns an Array of (Mock) elasticache nodes, representated as Hashes def create_cache_nodes(cluster_id, num_nodes = 1, port = '11211') (1..num_nodes).map do |node_number| node_id = "%04d" % node_number { # each hash represents a cache cluster node "CacheNodeId" => node_id, "Port" => port, "ParameterGroupStatus" => "in-sync", "CacheNodeStatus" => "available", "CacheNodeCreateTime" => Time.now.utc.to_s, "Address" => "#{cluster_id}.#{node_id}.use1.cache.amazonaws.com" } end end end end end end fog-aws-3.18.0/lib/fog/aws/elb.rb000066400000000000000000000220751437344660100163660ustar00rootroot00000000000000module Fog module AWS class ELB < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class DuplicatePolicyName < Fog::Errors::Error; end class IdentifierTaken < Fog::Errors::Error; end class InvalidInstance < Fog::Errors::Error; end class InvalidConfigurationRequest < Fog::Errors::Error; end class PolicyNotFound < Fog::Errors::Error; end class PolicyTypeNotFound < Fog::Errors::Error; end class Throttled < Fog::Errors::Error; end class TooManyPolicies < Fog::Errors::Error; end class ValidationError < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :version, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/elb' request :configure_health_check request :create_app_cookie_stickiness_policy request :create_lb_cookie_stickiness_policy request :create_load_balancer request :create_load_balancer_listeners request :create_load_balancer_policy request :delete_load_balancer request :delete_load_balancer_listeners request :delete_load_balancer_policy request :deregister_instances_from_load_balancer request :describe_instance_health request :describe_load_balancers request :describe_load_balancer_attributes request :describe_load_balancer_policies request :describe_load_balancer_policy_types request :disable_availability_zones_for_load_balancer request :enable_availability_zones_for_load_balancer request :modify_load_balancer_attributes request :register_instances_with_load_balancer request :set_load_balancer_listener_ssl_certificate request :set_load_balancer_policies_of_listener request :attach_load_balancer_to_subnets request :detach_load_balancer_from_subnets request :apply_security_groups_to_load_balancer request :set_load_balancer_policies_for_backend_server request :add_tags request :describe_tags request :remove_tags model_path 'fog/aws/models/elb' model :load_balancer collection :load_balancers model :policy collection :policies model :listener collection :listeners model :backend_server_description collection :backend_server_descriptions class Mock require 'fog/aws/elb/policy_types' def self.data @data ||= Hash.new do |hash, region| owner_id = Fog::AWS::Mock.owner_id hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :owner_id => owner_id, :load_balancers => {}, :policy_types => Fog::AWS::ELB::Mock::POLICY_TYPES } end end end def self.dns_name(name, region) "#{name}-#{Fog::Mock.random_hex(8)}.#{region}.elb.amazonaws.com" end def self.reset @data = nil end attr_reader :region def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) Fog::AWS.validate_region!(@region) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'elasticloadbalancing') end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to ELB # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # elb = ELB.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1', etc. # # ==== Returns # * ELB object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.elb' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "elasticloadbalancing.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version ||= options[:version] || '2012-06-01' setup_credentials(options) end attr_reader :region def reload @connection.reset end private def setup_credentials(options={}) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'elasticloadbalancing') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port, :version => @version, :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'CertificateNotFound' Fog::AWS::IAM::NotFound.slurp(error, match[:message]) when 'DuplicateLoadBalancerName' Fog::AWS::ELB::IdentifierTaken.slurp(error, match[:message]) when 'DuplicatePolicyName' Fog::AWS::ELB::DuplicatePolicyName.slurp(error, match[:message]) when 'InvalidInstance' Fog::AWS::ELB::InvalidInstance.slurp(error, match[:message]) when 'InvalidConfigurationRequest' # when do they fucking use this shit? Fog::AWS::ELB::InvalidConfigurationRequest.slurp(error, match[:message]) when 'LoadBalancerNotFound' Fog::AWS::ELB::NotFound.slurp(error, match[:message]) when 'PolicyNotFound' Fog::AWS::ELB::PolicyNotFound.slurp(error, match[:message]) when 'PolicyTypeNotFound' Fog::AWS::ELB::PolicyTypeNotFound.slurp(error, match[:message]) when 'Throttling' Fog::AWS::ELB::Throttled.slurp(error, match[:message]) when 'TooManyPolicies' Fog::AWS::ELB::TooManyPolicies.slurp(error, match[:message]) when 'ValidationError' Fog::AWS::ELB::ValidationError.slurp(error, match[:message]) else Fog::AWS::ELB::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/elb/000077500000000000000000000000001437344660100160335ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/elb/policy_types.rb000066400000000000000000000301311437344660100211010ustar00rootroot00000000000000class Fog::AWS::ELB::Mock POLICY_TYPES = [{ "Description" => "", "PolicyAttributeTypeDescriptions" => [{ "AttributeName"=>"CookieName", "AttributeType"=>"String", "Cardinality"=>"ONE", "DefaultValue"=>"", "Description"=>"" }], "PolicyTypeName"=>"AppCookieStickinessPolicyType" }, { "Description" => "", "PolicyAttributeTypeDescriptions" => [{ "AttributeName"=>"CookieExpirationPeriod", "AttributeType"=>"String", "Cardinality"=>"ONE", "DefaultValue"=>"", "Description"=>"" }], "PolicyTypeName"=>"LBCookieStickinessPolicyType" }, { "Description" => "Policy containing a list of public keys to accept when authenticating the back-end server(s). This policy cannot be applied directly to back-end servers or listeners but must be part of a BackendServerAuthenticationPolicyType.", "PolicyAttributeTypeDescriptions" => [{ "AttributeName"=>"PublicKey", "AttributeType"=>"String", "Cardinality"=>"ONE", "DefaultValue"=>"", "Description"=>"" }], "PolicyTypeName"=>"PublicKeyPolicyType" }, { "Description" => "Listener policy that defines the ciphers and protocols that will be accepted by the load balancer. This policy can be associated only with HTTPS/SSL listeners.", "PolicyAttributeTypeDescriptions" => [{ "AttributeName"=>"Protocol-SSLv2", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EDH-DSS-DES-CBC3-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-RSA-CAMELLIA128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DES-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-RC4-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-CAMELLIA128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"PSK-RC4-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"PSK-AES128-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-EDH-RSA-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"CAMELLIA128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-DSS-AES128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EDH-RSA-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-RSA-SEED-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-DES-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-RSA-CAMELLIA256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-DES-CBC3-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DES-CBC3-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-RC2-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EDH-DSS-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"PSK-AES256-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-AES256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-DES-CBC3-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"AES128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"DHE-DSS-SEED-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-CAMELLIA256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-RC4-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EDH-RSA-DES-CBC3-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-DES-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"Protocol-TLSv1", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"PSK-3DES-EDE-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"SEED-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-DSS-CAMELLIA256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"IDEA-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"RC2-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-AES128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"RC4-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"AES256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"Protocol-SSLv3", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"EXP-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DES-CBC3-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"DHE-RSA-AES128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-EDH-DSS-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-RC2-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-RSA-AES256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"KRB5-DES-CBC3-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"TRUE", "Description"=>"" }, { "AttributeName"=>"EXP-RC2-CBC-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-ADH-RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-RC4-MD5", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"CAMELLIA256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-DSS-CAMELLIA128-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-KRB5-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"EXP-ADH-DES-CBC-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"DHE-DSS-AES256-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }, { "AttributeName"=>"ADH-SEED-SHA", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"FALSE", "Description"=>"" }], "PolicyTypeName"=>"SSLNegotiationPolicyType" }, { "Description"=>"Policy that controls whether to include the IP address and port of the originating request for TCP messages. This policy operates on TCP/SSL listeners only", "PolicyAttributeTypeDescriptions"=>[{ "AttributeName"=>"ProxyProtocol", "AttributeType"=>"Boolean", "Cardinality"=>"ONE", "DefaultValue"=>"", "Description"=>"" }], "PolicyTypeName"=>"ProxyProtocolPolicyType" }] end fog-aws-3.18.0/lib/fog/aws/elbv2.rb000066400000000000000000000037161437344660100166370ustar00rootroot00000000000000module Fog module AWS class ELBV2 < ELB requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :version, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/elbv2' request :add_tags request :create_load_balancer request :describe_tags request :remove_tags request :describe_load_balancers request :describe_listeners class Real < ELB::Real def initialize(options={}) @version = '2015-12-01' super(options) end end class Mock def self.data @data ||= Hash.new do |hash, region| owner_id = Fog::AWS::Mock.owner_id hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :owner_id => owner_id, :load_balancers_v2 => {} } end end end def self.dns_name(name, region) "#{name}-#{Fog::Mock.random_hex(8)}.#{region}.elb.amazonaws.com" end def self.reset @data = nil end attr_reader :region def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) Fog::AWS.validate_region!(@region) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key,@region,'elasticloadbalancing') end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end end end end fog-aws-3.18.0/lib/fog/aws/emr.rb000066400000000000000000000111201437344660100163740ustar00rootroot00000000000000module Fog module AWS class EMR < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class IdentifierTaken < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/emr' request :add_instance_groups request :add_job_flow_steps request :describe_job_flows request :modify_instance_groups request :run_job_flow request :set_termination_protection request :terminate_job_flows # model_path 'fog/aws/models/rds' # model :server # collection :servers # model :snapshot # collection :snapshots # model :parameter_group # collection :parameter_groups # # model :parameter # collection :parameters # # model :security_group # collection :security_groups class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to EMR # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # emr = EMR.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, in 'eu-west-1', 'us-east-1' and etc. # # ==== Returns # * EMR object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.emr' options[:region] ||= 'us-east-1' @host = options[:host] || "elasticmapreduce.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @region = options[:region] setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'elasticmapreduce') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :method => 'POST', :host => @host, :path => @path, :port => @port, :version => '2009-03-31' #'2010-07-28' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) end end end end end fog-aws-3.18.0/lib/fog/aws/errors.rb000066400000000000000000000026331437344660100171360ustar00rootroot00000000000000module Fog module AWS module Errors def self.match_error(error) if !Fog::AWS.json_response?(error.response) matchers = [ lambda {|s| s.match(/(?:.*(.*)<\/Code>)(?:.*(.*)<\/Message>)/m)}, lambda {|s| s.match(/.*<(.+Exception)>(?:.*(.*)<\/Message>)/m)} ] [error.message, error.response.body].each(&Proc.new {|s| matchers.each do |matcher| match = matcher.call(s) return {:code => match[1].split('.').last, :message => match[2]} if match end }) else begin full_msg_error = Fog::JSON.decode(error.response.body) if (full_msg_error.has_key?('Message') || full_msg_error.has_key?('message')) && (error.response.headers.has_key?('x-amzn-ErrorType') || full_msg_error.has_key?('__type')) matched_error = { :code => full_msg_error['__type'] || error.response.headers['x-amzn-ErrorType'].split(':').first, :message => full_msg_error['Message'] || full_msg_error['message'] } return matched_error end rescue Fog::JSON::DecodeError => e Fog::Logger.warning("Error parsing response json - #{e}") end end {} # we did not match the message or response body end end end end fog-aws-3.18.0/lib/fog/aws/federation.rb000066400000000000000000000025621437344660100177430ustar00rootroot00000000000000module Fog module AWS class Federation < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods recognizes :instrumentor, :instrumentor_name request_path 'fog/aws/requests/federation' request 'get_signin_token' class Mock def self.data @data ||= {} end def self.reset @data = nil end def initialize(options={}) end def data self.class.data end def reset_data self.class.reset end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.federation' @connection_options = options[:connection_options] || {} @host = 'signin.aws.amazon.com' @path = '/federation' @scheme = 'https' @connection = Excon.new("#{@scheme}://#{@host}#{@path}") end def request(action, session) response = @connection.get( :query => "Action=#{action}&SessionType=json&Session=#{session}", :expects => 200 ).body Fog::JSON.decode(response) end end end end end fog-aws-3.18.0/lib/fog/aws/glacier.rb000066400000000000000000000210301437344660100172200ustar00rootroot00000000000000module Fog module AWS class Glacier < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/glacier' request :abort_multipart_upload request :complete_multipart_upload request :create_archive request :create_vault request :delete_archive request :delete_vault request :delete_vault_notification_configuration request :describe_job request :describe_vault request :get_job_output request :get_vault_notification_configuration request :initiate_job request :initiate_multipart_upload request :list_jobs request :list_multipart_uploads request :list_parts request :list_vaults request :set_vault_notification_configuration request :upload_part model_path 'fog/aws/models/glacier' model :vault collection :vaults MEGABYTE = 1024*1024 class TreeHash def self.digest(body) new.add_part(body) end def initialize @last_chunk_digest = nil # Digest OBJECT for last chunk (Digest::SHA256) @last_chunk_digest_temp = nil # Digest VALUE for last chunk @last_chunk_length = 0 # Length of last chunk, always smaller than 1MB. @digest_stack = [] # First position on stack corresponds to 1MB, second 2MB, third 4MB, fourt 8MB and so on. # In any time, the size of all already added parts is equal to sum of all existing (non-nil) # positions multiplied by that number, plus last_chunk_length for the remainder smaller than # one megabyte. So, if last_chunk_length is half megabyte, stack[0] is filled, stack[1] and # stack[2] empty and stack[3] filled, the size is 0.5MB + 1x1MB + 0x2MB + 0x4MB + 1x8MB = 9.5MB. end def update_digest_stack(digest, stack) stack.each_with_index{|s,i| if s digest = Digest::SHA256.digest(s + digest) stack[i] = nil else stack[i] = digest # Update this position with value obtained in previous run of cycle. digest = nil break end } stack << digest if digest end def reduce_digest_stack(digest, stack) stack.each_with_index{|s,i| unless digest digest = stack[i] next end if stack[i] digest = Digest::SHA256.digest(stack[i] + digest) end } digest end def add_part(bytes) part = self.digest_for_part(bytes) part.unpack('H*').first end def prepare_body_for_slice(body) if body.respond_to? :byteslice r = yield(body, :byteslice) else if body.respond_to? :encoding old_encoding = body.encoding body.force_encoding('BINARY') end r = yield(body, :slice) if body.respond_to? :encoding body.force_encoding(old_encoding) end end r end def digest_for_part(body) part_stack = [] part_temp = nil body_size = body.bytesize prepare_body_for_slice(body) {|body, slice| start_offset = 0 if @last_chunk_length != 0 start_offset = MEGABYTE - @last_chunk_length @last_chunk_hash.update(body.send(slice, 0, start_offset)) hash = @last_chunk_hash.digest @last_chunk_digest_temp = hash if body_size > start_offset @last_chunk_length = 0 @last_chunk_hash = nil @last_chunk_digest_temp = nil update_digest_stack(hash, @digest_stack) else part_temp = hash @last_chunk_digest_temp = hash @last_chunk_length += body_size next end end whole_chunk_count = (body_size - start_offset) / MEGABYTE whole_chunk_count.times.each {|chunk_index| hash = Digest::SHA256.digest(body.send(slice, start_offset + chunk_index * MEGABYTE, MEGABYTE)) update_digest_stack(hash, part_stack) update_digest_stack(hash, @digest_stack) } rest_size = body_size - start_offset - whole_chunk_count * MEGABYTE if rest_size > 0 || whole_chunk_count == 0 @last_chunk_hash = Digest::SHA256.new @last_chunk_length = rest_size @last_chunk_hash.update(body.send(slice, start_offset + whole_chunk_count * MEGABYTE, rest_size)) hash = @last_chunk_hash.digest @last_chunk_digest_temp = hash part_temp = hash end } reduce_digest_stack(part_temp, part_stack) end def digest reduce_digest_stack(@last_chunk_digest_temp, @digest_stack) end def hexdigest digest.unpack('H*').first end end class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Glacier # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # ses = SES.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'us-east-1' and etc. # # ==== Returns # * Glacier object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.glacier' @connection_options = options[:connection_options] || {} @host = options[:host] || "glacier.#{@region}.amazonaws.com" @version = '2012-06-01' @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'glacier') end def request(params, &block) refresh_credentials_if_expired date = Fog::Time.now params[:headers]['Date'] = date.to_date_header params[:headers]['x-amz-date'] = date.to_iso8601_basic params[:headers]['Host'] = @host params[:headers]['x-amz-glacier-version'] = @version params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['Authorization'] = @signer.sign params, date if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params, &block) end else _request(params, &block) end end def _request(params, &block) response = @connection.request(params, &block) if response.headers['Content-Type'] == 'application/json' && response.body.size > 0 #body will be empty if the streaming form has been used response.body = Fog::JSON.decode(response.body) end response end end end end end fog-aws-3.18.0/lib/fog/aws/iam.rb000066400000000000000000000302301437344660100163620ustar00rootroot00000000000000module Fog module AWS class IAM < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class EntityAlreadyExists < Fog::AWS::IAM::Error; end class KeyPairMismatch < Fog::AWS::IAM::Error; end class LimitExceeded < Fog::AWS::IAM::Error; end class MalformedCertificate < Fog::AWS::IAM::Error; end class ValidationError < Fog::AWS::IAM::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :persistent, :instrumentor, :instrumentor_name, :aws_session_token, :use_iam_profile, :aws_credentials_expire_at, :region request_path 'fog/aws/requests/iam' request :add_user_to_group request :add_role_to_instance_profile request :attach_group_policy request :attach_role_policy request :attach_user_policy request :create_access_key request :create_account_alias request :create_group request :create_instance_profile request :create_login_profile request :create_policy request :create_policy_version request :create_role request :create_user request :delete_access_key request :delete_account_password_policy request :delete_account_alias request :delete_group request :delete_group_policy request :delete_instance_profile request :delete_login_profile request :delete_policy request :delete_policy_version request :delete_role request :delete_role_policy request :delete_server_certificate request :delete_signing_certificate request :delete_user request :delete_user_policy request :detach_group_policy request :detach_role_policy request :detach_user_policy request :get_account_password_policy request :get_account_summary request :get_group request :get_group_policy request :get_instance_profile request :get_login_profile request :get_policy request :get_policy_version request :get_role request :get_role_policy request :get_server_certificate request :get_user request :get_user_policy request :list_access_keys request :list_account_aliases request :list_attached_group_policies request :list_attached_role_policies request :list_attached_user_policies request :list_group_policies request :list_groups request :list_groups_for_user request :list_instance_profiles request :list_instance_profiles_for_role request :list_mfa_devices request :list_policies request :list_policy_versions request :list_role_policies request :list_roles request :list_server_certificates request :list_signing_certificates request :list_user_policies request :list_users request :put_group_policy request :put_role_policy request :put_user_policy request :remove_role_from_instance_profile request :remove_user_from_group request :set_default_policy_version request :update_access_key request :update_group request :update_login_profile request :update_account_password_policy request :update_assume_role_policy request :update_server_certificate request :update_signing_certificate request :update_user request :upload_server_certificate request :upload_signing_certificate model_path 'fog/aws/models/iam' model :access_key collection :access_keys model :group collection :groups model :instance_profile collection :instance_profiles model :managed_policy collection :managed_policies model :policy collection :policies model :role collection :roles model :user collection :users require 'fog/aws/iam/default_policies' class Mock def self.data @data ||= Hash.new do |hash, key| owner_id = Fog::AWS::Mock.owner_id hash[key] = { :owner_id => owner_id, :instance_profiles => {}, :server_certificates => {}, :access_keys => [{ "Status" => "Active", "AccessKeyId" => key }], :devices => [{ :enable_date => Time.now, :serial_number => 'R1234', :user_name => 'Bob' }], :markers => Hash.new { |mhash, mkey| mhash[mkey] = [] }, :managed_policies => Fog::AWS::IAM::Mock.default_policies.inject({}) { |r,p| r.merge(p['Arn'] => p) }, :managed_policy_versions => Fog::AWS::IAM::Mock.default_policy_versions.inject({}) { |r,(arn,pv)| r.merge(arn => {pv["VersionId"] => pv}) }, :users => Hash.new do |uhash, ukey| uhash[ukey] = { :access_keys => [], :arn => "arn:aws:iam::#{owner_id}:user/#{ukey}", :attached_policies => [], :created_at => Time.now, :path => '/', :policies => {}, :user_id => Fog::AWS::Mock.key_id } end, :groups => Hash.new do |ghash, gkey| ghash[gkey] = { :arn => "arn:aws:iam::#{owner_id}:group/#{gkey}", :attached_policies => [], :created_at => Time.now, :group_id => Fog::AWS::Mock.key_id, :members => [], :policies => {} } end, :roles => Hash.new do |rhash, rkey| rhash[rkey] = { :role_id => Fog::AWS::Mock.key_id, :arn => "arn:aws:iam:#{owner_id}:role/#{rkey}", :create_date => Time.now, :assume_role_policy_document => { "Version" => "2012-10-17", "Statement" => [ { "Effect" => "Allow", "Principal" => { "Service" => [ "ec2.amazonaws.com" ] }, "Action" => ["sts:AssumeRole"] } ] }, } end } end end def self.reset @data = nil end def self.server_certificate_id Fog::Mock.random_hex(16) end attr_reader :current_user_name def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @aws_credentials_expire_at = Time::now + 20 setup_credentials(options) end def data self.class.data[@root_access_key_id] end def account_id self.data[:owner_id] end def reset_data self.class.data.delete(@root_access_key_id) current_user end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] existing_user = nil @root_access_key_id, _ = self.class.data.find { |_, d| d[:users].find { |_, user| existing_user = user[:access_keys].find { |key| key["AccessKeyId"] == @aws_access_key_id } } } @root_access_key_id ||= @aws_access_key_id @current_user_name = existing_user ? existing_user["UserName"] : "root" end def current_user unless self.data[:users].key?("root") root = self.data[:users]["root"] # sets the hash root[:arn].gsub!("user/", "") # root user doesn't have "user/" key prefix end self.data[:users][self.current_user_name] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to IAM # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # iam = IAM.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * IAM object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.iam' @host = options[:host] || 'iam.amazonaws.com' @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @region = options[:region] || "us-east-1" @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] #global services that have no region are signed with the us-east-1 region #the only exception is GovCloud, which requires the region to be explicitly specified as us-gov-west-1 @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'iam') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :signer => @signer, :aws_session_token => @aws_session_token, :host => @host, :path => @path, :port => @port, :version => '2010-05-08', :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'CertificateNotFound', 'NoSuchEntity' Fog::AWS::IAM::NotFound.slurp(error, match[:message]) when 'EntityAlreadyExists', 'KeyPairMismatch', 'LimitExceeded', 'MalformedCertificate', 'ValidationError' Fog::AWS::IAM.const_get(match[:code]).slurp(error, match[:message]) else Fog::AWS::IAM::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/iam/000077500000000000000000000000001437344660100160375ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/iam/default_policies.json000066400000000000000000001410571437344660100222550ustar00rootroot00000000000000[ { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:08 UTC", "PolicyId": "ANPAI23HZ27SI6FQMGNQ2", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDirectConnectReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectReadOnlyAccess", "CreateDate": "2015-02-06 18:40:08 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:27 UTC", "PolicyId": "ANPAI2D5NJKMU274MET4E", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonGlacierReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonGlacierReadOnlyAccess", "CreateDate": "2015-02-06 18:40:27 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-11 17:21:45 UTC", "PolicyId": "ANPAI2DV5ULJSO2FYVPYG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSMarketplaceFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceFullAccess", "CreateDate": "2015-02-11 17:21:45 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:52 UTC", "PolicyId": "ANPAI3R4QMOG6Q5A4VWVG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRDSFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRDSFullAccess", "CreateDate": "2015-02-06 18:40:52 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:15 UTC", "PolicyId": "ANPAI3VAJF5ZCRZ7MCQE6", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2FullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonEC2FullAccess", "CreateDate": "2015-02-06 18:40:15 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:19 UTC", "PolicyId": "ANPAI47KNGXDAXFD4SDHG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSElasticBeanstalkReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkReadOnlyAccess", "CreateDate": "2015-02-06 18:40:19 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:07 UTC", "PolicyId": "ANPAI65L554VRJ33ECQS6", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSQSFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess", "CreateDate": "2015-02-06 18:41:07 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 15:53:53 UTC", "PolicyId": "ANPAI6E2CYYMI4XI7AA5K", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AWSLambdaFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess", "CreateDate": "2015-02-06 18:40:45 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:23 UTC", "PolicyId": "ANPAI7QIUU4GC66SF26WE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCloudHSMRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSCloudHSMRole", "CreateDate": "2015-02-06 18:41:23 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:38 UTC", "PolicyId": "ANPAI7XKCFMBPM3QQRRVQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "IAMFullAccess", "Arn": "arn:aws:iam::aws:policy/IAMFullAccess", "CreateDate": "2015-02-06 18:40:38 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:20 UTC", "PolicyId": "ANPAIA2V44CPHAUAAECKG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElastiCacheFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", "CreateDate": "2015-02-06 18:40:20 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-05-19 18:10:14 UTC", "PolicyId": "ANPAIAZKXZ27TAJ4PVWGK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2RoleforAWSCodeDeploy", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy", "CreateDate": "2015-05-19 18:10:14 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:48 UTC", "PolicyId": "ANPAICN26VXMXASXKOQCG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSOpsWorksFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksFullAccess", "CreateDate": "2015-02-06 18:40:48 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-05-13 21:21:30 UTC", "PolicyId": "ANPAIDI2BQT2LKXZG36TW", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AmazonElasticMapReduceRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", "CreateDate": "2015-02-06 18:41:20 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:57 UTC", "PolicyId": "ANPAIDRINP6PPTRXYVQCI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRoute53DomainsReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsReadOnlyAccess", "CreateDate": "2015-02-06 18:40:57 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:27 UTC", "PolicyId": "ANPAIDUTMOKHJFAPJV45W", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSOpsWorksRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksRole", "CreateDate": "2015-02-06 18:41:27 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:04 UTC", "PolicyId": "ANPAIFE3AV6VE7EANYBVM", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "SimpleWorkflowFullAccess", "Arn": "arn:aws:iam::aws:policy/SimpleWorkflowFullAccess", "CreateDate": "2015-02-06 18:41:04 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:58 UTC", "PolicyId": "ANPAIFIR6V6BVTRAHWINE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonS3FullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonS3FullAccess", "CreateDate": "2015-02-06 18:40:58 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:10 UTC", "PolicyId": "ANPAIFKCTUVOPD5NICXJK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSStorageGatewayReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayReadOnlyAccess", "CreateDate": "2015-02-06 18:41:10 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-05-13 21:27:21 UTC", "PolicyId": "ANPAIGALS5RCDLZLB3PGS", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AmazonElasticMapReduceforEC2Role", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role", "CreateDate": "2015-02-06 18:41:21 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:51 UTC", "PolicyId": "ANPAIGD46KSON64QBSEZM", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRedshiftReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftReadOnlyAccess", "CreateDate": "2015-02-06 18:40:51 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:17 UTC", "PolicyId": "ANPAIGDT4SV4GSETWTBZK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2ReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess", "CreateDate": "2015-02-06 18:40:17 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:23 UTC", "PolicyId": "ANPAIHP6NH2S6GYFCOINC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticMapReduceReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess", "CreateDate": "2015-02-06 18:40:23 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:12 UTC", "PolicyId": "ANPAIHWYO6WSDNCG64M2W", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDirectoryServiceReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceReadOnlyAccess", "CreateDate": "2015-02-06 18:41:12 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-07 18:01:28 UTC", "PolicyId": "ANPAIICZJNOJN36GTG6CM", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AmazonVPCReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess", "CreateDate": "2015-02-06 18:41:17 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:34 UTC", "PolicyId": "ANPAIJIKLU2IJ7WJ6DZFG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMobileAnalyticsFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFullAccess", "CreateDate": "2015-02-06 18:40:34 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-03-19 19:11:56 UTC", "PolicyId": "ANPAIKCP6XS3ESGF4GLO2", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AWSDataPipelineRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSDataPipelineRole", "CreateDate": "2015-02-06 18:41:24 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:00 UTC", "PolicyId": "ANPAIKEABORKUXN6DEAZU", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudWatchFullAccess", "Arn": "arn:aws:iam::aws:policy/CloudWatchFullAccess", "CreateDate": "2015-02-06 18:40:00 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-21 16:40:17 UTC", "PolicyId": "ANPAILL3HVNFSB6DCOWYQ", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "ReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/ReadOnlyAccess", "CreateDate": "2015-02-06 18:39:48 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:12:19 UTC", "PolicyId": "ANPAILOI4HTQSFTF3GQSC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningBatchPredictionsAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningBatchPredictionsAccess", "CreateDate": "2015-04-09 17:12:19 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-19 18:21:32 UTC", "PolicyId": "ANPAILZHHKCKB4NE7XOIQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCodeDeployReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployReadOnlyAccess", "CreateDate": "2015-05-19 18:21:32 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:56 UTC", "PolicyId": "ANPAIM6OOWKQ7L7VBOZOC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudSearchFullAccess", "Arn": "arn:aws:iam::aws:policy/CloudSearchFullAccess", "CreateDate": "2015-02-06 18:39:56 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:51 UTC", "PolicyId": "ANPAIMBQYQZM7F63DA2UU", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCloudHSMFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSCloudHSMFullAccess", "CreateDate": "2015-02-06 18:39:51 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-05-18 23:28:05 UTC", "PolicyId": "ANPAIMRTKHWK7ESSNETSW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2SpotFleetRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole", "CreateDate": "2015-05-18 23:28:05 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:25 UTC", "PolicyId": "ANPAIN5WGARIKZ3E2UQOU", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticTranscoderJobsSubmitter", "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderJobsSubmitter", "CreateDate": "2015-02-06 18:40:25 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:11 UTC", "PolicyId": "ANPAINAW5ANUWTH3R4ANI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDirectoryServiceFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceFullAccess", "CreateDate": "2015-02-06 18:41:11 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:11 UTC", "PolicyId": "ANPAINUGF2JSOSUY76KYA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonDynamoDBFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess", "CreateDate": "2015-02-06 18:40:11 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:03 UTC", "PolicyId": "ANPAINV2XPFRMWJJNSCGI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSESReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSESReadOnlyAccess", "CreateDate": "2015-02-06 18:41:03 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:22 UTC", "PolicyId": "ANPAIO2VMUPGDC5PZVXVA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AutoScalingNotificationAccessRole", "Arn": "arn:aws:iam::aws:policy/service-role/AutoScalingNotificationAccessRole", "CreateDate": "2015-02-06 18:41:22 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:30 UTC", "PolicyId": "ANPAIOCMTDT5RLKZ2CAJO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonKinesisReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonKinesisReadOnlyAccess", "CreateDate": "2015-02-06 18:40:30 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-19 18:13:23 UTC", "PolicyId": "ANPAIONKN3TJZUKXCHXWC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCodeDeployFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployFullAccess", "CreateDate": "2015-05-19 18:13:23 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-09 15:09:29 UTC", "PolicyId": "ANPAIP7WNAGMIPYNW4WQG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaDynamoDBExecutionRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole", "CreateDate": "2015-04-09 15:09:29 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:56 UTC", "PolicyId": "ANPAIPAFBMIYUILMOKL6G", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRoute53DomainsFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsFullAccess", "CreateDate": "2015-02-06 18:40:56 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:21 UTC", "PolicyId": "ANPAIPDACSNQHSENWAKM2", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElastiCacheReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheReadOnlyAccess", "CreateDate": "2015-02-06 18:40:21 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-27 16:25:25 UTC", "PolicyId": "ANPAIPN5S4NE5JJOKVC4Y", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticFileSystemReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemReadOnlyAccess", "CreateDate": "2015-05-27 16:25:25 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-04 17:54:14 UTC", "PolicyId": "ANPAIPRV52SH6HDCCFY6U", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "CloudFrontFullAccess", "Arn": "arn:aws:iam::aws:policy/CloudFrontFullAccess", "CreateDate": "2015-02-06 18:39:50 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-09 17:05:26 UTC", "PolicyId": "ANPAIQ5UDYYMNN42BM4AK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningRoleforRedshiftDataSource", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMachineLearningRoleforRedshiftDataSource", "CreateDate": "2015-04-09 17:05:26 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:36 UTC", "PolicyId": "ANPAIQLKQ4RXPUBBVVRDE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMobileAnalyticsNon-financialReportAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsNon-financialReportAccess", "CreateDate": "2015-02-06 18:40:36 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:58 UTC", "PolicyId": "ANPAIQNUJTQYDRJPC3BNK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCloudTrailFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailFullAccess", "CreateDate": "2015-02-06 18:39:58 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-03-24 17:22:23 UTC", "PolicyId": "ANPAIQOKZ5BGKLCMTXH4W", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonCognitoDeveloperAuthenticatedIdentities", "Arn": "arn:aws:iam::aws:policy/AmazonCognitoDeveloperAuthenticatedIdentities", "CreateDate": "2015-03-24 17:22:23 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-02 17:36:23 UTC", "PolicyId": "ANPAIQRXRDRGJUA33ELIO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSConfigRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRole", "CreateDate": "2015-04-02 17:36:23 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:50 UTC", "PolicyId": "ANPAISEKCHH4YDB46B5ZO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRedshiftFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess", "CreateDate": "2015-02-06 18:40:50 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:14 UTC", "PolicyId": "ANPAISRCSSJNS3QPKZJPM", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonZocaloReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonZocaloReadOnlyAccess", "CreateDate": "2015-02-06 18:41:14 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:52 UTC", "PolicyId": "ANPAISVCBSY7YDBOT67KE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCloudHSMReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSCloudHSMReadOnlyAccess", "CreateDate": "2015-02-06 18:39:52 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:55 UTC", "PolicyId": "ANPAITOYK2ZAOQFXV2JNC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRoute53ReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess", "CreateDate": "2015-02-06 18:40:55 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:16 UTC", "PolicyId": "ANPAIU6NBZVF2PCRW36ZW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2ReportsAccess", "Arn": "arn:aws:iam::aws:policy/AmazonEC2ReportsAccess", "CreateDate": "2015-02-06 18:40:16 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:08 UTC", "PolicyId": "ANPAIUGSSQY362XGCM6KW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSQSReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSQSReadOnlyAccess", "CreateDate": "2015-02-06 18:41:08 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:29 UTC", "PolicyId": "ANPAIVF32HAMOXCUYRAYE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonKinesisFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonKinesisFullAccess", "CreateDate": "2015-02-06 18:40:29 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:40:02 UTC", "PolicyId": "ANPAIW5VYBCGEX56JCINC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningReadOnlyAccess", "CreateDate": "2015-04-09 17:40:02 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:29 UTC", "PolicyId": "ANPAIWKFXRLQG2ROKKXLE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "RDSCloudHsmAuthorizationRole", "Arn": "arn:aws:iam::aws:policy/service-role/RDSCloudHsmAuthorizationRole", "CreateDate": "2015-02-06 18:41:29 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:25:41 UTC", "PolicyId": "ANPAIWKW6AGSGYOQ5ERHC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningFullAccess", "CreateDate": "2015-04-09 17:25:41 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:46 UTC", "PolicyId": "ANPAIWMBCKSKIEE64ZLYK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AdministratorAccess", "Arn": "arn:aws:iam::aws:policy/AdministratorAccess", "CreateDate": "2015-02-06 18:39:46 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:44:06 UTC", "PolicyId": "ANPAIWMCNQPRWMWT36GVQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningRealTimePredictionOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningRealTimePredictionOnlyAccess", "CreateDate": "2015-04-09 17:44:06 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-18 19:38:41 UTC", "PolicyId": "ANPAIWTTSFJ7KKJE3MWGA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSConfigUserAccess", "Arn": "arn:aws:iam::aws:policy/AWSConfigUserAccess", "CreateDate": "2015-02-18 19:38:41 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-21 16:47:16 UTC", "PolicyId": "ANPAIX2T3QCXHR2OGGCTO", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "SecurityAudit", "Arn": "arn:aws:iam::aws:policy/SecurityAudit", "CreateDate": "2015-02-06 18:41:01 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:12 UTC", "PolicyId": "ANPAIY2XFNA232XJ6J7X2", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonDynamoDBReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", "CreateDate": "2015-02-06 18:40:12 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:06 UTC", "PolicyId": "ANPAIZGQCQTFOFPMHSB6W", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSNSReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess", "CreateDate": "2015-02-06 18:41:06 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-21 17:16:31 UTC", "PolicyId": "ANPAIZP5JFP3AMSGINBB2", "DefaultVersionId": "v3", "IsAttachable": true, "PolicyName": "AmazonElasticMapReduceFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess", "CreateDate": "2015-02-06 18:40:22 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:59 UTC", "PolicyId": "ANPAIZTJ4DXE7G6AGAE6M", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonS3ReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", "CreateDate": "2015-02-06 18:40:59 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:18 UTC", "PolicyId": "ANPAIZYX2YLLBW2LJVUFW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSElasticBeanstalkFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkFullAccess", "CreateDate": "2015-02-06 18:40:18 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-05-04 18:05:37 UTC", "PolicyId": "ANPAJ2NKMKD73QS5NBFLA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCodeDeployRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole", "CreateDate": "2015-05-04 18:05:37 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:02 UTC", "PolicyId": "ANPAJ2P4NXCHAT7NDPNR4", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSESFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSESFullAccess", "CreateDate": "2015-02-06 18:41:02 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:03 UTC", "PolicyId": "ANPAJ2YIYDYSNNEHK3VKW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudWatchLogsReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess", "CreateDate": "2015-02-06 18:40:03 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:49 UTC", "PolicyId": "ANPAJ3AB5ZBFPCQGTVDU4", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSOpsWorksRegisterCLI", "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksRegisterCLI", "CreateDate": "2015-02-06 18:40:49 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:14 UTC", "PolicyId": "ANPAJ3ORT7KDISSXGHJXA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonDynamoDBFullAccesswithDataPipeline", "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccesswithDataPipeline", "CreateDate": "2015-02-06 18:40:14 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-03-19 19:21:14 UTC", "PolicyId": "ANPAJ3Z5I2WAJE5DN2J36", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AmazonEC2RoleforDataPipelineRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforDataPipelineRole", "CreateDate": "2015-02-06 18:41:25 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:02 UTC", "PolicyId": "ANPAJ3ZGNWK2R5HW5BQFO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudWatchLogsFullAccess", "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess", "CreateDate": "2015-02-06 18:40:02 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:24 UTC", "PolicyId": "ANPAJ4D5OJU75P5ZJZVNY", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticTranscoderFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderFullAccess", "CreateDate": "2015-02-06 18:40:24 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:37 UTC", "PolicyId": "ANPAJ5TAWBBQC2FAL3G6G", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMobileAnalyticsWriteOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsWriteOnlyAccess", "CreateDate": "2015-02-06 18:40:37 UTC" }, { "AttachmentCount": 1, "Description": "", "Path": "/", "UpdateDate": "2015-02-19 22:40:45 UTC", "PolicyId": "ANPAJ6YATONJHICG3DJ3U", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AWSConnector", "Arn": "arn:aws:iam::aws:policy/AWSConnector", "CreateDate": "2015-02-11 17:14:31 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-24 16:54:35 UTC", "PolicyId": "ANPAJALOYVTPDZEMIACSM", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2ContainerServiceFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess", "CreateDate": "2015-04-24 16:54:35 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-03-24 17:06:46 UTC", "PolicyId": "ANPAJBFTRZD2GQGJHSVQK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonCognitoReadOnly", "Arn": "arn:aws:iam::aws:policy/AmazonCognitoReadOnly", "CreateDate": "2015-03-24 17:06:46 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-07 18:07:43 UTC", "PolicyId": "ANPAJBWPGNOVKZD3JI2P2", "DefaultVersionId": "v3", "IsAttachable": true, "PolicyName": "AmazonVPCFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonVPCFullAccess", "CreateDate": "2015-02-06 18:41:16 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:43 UTC", "PolicyId": "ANPAJCQCT4JGTLC6722MQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSImportExportFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSImportExportFullAccess", "CreateDate": "2015-02-06 18:40:43 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:18:09 UTC", "PolicyId": "ANPAJDRUNIC2RYAMAT3CK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningCreateOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningCreateOnlyAccess", "CreateDate": "2015-04-09 17:18:09 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-03-12 20:16:47 UTC", "PolicyId": "ANPAJDU7KJADWBSEQ3E7S", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AWSCloudTrailReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailReadOnlyAccess", "CreateDate": "2015-02-06 18:39:59 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:46 UTC", "PolicyId": "ANPAJE5FX7FQZSU5XAKGO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaExecute", "Arn": "arn:aws:iam::aws:policy/AWSLambdaExecute", "CreateDate": "2015-02-06 18:40:46 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:09 UTC", "PolicyId": "ANPAJG5SSPAVOGK3SIDGU", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSStorageGatewayFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayFullAccess", "CreateDate": "2015-02-06 18:41:09 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:26 UTC", "PolicyId": "ANPAJGPP7GPMJRRJMEP3Q", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticTranscoderReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderReadOnlyAccess", "CreateDate": "2015-02-06 18:40:26 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:42 UTC", "PolicyId": "ANPAJHF7J65E2QFKCWAJM", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonWorkMailReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailReadOnlyAccess", "CreateDate": "2015-02-06 18:40:42 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-09 15:14:16 UTC", "PolicyId": "ANPAJHOLKJPXV4GBRMJUQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaKinesisExecutionRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole", "CreateDate": "2015-04-09 15:14:16 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:54 UTC", "PolicyId": "ANPAJHXQTPI5I5JKAIU74", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "ResourceGroupsandTagEditorReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorReadOnlyAccess", "CreateDate": "2015-02-06 18:39:54 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 17:32:41 UTC", "PolicyId": "ANPAJJL3PC3VCSVZP6OCI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMachineLearningManageRealTimeEndpointOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningManageRealTimeEndpointOnlyAccess", "CreateDate": "2015-04-09 17:32:41 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-04 17:58:09 UTC", "PolicyId": "ANPAJJZMNYOTZCNQP36LG", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "CloudFrontReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/CloudFrontReadOnlyAccess", "CreateDate": "2015-02-06 18:39:55 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:30 UTC", "PolicyId": "ANPAJK5GQB7CIK7KHY2GA", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSNSRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSNSRole", "CreateDate": "2015-02-06 18:41:30 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:35 UTC", "PolicyId": "ANPAJKJHO2R27TXKCWBU4", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonMobileAnalyticsFinancialReportAccess", "Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFinancialReportAccess", "CreateDate": "2015-02-06 18:40:35 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-21 16:03:51 UTC", "PolicyId": "ANPAJKSO7NDY4T57MWDSQ", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "IAMReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/IAMReadOnlyAccess", "CreateDate": "2015-02-06 18:40:39 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:53 UTC", "PolicyId": "ANPAJKTTTYV2IIHKLZ346", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRDSReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess", "CreateDate": "2015-02-06 18:40:53 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-03-24 17:14:56 UTC", "PolicyId": "ANPAJKW5H2HNCPGCYGR6Y", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonCognitoPowerUser", "Arn": "arn:aws:iam::aws:policy/AmazonCognitoPowerUser", "CreateDate": "2015-03-24 17:14:56 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-27 16:22:28 UTC", "PolicyId": "ANPAJKXTMNVQGIDNCKPBC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticFileSystemFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess", "CreateDate": "2015-05-27 16:22:28 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:13 UTC", "PolicyId": "ANPAJLCDXYRINDMUXEVL6", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonZocaloFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonZocaloFullAccess", "CreateDate": "2015-02-06 18:41:13 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 15:59:32 UTC", "PolicyId": "ANPAJLDG7J3CGUHFN4YN6", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AWSLambdaReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess", "CreateDate": "2015-02-06 18:40:44 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:19 UTC", "PolicyId": "ANPAJLIB4VSBVO47ZSBB6", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSAccountUsageReportAccess", "Arn": "arn:aws:iam::aws:policy/AWSAccountUsageReportAccess", "CreateDate": "2015-02-06 18:41:19 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-03-19 18:45:18 UTC", "PolicyId": "ANPAJLYJCVHC7TQHCSQDS", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2ContainerServiceforEC2Role", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", "CreateDate": "2015-03-19 18:45:18 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:09 UTC", "PolicyId": "ANPAJLZZXU2YQVGL4QDNC", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonAppStreamFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamFullAccess", "CreateDate": "2015-02-06 18:40:09 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:01 UTC", "PolicyId": "ANPAJN23PDQP7SZQAE3QE", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudWatchReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess", "CreateDate": "2015-02-06 18:40:01 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-09 15:03:43 UTC", "PolicyId": "ANPAJNCQGXC42545SKXIK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaBasicExecutionRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "CreateDate": "2015-04-09 15:03:43 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:53 UTC", "PolicyId": "ANPAJNOS54ZFXN4T2Y34A", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "ResourceGroupsandTagEditorFullAccess", "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess", "CreateDate": "2015-02-06 18:39:53 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:40 UTC", "PolicyId": "ANPAJNPP7PPPPMJRV2SA4", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSKeyManagementServicePowerUser", "Arn": "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser", "CreateDate": "2015-02-06 18:40:40 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:42 UTC", "PolicyId": "ANPAJNTV4OG52ESYZHCNK", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSImportExportReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSImportExportReadOnlyAccess", "CreateDate": "2015-02-06 18:40:42 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:26 UTC", "PolicyId": "ANPAJNW3WMKVXFJ2KPIQ2", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonElasticTranscoderRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticTranscoderRole", "CreateDate": "2015-02-06 18:41:26 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-04-09 16:14:19 UTC", "PolicyId": "ANPAJO53W2XHNACG7V77Q", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonEC2ContainerServiceRole", "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole", "CreateDate": "2015-04-09 16:14:19 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:31 UTC", "PolicyId": "ANPAJOOM6LETKURTJ3XZ2", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSMarketplaceRead-only", "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceRead-only", "CreateDate": "2015-02-06 18:40:31 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-04-09 14:03:18 UTC", "PolicyId": "ANPAJPRL4KYETIH7XGTSS", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonWorkSpacesApplicationManagerAdminAccess", "Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesApplicationManagerAdminAccess", "CreateDate": "2015-04-09 14:03:18 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:07 UTC", "PolicyId": "ANPAJQF2QKZSK74KTIHOW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDirectConnectFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectFullAccess", "CreateDate": "2015-02-06 18:40:07 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:18 UTC", "PolicyId": "ANPAJQRYCWMFX5J3E333K", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSAccountActivityAccess", "Arn": "arn:aws:iam::aws:policy/AWSAccountActivityAccess", "CreateDate": "2015-02-06 18:41:18 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:28 UTC", "PolicyId": "ANPAJQSTZJWB2AXXAKHVQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonGlacierFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonGlacierFullAccess", "CreateDate": "2015-02-06 18:40:28 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-03-24 18:16:18 UTC", "PolicyId": "ANPAJQVKNMT7SVATQ4AUY", "DefaultVersionId": "v2", "IsAttachable": true, "PolicyName": "AmazonWorkMailFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailFullAccess", "CreateDate": "2015-02-06 18:40:41 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:32 UTC", "PolicyId": "ANPAJRDW2WIFN7QLUAKBQ", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSMarketplaceManageSubscriptions", "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceManageSubscriptions", "CreateDate": "2015-02-06 18:40:32 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:11 UTC", "PolicyId": "ANPAJSNKQX2OW67GF4S7E", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSSupportAccess", "Arn": "arn:aws:iam::aws:policy/AWSSupportAccess", "CreateDate": "2015-02-06 18:41:11 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:47 UTC", "PolicyId": "ANPAJTHQ3EKCQALQDYG5G", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaInvocation-DynamoDB", "Arn": "arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB", "CreateDate": "2015-02-06 18:40:47 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-05-19 18:18:43 UTC", "PolicyId": "ANPAJUWEPOMGLMVXJAPUI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCodeDeployDeployerAccess", "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployDeployerAccess", "CreateDate": "2015-05-19 18:18:43 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:06 UTC", "PolicyId": "ANPAJW53AHN6ZWYGU2GNO", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDataPipelinePowerUser", "Arn": "arn:aws:iam::aws:policy/AWSDataPipelinePowerUser", "CreateDate": "2015-02-06 18:40:06 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:41:05 UTC", "PolicyId": "ANPAJWEKLCXXUNT2SOLSG", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonSNSFullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonSNSFullAccess", "CreateDate": "2015-02-06 18:41:05 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:57 UTC", "PolicyId": "ANPAJWPLX7N7BCC3RZLHW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "CloudSearchReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/CloudSearchReadOnlyAccess", "CreateDate": "2015-02-06 18:39:57 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:49 UTC", "PolicyId": "ANPAJWVBEE4I2POWLODLW", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSCloudFormationReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess", "CreateDate": "2015-02-06 18:39:49 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:54 UTC", "PolicyId": "ANPAJWVDLG5RPST6PHQ3A", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonRoute53FullAccess", "Arn": "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "CreateDate": "2015-02-06 18:40:54 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/service-role/", "UpdateDate": "2015-02-06 18:41:28 UTC", "PolicyId": "ANPAJX4DPCRGTC4NFDUXI", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSLambdaRole", "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaRole", "CreateDate": "2015-02-06 18:41:28 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:10 UTC", "PolicyId": "ANPAJXIFDGB4VBX23DX7K", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AmazonAppStreamReadOnlyAccess", "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamReadOnlyAccess", "CreateDate": "2015-02-06 18:40:10 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:39:47 UTC", "PolicyId": "ANPAJYRXTHIB4FOVS3ZXS", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "PowerUserAccess", "Arn": "arn:aws:iam::aws:policy/PowerUserAccess", "CreateDate": "2015-02-06 18:39:47 UTC" }, { "AttachmentCount": 0, "Description": "", "Path": "/", "UpdateDate": "2015-02-06 18:40:05 UTC", "PolicyId": "ANPAJZVYL5DGR3IHUEA2O", "DefaultVersionId": "v1", "IsAttachable": true, "PolicyName": "AWSDataPipelineFullAccess", "Arn": "arn:aws:iam::aws:policy/AWSDataPipelineFullAccess", "CreateDate": "2015-02-06 18:40:05 UTC" } ] fog-aws-3.18.0/lib/fog/aws/iam/default_policies.rb000066400000000000000000000005721437344660100217030ustar00rootroot00000000000000module Fog module AWS class IAM class Mock def self.default_policies Fog::JSON.decode(File.read(File.expand_path("../default_policies.json", __FILE__))) end def self.default_policy_versions Fog::JSON.decode(File.read(File.expand_path("../default_policy_versions.json", __FILE__))) end end end end end fog-aws-3.18.0/lib/fog/aws/iam/default_policy_versions.json000066400000000000000000002533561437344660100237030ustar00rootroot00000000000000{ "arn:aws:iam::aws:policy/AWSDirectConnectReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "directconnect:Describe*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonGlacierReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "glacier:ListVaults", "glacier:DescribeVault", "glacier:GetVaultNotifications", "glacier:ListJobs", "glacier:DescribeJob", "glacier:GetJobOutput" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSMarketplaceFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "aws-marketplace:*", "cloudformation:CreateStack", "cloudformation:DescribeStackResource", "cloudformation:DescribeStackResources", "cloudformation:DescribeStacks", "cloudformation:List*", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeKeyPairs", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRDSFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "rds:*", "cloudwatch:DescribeAlarms", "cloudwatch:GetMetricStatistics", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "sns:ListSubscriptions", "sns:ListTopics" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonEC2FullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": "ec2:*", "Effect": "Allow", "Resource": "*" }, { "Effect": "Allow", "Action": "elasticloadbalancing:*", "Resource": "*" }, { "Effect": "Allow", "Action": "cloudwatch:*", "Resource": "*" }, { "Effect": "Allow", "Action": "autoscaling:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSElasticBeanstalkReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "elasticbeanstalk:Check*", "elasticbeanstalk:Describe*", "elasticbeanstalk:List*", "elasticbeanstalk:RequestEnvironmentInfo", "elasticbeanstalk:RetrieveEnvironmentInfo", "ec2:Describe*", "elasticloadbalancing:Describe*", "autoscaling:Describe*", "cloudwatch:Describe*", "cloudwatch:List*", "cloudwatch:Get*", "s3:Get*", "s3:List*", "sns:Get*", "sns:List*", "cloudformation:Describe*", "cloudformation:Get*", "cloudformation:List*", "cloudformation:Validate*", "cloudformation:Estimate*", "rds:Describe*", "sqs:Get*", "sqs:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSQSFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "sqs:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSLambdaFullAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:*", "cognito-identity:ListIdentityPools", "cognito-sync:GetCognitoEvents", "cognito-sync:SetCognitoEvents", "dynamodb:*", "iam:ListAttachedRolePolicies", "iam:ListRolePolicies", "iam:ListRoles", "iam:PassRole", "kinesis:DescribeStream", "kinesis:ListStreams", "kinesis:PutRecord", "lambda:*", "logs:*", "s3:*", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic", "sns:ListTopics", "sns:Subscribe", "sns:Unsubscribe" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:PutMetricData", "ds:CreateComputer", "ds:DescribeDirectories", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogGroups", "logs:DescribeLogStreams", "logs:PutLogEvents", "ssm:DescribeAssociation", "ssm:GetDocument", "ssm:ListAssociations", "ssm:UpdateAssociationStatus" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSCloudHSMRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateNetworkInterface", "ec2:CreateTags", "ec2:DeleteNetworkInterface", "ec2:DescribeNetworkInterfaceAttribute", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:DetachNetworkInterface" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/IAMFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "iam:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": "elasticache:*", "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:GetObject", "s3:GetObjectVersion", "s3:ListObjects" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSOpsWorksFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "opsworks:*", "ec2:DescribeAvailabilityZones", "ec2:DescribeKeyPairs", "ec2:DescribeSecurityGroups", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancers", "iam:GetRolePolicy", "iam:ListInstanceProfiles", "iam:ListRoles", "iam:ListUsers", "iam:PassRole" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Resource": "*", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CancelSpotInstanceRequests", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DescribeAvailabilityZones", "ec2:DescribeAccountAttributes", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeKeyPairs", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcEndpoints", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcs", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:RequestSpotInstances", "ec2:RunInstances", "ec2:TerminateInstances", "iam:GetRole", "iam:GetRolePolicy", "iam:ListInstanceProfiles", "iam:ListRolePolicies", "iam:PassRole", "s3:CreateBucket", "s3:Get*", "s3:List*", "sdb:BatchPutAttributes", "sdb:Select", "sqs:CreateQueue", "sqs:Delete*", "sqs:GetQueue*", "sqs:ReceiveMessage" ] } ] } }, "arn:aws:iam::aws:policy/AmazonRoute53DomainsReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "route53domains:Get*", "route53domains:List*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/service-role/AWSOpsWorksRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:GetMetricStatistics", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeInstances", "ec2:DescribeKeyPairs", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancers", "iam:GetRolePolicy", "iam:ListInstanceProfiles", "iam:ListRoles", "iam:ListUsers", "iam:PassRole", "opsworks:*", "rds:*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/SimpleWorkflowFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "swf:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonS3FullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "s3:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSStorageGatewayReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "storagegateway:List*", "storagegateway:Describe*" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "ec2:DescribeSnapshots" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Resource": "*", "Action": [ "cloudwatch:*", "dynamodb:*", "ec2:Describe*", "elasticmapreduce:Describe*", "elasticmapreduce:ListBootstrapActions", "elasticmapreduce:ListClusters", "elasticmapreduce:ListInstanceGroups", "elasticmapreduce:ListInstances", "elasticmapreduce:ListSteps", "kinesis:CreateStream", "kinesis:DeleteStream", "kinesis:DescribeStream", "kinesis:GetRecords", "kinesis:GetShardIterator", "kinesis:MergeShards", "kinesis:PutRecord", "kinesis:SplitShard", "rds:Describe*", "s3:*", "sdb:*", "sns:*", "sqs:*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonRedshiftReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "redshift:Describe*", "redshift:ViewQueriesInConsole", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:DescribeInternetGateways", "sns:Get*", "sns:List*", "cloudwatch:Describe*", "cloudwatch:List*", "cloudwatch:Get*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "ec2:Describe*", "Resource": "*" }, { "Effect": "Allow", "Action": "elasticloadbalancing:Describe*", "Resource": "*" }, { "Effect": "Allow", "Action": [ "cloudwatch:ListMetrics", "cloudwatch:GetMetricStatistics", "cloudwatch:Describe*" ], "Resource": "*" }, { "Effect": "Allow", "Action": "autoscaling:Describe*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "elasticmapreduce:Describe*", "elasticmapreduce:List*", "s3:GetObject", "s3:ListAllMyBuckets", "s3:ListBucket", "sdb:Select", "cloudwatch:GetMetricStatistics" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSDirectoryServiceReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "ds:Check*", "ds:Describe*", "ds:Get*", "ds:List*", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSubnets", "ec2:DescribeVpcs" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:DescribeAddresses", "ec2:DescribeCustomerGateways", "ec2:DescribeDhcpOptions", "ec2:DescribeInternetGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaces", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcEndpoints", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcPeeringConnection", "ec2:DescribeVpcs", "ec2:DescribeVpnConnections", "ec2:DescribeVpnGateways" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "mobileanalytics:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSDataPipelineRole": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:*", "datapipeline:DescribeObjects", "datapipeline:EvaluateExpression", "dynamodb:BatchGetItem", "dynamodb:DescribeTable", "dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan", "dynamodb:UpdateTable", "ec2:AuthorizeSecurityGroupIngress", "ec2:CancelSpotInstanceRequests", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:Describe*", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:RequestSpotInstances", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances", "elasticmapreduce:*", "iam:GetRole", "iam:GetRolePolicy", "iam:ListRolePolicies", "iam:ListInstanceProfiles", "iam:PassRole", "rds:DescribeDBInstances", "rds:DescribeDBSecurityGroups", "redshift:DescribeClusters", "redshift:DescribeClusterSecurityGroups", "s3:CreateBucket", "s3:DeleteObject", "s3:Get*", "s3:List*", "s3:Put*", "sdb:BatchPutAttributes", "sdb:Select*", "sns:GetTopicAttributes", "sns:ListTopics", "sns:Publish", "sns:Subscribe", "sns:Unsubscribe" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/CloudWatchFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "autoscaling:Describe*", "cloudwatch:*", "logs:*", "sns:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/ReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "appstream:Get*", "autoscaling:Describe*", "cloudformation:DescribeStacks", "cloudformation:DescribeStackEvents", "cloudformation:DescribeStackResource", "cloudformation:DescribeStackResources", "cloudformation:GetTemplate", "cloudformation:List*", "cloudfront:Get*", "cloudfront:List*", "cloudtrail:DescribeTrails", "cloudtrail:GetTrailStatus", "cloudwatch:Describe*", "cloudwatch:Get*", "cloudwatch:List*", "directconnect:Describe*", "dynamodb:GetItem", "dynamodb:BatchGetItem", "dynamodb:Query", "dynamodb:Scan", "dynamodb:DescribeTable", "dynamodb:ListTables", "ec2:Describe*", "ecs:Describe*", "ecs:List*", "elasticache:Describe*", "elasticbeanstalk:Check*", "elasticbeanstalk:Describe*", "elasticbeanstalk:List*", "elasticbeanstalk:RequestEnvironmentInfo", "elasticbeanstalk:RetrieveEnvironmentInfo", "elasticloadbalancing:Describe*", "elasticmapreduce:Describe*", "elasticmapreduce:List*", "elastictranscoder:Read*", "elastictranscoder:List*", "iam:List*", "iam:GenerateCredentialReport", "iam:Get*", "kinesis:Describe*", "kinesis:Get*", "kinesis:List*", "opsworks:Describe*", "opsworks:Get*", "route53:Get*", "route53:List*", "redshift:Describe*", "redshift:ViewQueriesInConsole", "rds:Describe*", "rds:ListTagsForResource", "s3:Get*", "s3:List*", "sdb:GetAttributes", "sdb:List*", "sdb:Select*", "ses:Get*", "ses:List*", "sns:Get*", "sns:List*", "sqs:GetQueueAttributes", "sqs:ListQueues", "sqs:ReceiveMessage", "storagegateway:List*", "storagegateway:Describe*", "tag:get*", "trustedadvisor:Describe*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningBatchPredictionsAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:CreateBatchPrediction", "machinelearning:DeleteBatchPrediction", "machinelearning:DescribeBatchPredictions", "machinelearning:GetBatchPrediction", "machinelearning:UpdateBatchPrediction" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCodeDeployReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "codedeploy:Batch*", "codedeploy:Get*", "codedeploy:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudSearchFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudsearch:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCloudHSMFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "cloudhsm:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:DescribeImages", "ec2:DescribeSubnets", "ec2:RequestSpotInstances", "ec2:TerminateInstances" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonElasticTranscoderJobsSubmitter": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "elastictranscoder:Read*", "elastictranscoder:List*", "elastictranscoder:*Job", "elastictranscoder:*Preset", "s3:List*", "iam:List*", "sns:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSDirectoryServiceFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "ds:*", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateNetworkInterface", "ec2:CreateSecurityGroup", "ec2:DeleteNetworkInterface", "ec2:DeleteSecurityGroup", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "dynamodb:*", "cloudwatch:DeleteAlarms", "cloudwatch:DescribeAlarmHistory", "cloudwatch:DescribeAlarms", "cloudwatch:DescribeAlarmsForMetric", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "cloudwatch:PutMetricAlarm", "datapipeline:ActivatePipeline", "datapipeline:CreatePipeline", "datapipeline:DeletePipeline", "datapipeline:DescribeObjects", "datapipeline:DescribePipelines", "datapipeline:GetPipelineDefinition", "datapipeline:ListPipelines", "datapipeline:PutPipelineDefinition", "datapipeline:QueryObjects", "iam:ListRoles", "sns:CreateTopic", "sns:DeleteTopic", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic", "sns:ListTopics", "sns:Subscribe", "sns:Unsubscribe" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSESReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ses:Get*", "ses:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AutoScalingNotificationAccessRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Resource": "*", "Action": [ "sqs:SendMessage", "sqs:GetQueueUrl", "sns:Publish" ] } ] } }, "arn:aws:iam::aws:policy/AmazonKinesisReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "kinesis:Get*", "kinesis:List*", "kinesis:Describe*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCodeDeployFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": "codedeploy:*", "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "dynamodb:DescribeStream", "dynamodb:GetRecords", "dynamodb:GetShardIterator", "dynamodb:ListStreams", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRoute53DomainsFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "route53:CreateHostedZone", "route53domains:*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonElastiCacheReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "elasticache:Describe*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticFileSystemReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "ec2:DescribeAvailabilityZones", "ec2:DescribeNetworkInterfaceAttribute", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "elasticfilesystem:Describe*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudFrontFullAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:ListAllMyBuckets" ], "Effect": "Allow", "Resource": "arn:aws:s3:::*" }, { "Action": [ "cloudfront:*", "iam:ListServerCertificates" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonMachineLearningRoleforRedshiftDataSource": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:DescribeInternetGateways", "ec2:DescribeSecurityGroups", "ec2:RevokeSecurityGroupIngress", "redshift:AuthorizeClusterSecurityGroupIngress", "redshift:CreateClusterSecurityGroup", "redshift:DescribeClusters", "redshift:DescribeClusterSecurityGroups", "redshift:ModifyCluster", "redshift:RevokeClusterSecurityGroupIngress", "s3:GetBucketLocation", "s3:GetBucketPolicy", "s3:GetObject", "s3:PutBucketPolicy", "s3:PutObject" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMobileAnalyticsNon-financialReportAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "mobileanalytics:GetReports", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCloudTrailFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sns:AddPermission", "sns:CreateTopic", "sns:DeleteTopic", "sns:ListTopics", "sns:SetTopicAttributes" ], "Resource": "arn:aws:sns:*" }, { "Effect": "Allow", "Action": [ "s3:CreateBucket", "s3:DeleteBucket", "s3:ListAllMyBuckets", "s3:PutBucketPolicy", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject" ], "Resource": "arn:aws:s3:::*" }, { "Effect": "Allow", "Action": "cloudtrail:*", "Resource": "*" }, { "Effect": "Allow", "Action": [ "logs:CreateLogGroup" ], "Resource": "arn:aws:logs:*" }, { "Effect": "Allow", "Action": [ "iam:PassRole", "iam:ListRoles", "iam:GetRolePolicy" ], "Resource": "arn:aws:iam::*" } ] } }, "arn:aws:iam::aws:policy/AmazonCognitoDeveloperAuthenticatedIdentities": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cognito-identity:GetOpenIdTokenForDeveloperIdentity", "cognito-identity:LookupDeveloperIdentity", "cognito-identity:MergeDeveloperIdentities", "cognito-identity:UnlinkDeveloperIdentity" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSConfigRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudtrail:DescribeTrails", "ec2:Describe*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "redshift:*", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:DescribeInternetGateways", "sns:CreateTopic", "sns:Get*", "sns:List*", "cloudwatch:Describe*", "cloudwatch:Get*", "cloudwatch:List*", "cloudwatch:PutMetricAlarm", "cloudwatch:EnableAlarmActions", "cloudwatch:DisableAlarmActions" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonZocaloReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "zocalo:Describe*", "ds:DescribeDirectories", "ec2:DescribeVpcs", "ec2:DescribeSubnets" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCloudHSMReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudhsm:Get*", "cloudhsm:List*", "cloudhsm:Describe*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "route53:Get*", "route53:List*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonEC2ReportsAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": "ec2-reports:*", "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSQSReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "sqs:GetQueueAttributes", "sqs:ListQueues" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonKinesisFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "kinesis:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:Describe*", "machinelearning:Get*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/RDSCloudHsmAuthorizationRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudhsm:CreateLunaClient", "cloudhsm:GetClientConfiguration", "cloudhsm:DeleteLunaClient", "cloudhsm:DescribeLunaClient", "cloudhsm:ModifyLunaClient", "cloudhsm:DescribeHapg", "cloudhsm:ModifyHapg", "cloudhsm:GetConfig" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AdministratorAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningRealTimePredictionOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:Predict" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSConfigUserAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "config:Get*", "config:Describe*", "config:Deliver*", "tag:GetResources", "tag:GetTagKeys" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/SecurityAudit": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "autoscaling:Describe*", "cloudformation:DescribeStack*", "cloudformation:GetTemplate", "cloudformation:ListStack*", "cloudfront:Get*", "cloudfront:List*", "cloudwatch:Describe*", "directconnect:Describe*", "dynamodb:ListTables", "ec2:Describe*", "ecs:Describe*", "ecs:List*", "elasticbeanstalk:Describe*", "elasticache:Describe*", "elasticloadbalancing:Describe*", "elasticmapreduce:DescribeJobFlows", "glacier:ListVaults", "iam:GenerateCredentialReport", "iam:Get*", "iam:List*", "rds:Describe*", "rds:DownloadDBLogFilePortion", "rds:ListTagsForResource", "redshift:Describe*", "route53:GetHostedZone", "route53:ListHostedZones", "route53:ListResourceRecordSets", "s3:GetBucket*", "s3:GetLifecycleConfiguration", "s3:GetObjectAcl", "s3:GetObjectVersionAcl", "s3:ListAllMyBuckets", "sdb:DomainMetadata", "sdb:ListDomains", "sns:GetTopicAttributes", "sns:ListTopics", "sqs:GetQueueAttributes", "sqs:ListQueues" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudwatch:DescribeAlarmHistory", "cloudwatch:DescribeAlarms", "cloudwatch:DescribeAlarmsForMetric", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "datapipeline:DescribeObjects", "datapipeline:DescribePipelines", "datapipeline:GetPipelineDefinition", "datapipeline:ListPipelines", "datapipeline:QueryObjects", "dynamodb:BatchGetItem", "dynamodb:DescribeTable", "dynamodb:GetItem", "dynamodb:ListTables", "dynamodb:Query", "dynamodb:Scan", "sns:ListSubscriptionsByTopic", "sns:ListTopics" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sns:GetTopicAttributes", "sns:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess": { "VersionId": "v3", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudwatch:*", "ec2:AuthorizeSecurityGroupIngress", "ec2:CancelSpotInstanceRequests", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DescribeAvailabilityZones", "ec2:DescribeAccountAttributes", "ec2:DescribeInstances", "ec2:DescribeKeyPairs", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcs", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:RequestSpotInstances", "ec2:RunInstances", "ec2:TerminateInstances", "elasticmapreduce:*", "iam:GetPolicy", "iam:GetPolicyVersion", "iam:ListRoles", "iam:PassRole", "kms:List*", "s3:*", "sdb:*", "support:CreateCase", "support:DescribeServices", "support:DescribeSeverityLevels" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:Get*", "s3:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSElasticBeanstalkFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "elasticbeanstalk:*", "ec2:*", "elasticloadbalancing:*", "autoscaling:*", "cloudwatch:*", "s3:*", "sns:*", "cloudformation:*", "rds:*", "sqs:*", "iam:PassRole" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "autoscaling:CompleteLifecycleAction", "autoscaling:DeleteLifecycleHook", "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeLifecycleHooks", "autoscaling:PutLifecycleHook", "autoscaling:RecordLifecycleActionHeartbeat", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "tag:GetTags", "tag:GetResources" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSESFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ses:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "logs:Describe*", "logs:Get*", "logs:TestMetricFilter" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSOpsWorksRegisterCLI": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "opsworks:AssignInstance", "opsworks:CreateStack", "opsworks:CreateLayer", "opsworks:DeregisterInstance", "opsworks:DescribeInstances", "opsworks:DescribeStackProvisioningParameters", "opsworks:DescribeStacks", "opsworks:UnassignInstance" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "ec2:DescribeInstances" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "iam:AddUserToGroup", "iam:CreateAccessKey", "iam:CreateGroup", "iam:CreateUser", "iam:ListInstanceProfiles", "iam:PassRole", "iam:PutUserPolicy" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccesswithDataPipeline": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudwatch:DeleteAlarms", "cloudwatch:DescribeAlarmHistory", "cloudwatch:DescribeAlarms", "cloudwatch:DescribeAlarmsForMetric", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "cloudwatch:PutMetricAlarm", "dynamodb:*", "sns:CreateTopic", "sns:DeleteTopic", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic", "sns:ListTopics", "sns:Subscribe", "sns:Unsubscribe" ], "Effect": "Allow", "Resource": "*", "Sid": "DDBConsole" }, { "Action": [ "datapipeline:*", "iam:ListRoles" ], "Effect": "Allow", "Resource": "*", "Sid": "DDBConsoleImportExport" }, { "Effect": "Allow", "Action": [ "iam:GetRolePolicy", "iam:PassRole" ], "Resource": [ "*" ], "Sid": "IAMEDPRoles" }, { "Action": [ "ec2:CreateTags", "ec2:DescribeInstances", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances", "elasticmapreduce:*", "datapipeline:*" ], "Effect": "Allow", "Resource": "*", "Sid": "EMR" }, { "Action": [ "s3:DeleteObject", "s3:Get*", "s3:List*", "s3:Put*" ], "Effect": "Allow", "Resource": [ "*" ], "Sid": "S3" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforDataPipelineRole": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:*", "datapipeline:*", "dynamodb:*", "ec2:Describe*", "elasticmapreduce:AddJobFlowSteps", "elasticmapreduce:Describe*", "elasticmapreduce:ListInstance*", "rds:Describe*", "redshift:DescribeClusters", "redshift:DescribeClusterSecurityGroups", "s3:*", "sdb:*", "sns:*", "sqs:*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "logs:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticTranscoderFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "elastictranscoder:*", "cloudfront:*", "s3:List*", "s3:Put*", "s3:Get*", "s3:*MultipartUpload*", "iam:CreateRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:List*", "sns:CreateTopic", "sns:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMobileAnalyticsWriteOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "mobileanalytics:PutEvents", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSConnector": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "iam:GetUser", "Resource": "*" }, { "Effect": "Allow", "Action": [ "s3:ListAllMyBuckets" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "s3:CreateBucket", "s3:DeleteBucket", "s3:DeleteObject", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:PutObject", "s3:PutObjectAcl" ], "Resource": "arn:aws:s3:::import-to-ec2-*" }, { "Effect": "Allow", "Action": [ "ec2:CancelConversionTask", "ec2:CancelExportTask", "ec2:CreateImage", "ec2:CreateInstanceExportTask", "ec2:CreateTags", "ec2:CreateVolume", "ec2:DeleteTags", "ec2:DeleteVolume", "ec2:DescribeConversionTasks", "ec2:DescribeExportTasks", "ec2:DescribeImages", "ec2:DescribeInstanceAttribute", "ec2:DescribeInstanceStatus", "ec2:DescribeInstances", "ec2:DescribeRegions", "ec2:DescribeTags", "ec2:DetachVolume", "ec2:ImportInstance", "ec2:ImportVolume", "ec2:ModifyInstanceAttribute", "ec2:RunInstances", "ec2:StartInstances", "ec2:StopInstances", "ec2:TerminateInstances" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "SNS:Publish" ], "Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*" } ] } }, "arn:aws:iam::aws:policy/AmazonSSMFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:PutMetricData", "ds:CreateComputer", "ds:DescribeDirectories", "ec2:DescribeInstanceStatus", "logs:*", "ssm:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:Describe*", "elasticloadbalancing:*", "ecs:*", "iam:ListInstanceProfiles", "iam:ListRoles", "iam:PassRole" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonCognitoReadOnly": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cognito-identity:Describe*", "cognito-identity:Get*", "cognito-identity:List*", "cognito-sync:Describe*", "cognito-sync:Get*", "cognito-sync:List*", "iam:ListOpenIdConnectProviders", "iam:ListRoles", "sns:ListPlatformApplications" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonVPCFullAccess": { "VersionId": "v3", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:AcceptVpcPeeringConnection", "ec2:AllocateAddress", "ec2:AssociateAddress", "ec2:AssociateDhcpOptions", "ec2:AssociateRouteTable", "ec2:AttachClassicLinkVpc", "ec2:AttachInternetGateway", "ec2:AttachVpnGateway", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateCustomerGateway", "ec2:CreateDhcpOptions", "ec2:CreateInternetGateway", "ec2:CreateNetworkAcl", "ec2:CreateNetworkAclEntry", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpcEndpoint", "ec2:CreateVpcPeeringConnection", "ec2:CreateVpnConnection", "ec2:CreateVpnConnectionRoute", "ec2:CreateVpnGateway", "ec2:DeleteCustomerGateway", "ec2:DeleteDhcpOptions", "ec2:DeleteInternetGateway", "ec2:DeleteNetworkAcl", "ec2:DeleteNetworkAclEntry", "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVpc", "ec2:DeleteVpcEndpoints", "ec2:DeleteVpcPeeringConnection", "ec2:DeleteVpnConnection", "ec2:DeleteVpnGateway", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", "ec2:DescribeCustomerGateways", "ec2:DescribeDhcpOptions", "ec2:DescribeInstances", "ec2:DescribeInternetGateways", "ec2:DescribeKeyPairs", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaces", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcClassicLink", "ec2:DescribeVpcEndpoints", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcPeeringConnections", "ec2:DescribeVpcs", "ec2:DescribeVpnConnections", "ec2:DescribeVpnGateways", "ec2:DetachClassicLinkVpc", "ec2:DetachInternetGateway", "ec2:DetachVpnGateway", "ec2:DisableVpcClassicLink", "ec2:DisableVgwRoutePropagation", "ec2:DisassociateAddress", "ec2:DisassociateRouteTable", "ec2:EnableVpcClassicLink", "ec2:EnableVgwRoutePropagation", "ec2:ModifySubnetAttribute", "ec2:ModifyVpcAttribute", "ec2:ModifyVpcEndpoint", "ec2:RejectVpcPeeringConnection", "ec2:ReleaseAddress", "ec2:ReplaceNetworkAclAssociation", "ec2:ReplaceNetworkAclEntry", "ec2:ReplaceRouteTableAssociation", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSImportExportFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "importexport:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningCreateOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:Create*", "machinelearning:Delete*", "machinelearning:Describe*", "machinelearning:Get*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCloudTrailReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:GetObject" ], "Resource": "arn:aws:s3:::*" }, { "Effect": "Allow", "Action": [ "cloudtrail:GetTrailStatus", "cloudtrail:DescribeTrails", "cloudtrail:LookupEvents", "s3:ListAllMyBuckets" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSLambdaExecute": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "logs:*" ], "Resource": "arn:aws:logs:*:*:*" }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:PutObject" ], "Resource": "arn:aws:s3:::*" } ] } }, "arn:aws:iam::aws:policy/AWSStorageGatewayFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "storagegateway:*" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "ec2:DescribeSnapshots", "ec2:DeleteSnapshot" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticTranscoderReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "elastictranscoder:Read*", "elastictranscoder:List*", "s3:List*", "iam:List*", "sns:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonWorkMailReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ses:Describe*", "ses:Get*", "workmail:Describe*", "workmail:Get*", "workmail:List*", "workmail:Search*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "kinesis:DescribeStream", "kinesis:GetRecords", "kinesis:GetShardIterator", "kinesis:ListStreams", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "tag:getResources", "tag:getTagKeys", "tag:getTagValues" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonMachineLearningManageRealTimeEndpointOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "machinelearning:CreateRealtimeEndpoint", "machinelearning:DeleteRealtimeEndpoint" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudFrontReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudfront:Get*", "cloudfront:List*", "iam:ListServerCertificates", "route53:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonSNSRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:PutMetricFilter", "logs:PutRetentionPolicy" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFinancialReportAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "mobileanalytics:GetReports", "mobileanalytics:GetFinancialReports" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/IAMReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:GenerateCredentialReport", "iam:GenerateServiceLastAccessedDetails", "iam:Get*", "iam:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "rds:Describe*", "rds:ListTagsForResource", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeSecurityGroups", "ec2:DescribeVpcs" ], "Effect": "Allow", "Resource": "*" }, { "Action": [ "cloudwatch:GetMetricStatistics" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonCognitoPowerUser": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cognito-identity:*", "cognito-sync:*", "iam:ListRoles", "iam:ListOpenIdConnectProviders", "sns:ListPlatformApplications" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface", "ec2:DescribeAvailabilityZones", "ec2:DescribeNetworkInterfaceAttribute", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:ModifyNetworkInterfaceAttribute", "elasticfilesystem:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonZocaloFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "zocalo:*", "ds:*", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateNetworkInterface", "ec2:CreateSecurityGroup", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVpc", "ec2:DescribeAvailabilityZones", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:DeleteNetworkInterface", "ec2:DeleteSecurityGroup", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudwatch:Describe*", "cloudwatch:Get*", "cloudwatch:List*", "cognito-identity:ListIdentityPools", "cognito-sync:GetCognitoEvents", "dynamodb:BatchGetItem", "dynamodb:DescribeStream", "dynamodb:DescribeTable", "dynamodb:GetItem", "dynamodb:ListStreams", "dynamodb:ListTables", "dynamodb:Query", "dynamodb:Scan", "iam:ListRoles", "kinesis:DescribeStream", "kinesis:ListStreams", "lambda:List*", "lambda:Get*", "logs:DescribeMetricFilters", "logs:GetLogEvents", "logs:DescribeLogGroups", "logs:DescribeLogStreams", "s3:Get*", "s3:List*", "sns:ListTopics", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSAccountUsageReportAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "aws-portal:ViewUsage" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ecs:CreateCluster", "ecs:DeregisterContainerInstance", "ecs:DiscoverPollEndpoint", "ecs:Poll", "ecs:RegisterContainerInstance", "ecs:Submit*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonAppStreamFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "appstream:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "autoscaling:Describe*", "cloudwatch:Describe*", "cloudwatch:Get*", "cloudwatch:List*", "logs:Get*", "logs:Describe*", "logs:TestMetricFilter", "sns:Get*", "sns:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "tag:getResources", "tag:getTagKeys", "tag:getTagValues", "tag:addResourceTags", "tag:removeResourceTags" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "kms:CreateAlias", "kms:CreateKey", "kms:DeleteAlias", "kms:Describe*", "kms:GenerateRandom", "kms:Get*", "kms:List*", "iam:ListGroups", "iam:ListRoles", "iam:ListUsers" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSImportExportReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "importexport:ListJobs", "importexport:GetStatus" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonElasticTranscoderRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Sid": "1", "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:Put*", "s3:Get*", "s3:*MultipartUpload*" ], "Resource": [ "*" ] }, { "Sid": "2", "Effect": "Allow", "Action": [ "sns:Publish" ], "Resource": [ "*" ] }, { "Sid": "3", "Effect": "Deny", "Action": [ "s3:*Policy*", "sns:*Permission*", "sns:*Delete*", "s3:*Delete*", "sns:*Remove*" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:Describe*", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:Describe*", "elasticloadbalancing:RegisterInstancesWithLoadBalancer" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ssm:Describe*", "ssm:Get*", "ssm:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSMarketplaceRead-only": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "aws-marketplace:ViewSubscriptions", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeKeyPairs", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonWorkSpacesApplicationManagerAdminAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "wam:AuthenticatePackager", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSDirectConnectFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "directconnect:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSAccountActivityAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "aws-portal:ViewBilling" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonGlacierFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": "glacier:*", "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonWorkMailFullAccess": { "VersionId": "v2", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ds:AuthorizeApplication", "ds:CheckAlias", "ds:CreateAlias", "ds:CreateDirectory", "ds:CreateDomain", "ds:DeleteAlias", "ds:DeleteDirectory", "ds:DescribeDirectories", "ds:ExtendDirectory", "ds:GetDirectoryLimits", "ds:ListAuthorizedApplications", "ds:UnauthorizeApplication", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateNetworkInterface", "ec2:CreateSecurityGroup", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVpc", "ec2:DeleteSecurityGroup", "ec2:DeleteSubnet", "ec2:DeleteVpc", "ec2:DescribeAvailabilityZones", "ec2:DescribeDomains", "ec2:DescribeRouteTables", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "kms:DescribeKey", "kms:ListAliases", "ses:*", "workmail:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSMarketplaceManageSubscriptions": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "aws-marketplace:ViewSubscriptions", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSSupportAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "support:*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "dynamodb:DescribeStream", "dynamodb:GetRecords", "dynamodb:GetShardIterator", "dynamodb:ListStreams" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCodeDeployDeployerAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "codedeploy:Batch*", "codedeploy:CreateDeployment", "codedeploy:Get*", "codedeploy:List*", "codedeploy:RegisterApplicationRevision" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSDataPipelinePowerUser": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:List*", "dynamodb:DescribeTable", "rds:DescribeDBInstances", "rds:DescribeDBSecurityGroups", "redshift:DescribeClusters", "redshift:DescribeClusterSecurityGroups", "sns:ListTopics", "iam:PassRole", "iam:ListRoles", "iam:PutRolePolicy", "iam:GetRolePolicy", "iam:GetInstanceProfiles", "iam:ListInstanceProfiles", "iam:CreateInstanceProfile", "iam:AddRoleToInstanceProfile", "datapipeline:*", "cloudwatch:*" ], "Effect": "Allow", "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonSNSFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "sns:*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/CloudSearchReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "cloudsearch:Describe*", "cloudsearch:List*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "cloudformation:DescribeStacks", "cloudformation:DescribeStackEvents", "cloudformation:DescribeStackResource", "cloudformation:DescribeStackResources", "cloudformation:GetTemplate", "cloudformation:List*" ], "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AmazonRoute53FullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "route53:*" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "elasticloadbalancing:DescribeLoadBalancers" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/service-role/AWSLambdaRole": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "lambda:InvokeFunction" ], "Resource": [ "*" ] } ] } }, "arn:aws:iam::aws:policy/AmazonAppStreamReadOnlyAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "appstream:Get*" ], "Effect": "Allow", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/PowerUserAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "NotAction": "iam:*", "Resource": "*" } ] } }, "arn:aws:iam::aws:policy/AWSDataPipelineFullAccess": { "VersionId": "v1", "IsDefaultVersion": true, "Document": { "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:List*", "dynamodb:DescribeTable", "rds:DescribeDBInstances", "rds:DescribeDBSecurityGroups", "redshift:DescribeClusters", "redshift:DescribeClusterSecurityGroups", "sns:CreateTopic", "sns:ListTopics", "sns:Subscribe", "iam:PassRole", "iam:ListRoles", "iam:CreateRole", "iam:PutRolePolicy", "iam:GetRolePolicy", "iam:GetInstanceProfiles", "iam:ListInstanceProfiles", "iam:CreateInstanceProfile", "iam:AddRoleToInstanceProfile", "datapipeline:*", "cloudwatch:*" ], "Effect": "Allow", "Resource": [ "*" ] } ] } } } fog-aws-3.18.0/lib/fog/aws/iam/paged_collection.rb000066400000000000000000000024121437344660100216560ustar00rootroot00000000000000module Fog module AWS class IAM class PagedCollection < Fog::Collection def self.inherited(klass) klass.send(:attribute, :truncated, :aliases => 'IsTruncated', :type => :boolean) klass.send(:attribute, :marker, :aliases => 'Marker') super end def each_entry(*args, &block) to_a.each(*args, &block) end def each(options={}) limit = options[:limit] || 100 if !block_given? self else subset = dup.all subset.each_entry { |f| yield f } while subset.truncated subset. all(:marker => subset.marker, :limit => limit). each_entry { |f| yield f } end self end end protected def page_params(options={}) marker = options.fetch(:marker) { options.fetch('Marker') { self.marker } } limit = options.fetch(:limit) { options['MaxItems'] } params = {} if marker && !marker.empty? params.merge!('Marker' => marker) end if limit params.merge!('MaxItems' => limit) end params end end end end end fog-aws-3.18.0/lib/fog/aws/kinesis.rb000066400000000000000000000154651437344660100172760ustar00rootroot00000000000000module Fog module AWS class Kinesis < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class ExpiredIterator < Fog::Errors::Error; end class LimitExceeded < Fog::Errors::Error; end class ResourceInUse < Fog::Errors::Error; end class ResourceNotFound < Fog::Errors::Error; end class ExpiredIterator < Fog::Errors::Error; end class InvalidArgument < Fog::Errors::Error; end class ProvisionedThroughputExceeded < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/kinesis' request :add_tags_to_stream request :create_stream request :delete_stream request :describe_stream request :get_records request :get_shard_iterator request :list_streams request :list_tags_for_stream request :merge_shards request :put_record request :put_records request :remove_tags_from_stream request :split_shard class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.kinesis' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "kinesis.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || true @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version = "20131202" setup_credentials(options) end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'kinesis') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) date = Fog::Time.now headers = { 'X-Amz-Target' => params['X-Amz-Target'], 'Content-Type' => 'application/x-amz-json-1.1', 'Host' => @host, 'x-amz-date' => date.to_iso8601_basic } headers['x-amz-security-token'] = @aws_session_token if @aws_session_token body = MultiJson.dump(params[:body]) headers['Authorization'] = @signer.sign({:method => "POST", :headers => headers, :body => body, :query => {}, :path => @path}, date) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'ExpiredIteratorException' Fog::AWS::Kinesis::ExpiredIterator.slurp(error, match[:message]) when 'LimitExceededException' Fog::AWS::Kinesis::LimitExceeded.slurp(error, match[:message]) when 'ResourceInUseException' Fog::AWS::Kinesis::ResourceInUse.slurp(error, match[:message]) when 'ResourceNotFoundException' Fog::AWS::Kinesis::ResourceNotFound.slurp(error, match[:message]) when 'ExpiredIteratorException' Fog::AWS::Kinesis::ExpiredIterator.slurp(error, match[:message]) when 'InvalidArgumentException' Fog::AWS::Kinesis::InvalidArgument.slurp(error, match[:message]) when 'ProvisionedThroughputExceededException' Fog::AWS::Kinesis::ProvisionedThroughputExceeded.slurp(error, match[:message]) else Fog::AWS::Kinesis::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end class Mock @mutex = Mutex.new def self.data @mutex.synchronize do @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :kinesis_streams => {} } end end yield @data if block_given? end end def self.reset @mutex.synchronize do @data = nil end end def initialize(options={}) @account_id = Fog::AWS::Mock.owner_id @aws_access_key_id = options[:aws_access_key_id] @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) end def data self.class.data do |data| data[@region][@aws_access_key_id] end end def reset_data self.class.data do |data| data[@region].delete(@aws_access_key_id) end end def self.next_sequence_number @mutex.synchronize do @sequence_number ||= -1 @sequence_number += 1 @sequence_number.to_s end end def next_sequence_number; self.class.next_sequence_number; end def self.next_shard_id @mutex.synchronize do @shard_id ||= -1 @shard_id += 1 "shardId-#{@shard_id.to_s.rjust(12, "0")}" end end def next_shard_id; self.class.next_shard_id; end end end end end fog-aws-3.18.0/lib/fog/aws/kms.rb000066400000000000000000000137431437344660100164200ustar00rootroot00000000000000module Fog module AWS class KMS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods DependencyTimeoutException = Class.new(Fog::Errors::Error) DisabledException = Class.new(Fog::Errors::Error) InvalidArnException = Class.new(Fog::Errors::Error) InvalidGrantTokenException = Class.new(Fog::Errors::Error) InvalidKeyUsageException = Class.new(Fog::Errors::Error) KMSInternalException = Class.new(Fog::Errors::Error) KeyUnavailableException = Class.new(Fog::Errors::Error) MalformedPolicyDocumentException = Class.new(Fog::Errors::Error) NotFoundException = Class.new(Fog::Errors::Error) requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/kms' request :list_keys request :create_key request :describe_key model_path 'fog/aws/models/kms' model :key collection :keys class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, access_key| region_hash[access_key] = { :keys => {}, } end end end def self.reset data.clear end attr_reader :account_id def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @account_id = Fog::AWS::Mock.owner_id @region = options[:region] || 'us-east-1' setup_credentials(options) Fog::AWS.validate_region!(@region) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'kms') end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to KMS # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # kms = KMS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1', etc. # # ==== Returns # * KMS object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.kms' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "kms.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options={}) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'kms') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port, :version => '2014-11-01', :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) if match.empty? raise elsif Fog::AWS::KMS.const_defined?(match[:code]) raise Fog::AWS::KMS.const_get(match[:code]).slurp(error, match[:message]) else raise Fog::AWS::KMS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/lambda.rb000066400000000000000000000156161437344660100170470ustar00rootroot00000000000000module Fog module AWS class Lambda < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :persistent, :region, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :version, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/lambda' request :create_function request :delete_function request :get_function request :get_function_configuration request :invoke request :list_functions request :update_function_code request :update_function_configuration request :get_policy request :add_permission request :remove_permission request :create_event_source_mapping request :delete_event_source_mapping request :get_event_source_mapping request :list_event_source_mappings request :update_event_source_mapping class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :functions => {}, :permissions => {}, :event_source_mappings => {} } end end end attr_reader :region attr_reader :account_id attr_reader :aws_access_key_id def initialize(options={}) @region = options[:region] || 'us-east-1' @aws_access_key_id = options[:aws_access_key_id] @account_id = Fog::AWS::Mock.owner_id @module = "lambda" Fog::AWS.validate_region!(@region) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Lambda # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # lambda = Lambda.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * Lambda object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.lambda' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "lambda.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @version = options[:version] || '2015-03-31' @connection = Fog::Core::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end attr_reader :region def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'lambda') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) path = params.delete(:path) request_path = "/#{@version}#{path}" query = params.delete(:query) || {} method = params.delete(:method) || 'POST' expects = params.delete(:expects) || 200 headers = { 'Content-Type' => 'application/json' } headers.merge!(params[:headers] || {}) request_path_to_sign = case path when %r{^/functions/([0-9a-zA-Z\:\-\_]+)(/.+)?$} "/#{@version}/functions/#{Fog::AWS.escape($~[1])}#{$~[2]}" else request_path end body, headers = AWS.signed_params_v4( params, headers, { :method => method, :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => request_path_to_sign, :port => @port, :query => query, :body => params[:body] } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(method, request_path, query, body, headers, expects, idempotent, parser) end else _request(method, request_path, query, body, headers, expects, idempotent, parser) end end def _request(method, path, query, body, headers, expects, idempotent, parser=nil) response = process_response(@connection.request({ :path => path, :query => query, :body => body, :expects => expects, :idempotent => idempotent, :headers => headers, :method => method }), parser) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise Fog::AWS::Lambda::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end def process_response(response, parser) if response && response.body && response.body.is_a?(String) && !response.body.strip.empty? && Fog::AWS.json_response?(response) begin response.body = Fog::JSON.decode(response.body) response.body = parser.process(response.body) if parser rescue Fog::JSON::DecodeError => e Fog::Logger.warning("Error parsing response json - #{e}") response.body = {} end end response end end end end end fog-aws-3.18.0/lib/fog/aws/mock.rb000066400000000000000000000122121437344660100165450ustar00rootroot00000000000000module Fog module AWS class Mock def self.arn(vendor, account_id, path, region = nil) "arn:aws:#{vendor}:#{region}:#{account_id}:#{path}" end def self.availability_zone(region) "#{region}#{Fog::Mock.random_selection('abcd', 1)}" end def self.box_usage sprintf("%0.10f", rand / 100).to_f end def self.console_output # "[ 0.000000] Linux version 2.6.18-xenU-ec2-v1.2 (root@domU-12-31-39-07-51-82) (gcc version 4.1.2 20070626 (Red Hat 4.1.2-13)) #2 SMP Wed Aug 19 09:04:38 EDT 2009" Base64.decode64("WyAwLjAwMDAwMF0gTGludXggdmVyc2lvbiAyLjYuMTgteGVuVS1lYzItdjEu\nMiAocm9vdEBkb21VLTEyLTMxLTM5LTA3LTUxLTgyKSAoZ2NjIHZlcnNpb24g\nNC4xLjIgMjAwNzA2MjYgKFJlZCBIYXQgNC4xLjItMTMpKSAjMiBTTVAgV2Vk\nIEF1ZyAxOSAwOTowNDozOCBFRFQgMjAwOQ==\n") end def self.dns_name_for(ip_address) "ec2-#{ip_address.gsub('.','-')}.compute-1.amazonaws.com" end def self.private_dns_name_for(ip_address) "ip-#{ip_address.gsub('.','-')}.ec2.internal" end def self.image path = [] (rand(3) + 2).times do path << Fog::Mock.random_letters(rand(9) + 8) end { "imageOwnerId" => Fog::Mock.random_letters(rand(5) + 4), "blockDeviceMapping" => [], "productCodes" => [], "kernelId" => kernel_id, "ramdiskId" => ramdisk_id, "imageState" => "available", "imageId" => image_id, "architecture" => "i386", "isPublic" => true, "imageLocation" => path.join('/'), "imageType" => "machine", "rootDeviceType" => ["ebs","instance-store"][rand(2)], "rootDeviceName" => "/dev/sda1" } end def self.image_id "ami-#{Fog::Mock.random_hex(8)}" end def self.key_fingerprint fingerprint = [] 20.times do fingerprint << Fog::Mock.random_hex(2) end fingerprint.join(':') end def self.instance_id "i-#{Fog::Mock.random_hex(8)}" end def self.ip_address Fog::Mock.random_ip end def self.private_ip_address ip_address.gsub(/^\d{1,3}\./,"10.") end def self.kernel_id "aki-#{Fog::Mock.random_hex(8)}" end def self.key_material OpenSSL::PKey::RSA.generate(1024).to_s end def self.owner_id Fog::Mock.random_numbers(12) end def self.ramdisk_id "ari-#{Fog::Mock.random_hex(8)}" end def self.request_id request_id = [] request_id << Fog::Mock.random_hex(8) 3.times do request_id << Fog::Mock.random_hex(4) end request_id << Fog::Mock.random_hex(12) request_id.join('-') end class << self alias_method :reserved_instances_id, :request_id alias_method :reserved_instances_offering_id, :request_id alias_method :sqs_message_id, :request_id alias_method :sqs_sender_id, :request_id end def self.reservation_id "r-#{Fog::Mock.random_hex(8)}" end def self.snapshot_id "snap-#{Fog::Mock.random_hex(8)}" end def self.volume_id "vol-#{Fog::Mock.random_hex(8)}" end def self.security_group_id "sg-#{Fog::Mock.random_hex(8)}" end def self.network_acl_id "acl-#{Fog::Mock.random_hex(8)}" end def self.network_acl_association_id "aclassoc-#{Fog::Mock.random_hex(8)}" end def self.network_interface_id "eni-#{Fog::Mock.random_hex(8)}" end def self.internet_gateway_id "igw-#{Fog::Mock.random_hex(8)}" end def self.dhcp_options_id "dopt-#{Fog::Mock.random_hex(8)}" end def self.vpc_id "vpc-#{Fog::Mock.random_hex(8)}" end def self.subnet_id "subnet-#{Fog::Mock.random_hex(8)}" end def self.zone_id "zone-#{Fog::Mock.random_hex(8)}" end def self.route_table_id "rtb-#{Fog::Mock.random_hex(8)}" end def self.change_id Fog::Mock.random_letters_and_numbers(14) end def self.nameservers [ 'ns-2048.awsdns-64.com', 'ns-2049.awsdns-65.net', 'ns-2050.awsdns-66.org', 'ns-2051.awsdns-67.co.uk' ] end def self.key_id(length=21) #Probably close enough Fog::Mock.random_selection('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',length) end def self.rds_address(db_name,region) "#{db_name}.#{Fog::Mock.random_letters(rand(12) + 4)}.#{region}.rds.amazonaws.com" end def self.spot_instance_request_id "sir-#{Fog::Mock.random_letters_and_numbers(8)}" end def self.data_pipeline_id "df-#{Fog::Mock.random_letters_and_numbers(19).capitalize}" end def self.spot_product_descriptions [ 'Linux/UNIX', 'Windows', 'SUSE Linux' ] end def self.default_vpc_for(region) @default_vpcs ||= {} @default_vpcs[region] ||= vpc_id end end end end fog-aws-3.18.0/lib/fog/aws/models/000077500000000000000000000000001437344660100165545ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/000077500000000000000000000000001437344660100212245ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/activities.rb000066400000000000000000000020131437344660100237110ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/activity' module Fog module AWS class AutoScaling class Activities < Fog::Collection model Fog::AWS::AutoScaling::Activity attribute :filters # Creates a new scaling policy. def initialize(attributes={}) self.filters ||= {} super end def all(filters_arg = filters) data = [] next_token = nil filters = filters_arg loop do result = service.describe_scaling_activities(filters.merge('NextToken' => next_token)).body['DescribeScalingActivitiesResult'] data += result['Activities'] next_token = result['NextToken'] break if next_token.nil? end load(data) end def get(identity) data = service.describe_scaling_activities('ActivityId' => identity).body['DescribeScalingActivitiesResult']['Activities'].first new(data) unless data.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/activity.rb000066400000000000000000000016041437344660100234060ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Activity < Fog::Model identity :id, :aliases => 'ActivityId' attribute :auto_scaling_group_name, :aliases => 'AutoScalingGroupName' attribute :cause, :aliases => 'Cause' attribute :description, :aliases => 'Description' attribute :end_time, :aliases => 'EndTime' attribute :progress, :aliases => 'Progress' attribute :start_time, :aliases => 'StartTime' attribute :status_code, :aliases => 'StatusCode' attribute :status_message, :aliases => 'StatusMessage' def group service.groups.get(attributes['AutoScalingGroupName']) end def save raise "Operation not supported" end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/configuration.rb000066400000000000000000000055261437344660100244300ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Configuration < Fog::Model identity :id, :aliases => 'LaunchConfigurationName' attribute :arn, :aliases => 'LaunchConfigurationARN' attribute :associate_public_ip, :aliases => 'AssociatePublicIpAddress' attribute :block_device_mappings, :aliases => 'BlockDeviceMappings' attribute :created_at, :aliases => 'CreatedTime' attribute :ebs_optimized, :aliases => 'EbsOptimized' attribute :iam_instance_profile, :aliases => 'IamInstanceProfile' attribute :image_id, :aliases => 'ImageId' #attribute :instance_monitoring, :aliases => 'InstanceMonitoring' attribute :instance_monitoring, :aliases => 'InstanceMonitoring', :squash => 'Enabled' attribute :instance_type, :aliases => 'InstanceType' attribute :kernel_id, :aliases => 'KernelId' attribute :key_name, :aliases => 'KeyName' attribute :ramdisk_id, :aliases => 'RamdiskId' attribute :security_groups, :aliases => 'SecurityGroups' attribute :user_data, :aliases => 'UserData' attribute :spot_price, :aliases => 'SpotPrice' attribute :placement_tenancy, :aliases => 'PlacementTenancy' attribute :classic_link_vpc_id, :aliases => 'ClassicLinkVPCId' attribute :classic_link_security_groups, :aliases => 'ClassicLinkVPCSecurityGroups' def initialize(attributes={}) #attributes[:availability_zones] ||= %w(us-east-1a us-east-1b us-east-1c us-east-1d) #attributes['ListenerDescriptions'] ||= [{ # 'Listener' => {'LoadBalancerPort' => 80, 'InstancePort' => 80, 'Protocol' => 'http'}, # 'PolicyNames' => [] #}] #attributes['Policies'] ||= {'AppCookieStickinessPolicies' => [], 'LBCookieStickinessPolicies' => []} super end def ready? # AutoScaling requests are synchronous true end def save requires :id requires :image_id requires :instance_type options = Hash[self.class.aliases.map { |key, value| [key, send(value)] }] options.delete_if { |key, value| value.nil? } service.create_launch_configuration(image_id, instance_type, id, options) #, listeners.map{|l| l.to_params}) # reload instead of merge attributes b/c some attrs (like HealthCheck) # may be set, but only the DNS name is returned in the create_load_balance # API call reload end def reload super self end def destroy requires :id service.delete_launch_configuration(id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/configurations.rb000066400000000000000000000017051437344660100246060ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/configuration' module Fog module AWS class AutoScaling class Configurations < Fog::Collection model Fog::AWS::AutoScaling::Configuration # Creates a new launch configuration def initialize(attributes={}) super end def all data = [] next_token = nil loop do result = service.describe_launch_configurations('NextToken' => next_token).body['DescribeLaunchConfigurationsResult'] data += result['LaunchConfigurations'] next_token = result['NextToken'] break if next_token.nil? end load(data) end def get(identity) data = service.describe_launch_configurations('LaunchConfigurationNames' => identity).body['DescribeLaunchConfigurationsResult']['LaunchConfigurations'].first new(data) unless data.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/group.rb000066400000000000000000000145321437344660100227120ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Group < Fog::Model identity :id, :aliases => 'AutoScalingGroupName' attribute :arn, :aliases => 'AutoScalingGroupARN' attribute :availability_zones, :aliases => 'AvailabilityZones' attribute :created_at, :aliases => 'CreatedTime' attribute :default_cooldown, :aliases => 'DefaultCooldown' attribute :desired_capacity, :aliases => 'DesiredCapacity' attribute :enabled_metrics, :aliases => 'EnabledMetrics' attribute :health_check_grace_period, :aliases => 'HealthCheckGracePeriod' attribute :health_check_type, :aliases => 'HealthCheckType' attribute :instances, :aliases => 'Instances' attribute :launch_configuration_name, :aliases => 'LaunchConfigurationName' attribute :load_balancer_names, :aliases => 'LoadBalancerNames' attribute :max_size, :aliases => 'MaxSize' attribute :min_size, :aliases => 'MinSize' attribute :placement_group, :aliases => 'PlacementGroup' attribute :suspended_processes, :aliases => 'SuspendedProcesses' attribute :tags, :aliases => 'Tags' attribute :termination_policies, :aliases => 'TerminationPolicies' attribute :vpc_zone_identifier, :aliases => 'VPCZoneIdentifier' attribute :target_group_arns, :aliases => 'TargetGroupARNs' def initialize(attributes={}) self.instances = [] self.default_cooldown = 300 self.desired_capacity = 0 self.enabled_metrics = [] self.health_check_grace_period = 0 self.health_check_type = 'EC2' self.load_balancer_names = [] self.max_size = 0 self.min_size = 0 self.suspended_processes = [] self.tags = {} self.termination_policies = ['Default'] self.target_group_arns = [] super end def activities requires :id activities = Fog::AWS::AutoScaling::Activities.new(:service => service, :filters => {'AutoScalingGroupName' => id}) end def attach_load_balancers(*load_balancer_names) requires :id service.attach_load_balancers(id, 'LoadBalancerNames' => load_balancer_names) reload end def configuration requires :launch_configuration_name service.configurations.get(launch_configuration_name) end def detach_load_balancers(*load_balancer_names) requires :id service.detach_load_balancers(id, 'LoadBalancerNames' => load_balancer_names) reload end def detach_instances(*instance_ids) requires :id service.detach_instances(id, 'InstanceIds' => instance_ids) reload end def attach_instances(*instance_ids) requires :id service.attach_instances(id, 'InstanceIds' => instance_ids) reload end def attach_load_balancer_target_groups(*target_group_arns) requires :id service.attach_load_balancer_target_groups(id, 'TargetGroupARNs' => target_group_arns) reload end def detach_load_balancer_target_groups(*target_group_arns) requires :id service.detach_load_balancer_target_groups(id, 'TargetGroupARNs' => target_group_arns) reload end def disable_metrics_collection(metrics = {}) requires :id service.disable_metrics_collection(id, 'Metrics' => metrics) reload end def enable_metrics_collection(granularity = '1Minute', metrics = {}) requires :id service.enable_metrics_collection(id, granularity, 'Metrics' => metrics) reload end def set_instance_protection(instance_ids, protected_from_scale_in) requires :id service.set_instance_protection( id, 'InstanceIds' => instance_ids, 'ProtectedFromScaleIn' => protected_from_scale_in ) reload end def instances Fog::AWS::AutoScaling::Instances.new(:service => service).load(attributes[:instances]) end def instances_in_service attributes[:instances].select {|hash| hash['LifecycleState'] == 'InService'}.map {|hash| hash['InstanceId']} end def instances_out_service attributes[:instances].select {|hash| hash['LifecycleState'] == 'OutOfService'}.map {|hash| hash['InstanceId']} end def resume_processes(processes = []) requires :id service.resume_processes(id, 'ScalingProcesses' => processes) reload end def suspend_processes(processes = []) requires :id service.suspend_processes(id, 'ScalingProcesses' => processes) reload end def ready? # Is this useful? #instances_in_service.length == desired_capacity #instances_in_service.length >= min_size true end def save requires :id requires :availability_zones requires :launch_configuration_name requires :max_size requires :min_size service.create_auto_scaling_group(id, availability_zones, launch_configuration_name, max_size, min_size, filtered_options(:create_auto_scaling_group)) reload end #def reload # super # self #end def destroy(options = { :force => false }) requires :id opts = {} opts.merge!({'ForceDelete' => true}) if options[:force] service.delete_auto_scaling_group(id, opts) end def update requires :id service.update_auto_scaling_group(id, filtered_options(:update_auto_scaling_group) ) reload end def filtered_options(method) Hash[options.select{|k,_| ExpectedOptions[method].include?(k)}] end def options ret = Hash[self.class.aliases.map { |key, value| [key, send(value)] }] ret.delete_if { |key, value| value.nil? } ret end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/groups.rb000066400000000000000000000020461437344660100230720ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/group' module Fog module AWS class AutoScaling class Groups < Fog::Collection model Fog::AWS::AutoScaling::Group attribute :filters # Creates a new auto scaling group. def initialize(attributes={}) self.filters = attributes super end def all(filters_arg = filters) data = [] next_token = nil filters = filters_arg loop do result = service.describe_auto_scaling_groups(filters.merge('NextToken' => next_token)).body['DescribeAutoScalingGroupsResult'] data += result['AutoScalingGroups'] next_token = result['NextToken'] break if next_token.nil? end load(data) end def get(identity) data = service.describe_auto_scaling_groups('AutoScalingGroupNames' => identity).body['DescribeAutoScalingGroupsResult']['AutoScalingGroups'].first new(data) unless data.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/instance.rb000066400000000000000000000030011437344660100233470ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Instance < Fog::Model identity :id, :aliases => 'InstanceId' attribute :auto_scaling_group_name, :aliases => 'AutoScalingGroupName' attribute :availability_zone, :aliases => 'AvailabilityZone' attribute :health_status, :aliases => 'HealthStatus' attribute :launch_configuration_name, :aliases => 'LaunchConfigurationName' attribute :life_cycle_state, :aliases => 'LifecycleState' def initialize(attributes={}) super end def group service.groups.get(attributes['AutoScalingGroupName']) end def configuration service.configurations.get(attributes['LaunchConfigurationName']) end def set_health(health_status, options) requires :id service.set_instance_health(health_status, id, options) reload end def terminate(should_decrement_desired_capacity) requires :id service.terminate_instance_in_auto_scaling_group(id, should_decrement_desired_capacity) reload end def healthy? health_status == 'Healthy' end def ready? life_cycle_state == 'InService' end def reload super self end #def destroy # requires :id # service.delete_auto_scaling_group(id) #end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/instances.rb000066400000000000000000000015021437344660100235360ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/instance' module Fog module AWS class AutoScaling class Instances < Fog::Collection model Fog::AWS::AutoScaling::Instance def all data = [] next_token = nil loop do result = service.describe_auto_scaling_instances('NextToken' => next_token).body['DescribeAutoScalingInstancesResult'] data += result['AutoScalingInstances'] next_token = result['NextToken'] break if next_token.nil? end load(data) end def get(identity) data = service.describe_auto_scaling_instances('InstanceIds' => identity).body['DescribeAutoScalingInstancesResult']['AutoScalingInstances'].first new(data) unless data.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/policies.rb000066400000000000000000000021111437344660100233530ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/policy' module Fog module AWS class AutoScaling class Policies < Fog::Collection model Fog::AWS::AutoScaling::Policy attribute :filters # Creates a new scaling policy. def initialize(attributes={}) self.filters = attributes super(attributes) end def all(filters_arg = filters) data = [] next_token = nil self.filters = filters_arg loop do result = service.describe_policies(filters.merge('NextToken' => next_token)).body['DescribePoliciesResult'] data += result['ScalingPolicies'] next_token = result['NextToken'] break if next_token.nil? end load(data) end def get(identity, auto_scaling_group = nil) data = service.describe_policies('PolicyNames' => identity, 'AutoScalingGroupName' => auto_scaling_group).body['DescribePoliciesResult']['ScalingPolicies'].first new(data) unless data.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/models/auto_scaling/policy.rb000066400000000000000000000030061437344660100230470ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Policy < Fog::Model identity :id, :aliases => 'PolicyName' attribute :arn, :aliases => 'PolicyARN' attribute :adjustment_type, :aliases => 'AdjustmentType' attribute :alarms, :aliases => 'Alarms' attribute :auto_scaling_group_name, :aliases => 'AutoScalingGroupName' attribute :cooldown, :aliases => 'Cooldown' attribute :min_adjustment_step, :aliases => 'MinAdjustmentStep' attribute :scaling_adjustment, :aliases => 'ScalingAdjustment' def initialize(attributes) attributes['AdjustmentType'] ||= 'ChangeInCapacity' attributes['ScalingAdjustment'] ||= 1 super end # TODO: implement #alarms # TODO: implement #auto_scaling_group def save requires :id requires :adjustment_type requires :auto_scaling_group_name requires :scaling_adjustment options = Hash[self.class.aliases.map { |key, value| [key, send(value)] }] options.delete_if { |key, value| value.nil? } service.put_scaling_policy(adjustment_type, auto_scaling_group_name, id, scaling_adjustment, options) reload end def destroy requires :id requires :auto_scaling_group_name service.delete_policy(auto_scaling_group_name, id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/000077500000000000000000000000001437344660100205205ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/beanstalk/application.rb000066400000000000000000000027161437344660100233560ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Application < Fog::Model identity :name, :aliases => 'ApplicationName' attribute :template_names, :aliases => 'ConfigurationTemplates' attribute :created_at, :aliases => 'DateCreated' attribute :updated_at, :aliases => 'DateUpdated' attribute :description, :aliases => 'Description' attribute :version_names, :aliases => 'Versions' def initialize(attributes={}) super end def environments requires :name service.environments.all({'ApplicationName' => name}) end def events requires :name service.events.all({'ApplicationName' => name}) end def templates requires :name service.templates.all({'ApplicationName' => name}) end def versions requires :name service.versions.all({'ApplicationName' => name}) end def destroy requires :name service.delete_application(name) true end def save requires :name options = { 'ApplicationName' => name } options['Description'] = description unless description.nil? data = service.create_application(options).body['CreateApplicationResult']['Application'] merge_attributes(data) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/applications.rb000066400000000000000000000012261437344660100235340ustar00rootroot00000000000000require 'fog/aws/models/beanstalk/application' module Fog module AWS class ElasticBeanstalk class Applications < Fog::Collection model Fog::AWS::ElasticBeanstalk::Application def all(application_names=[]) data = service.describe_applications(application_names).body['DescribeApplicationsResult']['Applications'] load(data) # data is an array of attribute hashes end def get(application_name) if data = service.describe_applications([application_name]).body['DescribeApplicationsResult']['Applications'].first new(data) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/environment.rb000066400000000000000000000115071437344660100234150ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Environment < Fog::Model identity :name, :aliases => 'EnvironmentName' attribute :id, :aliases => 'EnvironmentId' attribute :application_name, :aliases => 'ApplicationName' attribute :cname, :aliases => 'CNAME' attribute :cname_prefix, :aliases => 'CNAMEPrefix' attribute :created_at, :aliases => 'DateCreated' attribute :updated_at, :aliases => 'DateUpdated' attribute :updated_at, :aliases => 'DateUpdated' attribute :description, :aliases => 'Description' attribute :endpoint_url, :aliases => 'EndpointURL' attribute :health, :aliases => 'Health' attribute :resources, :aliases => 'Resources' attribute :solution_stack_name, :aliases => 'SolutionStackName' attribute :status, :aliases => 'Status' attribute :template_name, :aliases => 'TemplateName' attribute :version_label, :aliases => 'VersionLabel' attribute :option_settings, :aliases => 'OptionSettings' attribute :options_to_remove, :aliases => 'OptionsToRemove' def healthy? health == 'Green' end def ready? status == 'Ready' end def terminated? status == 'Terminated' end # Returns the current live resources for this environment def live_resources requires :id data = service.describe_environment_resources({'EnvironmentId' => id}).body['DescribeEnvironmentResourcesResult']['EnvironmentResources'] data.delete('EnvironmentName') # Delete the environment name from the result, only return actual resources data end # Returns the load balancer object associated with the environment. def load_balancer(elb_connection = Fog::AWS[:elb]) if resources.nil? elb_connection.load_balancers.get(live_resources['LoadBalancers'].first['Name']) else elb_connection.load_balancers.get(resources['LoadBalancer']['LoadBalancerName']) end end # Return events related to this version def events requires :id service.events.all({'EnvironmentId' => id}) end # Restarts the app servers in this environment def restart_app_server requires :id service.restart_app_server({'EnvironmentId' => id}) reload end # Rebuilds the environment def rebuild requires :id service.rebuild_environment({'EnvironmentId' => id}) reload end def swap_cnames(source) requires :name service.swap_environment_cnames({ 'SourceEnvironmentName' => source.name, 'DestinationEnvironmentName' => name }) source.reload reload end # Return the version object for this environment def version requires :application_name, :version_label service.versions.get(application_name, version_label) end # Update the running version of this environment def version=(new_version) requires :id if new_version.is_a?(String) new_version_label = new_version elsif new_version.is_a?(Fog::AWS::ElasticBeanstalk::Version) new_version_label = new_version.label else raise "Unknown type for new_version, must be either String or Fog::AWS::ElasticBeanstalk::Version" end if new_version.nil? raise "Version label not specified." end data = service.update_environment({ 'EnvironmentId' => id, 'VersionLabel' => new_version_label }).body['UpdateEnvironmentResult'] merge_attributes(data) end def destroy requires :id service.terminate_environment({'EnvironmentId' => id}) true end def save requires :name, :application_name requires_one :template_name, :solution_stack_name options = { 'ApplicationName' => application_name, 'CNAMEPrefix' => cname_prefix, 'Description' => description, 'EnvironmentName' => name, 'OptionSettings' => option_settings, 'OptionsToRemove' => options_to_remove, 'SolutionStackName' => solution_stack_name, 'TemplateName' => template_name, 'VersionLabel' => version_label } options.delete_if {|key, value| value.nil?} data = service.create_environment(options).body['CreateEnvironmentResult'] merge_attributes(data) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/environments.rb000066400000000000000000000013571437344660100236020ustar00rootroot00000000000000require 'fog/aws/models/beanstalk/environment' module Fog module AWS class ElasticBeanstalk class Environments < Fog::Collection model Fog::AWS::ElasticBeanstalk::Environment def all(options={}) data = service.describe_environments(options).body['DescribeEnvironmentsResult']['Environments'] load(data) # data is an array of attribute hashes end # Gets an environment given a name. # def get(environment_name) options = { 'EnvironmentNames' => [environment_name] } if data = service.describe_environments(options).body['DescribeEnvironmentsResult']['Environments'].first new(data) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/event.rb000066400000000000000000000010741437344660100221700ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Event < Fog::Model attribute :application_name, :aliases => 'ApplicationName' attribute :environment_name, :aliases => 'EnvironmentName' attribute :date, :aliases => 'EventDate' attribute :message, :aliases => 'Message' attribute :request_id, :aliases => 'RequestId' attribute :severity, :aliases => 'Severity' attribute :template_name, :aliases => 'TemplateName' attribute :version_label, :aliases => 'VersionLabel' end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/events.rb000066400000000000000000000006141437344660100223520ustar00rootroot00000000000000require 'fog/aws/models/beanstalk/event' module Fog module AWS class ElasticBeanstalk class Events < Fog::Collection model Fog::AWS::ElasticBeanstalk::Event def all(options={}) data = service.describe_events(options).body['DescribeEventsResult']['Events'] load(data) # data is an array of attribute hashes end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/template.rb000066400000000000000000000051041437344660100226600ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Template < Fog::Model attribute :name, :aliases => 'TemplateName' attribute :application_name, :aliases => 'ApplicationName' attribute :created_at, :aliases => 'DateCreated' attribute :updated_at, :aliases => 'DateUpdated' attribute :deployment_status, :aliases => 'DeploymentStatus' attribute :description, :aliases => 'Description' attribute :environment_id attribute :environment_name, :aliases => 'EnvironmentName' attribute :solution_stack_name, :aliases => 'SolutionStackName' attribute :source_configuration attribute :option_settings, :aliases => 'OptionSettings' def initialize(attributes={}) super end # Returns an array of options that may be set on this template def options requires :name, :application_name data = service.describe_configuration_options({ 'ApplicationName' => application_name, 'TemplateName' => name }) data.body['DescribeConfigurationOptionsResult']['Options'] end def destroy requires :name, :application_name service.delete_configuration_template(application_name, name) true end def save requires :name, :application_name options = { 'ApplicationName' => application_name, 'Description' => description, 'EnvironmentId' => environment_id, 'OptionSettings' => option_settings, 'SolutionStackName' => solution_stack_name, 'SourceConfiguration' => source_configuration, 'TemplateName' => name } options.delete_if {|key, value| value.nil?} data = service.create_configuration_template(options).body['CreateConfigurationTemplateResult'] merge_attributes(data) true end def modify(new_attributes) requires :name, :application_name options = { 'ApplicationName' => application_name, 'Description' => new_attributes[:description], 'OptionSettings' => new_attributes[:option_settings], 'TemplateName' => name } options.delete_if {|key, value| value.nil?} data = service.update_configuration_template(options).body['UpdateConfigurationTemplateResult'] merge_attributes(data) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/templates.rb000066400000000000000000000046201437344660100230450ustar00rootroot00000000000000require 'fog/aws/models/beanstalk/template' module Fog module AWS class ElasticBeanstalk class Templates < Fog::Collection model Fog::AWS::ElasticBeanstalk::Template # Describes all configuration templates, may optionally pass an ApplicationName filter # # Note: This is currently an expensive operation requiring multiple API calls due to a lack of # a describe configuration templates call in the AWS API. def all(options={}) application_filter = [] if options.key?('ApplicationName') application_filter << options['ApplicationName'] end # Initialize with empty array data = [] applications = service.describe_applications(application_filter).body['DescribeApplicationsResult']['Applications'] applications.each { |application| application['ConfigurationTemplates'].each { |template_name| begin options = { 'ApplicationName' => application['ApplicationName'], 'TemplateName' => template_name } settings = service.describe_configuration_settings(options).body['DescribeConfigurationSettingsResult']['ConfigurationSettings'] if settings.length == 1 # Add to data data << settings.first end rescue Fog::AWS::ElasticBeanstalk::InvalidParameterError # Ignore end } } load(data) # data is an array of attribute hashes end def get(application_name, template_name) options = { 'ApplicationName' => application_name, 'TemplateName' => template_name } result = nil # There is no describe call for templates, so we must use describe_configuration_settings. Unfortunately, # it throws an exception if template name doesn't exist, which is inconsistent, catch and return nil begin data = service.describe_configuration_settings(options).body['DescribeConfigurationSettingsResult']['ConfigurationSettings'] if data.length == 1 result = new(data.first) end rescue Fog::AWS::ElasticBeanstalk::InvalidParameterError end result end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/version.rb000066400000000000000000000051111437344660100225300ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Version < Fog::Model attribute :label, :aliases => 'VersionLabel' attribute :application_name, :aliases => 'ApplicationName' attribute :created_at, :aliases => 'DateCreated' attribute :updated_at, :aliases => 'DateUpdated' attribute :description, :aliases => 'Description' attribute :source_bundle, :aliases => 'SourceBundle' attribute :auto_create_application # FIXME - should be write only def initialize(attributes={}) super end # Return events related to this version def events requires :label, :application_name service.events.all({ 'ApplicationName' => application_name, 'VersionLabel' => label }) end # Returns environments running this version def environments requires :label, :application_name service.environments.all({ 'ApplicationName' => application_name, 'VersionLabel' => label }) end def destroy(delete_source_bundle = nil) requires :label, :application_name service.delete_application_version(application_name, label, delete_source_bundle) true end def save requires :label, :application_name options = { 'ApplicationName' => application_name, 'AutoCreateApplication' => auto_create_application, 'Description' => description, 'SourceBundle' => source_bundle, 'VersionLabel' => label } options.delete_if {|key, value| value.nil?} data = service.create_application_version(options).body['CreateApplicationVersionResult']['ApplicationVersion'] merge_attributes(data) true end # Updates the version label with the current property values. Currently only updates description def update requires :label, :application_name options = { 'ApplicationName' => application_name, 'Description' => description, 'VersionLabel' => label } options.delete_if {|key, value| value.nil?} data = service.update_application_version(options).body['UpdateApplicationVersionResult']['ApplicationVersion'] merge_attributes(data) end end end end end fog-aws-3.18.0/lib/fog/aws/models/beanstalk/versions.rb000066400000000000000000000017361437344660100227240ustar00rootroot00000000000000require 'fog/aws/models/beanstalk/version' module Fog module AWS class ElasticBeanstalk class Versions < Fog::Collection model Fog::AWS::ElasticBeanstalk::Version def all(options={}) data = service.describe_application_versions(options).body['DescribeApplicationVersionsResult']['ApplicationVersions'] load(data) # data is an array of attribute hashes end def get(application_name, version_label) if data = service.describe_application_versions({ 'ApplicationName' => application_name, 'VersionLabels' => [version_label] }).body['DescribeApplicationVersionsResult']['ApplicationVersions'] if data.length == 1 new(data.first) end end end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/000077500000000000000000000000001437344660100173205ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/cdn/distribution.rb000066400000000000000000000062051437344660100223670ustar00rootroot00000000000000require 'fog/aws/models/cdn/invalidations' require 'fog/aws/models/cdn/distribution_helper' module Fog module AWS class CDN class Distribution < Fog::Model include Fog::AWS::CDN::DistributionHelper identity :id, :aliases => 'Id' attribute :caller_reference, :aliases => 'CallerReference' attribute :last_modified_time, :aliases => 'LastModifiedTime' attribute :status, :aliases => 'Status' attribute :s3_origin, :aliases => 'S3Origin' attribute :custom_origin, :aliases => 'CustomOrigin' attribute :cname, :aliases => 'CNAME' attribute :comment, :aliases => 'Comment' attribute :enabled, :aliases => 'Enabled' attribute :in_progress_invalidation_batches, :aliases => 'InProgressInvalidationBatches' attribute :logging, :aliases => 'Logging' attribute :trusted_signers, :aliases => 'TrustedSigners' attribute :default_root_object,:aliases => 'DefaultRootObject' attribute :domain, :aliases => 'DomainName' attribute :etag, :aliases => ['Etag', 'ETag'] # items part of DistributionConfig CONFIG = [ :caller_reference, :origin, :cname, :comment, :enabled, :logging, :trusted_signers, :default_root_object ] def initialize(new_attributes = {}) super(distribution_config_to_attributes(new_attributes)) end def invalidations @invalidations ||= begin Fog::AWS::CDN::Invalidations.new( :distribution => self, :service => service ) end end def save requires_one :s3_origin, :custom_origin options = attributes_to_options response = identity ? put_distribution_config(identity, etag, options) : post_distribution(options) etag = response.headers['ETag'] merge_attributes(response.body) true end private def delete_distribution(identity, etag) service.delete_distribution(identity, etag) end def put_distribution_config(identity, etag, options) service.put_distribution_config(identity, etag, options) end def post_distribution(options = {}) service.post_distribution(options) end def attributes_to_options options = { 'CallerReference' => caller_reference, 'S3Origin' => s3_origin, 'CustomOrigin' => custom_origin, 'CNAME' => cname, 'Comment' => comment, 'Enabled' => enabled, 'Logging' => logging, 'TrustedSigners' => trusted_signers, 'DefaultRootObject' => default_root_object } options.reject! { |k,v| v.nil? } options.reject! { |k,v| v.respond_to?(:empty?) && v.empty? } options end def distribution_config_to_attributes(new_attributes = {}) new_attributes.merge(new_attributes.delete('DistributionConfig') || {}) end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/distribution_helper.rb000066400000000000000000000026671437344660100237360ustar00rootroot00000000000000module Fog module AWS class CDN module DistributionHelper def destroy requires :identity, :etag, :caller_reference raise "Distribution must be disabled to be deleted" unless disabled? delete_distribution(identity, etag) true end def enabled? requires :identity !!enabled and ready? end def disabled? requires :identity not enabled? and ready? end def custom_origin? requires :identity not custom_origin.nil? end def ready? requires :identity status == 'Deployed' end def enable requires :identity reload if etag.nil? or caller_reference.nil? unless enabled? self.enabled = true response = put_distribution_config(identity, etag, attributes_to_options) etag = response.headers['ETag'] merge_attributes(response.body) end true end def disable requires :identity reload if etag.nil? or caller_reference.nil? if enabled? self.enabled = false response = put_distribution_config(identity, etag, attributes_to_options) etag = response.headers['ETag'] merge_attributes(response.body) end true end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/distributions.rb000066400000000000000000000014031437344660100225450ustar00rootroot00000000000000require 'fog/aws/models/cdn/distribution' require 'fog/aws/models/cdn/distributions_helper' module Fog module AWS class CDN class Distributions < Fog::Collection include Fog::AWS::CDN::DistributionsHelper model Fog::AWS::CDN::Distribution attribute :marker, :aliases => 'Marker' attribute :max_items, :aliases => 'MaxItems' attribute :is_truncated, :aliases => 'IsTruncated' def get_distribution(dist_id) service.get_distribution(dist_id) end def list_distributions(options = {}) service.get_distribution_list(options) end alias_method :each_distribution_this_page, :each alias_method :each, :each_distribution end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/distributions_helper.rb000066400000000000000000000023661437344660100241150ustar00rootroot00000000000000module Fog module AWS class CDN module DistributionsHelper def all(options = {}) merge_attributes(options) data = list_distributions(options).body merge_attributes('IsTruncated' => data['IsTruncated'], 'Marker' => data['Marker'], 'MaxItems' => data['MaxItems']) if summary = data['DistributionSummary'] load(summary.map { |a| { 'DistributionConfig' => a } }) else load((data['StreamingDistributionSummary'] || {}).map { |a| { 'StreamingDistributionConfig' => a }}) end end def get(dist_id) response = get_distribution(dist_id) data = response.body.merge({'ETag' => response.headers['ETag']}) new(data) rescue Excon::Errors::NotFound nil end def each_distribution if !block_given? self else subset = dup.all subset.each_distribution_this_page {|f| yield f} while subset.is_truncated subset = subset.all('Marker' => subset.marker, 'MaxItems' => 1000) subset.each_distribution_this_page {|f| yield f} end self end end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/invalidation.rb000066400000000000000000000033241437344660100223300ustar00rootroot00000000000000module Fog module AWS class CDN class Invalidation < Fog::Model identity :id, :aliases => 'Id' attribute :status, :aliases => 'Status' attribute :create_time, :aliases => 'CreateTime' attribute :caller_reference, :aliases => 'CallerReference' attribute :paths, :aliases => 'Paths' def initialize(new_attributes={}) new_attributes[:caller_reference] ||= Time.now.utc.to_i.to_s super(invalidation_to_attributes(new_attributes)) end def distribution @distribution end def ready? requires :id, :status status == 'Completed' end def save requires :paths, :caller_reference raise "Submitted invalidation cannot be submitted again" if persisted? response = service.post_invalidation(distribution.identity, paths, caller_reference) merge_attributes(invalidation_to_attributes(response.body)) true end def destroy # invalidations can't be removed, but tests are requiring they do :) true end private def distribution=(dist) @distribution = dist end def invalidation_to_attributes(new_attributes={}) invalidation_batch = new_attributes.delete('InvalidationBatch') || {} if invalidation_batch['Path'] new_attributes[:paths] = invalidation_batch['Path'] end if invalidation_batch['CallerReference'] new_attributes[:caller_reference] = invalidation_batch['CallerReference'] end new_attributes end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/invalidations.rb000066400000000000000000000025761437344660100225230ustar00rootroot00000000000000require 'fog/aws/models/cdn/invalidation' module Fog module AWS class CDN class Invalidations < Fog::Collection attribute :is_truncated, :aliases => ['IsTruncated'] attribute :max_items, :aliases => ['MaxItems'] attribute :next_marker, :aliases => ['NextMarker'] attribute :marker, :aliases => ['Marker'] attribute :distribution model Fog::AWS::CDN::Invalidation def all(options = {}) requires :distribution options[:max_items] ||= max_items options.delete_if {|key, value| value.nil?} data = service.get_invalidation_list(distribution.identity, options).body merge_attributes(data.reject {|key, value| !['IsTruncated', 'MaxItems', 'NextMarker', 'Marker'].include?(key)}) load(data['InvalidationSummary']) end def get(invalidation_id) requires :distribution data = service.get_invalidation(distribution.identity, invalidation_id).body if data invalidation = new(data) else nil end rescue Excon::Errors::NotFound nil end def new(attributes = {}) requires :distribution super({ :distribution => distribution }.merge!(attributes)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/streaming_distribution.rb000066400000000000000000000047251437344660100244450ustar00rootroot00000000000000require 'fog/aws/models/cdn/invalidations' require 'fog/aws/models/cdn/distribution_helper' module Fog module AWS class CDN class StreamingDistribution < Fog::Model include Fog::AWS::CDN::DistributionHelper identity :id, :aliases => 'Id' attribute :caller_reference, :aliases => 'CallerReference' attribute :last_modified_time, :aliases => 'LastModifiedTime' attribute :status, :aliases => 'Status' attribute :s3_origin, :aliases => 'S3Origin' attribute :cname, :aliases => 'CNAME' attribute :comment, :aliases => 'Comment' attribute :enabled, :aliases => 'Enabled' attribute :logging, :aliases => 'Logging' attribute :domain, :aliases => 'DomainName' attribute :etag, :aliases => ['Etag', 'ETag'] # items part of DistributionConfig CONFIG = [ :caller_reference, :cname, :comment, :enabled, :logging ] def initialize(new_attributes = {}) super(distribution_config_to_attributes(new_attributes)) end def save requires_one :s3_origin options = attributes_to_options response = identity ? put_distribution_config(identity, etag, options) : post_distribution(options) etag = response.headers['ETag'] merge_attributes(response.body) true end private def delete_distribution(identity, etag) service.delete_streaming_distribution(identity, etag) end def put_distribution_config(identity, etag, options) service.put_streaming_distribution_config(identity, etag, options) end def post_distribution(options = {}) service.post_streaming_distribution(options) end def attributes_to_options options = { 'CallerReference' => caller_reference, 'S3Origin' => s3_origin, 'CNAME' => cname, 'Comment' => comment, 'Enabled' => enabled, 'Logging' => logging, } options.reject! { |k,v| v.nil? } options.reject! { |k,v| v.respond_to?(:empty?) && v.empty? } options end def distribution_config_to_attributes(new_attributes = {}) new_attributes.merge(new_attributes.delete('StreamingDistributionConfig') || {}) end end end end end fog-aws-3.18.0/lib/fog/aws/models/cdn/streaming_distributions.rb000066400000000000000000000014631437344660100246240ustar00rootroot00000000000000require 'fog/aws/models/cdn/streaming_distribution' require 'fog/aws/models/cdn/distributions_helper' module Fog module AWS class CDN class StreamingDistributions < Fog::Collection include Fog::AWS::CDN::DistributionsHelper model Fog::AWS::CDN::StreamingDistribution attribute :marker, :aliases => 'Marker' attribute :max_items, :aliases => 'MaxItems' attribute :is_truncated, :aliases => 'IsTruncated' def get_distribution(dist_id) service.get_streaming_distribution(dist_id) end def list_distributions(options = {}) service.get_streaming_distribution_list(options) end alias_method :each_distribution_this_page, :each alias_method :each, :each_distribution end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/000077500000000000000000000000001437344660100210505ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarm.rb000066400000000000000000000041271437344660100224750ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Alarm < Fog::Model identity :id, :aliases => 'AlarmName' attribute :actions_enabled, :aliases => 'ActionsEnabled' attribute :alarm_actions, :aliases => 'AlarmActions' attribute :arn, :aliases => 'AlarmArn' attribute :alarm_configuration_updated_timestamp, :aliases => 'AlarmConfigurationUpdatedTimestamp' attribute :alarm_description, :aliases => 'AlarmDescription' attribute :comparison_operator, :aliases => 'ComparisonOperator' attribute :dimensions, :aliases => 'Dimensions' attribute :evaluation_periods, :aliases => 'EvaluationPeriods' attribute :insufficient_data_actions, :aliases => 'InsufficientDataActions' attribute :metric_name, :aliases => 'MetricName' attribute :namespace, :aliases => 'Namespace' attribute :ok_actions, :aliases => 'OKActions' attribute :period, :aliases => 'Period' attribute :state_reason, :aliases => 'StateReason' attribute :state_reason_data, :aliases => 'StateReasonData' attribute :state_updated_timestamp, :aliases => 'StateUpdatedTimestamp' attribute :state_value, :aliases => 'StateValue' attribute :statistic, :aliases => 'Statistic' attribute :threshold, :aliases => 'Threshold' attribute :unit, :aliases => 'Unit' def initialize(attributes) self.namespace ||= "AWS/EC2" self.evaluation_periods ||= 1 super end def save requires :id requires :comparison_operator requires :metric_name requires :period requires :statistic requires :threshold requires :namespace requires :evaluation_periods options = Hash[self.class.aliases.map { |key, value| [key, send(value)] }] options.delete_if { |key, value| value.nil? } service.put_metric_alarm(options) reload end def destroy requires :id service.delete_alarms(id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarm_data.rb000066400000000000000000000022751437344660100234700ustar00rootroot00000000000000require 'fog/aws/models/cloud_watch/alarm_datum' module Fog module AWS class CloudWatch class AlarmData < Fog::Collection model Fog::AWS::CloudWatch::AlarmDatum def all(conditions={}) data = service.describe_alarms(conditions).body['DescribeAlarmsResult']['MetricAlarms'] load(data) # data is an array of attribute hashes end def get(namespace, metric_name, dimensions=nil, period=nil, statistic=nil, unit=nil) list_opts = {'Namespace' => namespace, 'MetricName' => metric_name} if dimensions dimensions_array = dimensions.map do |name, value| {'Name' => name, 'Value' => value} end list_opts.merge!('Dimensions' => dimensions_array) end if period list_opts.merge!('Period' => period) end if statistic list_opts.merge!('Statistic' => statistic) end if unit list_opts.merge!('Unit' => unit) end data = service.describe_alarms_for_metric(list_opts).body['DescribeAlarmsForMetricResult']['MetricAlarms'] load(data) end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarm_datum.rb000066400000000000000000000054341437344660100236710ustar00rootroot00000000000000module Fog module AWS class CloudWatch class AlarmDatum < Fog::Model attribute :alarm_name, :aliases => 'AlarmName' attribute :metric_name, :aliases => 'MetricName' attribute :namespace, :aliases => 'Namespace' attribute :dimensions, :aliases => 'Dimensions' attribute :alarm_description, :aliases => 'AlarmDescription' attribute :alarm_arn, :aliases => 'AlarmArn' attribute :state_value, :aliases => 'StateValue' attribute :statistic, :aliases => 'Statistic' attribute :comparison_operator, :aliases => 'ComparisonOperator' attribute :state_reason, :aliases => 'StateReason' attribute :action_enabled, :aliases => 'ActionsEnabled' attribute :period, :aliases => 'Period' attribute :evaluation_periods, :aliases => 'EvaluationPeriods' attribute :threshold, :aliases => 'Threshold' attribute :alarm_actions, :aliases => 'AlarmActions' attribute :ok_actions, :aliases => 'OKActions' attribute :insufficient_actions, :aliases => 'InsufficientDataActions' attribute :unit, :aliases => 'Unit' attribute :state_updated_timestamp, :aliases => 'StateUpdatedTimestamp' attribute :alarm_configuration_updated_timestamp, :aliases => 'AlarmConfigurationUpdatedTimestamp' def save requires :alarm_name requires :comparison_operator requires :evaluation_periods requires :metric_name requires :namespace requires :period requires :statistic requires :threshold alarm_definition = { 'AlarmName' => alarm_name, 'ComparisonOperator' => comparison_operator, 'EvaluationPeriods' => evaluation_periods, 'MetricName' => metric_name, 'Namespace' => namespace, 'Period' => period, 'Statistic' => statistic, 'Threshold' => threshold } alarm_definition.merge!('ActionsEnabled' => action_enabled) if action_enabled alarm_definition.merge!('AlarmActions' => alarm_actions) if alarm_actions alarm_definition.merge!('AlarmDescription' => alarm_description) if alarm_description #dimension is an array of Name/Value pairs, ex. [{'Name'=>'host', 'Value'=>'localhost'},{'Name'=>'version', 'Value'=>'0.11.0'}] alarm_definition.merge!('Dimensions' => dimensions) if dimensions alarm_definition.merge!('InsufficientDataActions' => insufficient_actions) if insufficient_actions alarm_definition.merge!('OKActions' => ok_actions) if ok_actions alarm_definition.merge!('Unit' => unit) if unit service.put_metric_alarm(alarm_definition) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarm_histories.rb000066400000000000000000000006671437344660100245730ustar00rootroot00000000000000require 'fog/aws/models/cloud_watch/alarm_history' module Fog module AWS class CloudWatch class AlarmHistories < Fog::Collection model Fog::AWS::CloudWatch::AlarmHistory def all(conditions={}) data = service.describe_alarm_history(conditions).body['DescribeAlarmHistoryResult']['AlarmHistoryItems'] load(data) # data is an array of attribute hashes end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarm_history.rb000066400000000000000000000006151437344660100242540ustar00rootroot00000000000000module Fog module AWS class CloudWatch class AlarmHistory < Fog::Model attribute :alarm_name, :aliases => 'AlarmName' attribute :end_date, :aliases => 'EndDate' attribute :history_item_type, :aliases => 'HistoryItemType' attribute :max_records, :aliases => 'MaxRecords' attribute :start_date, :aliases => 'StartDate' end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/alarms.rb000066400000000000000000000021621437344660100226550ustar00rootroot00000000000000require 'fog/aws/models/cloud_watch/alarm' module Fog module AWS class CloudWatch class Alarms < Fog::Collection model Fog::AWS::CloudWatch::Alarm def all data = [] next_token = nil loop do body = service.describe_alarms('NextToken' => next_token).body data += body['DescribeAlarmsResult']['MetricAlarms'] next_token = body['ResponseMetadata']['NextToken'] break if next_token.nil? end load(data) end def get(identity) data = service.describe_alarms('AlarmNames' => identity).body['DescribeAlarmsResult']['MetricAlarms'].first new(data) unless data.nil? end #alarm_names is an array of alarm names def delete(alarm_names) service.delete_alarms(alarm_names) true end def disable(alarm_names) service.disable_alarm_actions(alarm_names) true end def enable(alarm_names) service.enable_alarm_actions(alarm_names) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/metric.rb000066400000000000000000000004111437344660100226540ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Metric < Fog::Model attribute :name, :aliases => 'MetricName' attribute :namespace, :aliases => 'Namespace' attribute :dimensions, :aliases => 'Dimensions' end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/metric_statistic.rb000066400000000000000000000025261437344660100247540ustar00rootroot00000000000000module Fog module AWS class CloudWatch class MetricStatistic < Fog::Model attribute :label, :aliases => 'Label' attribute :minimum, :aliases => 'Minimum' attribute :maximum, :aliases => 'Maximum' attribute :sum, :aliases => 'Sum' attribute :average, :aliases => 'Average' attribute :sample_count, :aliases => 'SampleCount' attribute :timestamp, :aliases => 'Timestamp' attribute :unit, :aliases => 'Unit' attribute :metric_name, :aliases => 'MetricName' attribute :namespace, :aliases => 'Namespace' attribute :dimensions, :aliases => 'Dimensions' attribute :value def save requires :metric_name requires :namespace requires :unit put_opts = {'MetricName' => metric_name, 'Unit' => unit} put_opts.merge!('Dimensions' => dimensions) if dimensions if value put_opts.merge!('Value' => value) else put_opts.merge!('StatisticValues' => { 'Minimum' => minimum, 'Maximum' => maximum, 'Sum' => sum, 'Average' => average, 'SampleCount' => sample_count }) end service.put_metric_data(namespace, [put_opts]) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/metric_statistics.rb000066400000000000000000000015131437344660100251320ustar00rootroot00000000000000require 'fog/aws/models/cloud_watch/metric_statistic' module Fog module AWS class CloudWatch class MetricStatistics < Fog::Collection model Fog::AWS::CloudWatch::MetricStatistic def all(conditions) metricName = conditions['MetricName'] namespace = conditions['Namespace'] dimensions = conditions['Dimensions'] get_metric_opts = {"StartTime" => (Time.now-3600).iso8601, "EndTime" => Time.now.iso8601, "Period" => 300}.merge(conditions) data = service.get_metric_statistics(get_metric_opts).body['GetMetricStatisticsResult']['Datapoints'] data.map! { |datum| datum.merge('MetricName' => metricName, 'Namespace' => namespace, 'Dimensions' => dimensions) } load(data) # data is an array of attribute hashes end end end end end fog-aws-3.18.0/lib/fog/aws/models/cloud_watch/metrics.rb000066400000000000000000000026601437344660100230470ustar00rootroot00000000000000require 'fog/aws/models/cloud_watch/metric' module Fog module AWS class CloudWatch class Metrics < Fog::Collection attribute :next_token, :aliases => 'NextToken' model Fog::AWS::CloudWatch::Metric def all(conditions={}) result = service.list_metrics(conditions).body['ListMetricsResult'] merge_attributes("NextToken" => result["NextToken"]) load(result['Metrics']) # an array of attribute hashes end alias_method :each_metric_this_page, :each def each if !block_given? self else subset = dup.all subset.each_metric_this_page {|m| yield m } while next_token = subset.next_token subset = subset.all("NextToken" => next_token) subset.each_metric_this_page {|m| yield m } end self end end def get(namespace, metric_name, dimensions=nil) list_opts = {'Namespace' => namespace, 'MetricName' => metric_name} if dimensions dimensions_array = dimensions.map do |name, value| {'Name' => name, 'Value' => value} end # list_opts.merge!('Dimensions' => dimensions_array) end if data = service.list_metrics(list_opts).body['ListMetricsResult']['Metrics'].first new(data) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/000077500000000000000000000000001437344660100202305ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/compute/address.rb000066400000000000000000000051201437344660100222000ustar00rootroot00000000000000module Fog module AWS class Compute class Address < Fog::Model identity :public_ip, :aliases => 'publicIp' attribute :private_ip_address, :aliases => 'privateIpAddress' attribute :allocation_id, :aliases => 'allocationId' attribute :association_id, :aliases => 'associationId' attribute :server_id, :aliases => 'instanceId' attribute :network_interface_id, :aliases => 'networkInterfaceId' attribute :network_interface_owner_id, :aliases => 'networkInterfaceOwnerId' attribute :tags, :aliases => 'tagSet' attribute :domain def initialize(attributes = {}) # assign server first to prevent race condition with persisted? self.server = attributes.delete(:server) super end def destroy requires :public_ip service.release_address(allocation_id || public_ip) true end def change_scope if self.domain == 'standard' service.move_address_to_vpc(self.identity) wait_for { self.domain == 'vpc' } else service.restore_address_to_classic(self.identity) wait_for { self.domain == 'standard' } end end def server=(new_server) if new_server associate(new_server) else disassociate end end def server service.servers.get(server_id) end def save raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if persisted? data = service.allocate_address(domain).body new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) if @server self.server = @server end true end private def associate(new_server) unless persisted? @server = new_server else @server = nil self.server_id = new_server.id service.associate_address(server_id, public_ip, network_interface_id, allocation_id) end end def disassociate @server = nil self.server_id = nil if persisted? if association_id service.disassociate_address(nil, association_id) else service.disassociate_address(public_ip) end end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/addresses.rb000066400000000000000000000050351437344660100225350ustar00rootroot00000000000000require 'fog/aws/models/compute/address' module Fog module AWS class Compute class Addresses < Fog::Collection attribute :filters attribute :server model Fog::AWS::Compute::Address # Used to create an IP address # # ==== Returns # #>> AWS.addresses.create # # # The IP address can be retrieved by running AWS.addresses.get("test"). See get method below. # def initialize(attributes) self.filters ||= {} super end # AWS.addresses.all # # ==== Returns # # Returns an array of all IP addresses # #>> AWS.addresses.all # , # ....... # # ] # > #>> def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.deprecation("all with #{filters_arg.class} param is deprecated, use all('public-ip' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'public-ip' => [*filters_arg]} end self.filters = filters_arg data = service.describe_addresses(filters).body load( data['addressesSet'].map do |address| address.reject {|key, value| value.nil? || value.empty? } end ) if server self.replace(self.select {|address| address.server_id == server.id}) end self end # Used to retrieve an IP address # # public_ip is required to get the associated IP information. # # You can run the following command to get the details: # AWS.addresses.get("76.7.46.54") def get(public_ip) if public_ip self.class.new(:service => service).all('public-ip' => public_ip).first end end def new(attributes = {}) if server super({ :server => server }.merge!(attributes)) else super(attributes) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/dhcp_option.rb000066400000000000000000000030751437344660100230700ustar00rootroot00000000000000module Fog module AWS class Compute class DhcpOption < Fog::Model identity :id, :aliases => 'dhcpOptionsId' attribute :dhcp_configuration_set, :aliases => 'dhcpConfigurationSet' attribute :tag_set, :aliases => 'tagSet' def initialize(attributes={}) super end # Associates an existing dhcp configration set with a VPC # # dhcp_option.attach(dopt-id, vpc-id) # # ==== Returns # # True or false depending on the result # def associate(vpc_id) requires :id service.associate_dhcp_options(id, vpc_id) reload end # Removes an existing dhcp configuration set # # dhcp_option.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :id service.delete_dhcp_options(id) true end # Create a dhcp configuration set # # >> g = AWS.dhcp_options.new() # >> g.save # # == Returns: # # requestId and a dhcpOptions object # def save requires :dhcp_configuration_set data = service.create_dhcp_options(dhcp_configuration_set).body['dhcpOptionsSet'].first new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/dhcp_options.rb000066400000000000000000000044131437344660100232500ustar00rootroot00000000000000require 'fog/aws/models/compute/dhcp_option' module Fog module AWS class Compute class DhcpOptions < Fog::Collection attribute :filters model Fog::AWS::Compute::DhcpOption # Creates a new dhcp option # # AWS.dhcp_options.new # # ==== Returns # # Returns the details of the new DHCP options # #>> AWS.dhcp_options.new #=> # def initialize(attributes) self.filters ||= {} super end # Returns an array of all DhcpOptions that have been created # # AWS.dhcp_options.all # # ==== Returns # # Returns an array of all DhcpOptions # #>> AWS.dhcp_options.all #"vpc-some-id", "state"=>"available"}, #tag_set={} #> #] #> # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.warning("all with #{filters_arg.class} param is deprecated, use all('internet-gateway-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'dhcp-options-id' => [*filters_arg]} end filters = filters_arg data = service.describe_dhcp_options(filters).body load(data['dhcpOptionsSet']) end # Used to retrieve an DhcpOption # # You can run the following command to get the details: # AWS.dhcp_options.get("dopt-12345678") # # ==== Returns # #>> AWS.dhcp_options.get("dopt-12345678") #=> "vpc-12345678", "state"=>"available"}, #tag_set={} #> # def get(dhcp_options_id) if dhcp_options_id self.class.new(:service => service).all('dhcp-options-id' => dhcp_options_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/flavor.rb000066400000000000000000000005111437344660100220430ustar00rootroot00000000000000module Fog module AWS class Compute class Flavor < Fog::Model identity :id attribute :bits attribute :cores attribute :disk attribute :name attribute :ram attribute :ebs_optimized_available attribute :instance_store_volumes end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/flavors.rb000066400000000000000000004177021437344660100222440ustar00rootroot00000000000000require 'fog/aws/models/compute/flavor' # To compute RAM from AWS doc https://aws.amazon.com/fr/ec2/instance-types # we can use this formula: RAM (in MB) = AWS_RAM (in GiB) * 1073.742 MB/GiB module Fog module AWS class Compute FLAVORS = [ { :id => 'a1.medium', :name => 'A1 Medium Instance', :bits => 64, :cores => 1, :disk => 0, :ram => 2147, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'a1.large', :name => 'A1 Large Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 4295, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'a1.xlarge', :name => 'A1 Extra Large Instance', :bits => 64, :cores => 4, :disk => 0, :ram => 8590, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'a1.2xlarge', :name => 'A1 Double Extra Large Instance', :bits => 64, :cores => 8, :disk => 0, :ram => 17180, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'a1.4xlarge', :name => 'A1 Quadruple Extra Large Instance', :bits => 64, :cores => 16, :disk => 0, :ram => 34360, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'a1.metal', :name => 'A1 Metal', :bits => 64, :cores => 16, :disk => 0, :ram => 34360, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't1.micro', :name => 'T1 Micro Instance', :bits => 32, :cores => 1, :disk => 0, :ram => 658, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.nano', :name => 'Nano Instance', :bits => 64, :cores => 1, :disk => 0, :ram => 536, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.micro', :name => 'T2 Micro Instance', :bits => 64, :cores => 1, :disk => 0, :ram => 1073, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.small', :name => 'T2 Small Instance', :bits => 64, :cores => 1, :disk => 0, :ram => 2147, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.medium', :name => 'T2 Medium Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 4294, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.large', :name => 'T2 Large Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 8589, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.xlarge', :name => 'T2 Extra Large Instance', :bits => 64, :cores => 4, :disk => 0, :ram => 17179, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't2.2xlarge', :name => 'T2 Double Extra Large Instance', :bits => 64, :cores => 8, :disk => 0, :ram => 34359, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 't3.nano', :name => 'T3 Nano', :bits => 64, :cores => 2, :disk => 0, :ram => 536, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.micro', :name => 'T3 Micro', :bits => 64, :cores => 2, :disk => 0, :ram => 1073, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.small', :name => 'T3 Small', :bits => 64, :cores => 2, :disk => 0, :ram => 2147, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.medium', :name => 'T3 Medium', :bits => 64, :cores => 2, :disk => 0, :ram => 4294, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.large', :name => 'T3 Large', :bits => 64, :cores => 2, :disk => 0, :ram => 8589, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.xlarge', :name => 'T3 Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 17179, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3.2xlarge', :name => 'T3 Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 34359, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.nano', :name => 'T3a Nano', :bits => 64, :cores => 2, :disk => 0, :ram => 536, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.micro', :name => 'T3a Micro', :bits => 64, :cores => 2, :disk => 0, :ram => 1073, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.small', :name => 'T3a Small', :bits => 64, :cores => 2, :disk => 0, :ram => 2147, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.medium', :name => 'T3a Medium', :bits => 64, :cores => 2, :disk => 0, :ram => 4294, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.large', :name => 'T3a Large', :bits => 64, :cores => 2, :disk => 0, :ram => 8589, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.xlarge', :name => 'T3a Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 17179, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't3a.2xlarge', :name => 'T3a Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 34359, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.nano', :name => 'T4G Nano Instace', :bits => 64, :cores => 2, :disk => 0, :ram => 537, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.micro', :name => 'T4G Micro Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 1073, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.small', :name => 'T4G Small Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 2147, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.medium', :name => 'T4G Medium Instace', :bits => 64, :cores => 2, :disk => 0, :ram => 4295, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.large', :name => 'T4G Large Instance', :bits => 64, :cores => 2, :disk => 0, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.xlarge', :name => 'T4G Extra Large Instance', :bits => 64, :cores => 4, :disk => 0, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 't4g.2xlarge', :name => 'T4G Double Extra Large Instance', :bits => 64, :cores => 8, :disk => 0, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.medium', :name => 'M6G Medium', :bits => 64, :cores => 1, :disk => 0, :ram => 4295, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.large', :name => 'M6G Large', :bits => 64, :cores => 2, :disk => 0, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.xlarge', :name => 'M6G Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.2xlarge', :name => 'M6G Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.4xlarge', :name => 'M6G Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.8xlarge', :name => 'M6G Octuple Extra Large', :bits => 64, :cores => 32, :disk => 0, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.12xlarge', :name => 'M6G Twelve Extra Large', :bits => 64, :cores => 48, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.16xlarge', :name => 'M6G Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 0, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6g.metal', :name => 'M6G Metal', :bits => 64, :cores => 64, :disk => 0, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'm6gd.medium', :name => 'M6GD Medium', :bits => 64, :cores => 1, :disk => 59, :ram => 4295, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.large', :name => 'M6GD Large', :bits => 64, :cores => 2, :disk => 118, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.xlarge', :name => 'M6GD Extra Large', :bits => 64, :cores => 4, :disk => 237, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.2xlarge', :name => 'M6GD Double Extra Large', :bits => 64, :cores => 8, :disk => 474, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.4xlarge', :name => 'M6GD Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 950, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.8xlarge', :name => 'M6GD Octuple Extra Large', :bits => 64, :cores => 32, :disk => 1900, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm6gd.12xlarge', :name => 'M6GD Twelve Extra Large', :bits => 64, :cores => 48, :disk => 2850, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'm6gd.16xlarge', :name => 'M6GD Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 3800, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'm6gd.metal', :name => 'M6GD Metal', :bits => 64, :cores => 64, :disk => 3800, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'm1.small', :name => 'M1 Small Instance', :bits => 32, :cores => 1, :disk => 160, :ram => 1825, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'm1.medium', :name => 'M1 Medium Instance', :bits => 32, :cores => 1, :disk => 400, :ram => 4026, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'm1.large', :name => 'M1 Large Instance', :bits => 64, :cores => 2, :disk => 850, :ram => 8053, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'm1.xlarge', :name => 'M1 Extra Large Instance', :bits => 64, :cores => 4, :disk => 1690, :ram => 16106, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'c1.medium', :bits => 32, :cores => 2, :disk => 350, :name => 'High-CPU Medium', :ram => 1825, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'c1.xlarge', :name => 'High-CPU Extra Large', :bits => 64, :cores => 8, :disk => 1690, :ram => 7516, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'c3.large', :name => 'C3 Large', :bits => 64, :cores => 2, :disk => 32, :ram => 4026, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => 'c3.xlarge', :name => 'C3 Extra Large', :bits => 64, :cores => 4, :disk => 80, :ram => 8053, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c3.2xlarge', :name => 'C3 Double Extra Large', :bits => 64, :cores => 8, :disk => 160, :ram => 16106, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c3.4xlarge', :name => 'C3 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 320, :ram => 32212, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c3.8xlarge', :name => 'C3 Eight Extra Large', :bits => 64, :cores => 32, :disk => 640, :ram => 64424, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => 'c4.large', :name => 'C4 Large', :bits => 64, :cores => 2, :disk => 0, :ram => 4026, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c4.xlarge', :name => 'C4 Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 8053, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c4.2xlarge', :name => 'C4 Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 16106, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c4.4xlarge', :name => 'C4 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 32212, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c4.8xlarge', :name => 'C4 Eight Extra Large', :bits => 64, :cores => 36, :disk => 0, :ram => 64424, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.large', :name => 'C5 Large', :bits => 64, :cores => 2, :disk => 0, :ram => 4294, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.xlarge', :name => 'C5 Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 8589, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.2xlarge', :name => 'C5 Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 17179, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.4xlarge', :name => 'C5 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 34359, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.9xlarge', :name => 'C5 Nine Extra Large', :bits => 64, :cores => 36, :disk => 0, :ram => 77309, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.12xlarge', :name => 'C5 Twelve Extra Large', :bits => 64, :cores => 48, :disk => 0, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.18xlarge', :name => 'C5 Eighteen Extra Large', :bits => 64, :cores => 72, :disk => 0, :ram => 154618, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.24xlarge', :name => 'C5 Twenty-Four Extra Large', :bits => 64, :cores => 96, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5.metal', :name => 'C5 Metal', :bits => 64, :cores => 96, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5d.large', :name => 'C5d Large', :bits => 64, :cores => 2, :disk => 50, :ram => 4294, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c5d.xlarge', :name => 'C5d Extra Large', :bits => 64, :cores => 4, :disk => 100, :ram => 8589, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c5d.2xlarge', :name => 'C5d Double Extra Large', :bits => 64, :cores => 8, :disk => 200, :ram => 17179, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c5d.4xlarge', :name => 'C5d Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 400, :ram => 34359, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c5d.9xlarge', :name => 'C5d Nine Extra Large', :bits => 64, :cores => 36, :disk => 900, :ram => 77309, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c5d.12xlarge', :name => 'C5d Twelve Extra Large', :bits => 64, :cores => 48, :disk => 1800, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c5d.18xlarge', :name => 'C5d Eighteen Extra Large', :bits => 64, :cores => 72, :disk => 1800, :ram => 154618, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c5d.24xlarge', :name => 'C5d Twenty-four Extra Large', :bits => 64, :cores => 96, :disk => 3600, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'c5d.metal', :name => 'C5d Metal', :bits => 64, :cores => 96, :disk => 3600, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'c5n.large', :name => 'C5n Large', :bits => 64, :cores => 2, :disk => 0, :ram => 5637, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.xlarge', :name => 'C5n Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 11274, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.2xlarge', :name => 'C5n Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 22549, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.4xlarge', :name => 'C5n Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 45097, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.9xlarge', :name => 'C5n Nine Extra Large', :bits => 64, :cores => 36, :disk => 0, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.18xlarge', :name => 'C5n Eighteen Extra Large', :bits => 64, :cores => 72, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c5n.metal', :name => 'C5n Metal', :bits => 64, :cores => 72, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.medium', :name => 'C6G Medium', :bits => 64, :cores => 1, :disk => 0, :ram => 2147, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.large', :name => 'C6G Large', :bits => 64, :cores => 2, :disk => 0, :ram => 4295, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.xlarge', :name => 'C6G Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.2xlarge', :name => 'C6G Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.4xlarge', :name => 'C6G Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.8xlarge', :name => 'C6G Octuple Extra Large', :bits => 64, :cores => 32, :disk => 0, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.12xlarge', :name => 'C6G Twelve Extra Large', :bits => 64, :cores => 48, :disk => 0, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.16xlarge', :name => 'C6G Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 0, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6g.metal', :name => 'C6G Metal', :bits => 64, :cores => 64, :disk => 0, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'c6gd.medium', :name => 'C6GD Medium', :bits => 64, :cores => 1, :disk => 59, :ram => 2147, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.large', :name => 'C6GD Large', :bits => 64, :cores => 2, :disk => 118, :ram => 4295, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.xlarge', :name => 'C6GD Extra Large', :bits => 64, :cores => 4, :disk => 237, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.2xlarge', :name => 'C6GD Double Extra Large', :bits => 64, :cores => 8, :disk => 474, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.4xlarge', :name => 'C6GD Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 950, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.8xlarge', :name => 'C6GD Octuple Extra Large', :bits => 64, :cores => 32, :disk => 1900, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'c6gd.12xlarge', :name => 'C6GD Twelve Extra Large', :bits => 64, :cores => 48, :disk => 2850, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c6gd.16xlarge', :name => 'C6GD Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 3800, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'c6gd.metal', :name => 'C6GD Metal', :bits => 64, :cores => 64, :disk => 3800, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'g2.2xlarge', :name => 'GPU Double Extra Large', :bits => 64, :cores => 8, :disk => 60, :ram => 16106, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g2.8xlarge', :name => 'GPU Eight Extra Large', :bits => 64, :cores => 32, :disk => 240, :ram => 64424, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'hs1.8xlarge', :name => 'High Storage Eight Extra Large', :bits => 64, :cores => 16, :disk => 50331648, :ram => 125627, :ebs_optimized_available => false, :instance_store_volumes => 24 }, { :id => 'm2.xlarge', :name => 'High-Memory Extra Large', :bits => 64, :cores => 2, :disk => 420, :ram => 18360, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'm2.2xlarge', :name => 'High Memory Double Extra Large', :bits => 64, :cores => 4, :disk => 850, :ram => 36721, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'm2.4xlarge', :name => 'High Memory Quadruple Extra Large', :bits => 64, :cores => 8, :disk => 1690, :ram => 73443, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'cr1.8xlarge', :name => 'High Memory Eight Extra Large', :bits => 64, :cores => 32, :disk => 240, :ram => 261993, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => 'm3.medium', :name => 'M3 Medium', :bits => 64, :cores => 1, :disk => 4, :ram => 4026, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'm3.large', :name => 'M3 Large', :bits => 64, :cores => 2, :disk => 32, :ram => 8053, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => 'm3.xlarge', :name => 'M3 Extra Large', :bits => 64, :cores => 4, :disk => 80, :ram => 16106, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'm3.2xlarge', :name => 'M3 Double Extra Large', :bits => 64, :cores => 8, :disk => 160, :ram => 32212, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "hi1.4xlarge", :name => "High I/O Quadruple Extra Large Instance", :bits => 64, :cores => 35, :disk => 2048, :ram => 61952, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => 'cc1.4xlarge', :name => 'Cluster Compute Quadruple Extra Large', :bits => 64, :cores => 33.5, :disk => 1690, :ram => 23552, :ebs_optimized_available => false, :instance_store_volumes => 0 }, { :id => 'cc2.8xlarge', :name => 'Cluster Compute Eight Extra Large', :bits => 64, :cores => 32, :disk => 3370, :ram => 64961, :ebs_optimized_available => false, :instance_store_volumes => 4 }, { :id => 'cg1.4xlarge', :name => 'Cluster GPU Quadruple Extra Large', :bits => 64, :cores => 33.5, :disk => 1690, :ram => 22528, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => 'i2.xlarge', :name => 'I2 Extra Large', :bits => 64, :cores => 4, :disk => 800, :ram => 32749, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i2.2xlarge', :name => 'I2 Double Extra Large', :bits => 64, :cores => 8, :disk => 1600, :ram => 65498, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'i2.4xlarge', :name => 'I2 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 3200, :ram => 130996, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'i2.8xlarge', :name => 'I2 Eight Extra Large', :bits => 64, :cores => 32, :disk => 6400, :ram => 261993, :ebs_optimized_available => false, :instance_store_volumes => 8 }, { :id => 'i3.large', :name => 'I3 Large', :bits => 64, :cores => 2, :disk => 475, :ram => 16374, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3.xlarge', :name => 'I3 Extra Large', :bits => 64, :cores => 4, :disk => 950, :ram => 32749, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3.2xlarge', :name => 'I3 Double Extra Large', :bits => 64, :cores => 8, :disk => 1900, :ram => 65498, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3.4xlarge', :name => 'I3 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 3800, :ram => 130996, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'i3.8xlarge', :name => 'I3 Eight Extra Large', :bits => 64, :cores => 32, :disk => 7600, :ram => 261993, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'i3.16xlarge', :name => 'I3 Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 15200, :ram => 523986, :ebs_optimized_available => true, :instance_store_volumes => 8 }, { :id => 'i3.metal', :name => 'I3 Metal', :bits => 64, :cores => 72, :disk => 15200, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 8 }, { :id => 'i3en.large', :name => 'I3en Large', :bits => 64, :cores => 2, :disk => 1250, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3en.xlarge', :name => 'I3en Extra Large', :bits => 64, :cores => 4, :disk => 2500, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3en.2xlarge', :name => 'I3en Double Extra Large', :bits => 64, :cores => 8, :disk => 5000, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'i3en.3xlarge', :name => 'I3en Triple Extra Large', :bits => 64, :cores => 8, :disk => 7500, :ram => 103079, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'i3en.6xlarge', :name => 'I3en Sextuple Extra Large', :bits => 64, :cores => 24, :disk => 15000, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'i3en.12xlarge', :name => 'I3en Twelve Extra Large', :bits => 64, :cores => 24, :disk => 15000, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'i3en.16xlarge', :name => 'I3en Sixteen Extra Large', :bits => 64, :cores => 48, :disk => 30000, :ram => 412317, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'i3en.24xlarge', :name => 'I3en Twenty-four Extra Large', :bits => 64, :cores => 96, :disk => 60000, :ram => 824634, :ebs_optimized_available => true, :instance_store_volumes => 8 }, { :id => 'i3en.metal', :name => 'I3en Metal', :bits => 64, :cores => 96, :disk => 60000, :ram => 824634, :ebs_optimized_available => true, :instance_store_volumes => 8 }, { :id => "r3.large", :name => "R3 Large", :bits => 64, :cores => 2, :ram => 16374, :disk => 32, :ebs_optimized_available => false, :instance_store_volumes => 1 }, { :id => "r3.xlarge", :name => "R3 Extra Large", :bits => 64, :cores => 4, :ram => 32749, :disk => 80, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r3.2xlarge", :name => "R3 Double Extra Large", :bits => 64, :cores => 8, :ram => 65498, :disk => 160, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r3.4xlarge", :name => "R3 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 130996, :disk => 320, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r3.8xlarge", :name => "R3 Eight Extra Large", :bits => 64, :cores => 32, :ram => 261993, :disk => 640, :ebs_optimized_available => false, :instance_store_volumes => 2 }, { :id => "r4.large", :name => "R4 Large", :bits => 64, :cores => 2, :ram => 16374, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r4.xlarge", :name => "R4 Extra Large", :bits => 64, :cores => 4, :ram => 32749, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r4.2xlarge", :name => "R4 Double Extra Large", :bits => 64, :cores => 8, :ram => 65498, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r4.4xlarge", :name => "R4 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 130996, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r4.8xlarge", :name => "R4 Eight Extra Large", :bits => 64, :cores => 32, :ram => 261993, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r4.16xlarge", :name => "R4 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 523986, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.large", :name => "R5 Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.xlarge", :name => "R5 Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.2xlarge", :name => "R5 Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.4xlarge", :name => "R5 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.8xlarge", :name => "R5 Octuple Extra Large", :bits => 64, :cores => 32, :ram => 274878, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.12xlarge", :name => "R5 Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.16xlarge", :name => "R5 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 549756, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.24xlarge", :name => "R5 Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5.metal", :name => "R5 Metal", :bits => 64, :cores => 96, :ram => 824633, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5d.large", :name => "R5d Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5d.xlarge", :name => "R5d Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5d.2xlarge", :name => "R5d Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5d.4xlarge", :name => "R5d Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5d.8xlarge", :name => "R5d Octuple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5d.12xlarge", :name => "R5d Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5d.16xlarge", :name => "R5d Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 549756, :disk => 2400, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "r5d.24xlarge", :name => "R5d Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "r5d.metal", :name => "R5d Metal", :bits => 64, :cores => 96, :ram => 824633, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "r5a.large", :name => "R5 (AMD) Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5a.xlarge", :name => "R5 (AMD) Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5a.2xlarge", :name => "R5 (AMD) Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5a.4xlarge", :name => "R5 (AMD) Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5a.12xlarge", :name => "R5 (AMD) Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5a.24xlarge", :name => "R5 (AMD) Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5ad.large", :name => "R5d (AMD) Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5ad.xlarge", :name => "R5d (AMD) Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5ad.2xlarge", :name => "R5d (AMD) Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5ad.4xlarge", :name => "R5d (AMD) Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5ad.12xlarge", :name => "R5d (AMD) Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5ad.24xlarge", :name => "R5d (AMD) Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "r5n.large", :name => "R5n Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.xlarge", :name => "R5n Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.2xlarge", :name => "R5n Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.4xlarge", :name => "R5n Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.8xlarge", :name => "R5n Octuple Extra Large", :bits => 64, :cores => 32, :ram => 274878, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.12xlarge", :name => "R5n Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.16xlarge", :name => "R5n Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 549756, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5n.24xlarge", :name => "R5n Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "r5dn.large", :name => "R5dn Large", :bits => 64, :cores => 2, :ram => 17179, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5dn.xlarge", :name => "R5dn Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5dn.2xlarge", :name => "R5dn Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "r5dn.4xlarge", :name => "R5dn Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 137438, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5dn.8xlarge", :name => "R5dn Octuple Extra Large", :bits => 64, :cores => 32, :ram => 274878, :disk => 1200, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5dn.12xlarge", :name => "R5dn Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "r5dn.16xlarge", :name => "R5dn Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 549756, :disk => 2400, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "r5dn.24xlarge", :name => "R5dn Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 824633, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => 'r6g.medium', :name => 'R6G Medium', :bits => 64, :cores => 1, :disk => 0, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.large', :name => 'R6G Large', :bits => 64, :cores => 2, :disk => 0, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.xlarge', :name => 'R6G Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.2xlarge', :name => 'R6G Double Extra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.4xlarge', :name => 'R6G Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.8xlarge', :name => 'R6G Octuple Extra Large', :bits => 64, :cores => 32, :disk => 0, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.12xlarge', :name => 'R6G Twelve Extra Large', :bits => 64, :cores => 48, :disk => 0, :ram => 412317, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.16xlarge', :name => 'R6G Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 0, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6g.metal', :name => 'R6G Metal', :bits => 64, :cores => 64, :disk => 0, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'r6gd.medium', :name => 'R6GD Medium', :bits => 64, :cores => 1, :disk => 59, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.large', :name => 'R6GD Large', :bits => 64, :cores => 2, :disk => 118, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.xlarge', :name => 'R6GD Extra Large', :bits => 64, :cores => 4, :disk => 237, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.2xlarge', :name => 'R6GD Double Extra Large', :bits => 64, :cores => 8, :disk => 474, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.4xlarge', :name => 'R6GD Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 950, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.8xlarge', :name => 'R6GD Octuple Extra Large', :bits => 64, :cores => 32, :disk => 1900, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'r6gd.12xlarge', :name => 'R6GD Twelve Extra Large', :bits => 64, :cores => 48, :disk => 2850, :ram => 412317, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'r6gd.16xlarge', :name => 'R6GD Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 3800, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'r6gd.metal', :name => 'R6GD Metal', :bits => 64, :cores => 64, :disk => 3800, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "x1.16xlarge", :name => "X1 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 1047972, :disk => 1920, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1.32xlarge", :name => "X1 Thirty-two Extra Large", :bits => 64, :cores => 128, :ram => 2095944, :disk => 3840, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "x1e.xlarge", :name => "X1e Extra Large", :bits => 64, :cores => 4, :ram => 130997, :disk => 120, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1e.2xlarge", :name => "X1e Double Extra Large", :bits => 64, :cores => 8, :ram => 261993, :disk => 240, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1e.4xlarge", :name => "X1e Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 523986, :disk => 480, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1e.8xlarge", :name => "X1e Octuple Extra Large", :bits => 64, :cores => 32, :ram => 1043677, :disk => 960, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1e.16xlarge", :name => "X1e Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 2095944, :disk => 1920, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "x1e.32xlarge", :name => "X1e Thirty-two Extra Large", :bits => 64, :cores => 128, :ram => 3118147, :disk => 3840, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'x2gd.medium', :name => 'X2gd Medium Instance', :bits => 64, :cores => 1, :disk => 59, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.large', :name => 'X2gd Large Instance', :bits => 64, :cores => 2, :disk => 118, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.xlarge', :name => 'X2gd Extra Large Instance', :bits => 64, :cores => 4, :disk => 237, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.2xlarge', :name => 'X2gd Double Extra Large Instance', :bits => 64, :cores => 8, :disk => 475, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.4xlarge', :name => 'X2gd Quadruple Extra Large Instance', :bits => 64, :cores => 16, :disk => 950, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.8xlarge', :name => 'X2gd Octuple Extra Large Instance', :bits => 64, :cores => 32, :disk => 1900, :ram => 549756, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.12xlarge', :name => 'X2gd Twelve Extra Large Instance', :bits => 64, :cores => 48, :disk => 2850, :ram => 824634, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.16xlarge', :name => 'X2gd Sixteen Extra Large Instance', :bits => 64, :cores => 64, :disk => 3800, :ram => 1099512, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'x2gd.metal', :name => 'X2gd Metal', :bits => 64, :cores => 64, :disk => 3800, :ram => 1099512, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "u-6tb1.metal", :name => "U 6TB Metal", :bits => 64, :cores => 448, :ram => 6597071, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "u-9tb1.metal", :name => "U 9 TB Metal", :bits => 64, :cores => 448, :ram => 9895606, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "u-12tb1.metal", :name => "U 12 TB Metal", :bits => 64, :cores => 448, :ram => 13194141, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "u-18tb1.metal", :name => "U 18 TB Metal", :bits => 64, :cores => 448, :ram => 19791212, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "u-24tb1.metal", :name => "U 24 TB Metal", :bits => 64, :cores => 448, :ram => 26388283, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "z1d.large", :name => "Z1d Large", :bits => 64, :cores => 2, :ram => 17180, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "z1d.xlarge", :name => "Z1d Extra Large", :bits => 64, :cores => 4, :ram => 34359, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "z1d.2xlarge", :name => "Z1d Double Extra Large", :bits => 64, :cores => 8, :ram => 68719, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "z1d.3xlarge", :name => "Z1d Triple Extra Large", :bits => 64, :cores => 12, :ram => 103079, :disk => 450, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "z1d.6xlarge", :name => "Z1d Sextuple Large", :bits => 64, :cores => 24, :ram => 206158, :disk => 900, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "z1d.12xlarge", :name => "Z1d Twelve Extra Large", :bits => 64, :cores => 48, :ram => 412316, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "z1d.metal", :name => "Z1d Metal", :bits => 64, :cores => 48, :ram => 412316, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "d2.xlarge", :name => "D2 Extra Large", :bits => 64, :cores => 4, :ram => 32749, :disk => 6000, :ebs_optimized_available => true, :instance_store_volumes => 3 }, { :id => "d2.2xlarge", :name => "D2 Double Extra Large", :bits => 64, :cores => 8, :ram => 65498, :disk => 12000, :ebs_optimized_available => true, :instance_store_volumes => 6 }, { :id => "d2.4xlarge", :name => "D2 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 130996, :disk => 24000, :ebs_optimized_available => true, :instance_store_volumes => 12 }, { :id => "d2.8xlarge", :name => "D2 Eight Extra Large", :bits => 64, :cores => 36, :ram => 261993, :disk => 48000, :ebs_optimized_available => true, :instance_store_volumes => 24 }, { :id => "h1.2xlarge", :name => "H1 Double Extra Large", :bits => 64, :cores => 8, :ram => 34360, :disk => 2000, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "h1.4xlarge", :name => "H1 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 4000, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "h1.8xlarge", :name => "H1 Octuple Extra Large", :bits => 64, :cores => 32, :ram => 137439, :disk => 8000, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "h1.16xlarge", :name => "H1 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 274878, :disk => 16000, :ebs_optimized_available => true, :instance_store_volumes => 8 }, { :id => "m4.large", :name => "M4 Large", :bits => 64, :cores => 2, :ram => 8589, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m4.xlarge", :name => "M4 Extra Large", :bits => 64, :cores => 4, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m4.2xlarge", :name => "M4 Double Extra Large", :bits => 64, :cores => 8, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m4.4xlarge", :name => "M4 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m4.10xlarge", :name => "M4 Ten Extra Large", :bits => 64, :cores => 40, :ram => 171798, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m4.16xlarge", :name => "M4 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 262144, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.large", :name => "M5 Large", :bits => 64, :cores => 2, :ram => 8589, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.xlarge", :name => "M5 Extra Large", :bits => 64, :cores => 4, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.2xlarge", :name => "M5 Double Extra Large", :bits => 64, :cores => 8, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.4xlarge", :name => "M5 Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.8xlarge", :name => "M5 Octuple Extra Large", :bits => 64, :cores => 32, :ram => 137439, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.12xlarge", :name => "M5 Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.16xlarge", :name => "M5 Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 274878, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.24xlarge", :name => "M5 Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5.metal", :name => "M5 Metal", :bits => 64, :cores => 96, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5d.large", :name => "M5d Large", :bits => 64, :cores => 2, :ram => 8589, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5d.xlarge", :name => "M5d Extra Large", :bits => 64, :cores => 4, :ram => 17179, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5d.2xlarge", :name => "M5d Double Extra Large", :bits => 64, :cores => 8, :ram => 34359, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5d.4xlarge", :name => "M5d Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5d.8xlarge", :name => "M5d Octuple Extra Large", :bits => 64, :cores => 32, :ram => 137439, :disk => 1200, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5d.12xlarge", :name => "M5d Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5d.16xlarge", :name => "M5d Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 274878, :disk => 2400, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "m5d.24xlarge", :name => "M5d Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 412316, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "m5d.metal", :name => "M5d Metal", :bits => 64, :cores => 96, :ram => 412316, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "m5a.large", :name => "M5 (AMD) Large", :bits => 64, :cores => 2, :ram => 8589, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.xlarge", :name => "M5 (AMD) Extra Large", :bits => 64, :cores => 4, :ram => 17179, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.2xlarge", :name => "M5 (AMD) Double Extra Large", :bits => 64, :cores => 8, :ram => 34359, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.4xlarge", :name => "M5 (AMD) Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.8xlarge", :name => "M5 (AMD) Eight Extra Large", :bits => 64, :cores => 32, :ram => 137438, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.12xlarge", :name => "M5 (AMD) Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.16xlarge", :name => "M5 (AMD) Sixteen Extra Large", :bits => 64, :cores => 32, :ram => 274877, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5a.24xlarge", :name => "M5 (AMD) Twenty Four Extra Large", :bits => 64, :cores => 96, :ram => 412316, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.large", :name => "M5ad (AMD) Large", :bits => 64, :cores => 2, :ram => 8589, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.xlarge", :name => "M5ad (AMD) Extra Large", :bits => 64, :cores => 4, :ram => 17179, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.2xlarge", :name => "M5ad (AMD) Double Extra Large", :bits => 64, :cores => 8, :ram => 34359, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.4xlarge", :name => "M5ad (AMD) Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.12xlarge", :name => "M5ad (AMD) Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5ad.24xlarge", :name => "M5ad (AMD) Twenty-four Extra Large", :bits => 64, :cores => 96, :ram => 412316, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.large", :name => "M5n Large", :bits => 64, :cores => 2, :ram => 8590, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.xlarge", :name => "M5n Extra Large", :bits => 64, :cores => 4, :ram => 17180, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.2xlarge", :name => "M5n Double Extra Large", :bits => 64, :cores => 8, :ram => 34360, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.4xlarge", :name => "M5n Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.8xlarge", :name => "M5n Octuple Extra Large", :bits => 64, :cores => 32, :ram => 137439, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.12xlarge", :name => "M5n Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.16xlarge", :name => "M5n Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 274878, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5n.24xlarge", :name => "M5n Twenty-Four Extra Large", :bits => 64, :cores => 96, :ram => 412317, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "m5dn.large", :name => "M5dn Large", :bits => 64, :cores => 2, :ram => 8590, :disk => 75, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5dn.xlarge", :name => "M5dn Extra Large", :bits => 64, :cores => 4, :ram => 17180, :disk => 150, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5dn.2xlarge", :name => "M5dn Double Extra Large", :bits => 64, :cores => 8, :ram => 34360, :disk => 300, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => "m5dn.4xlarge", :name => "M5dn Quadruple Extra Large", :bits => 64, :cores => 16, :ram => 68719, :disk => 600, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5dn.8xlarge", :name => "M5dn Octuple Extra Large", :bits => 64, :cores => 32, :ram => 137439, :disk => 1200, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5dn.12xlarge", :name => "M5dn Twelve Extra Large", :bits => 64, :cores => 48, :ram => 206158, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => "m5dn.16xlarge", :name => "M5dn Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 274878, :disk => 2400, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "m5dn.24xlarge", :name => "M5dn Twenty-Four Extra Large", :bits => 64, :cores => 96, :ram => 412317, :disk => 3600, :ebs_optimized_available => true, :instance_store_volumes => 4 }, { :id => "p2.xlarge", :name => "General Purpose GPU Extra Large", :bits => 64, :cores => 4, :ram => 65498, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p2.8xlarge", :name => "General Purpose GPU Eight Extra Large", :bits => 64, :cores => 32, :ram => 523986, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p2.16xlarge", :name => "General Purpose GPU Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 785979, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p3.2xlarge", :name => "Tesla GPU Two Extra Large", :bits => 64, :cores => 8, :ram => 65498, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p3.8xlarge", :name => "Tesla GPU Eight Extra Large", :bits => 64, :cores => 32, :ram => 261993, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p3.16xlarge", :name => "Tesla GPU Sixteen Extra Large", :bits => 64, :cores => 64, :ram => 523986, :disk => 0, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => "p3dn.24xlarge", :name => "Tesla GPU Twenty-four Extra Large", :bits => 64, :cores => 96, :ram => 824634, :disk => 1800, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'inf1.xlarge', :name => 'Inf1 Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 8590, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'inf1.2xlarge', :name => 'Inf1 Double xtra Large', :bits => 64, :cores => 8, :disk => 0, :ram => 17180, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'inf1.6xlarge', :name => 'Inf1 Sextuple Extra Large', :bits => 64, :cores => 24, :disk => 0, :ram => 51540, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'inf1.24xlarge', :name => 'Inf1 Twenty-four Extra Large', :bits => 64, :cores => 96, :disk => 0, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'g3s.xlarge', :name => 'G3s Extra Large', :bits => 64, :cores => 4, :disk => 0, :ram => 32749, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'g3.4xlarge', :name => 'G3 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 0, :ram => 130996, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'g3.8xlarge', :name => 'G3 Octuple Extra Large', :bits => 64, :cores => 32, :disk => 0, :ram => 261993, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'g3.16xlarge', :name => 'G3 Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 0, :ram => 523986, :ebs_optimized_available => true, :instance_store_volumes => 0 }, { :id => 'g3dn.xlarge', :name => 'G3dn Extra Large', :bits => 64, :cores => 4, :disk => 125, :ram => 171780, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.2xlarge', :name => 'G3dn Double Extra Large', :bits => 64, :cores => 8, :disk => 225, :ram => 34360, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.4xlarge', :name => 'G3dn Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 225, :ram => 68719, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.8xlarge', :name => 'G3dn Octuple Extra Large', :bits => 64, :cores => 32, :disk => 900, :ram => 137439, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.16xlarge', :name => 'G3dn Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 900, :ram => 274878, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.12xlarge', :name => 'G3dn Twelve Extra Large (4GPU)', :bits => 64, :cores => 48, :disk => 900, :ram => 206158, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'g3dn.metal', :name => 'G3dn Metal (8GPU)', :bits => 64, :cores => 96, :disk => 1800, :ram => 412317, :ebs_optimized_available => true, :instance_store_volumes => 2 }, { :id => 'f1.2xlarge', :name => 'F1 Double Extra Large', :bits => 64, :cores => 8, :disk => 470, :ram => 130997, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'f1.4xlarge', :name => 'F1 Quadruple Extra Large', :bits => 64, :cores => 16, :disk => 940, :ram => 261993, :ebs_optimized_available => true, :instance_store_volumes => 1 }, { :id => 'f1.16xlarge', :name => 'F1 Sixteen Extra Large', :bits => 64, :cores => 64, :disk => 3760, :ram => 1047972, :ebs_optimized_available => true, :instance_store_volumes => 4 } ] class Flavors < Fog::Collection model Fog::AWS::Compute::Flavor # Returns an array of all flavors that have been created # # AWS.flavors.all # # ==== Returns # # Returns an array of all available instances and their general information # #>> AWS.flavors.all # , # , # , # , # , # , # , # , # , # , # , # , # , # , # # ] # > # def all load(Fog::AWS::Compute::FLAVORS) self end # Used to retrieve a flavor # flavor_id is required to get the associated flavor information. # flavors available currently: # # t1.micro # m1.small, m1.medium, m1.large, m1.xlarge # c1.medium, c1.xlarge # c3.large, c3.xlarge, c3.2xlarge, c3.4xlarge, c3.8xlarge # g2.2xlarge # hs1.8xlarge # m2.xlarge, m2.2xlarge, m2.4xlarge # m3.xlarge, m3.2xlarge # cr1.8xlarge # cc1.4xlarge # cc2.8xlarge # cg1.4xlarge # i2.xlarge, i2.2xlarge, i2.4xlarge, i2.8xlarge # # You can run the following command to get the details: # AWS.flavors.get("t1.micro") # # ==== Returns # #>> AWS.flavors.get("t1.micro") # # def get(flavor_id) self.class.new(:service => service).all.find {|flavor| flavor.id == flavor_id} end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/image.rb000066400000000000000000000034641437344660100216460ustar00rootroot00000000000000module Fog module AWS class Compute class Image < Fog::Model identity :id, :aliases => 'imageId' attribute :architecture attribute :block_device_mapping, :aliases => 'blockDeviceMapping' attribute :description attribute :location, :aliases => 'imageLocation' attribute :owner_id, :aliases => 'imageOwnerId' attribute :owner_alias, :aliases => 'imageOwnerAlias' attribute :state, :aliases => 'imageState' attribute :type, :aliases => 'imageType' attribute :is_public, :aliases => 'isPublic' attribute :kernel_id, :aliases => 'kernelId' attribute :platform attribute :product_codes, :aliases => 'productCodes' attribute :ramdisk_id, :aliases => 'ramdiskId' attribute :root_device_type, :aliases => 'rootDeviceType' attribute :root_device_name, :aliases => 'rootDeviceName' attribute :tags, :aliases => 'tagSet' attribute :name attribute :virtualization_type, :aliases => 'virtualizationType' attribute :creation_date, :aliases => 'creationDate' attribute :ena_support, :aliases => 'enaSupport' def deregister(delete_snapshot = false) service.deregister_image(id) if(delete_snapshot && root_device_type == "ebs") block_device = block_device_mapping.find {|block_device| block_device['deviceName'] == root_device_name} service.snapshots.new(:id => block_device['snapshotId']).destroy else true end end def ready? state == 'available' end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/images.rb000066400000000000000000000025641437344660100220310ustar00rootroot00000000000000require 'fog/aws/models/compute/image' module Fog module AWS class Compute class Images < Fog::Collection attribute :filters model Fog::AWS::Compute::Image # Creates a new Amazon machine image # # AWS.images.new # # ==== Returns # # Returns the details of the new image # #>> AWS.images.new # # def initialize(attributes) self.filters ||= {} super end def all(filters_arg = filters) filters = filters_arg data = service.describe_images(filters).body load(data['imagesSet']) end def get(image_id) if image_id self.class.new(:service => service).all('image-id' => image_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/internet_gateway.rb000066400000000000000000000035211437344660100241270ustar00rootroot00000000000000module Fog module AWS class Compute class InternetGateway < Fog::Model identity :id, :aliases => 'internetGatewayId' attribute :attachment_set, :aliases => 'attachmentSet' attribute :tag_set, :aliases => 'tagSet' def initialize(attributes={}) super end # Attaches an existing internet gateway # # internet_gateway.attach(igw-id, vpc-id) # # ==== Returns # # True or false depending on the result # def attach(vpc_id) requires :id service.attach_internet_gateway(id, vpc_id) reload end # Detaches an existing internet gateway # # internet_gateway.detach(igw-id, vpc-id) # # ==== Returns # # True or false depending on the result # def detach(vpc_id) requires :id service.detach_internet_gateway(id, vpc_id) reload end # Removes an existing internet gateway # # internet_gateway.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :id service.delete_internet_gateway(id) true end # Create an internet gateway # # >> g = AWS.internet_gateways.new() # >> g.save # # == Returns: # # requestId and a internetGateway object # def save data = service.create_internet_gateway.body['internetGatewaySet'].first new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/internet_gateways.rb000066400000000000000000000045441437344660100243200ustar00rootroot00000000000000require 'fog/aws/models/compute/internet_gateway' module Fog module AWS class Compute class InternetGateways < Fog::Collection attribute :filters model Fog::AWS::Compute::InternetGateway # Creates a new internet gateway # # AWS.internet_gateways.new # # ==== Returns # # Returns the details of the new InternetGateway # #>> AWS.internet_gateways.new #=> # def initialize(attributes) self.filters ||= {} super end # Returns an array of all InternetGateways that have been created # # AWS.internet_gateways.all # # ==== Returns # # Returns an array of all InternetGateways # #>> AWS.internet_gateways.all #"vpc-some-id", "state"=>"available"}, #tag_set={} #> #] #> # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.warning("all with #{filters_arg.class} param is deprecated, use all('internet-gateway-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'internet-gateway-id' => [*filters_arg]} end filters = filters_arg data = service.describe_internet_gateways(filters).body load(data['internetGatewaySet']) end # Used to retrieve an InternetGateway # # You can run the following command to get the details: # AWS.internet_gateways.get("igw-12345678") # # ==== Returns # #>> AWS.internet_gateways.get("igw-12345678") #=> "vpc-12345678", "state"=>"available"}, #tag_set={} #> # def get(internet_gateway_id) if internet_gateway_id self.class.new(:service => service).all('internet-gateway-id' => internet_gateway_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/key_pair.rb000066400000000000000000000024471437344660100223670ustar00rootroot00000000000000module Fog module AWS class Compute class KeyPair < Fog::Model identity :name, :aliases => 'keyName' attribute :fingerprint, :aliases => 'keyFingerprint' attribute :private_key, :aliases => 'keyMaterial' attr_accessor :public_key def destroy requires :name service.delete_key_pair(name) true end def save requires :name data = if public_key service.import_key_pair(name, public_key).body else service.create_key_pair(name).body end new_attributes = data.reject {|key,value| !['keyFingerprint', 'keyMaterial', 'keyName'].include?(key)} merge_attributes(new_attributes) true end def write(path="#{ENV['HOME']}/.ssh/fog_#{Fog.credential.to_s}_#{name}.pem") if writable? split_private_key = private_key.split(/\n/) File.open(path, "w") do |f| split_private_key.each {|line| f.puts line} f.chmod 0600 end "Key file built: #{path}" else "Invalid private key" end end def writable? !!(private_key && ENV.key?('HOME')) end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/key_pairs.rb000066400000000000000000000102221437344660100225400ustar00rootroot00000000000000require 'fog/aws/models/compute/key_pair' module Fog module AWS class Compute class KeyPairs < Fog::Collection attribute :filters attribute :key_name model Fog::AWS::Compute::KeyPair # Used to create a key pair. There are 3 arguments and only name is required. You can generate a new key_pair as follows: # AWS.key_pairs.create(:name => "test", :fingerprint => "123", :private_key => '234234') # # ==== Returns # # # # The key_pair can be retrieved by running AWS.key_pairs.get("test"). See get method below. # def initialize(attributes) self.filters ||= {} super end # Returns an array of all key pairs that have been created # # AWS.key_pairs.all # # ==== Returns # # # ] #> # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.deprecation("all with #{filters_arg.class} param is deprecated, use all('key-name' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'key-name' => [*filters_arg]} end filters = filters_arg data = service.describe_key_pairs(filters).body load(data['keySet']) end # Used to retrieve a key pair that was created with the AWS.key_pairs.create method. # The name is required to get the associated key_pair information. # # You can run the following command to get the details: # AWS.key_pairs.get("test") # # ==== Returns # #>> AWS.key_pairs.get("test") # # def get(key_name) if key_name self.class.new(:service => service).all('key-name' => key_name).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/network_acl.rb000066400000000000000000000162331437344660100230720ustar00rootroot00000000000000module Fog module AWS class Compute class NetworkAcl < Fog::Model ICMP = 1 TCP = 6 UDP = 17 identity :network_acl_id, :aliases => 'networkAclId' attribute :vpc_id, :aliases => 'vpcId' attribute :default attribute :entries, :aliases => 'entrySet' attribute :associations, :aliases => 'associationSet' attribute :tags, :aliases => 'tagSet' # Add an inbound rule, shortcut method for #add_rule def add_inbound_rule(rule_number, protocol, rule_action, cidr_block, options = {}) add_rule(rule_number, protocol, rule_action, cidr_block, false, options) end # Add an outbound rule, shortcut method for #add_rule def add_outbound_rule(rule_number, protocol, rule_action, cidr_block, options = {}) add_rule(rule_number, protocol, rule_action, cidr_block, true, options) end # Add a new rule # # network_acl.add_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '0.0.0.0/0', true, 'PortRange.From' => 22, 'PortRange.To' => 22) # # ==== Parameters # * rule_number<~Integer> - The rule number for the entry, between 100 and 32766 # * protocol<~Integer> - The IP protocol to which the rule applies. You can use -1 to mean all protocols. # * rule_action<~String> - Allows or denies traffic that matches the rule. (either allow or deny) # * cidr_block<~String> - The CIDR range to allow or deny # * egress<~Boolean> - Indicates whether this rule applies to egress traffic from the subnet (true) or ingress traffic to the subnet (false). # * options<~Hash>: # * 'Icmp.Code' - ICMP code, required if protocol is 1 # * 'Icmp.Type' - ICMP type, required if protocol is 1 # * 'PortRange.From' - The first port in the range, required if protocol is 6 (TCP) or 17 (UDP) # * 'PortRange.To' - The last port in the range, required if protocol is 6 (TCP) or 17 (UDP) # # ==== Returns # # True or false depending on the result # def add_rule(rule_number, protocol, rule_action, cidr_block, egress, options = {}) requires :network_acl_id service.create_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options) true end # Remove an inbound rule, shortcut method for #remove_rule def remove_inbound_rule(rule_number) remove_rule(rule_number, false) end # Remove an outbound rule, shortcut method for #remove_rule def remove_outbound_rule(rule_number) remove_rule(rule_number, true) end # Update a specific rule number # # network_acl.remove_rule(100, true) # # ==== Parameters # * rule_number<~Integer> - The rule number for the entry, between 100 and 32766 # * egress<~Boolean> - Indicates whether this rule applies to egress traffic from the subnet (true) or ingress traffic to the subnet (false). # # ==== Returns # # True or false depending on the result # def remove_rule(rule_number, egress) requires :network_acl_id service.delete_network_acl_entry(network_acl_id, rule_number, egress) true end # Update an inbound rule, shortcut method for #update_rule def update_inbound_rule(rule_number, protocol, rule_action, cidr_block, options = {}) update_rule(rule_number, protocol, rule_action, cidr_block, false, options) end # Update an outbound rule, shortcut method for #update_rule def update_outbound_rule(rule_number, protocol, rule_action, cidr_block, options = {}) update_rule(rule_number, protocol, rule_action, cidr_block, true, options) end # Update a specific rule number # # network_acl.update_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '0.0.0.0/0', true, 'PortRange.From' => 22, 'PortRange.To' => 22) # # ==== Parameters # * rule_number<~Integer> - The rule number for the entry, between 100 and 32766 # * protocol<~Integer> - The IP protocol to which the rule applies. You can use -1 to mean all protocols. # * rule_action<~String> - Allows or denies traffic that matches the rule. (either allow or deny) # * cidr_block<~String> - The CIDR range to allow or deny # * egress<~Boolean> - Indicates whether this rule applies to egress traffic from the subnet (true) or ingress traffic to the subnet (false). # * options<~Hash>: # * 'Icmp.Code' - ICMP code, required if protocol is 1 # * 'Icmp.Type' - ICMP type, required if protocol is 1 # * 'PortRange.From' - The first port in the range, required if protocol is 6 (TCP) or 17 (UDP) # * 'PortRange.To' - The last port in the range, required if protocol is 6 (TCP) or 17 (UDP) # # ==== Returns # # True or false depending on the result # def update_rule(rule_number, protocol, rule_action, cidr_block, egress, options = {}) requires :network_acl_id service.replace_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options) true end # Associate a subnet with this network ACL # # network_acl.associate_with(subnet) # # ==== Parameters # * subnet<~Subnet> - Subnet object to associate with this network ACL # # ==== Returns # # True or false depending on the result # def associate_with(subnet) requires :network_acl_id # We have to manually find out the network ACL the subnet is currently associated with old_id = service.network_acls.all('association.subnet-id' => subnet.subnet_id).first.associations.find { |a| a['subnetId'] == subnet.subnet_id }['networkAclAssociationId'] service.replace_network_acl_association(old_id, network_acl_id) true end # Removes an existing network ACL # # network_acl.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :network_acl_id service.delete_network_acl(network_acl_id) true end # Create a network ACL # # >> g = AWS.network_acls.new(:vpc_id => 'vpc-abcdefgh') # >> g.save def save requires :vpc_id data = service.create_network_acl(vpc_id).body['networkAcl'] new_attributes = data.reject { |key,value| key == 'tagSet' } merge_attributes(new_attributes) if tags = self.tags # expect eventual consistency Fog.wait_for { self.reload rescue nil } service.create_tags( self.identity, tags ) end true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/network_acls.rb000066400000000000000000000101141437344660100232450ustar00rootroot00000000000000require 'fog/aws/models/compute/network_acl' module Fog module AWS class Compute class NetworkAcls < Fog::Collection attribute :filters model Fog::AWS::Compute::NetworkAcl # Creates a new network ACL # # AWS.network_acls.new # # ==== Returns # # Returns the details of the new network ACL # #>> # def initialize(attributes) self.filters ||= {} super end # Returns an array of all network ACLs that have been created # # AWS.network_acls.all # # ==== Returns # # Returns an array of all network ACLs # #>> AWS.network_acls.all # {}, # "portRange" => {}, # "ruleNumber" => 32767, # "protocol" => -1, # "ruleAction" => "deny", # "egress" => false, # "cidrBlock" => "0.0.0.0/0" # }, # { # "icmpTypeCode" => {}, # "portRange" => {}, # "ruleNumber" => 32767, # "protocol" => -1, # "ruleAction" => "deny", # "egress" => true, # "cidrBlock" => "0.0.0.0/0" # } # ], # associations=[ # { # "networkAclAssociationId" => "aclassoc-abcdefgh", # "networkAclId" => "acl-abcdefgh", # "subnetId" => "subnet-abcdefgh" # } # ], # tags={} # > # ] # > # def all(filters_arg = filters) filters = filters_arg data = service.describe_network_acls(filters).body load(data['networkAclSet']) end # Used to retrieve a network interface # network interface id is required to get any information # # You can run the following command to get the details: # AWS.network_interfaces.get("eni-11223344") # # ==== Returns # #>> AWS.network_acls.get("acl-abcdefgh") # {}, # "portRange" => {}, # "ruleNumber" => 32767, # "protocol" => -1, # "ruleAction" => "deny", # "egress" => false, # "cidrBlock" => "0.0.0.0/0" # }, # { # "icmpTypeCode" => {}, # "portRange" => {}, # "ruleNumber" => 32767, # "protocol" => -1, # "ruleAction" => "deny", # "egress" => true, # "cidrBlock" => "0.0.0.0/0" # } # ], # associations=[ # { # "networkAclAssociationId" => "aclassoc-abcdefgh", # "networkAclId" => "acl-abcdefgh", # "subnetId" => "subnet-abcdefgh" # } # ], # tags={} # > def get(nacl_id) self.class.new(:service => service).all('network-acl-id' => nacl_id).first if nacl_id end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/network_interface.rb000066400000000000000000000055031437344660100242710ustar00rootroot00000000000000module Fog module AWS class Compute class NetworkInterface < Fog::Model identity :network_interface_id, :aliases => 'networkInterfaceId' attribute :state attribute :request_id, :aliases => 'requestId' attribute :network_interface_id, :aliases => 'networkInterfaceId' attribute :subnet_id, :aliases => 'subnetId' attribute :vpc_id, :aliases => 'vpcId' attribute :availability_zone, :aliases => 'availabilityZone' attribute :description, :aliases => 'description' attribute :owner_id, :aliases => 'ownerId' attribute :requester_id, :aliases => 'requesterId' attribute :requester_managed, :aliases => 'requesterManaged' attribute :status, :aliases => 'status' attribute :mac_address, :aliases => 'macAddress' attribute :private_ip_address, :aliases => 'privateIpAddress' attribute :private_ip_addresses, :aliases => 'privateIpAddresses' attribute :private_dns_name, :aliases => 'privateDnsName' attribute :source_dest_check, :aliases => 'sourceDestCheck' attribute :group_set, :aliases => 'groupSet' attribute :attachment, :aliases => 'attachment' attribute :association, :aliases => 'association' attribute :tag_set, :aliases => 'tagSet' # Removes an existing network interface # # network_interface.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :network_interface_id service.delete_network_interface(network_interface_id) true end # Create a network_interface # # >> g = AWS.network_interfaces.new(:subnet_id => "subnet-someId", options) # >> g.save # # options is an optional hash which may contain 'PrivateIpAddress', 'Description', 'GroupSet' # # == Returns: # # requestId and a networkInterface object # def save requires :subnet_id options = { 'PrivateIpAddress' => private_ip_address, 'Description' => description, 'GroupSet' => group_set, } options.delete_if {|key, value| value.nil?} data = service.create_network_interface(subnet_id, options).body['networkInterface'] new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/network_interfaces.rb000066400000000000000000000075061437344660100244610ustar00rootroot00000000000000require 'fog/aws/models/compute/network_interface' module Fog module AWS class Compute class NetworkInterfaces < Fog::Collection attribute :filters model Fog::AWS::Compute::NetworkInterface # Creates a new network interface # # AWS.network_interfaces.new # # ==== Returns # # Returns the details of the new network interface # #>> AWS.network_interfaces.new # # def initialize(attributes) self.filters ||= {} super end # Returns an array of all network interfaces that have been created # # AWS.network_interfaces.all # # ==== Returns # # Returns an array of all network interfaces # #>> AWS.network_interfaves.all # # ] # > # def all(filters_arg = filters) filters = filters_arg data = service.describe_network_interfaces(filters).body load(data['networkInterfaceSet']) end # Used to retrieve a network interface # network interface id is required to get any information # # You can run the following command to get the details: # AWS.network_interfaces.get("eni-11223344") # # ==== Returns # #>> AWS.NetworkInterface.get("eni-11223344") # # def get(nic_id) if nic_id self.class.new(:service => service).all('network-interface-id' => nic_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/route_table.rb000066400000000000000000000030341437344660100230620ustar00rootroot00000000000000module Fog module AWS class Compute class RouteTable < Fog::Model identity :id, :aliases => 'routeTableId' attribute :vpc_id, :aliases => 'vpcId' attribute :routes, :aliases => 'routeSet' attribute :associations, :aliases => 'associationSet' attribute :tags, :aliases => 'tagSet' def initialize(attributes={}) super end # Remove an existing route table # # route_tables.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :id service.delete_route_table(id) true end # Create a route table # # >> routetable = connection.route_tables.new # >> routetable.save # # == Returns: # # True or an exception depending on the result. Keep in mind that this *creates* a new route table. # def save requires :vpc_id data = service.create_route_table(vpc_id).body['routeTable'].first new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true end private def associationSet=(new_association_set) merge_attributes(new_association_set.first || {}) end def routeSet=(new_route_set) merge_attributes(new_route_set || {}) end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/route_tables.rb000066400000000000000000000043141437344660100232470ustar00rootroot00000000000000require 'fog/aws/models/compute/route_table' module Fog module AWS class Compute class RouteTables < Fog::Collection attribute :filters model Fog::AWS::Compute::RouteTable # Creates a new route table # # AWS.route_tables.new # # ==== Returns # # Returns the details of the new route table # #>> AWS.route_tables.new # # def initialize(attributes) self.filters ||= {} super end # Returns an array of all route tables that have been created # # AWS.route_tables.all # # ==== Returns # # Returns an array of all route tables # #>> AWS.route_tables.all # # ] # > # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.warning("all with #{filters_arg.class} param is deprecated, use all('route-table-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'route-table-id' => [*filters_arg]} end filters = filters_arg data = service.describe_route_tables(filters).body load(data['routeTableSet']) end # Used to retrieve a route table # route_table_id is required to get the associated route table information. # # You can run the following command to get the details: # AWS.route_tables.get("rtb-41e8552f") # # ==== Returns # #>> AWS.route_tables.get("rtb-41e8552f") # # def get(route_table_id) if route_table_id self.class.new(:service => service).all('route-table-id' => route_table_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/security_group.rb000066400000000000000000000265171437344660100236530ustar00rootroot00000000000000module Fog module AWS class Compute class SecurityGroup < Fog::Model identity :name, :aliases => 'groupName' attribute :description, :aliases => 'groupDescription' attribute :group_id, :aliases => 'groupId' attribute :ip_permissions, :aliases => 'ipPermissions' attribute :ip_permissions_egress, :aliases => 'ipPermissionsEgress' attribute :owner_id, :aliases => 'ownerId' attribute :vpc_id, :aliases => 'vpcId' attribute :tags, :aliases => 'tagSet' # Authorize access by another security group # # >> g = AWS.security_groups.all(:description => "something").first # >> g.authorize_group_and_owner("some_group_name", "1234567890") # # == Parameters: # group:: # The name of the security group you're granting access to. # # owner:: # The owner id for security group you're granting access to. # # == Returns: # # An excon response object representing the result # # "some-id-string", # "return"=>true}, # headers{"Transfer-Encoding"=>"chunked", # "Date"=>"Mon, 27 Dec 2010 22:12:57 GMT", # "Content-Type"=>"text/xml;charset=UTF-8", # "Server"=>"AmazonEC2"} # def authorize_group_and_owner(group, owner = nil) Fog::Logger.deprecation("authorize_group_and_owner is deprecated, use authorize_port_range with :group option instead") requires_one :name, :group_id service.authorize_security_group_ingress( name, 'GroupId' => group_id, 'SourceSecurityGroupName' => group, 'SourceSecurityGroupOwnerId' => owner ) end # Authorize a new port range for a security group # # >> g = AWS.security_groups.all(:description => "something").first # >> g.authorize_port_range(20..21) # # == Parameters: # range:: # A Range object representing the port range you want to open up. E.g., 20..21 # # options:: # A hash that can contain any of the following keys: # :cidr_ip (defaults to "0.0.0.0/0") # :cidr_ipv6 cannot be used with :cidr_ip # :group - ("account:group_name" or "account:group_id"), cannot be used with :cidr_ip or :cidr_ipv6 # :ip_protocol (defaults to "tcp") # # == Returns: # # An excon response object representing the result # # "some-id-string", # "return"=>true}, # headers{"Transfer-Encoding"=>"chunked", # "Date"=>"Mon, 27 Dec 2010 22:12:57 GMT", # "Content-Type"=>"text/xml;charset=UTF-8", # "Server"=>"AmazonEC2"} # def authorize_port_range(range, options = {}) requires_one :name, :group_id ip_permission = fetch_ip_permission(range, options) if options[:direction].nil? || options[:direction] == 'ingress' authorize_port_range_ingress group_id, ip_permission elsif options[:direction] == 'egress' authorize_port_range_egress group_id, ip_permission end end def authorize_port_range_ingress(group_id, ip_permission) service.authorize_security_group_ingress( name, 'GroupId' => group_id, 'IpPermissions' => [ ip_permission ] ) end def authorize_port_range_egress(group_id, ip_permission) service.authorize_security_group_egress( name, 'GroupId' => group_id, 'IpPermissions' => [ ip_permission ] ) end # Removes an existing security group # # security_group.destroy # # ==== Returns # # True or false depending on the result # def destroy requires_one :name, :group_id if group_id.nil? service.delete_security_group(name) else service.delete_security_group(nil, group_id) end true end # Revoke access by another security group # # >> g = AWS.security_groups.all(:description => "something").first # >> g.revoke_group_and_owner("some_group_name", "1234567890") # # == Parameters: # group:: # The name of the security group you're revoking access to. # # owner:: # The owner id for security group you're revoking access access to. # # == Returns: # # An excon response object representing the result # # "some-id-string", # "return"=>true}, # headers{"Transfer-Encoding"=>"chunked", # "Date"=>"Mon, 27 Dec 2010 22:12:57 GMT", # "Content-Type"=>"text/xml;charset=UTF-8", # "Server"=>"AmazonEC2"} # def revoke_group_and_owner(group, owner = nil) Fog::Logger.deprecation("revoke_group_and_owner is deprecated, use revoke_port_range with :group option instead") requires_one :name, :group_id service.revoke_security_group_ingress( name, 'GroupId' => group_id, 'SourceSecurityGroupName' => group, 'SourceSecurityGroupOwnerId' => owner ) end # Revoke an existing port range for a security group # # >> g = AWS.security_groups.all(:description => "something").first # >> g.revoke_port_range(20..21) # # == Parameters: # range:: # A Range object representing the port range you want to open up. E.g., 20..21 # # options:: # A hash that can contain any of the following keys: # :cidr_ip (defaults to "0.0.0.0/0") # :cidr_ipv6 cannot be used with :cidr_ip # :group - ("account:group_name" or "account:group_id"), cannot be used with :cidr_ip or :cidr_ipv6 # :ip_protocol (defaults to "tcp") # # == Returns: # # An excon response object representing the result # # "some-id-string", # "return"=>true}, # headers{"Transfer-Encoding"=>"chunked", # "Date"=>"Mon, 27 Dec 2010 22:12:57 GMT", # "Content-Type"=>"text/xml;charset=UTF-8", # "Server"=>"AmazonEC2"} # def revoke_port_range(range, options = {}) requires_one :name, :group_id ip_permission = fetch_ip_permission(range, options) if options[:direction].nil? || options[:direction] == 'ingress' revoke_port_range_ingress group_id, ip_permission elsif options[:direction] == 'egress' revoke_port_range_egress group_id, ip_permission end end def revoke_port_range_ingress(group_id, ip_permission) service.revoke_security_group_ingress( name, 'GroupId' => group_id, 'IpPermissions' => [ ip_permission ] ) end def revoke_port_range_egress(group_id, ip_permission) service.revoke_security_group_egress( name, 'GroupId' => group_id, 'IpPermissions' => [ ip_permission ] ) end # Reload a security group # # >> g = AWS.security_groups.get(:name => "some_name") # >> g.reload # # == Returns: # # Up to date model or an exception def reload if group_id.nil? super service.delete_security_group(name) else requires :group_id data = begin collection.get_by_id(group_id) rescue Excon::Errors::SocketError nil end return unless data merge_attributes(data.attributes) self end end # Create a security group # # >> g = AWS.security_groups.new(:name => "some_name", :description => "something") # >> g.save # # == Returns: # # True or an exception depending on the result. Keep in mind that this *creates* a new security group. # As such, it yields an InvalidGroup.Duplicate exception if you attempt to save an existing group. # def save requires :description, :name data = service.create_security_group(name, description, vpc_id).body new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) if tags = self.tags # expect eventual consistency Fog.wait_for { self.reload rescue nil } service.create_tags( self.group_id, tags ) end true end private # # +group_arg+ may be a string or a hash with one key & value. # # If group_arg is a string, it is assumed to be the group name, # and the UserId is assumed to be self.owner_id. # # The "account:group" form is deprecated. # # If group_arg is a hash, the key is the UserId and value is the group. def group_info(group_arg) if Hash === group_arg account = group_arg.keys.first group = group_arg.values.first elsif group_arg.match(/:/) account, group = group_arg.split(':') Fog::Logger.deprecation("'account:group' argument is deprecated. Use {account => group} or just group instead") else requires :owner_id account = owner_id group = group_arg end info = { 'UserId' => account } if group.start_with?("sg-") # we're dealing with a security group id info['GroupId'] = group else # this has to be a security group name info['GroupName'] = group end info end def fetch_ip_permission(range, options) ip_permission = { 'FromPort' => range.begin, 'ToPort' => range.end, 'IpProtocol' => options[:ip_protocol] || 'tcp' } if options[:group].nil? if options[:cidr_ipv6].nil? ip_permission['IpRanges'] = [ { 'CidrIp' => options[:cidr_ip] || '0.0.0.0/0' } ] else ip_permission['Ipv6Ranges'] = [ { 'CidrIpv6' => options[:cidr_ipv6] } ] end else ip_permission['Groups'] = [ group_info(options[:group]) ] end ip_permission end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/security_groups.rb000066400000000000000000000106251437344660100240270ustar00rootroot00000000000000require 'fog/aws/models/compute/security_group' module Fog module AWS class Compute class SecurityGroups < Fog::Collection attribute :filters model Fog::AWS::Compute::SecurityGroup # Creates a new security group # # AWS.security_groups.new # # ==== Returns # # Returns the details of the new image # #>> AWS.security_groups.new # # def initialize(attributes) self.filters ||= {} super end # Returns an array of all security groups that have been created # # AWS.security_groups.all # # ==== Returns # # Returns an array of all security groups # #>> AWS.security_groups.all # [{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>-1, "toPort"=>-1, "ipRanges"=>[], "ipProtocol"=>"icmp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"tcp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"udp"}], # owner_id="312571045469" # vpc_id=nill # > # ] # > # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.deprecation("all with #{filters_arg.class} param is deprecated, use all('group-name' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'group-name' => [*filters_arg]} end self.filters = filters_arg data = service.describe_security_groups(filters).body load(data['securityGroupInfo']) end # Used to retrieve a security group # group name is required to get the associated flavor information. # # You can run the following command to get the details: # AWS.security_groups.get("default") # # ==== Returns # #>> AWS.security_groups.get("default") # [{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>-1, "toPort"=>-1, "ipRanges"=>[], "ipProtocol"=>"icmp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"tcp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"udp"}], # owner_id="312571045469" # vpc_id=nil # > # def get(group_name) if group_name self.class.new(:service => service).all('group-name' => group_name).first end end # Used to retrieve a security group # group id is required to get the associated flavor information. # # You can run the following command to get the details: # AWS.security_groups.get_by_id("default") # # ==== Returns # #>> AWS.security_groups.get_by_id("sg-123456") # [{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>-1, "toPort"=>-1, "ipRanges"=>[], "ipProtocol"=>"icmp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"tcp"}, {"groups"=>[{"groupName"=>"default", "userId"=>"312571045469"}], "fromPort"=>0, "toPort"=>65535, "ipRanges"=>[], "ipProtocol"=>"udp"}], # owner_id="312571045469" # > # def get_by_id(group_id) if group_id self.class.new(:service => service).all('group-id' => group_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/server.rb000066400000000000000000000245611437344660100220730ustar00rootroot00000000000000require 'fog/compute/models/server' module Fog module AWS class Compute class Server < Fog::Compute::Server extend Fog::Deprecation deprecate :ip_address, :public_ip_address identity :id, :aliases => 'instanceId' attr_accessor :architecture attribute :ami_launch_index, :aliases => 'amiLaunchIndex' attribute :associate_public_ip, :aliases => 'associatePublicIP' attribute :availability_zone, :aliases => 'availabilityZone' attribute :block_device_mapping, :aliases => 'blockDeviceMapping' attribute :hibernation_options, :aliases => 'hibernationOptions' attribute :network_interfaces, :aliases => 'networkInterfaces' attribute :client_token, :aliases => 'clientToken' attribute :disable_api_termination, :aliases => 'disableApiTermination' attribute :dns_name, :aliases => 'dnsName' attribute :ebs_optimized, :aliases => 'ebsOptimized' attribute :groups attribute :flavor_id, :aliases => 'instanceType' attribute :hypervisor attribute :iam_instance_profile, :aliases => 'iamInstanceProfile' attribute :image_id, :aliases => 'imageId' attr_accessor :instance_initiated_shutdown_behavior attribute :kernel_id, :aliases => 'kernelId' attribute :key_name, :aliases => 'keyName' attribute :created_at, :aliases => 'launchTime' attribute :lifecycle, :aliases => 'instanceLifecycle' attribute :monitoring, :squash => 'state' attribute :placement_group, :aliases => 'groupName' attribute :platform, :aliases => 'platform' attribute :product_codes, :aliases => 'productCodes' attribute :private_dns_name, :aliases => 'privateDnsName' attribute :private_ip_address, :aliases => 'privateIpAddress' attribute :public_ip_address, :aliases => 'ipAddress' attribute :ramdisk_id, :aliases => 'ramdiskId' attribute :reason attribute :requester_id, :aliases => 'requesterId' attribute :root_device_name, :aliases => 'rootDeviceName' attribute :root_device_type, :aliases => 'rootDeviceType' attribute :security_group_ids, :aliases => 'securityGroupIds' attribute :source_dest_check, :aliases => 'sourceDestCheck' attribute :spot_instance_request_id, :aliases => 'spotInstanceRequestId' attribute :state, :aliases => 'instanceState', :squash => 'name' attribute :state_reason, :aliases => 'stateReason' attribute :subnet_id, :aliases => 'subnetId' attribute :tenancy attribute :tags, :aliases => 'tagSet' attribute :tag_specifications, :aliases => 'tagSpecifications' attribute :user_data attribute :virtualization_type, :aliases => 'virtualizationType' attribute :vpc_id, :aliases => 'vpcId' attr_accessor :password attr_writer :iam_instance_profile_name, :iam_instance_profile_arn def initialize(attributes={}) self.groups ||= ["default"] unless (attributes[:subnet_id] || attributes[:security_group_ids] || attributes[:network_interfaces]) self.flavor_id ||= 't1.micro' # Old 'connection' is renamed as service and should be used instead prepare_service_value(attributes) self.image_id ||= begin self.username ||= 'ubuntu' case @service.instance_variable_get(:@region) # Ubuntu 10.04 LTS 64bit (EBS) when 'ap-northeast-1' 'ami-5e0fa45f' when 'ap-southeast-1' 'ami-f092eca2' when 'ap-southeast-2' 'ami-fb8611c1' # Ubuntu 12.04 LTS 64bit (EBS) when 'eu-west-1' 'ami-3d1f2b49' when 'sa-east-1' 'ami-d0429ccd' when 'us-east-1' 'ami-3202f25b' when 'us-west-1' 'ami-f5bfefb0' when 'us-west-2' 'ami-e0ec60d0' end end super end def addresses requires :id service.addresses(:server => self) end def console_output requires :id service.get_console_output(id) end def destroy requires :id service.terminate_instances(id) true end remove_method :flavor_id def flavor_id @flavor && @flavor.id || attributes[:flavor_id] end def flavor=(new_flavor) @flavor = new_flavor end def flavor @flavor ||= service.flavors.all.find {|flavor| flavor.id == flavor_id} end def key_pair requires :key_name service.key_pairs.all({'key-name' => key_name}).first end def key_pair=(new_keypair) self.key_name = new_keypair && new_keypair.name end def ready? state == 'running' end def reboot requires :id service.reboot_instances(id) true end def run_instance_options raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if persisted? requires :image_id options = { 'BlockDeviceMapping' => block_device_mapping, 'HibernationOptions' => hibernation_options, 'NetworkInterfaces' => network_interfaces, 'ClientToken' => client_token, 'DisableApiTermination' => disable_api_termination, 'EbsOptimized' => ebs_optimized, 'IamInstanceProfile.Arn' => @iam_instance_profile_arn, 'IamInstanceProfile.Name' => @iam_instance_profile_name, 'InstanceInitiatedShutdownBehavior' => instance_initiated_shutdown_behavior, 'InstanceType' => flavor_id, 'KernelId' => kernel_id, 'KeyName' => key_name, 'Monitoring.Enabled' => monitoring, 'Placement.AvailabilityZone' => availability_zone, 'Placement.GroupName' => placement_group, 'Placement.Tenancy' => tenancy, 'PrivateIpAddress' => private_ip_address, 'RamdiskId' => ramdisk_id, 'SecurityGroup' => groups, 'SecurityGroupId' => security_group_ids, 'SubnetId' => subnet_id, 'UserData' => user_data, 'TagSpecifications' => tag_specifications, } options.delete_if {|key, value| value.nil?} # If subnet is defined then this is a Virtual Private Cloud. # subnet & security group cannot co-exist. Attempting to specify # both subnet and groups will cause an error. Instead please make # use of Security Group Ids when working in a VPC. if subnet_id options.delete('SecurityGroup') if associate_public_ip options['NetworkInterface.0.DeviceIndex'] = 0 options['NetworkInterface.0.AssociatePublicIpAddress'] = associate_public_ip options['NetworkInterface.0.SubnetId'] = options['SubnetId'] options.delete('SubnetId') if options['SecurityGroupId'].kind_of?(Array) options['SecurityGroupId'].each {|id| options["NetworkInterface.0.SecurityGroupId.#{options['SecurityGroupId'].index(id)}"] = id } else options["NetworkInterface.0.SecurityGroupId.0"] = options['SecurityGroupId'] end options.delete('SecurityGroupId') if private_ip_address options.delete('PrivateIpAddress') options['NetworkInterface.0.PrivateIpAddress'] = private_ip_address end end else options.delete('SubnetId') end options end def save servers = service.servers.save_many(self, 1, 1) merge_attributes(servers.first.attributes) true end def setup(credentials = {}) requires :ssh_ip_address, :username commands = [ %{mkdir .ssh}, %{passwd -l #{username}}, %{echo "#{Fog::JSON.encode(Fog::JSON.sanitize(attributes))}" >> ~/attributes.json} ] if public_key commands << %{echo "#{public_key}" >> ~/.ssh/authorized_keys} end # wait for aws to be ready wait_for { sshable?(credentials) } Fog::SSH.new(ssh_ip_address, username, credentials).run(commands) end def start requires :id service.start_instances(id) true end def stop(options = {}) requires :id service.stop_instances(id, options) true end def volumes requires :id service.volumes(:server => self) end #I tried to call it monitoring= and be smart with attributes[] #but in #save a merge_attribute is called after run_instance #thus making an un-necessary request. Use this until finding a clever solution def monitor=(new_monitor) if persisted? case new_monitor when true response = service.monitor_instances(identity) when false response = service.unmonitor_instances(identity) else raise ArgumentError.new("only Boolean allowed here") end end self.monitoring = new_monitor end private def placement=(new_placement) if new_placement.is_a?(Hash) merge_attributes(new_placement) else self.attributes[:placement] = new_placement end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/servers.rb000066400000000000000000000163241437344660100222540ustar00rootroot00000000000000require 'fog/aws/models/compute/server' module Fog module AWS class Compute class Servers < Fog::Collection attribute :filters model Fog::AWS::Compute::Server # Creates a new server # # AWS.servers.new # # ==== Returns # # Returns the details of the new server # #>> AWS.servers.new # # def initialize(attributes) self.filters ||= {} super end def all(filters = self.filters) unless filters.is_a?(Hash) Fog::Logger.deprecation("all with #{filters.class} param is deprecated, use all('instance-id' => []) instead [light_black](#{caller.first})[/]") filters = {'instance-id' => [*filters]} end self.filters = filters data = service.describe_instances(filters).body load( data['reservationSet'].map do |reservation| reservation['instancesSet'].map do |instance| instance.merge(:groups => reservation['groupSet'], :security_group_ids => reservation['groupIds']) end end.flatten ) end # Create between m and n servers with the server options specified in # new_attributes. Equivalent to this loop, but happens in 1 request: # # 1.upto(n).map { create(new_attributes) } # # See the AWS RunInstances API. def create_many(min_servers = 1, max_servers = nil, new_attributes = {}) max_servers ||= min_servers template = new(new_attributes) save_many(template, min_servers, max_servers) end # Bootstrap between m and n servers with the server options specified in # new_attributes. Equivalent to this loop, but happens in 1 AWS request # and the machines' spinup will happen in parallel: # # 1.upto(n).map { bootstrap(new_attributes) } # # See the AWS RunInstances API. def bootstrap_many(min_servers = 1, max_servers = nil, new_attributes = {}) template = service.servers.new(new_attributes) _setup_bootstrap(template) servers = save_many(template, min_servers, max_servers) servers.each do |server| server.wait_for { ready? } server.setup(:key_data => [server.private_key]) end servers end def bootstrap(new_attributes = {}) bootstrap_many(1, 1, new_attributes).first end # Used to retrieve a server # # server_id is required to get the associated server information. # # You can run the following command to get the details: # AWS.servers.get("i-5c973972") # # ==== Returns # #>> AWS.servers.get("i-5c973972") # # def get(server_id) if server_id self.class.new(:service => service).all('instance-id' => server_id).first end rescue Fog::Errors::NotFound nil end # From a template, create between m-n servers (see the AWS RunInstances API) def save_many(template, min_servers = 1, max_servers = nil) max_servers ||= min_servers data = service.run_instances(template.image_id, min_servers, max_servers, template.run_instance_options) # For some reason, AWS sometimes returns empty results alongside the real ones. Thus the select data.body['instancesSet'].select { |instance_set| instance_set['instanceId'] }.map do |instance_set| server = template.dup server.merge_attributes(instance_set) # expect eventual consistency if (tags = server.tags) && tags.size > 0 Fog.wait_for { server.reload rescue nil } Fog.wait_for { begin service.create_tags(server.identity, tags) rescue Fog::AWS::Compute::NotFound false end } end server end end private def _setup_bootstrap(server) unless server.key_name # first or create fog_#{credential} keypair name = Fog.respond_to?(:credential) && Fog.credential || :default unless server.key_pair = service.key_pairs.get("fog_#{name}") server.key_pair = service.key_pairs.create( :name => "fog_#{name}", :public_key => server.public_key ) end end security_group = service.security_groups.get(server.groups.first) if security_group.nil? raise Fog::AWS::Compute::Error, "The security group" \ " #{server.groups.first} doesn't exist." end # make sure port 22 is open in the first security group authorized = security_group.ip_permissions.find do |ip_permission| ip_permission['ipRanges'].find { |ip_range| ip_range['cidrIp'] == '0.0.0.0/0' } && ip_permission['fromPort'] == 22 && ip_permission['ipProtocol'] == 'tcp' && ip_permission['toPort'] == 22 end unless authorized security_group.authorize_port_range(22..22) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/snapshot.rb000066400000000000000000000026031437344660100224150ustar00rootroot00000000000000module Fog module AWS class Compute class Snapshot < Fog::Model identity :id, :aliases => 'snapshotId' attribute :description attribute :encrypted attribute :progress attribute :created_at, :aliases => 'startTime' attribute :owner_id, :aliases => 'ownerId' attribute :state, :aliases => 'status' attribute :tags, :aliases => 'tagSet' attribute :volume_id, :aliases => 'volumeId' attribute :volume_size, :aliases => 'volumeSize' attribute :status_message, :aliases => 'statusMessage' def destroy requires :id service.delete_snapshot(id) true end def ready? state == 'completed' end def save raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if persisted? requires :volume_id data = service.create_snapshot(volume_id, description).body new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true end def volume requires :id service.describe_volumes(volume_id) end private def volume=(new_volume) self.volume_id = new_volume.volume_id end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/snapshots.rb000066400000000000000000000024651437344660100226060ustar00rootroot00000000000000require 'fog/aws/models/compute/snapshot' module Fog module AWS class Compute class Snapshots < Fog::Collection attribute :filters attribute :volume model Fog::AWS::Compute::Snapshot def initialize(attributes) self.filters ||= { 'RestorableBy' => 'self' } super end def all(filters_arg = filters, options = {}) unless filters_arg.is_a?(Hash) Fog::Logger.deprecation("all with #{filters_arg.class} param is deprecated, use all('snapshot-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'snapshot-id' => [*filters_arg]} end filters = filters_arg data = service.describe_snapshots(filters.merge!(options)).body load(data['snapshotSet']) if volume self.replace(self.select {|snapshot| snapshot.volume_id == volume.id}) end self end def get(snapshot_id) if snapshot_id self.class.new(:service => service).all('snapshot-id' => snapshot_id).first end end def new(attributes = {}) if volume super({ 'volumeId' => volume.id }.merge!(attributes)) else super end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/spot_request.rb000066400000000000000000000125761437344660100233250ustar00rootroot00000000000000require 'fog/compute/models/server' module Fog module AWS class Compute class SpotRequest < Fog::Compute::Server identity :id, :aliases => 'spotInstanceRequestId' attribute :price, :aliases => 'spotPrice' attribute :request_type, :aliases => 'type' attribute :created_at, :aliases => 'createTime' attribute :instance_count, :aliases => 'instanceCount' attribute :instance_id, :aliases => 'instanceId' attribute :state attribute :valid_from, :aliases => 'validFrom' attribute :valid_until, :aliases => 'validUntil' attribute :launch_group, :aliases => 'launchGroup' attribute :availability_zone_group, :aliases => 'availabilityZoneGroup' attribute :product_description, :aliases => 'productDescription' attribute :ebs_optimized, :aliases => 'LaunchSpecification.EbsOptimized' attribute :groups, :aliases => 'LaunchSpecification.SecurityGroup' attribute :security_group_ids, :aliases => 'LaunchSpecification.SecurityGroupId' attribute :key_name, :aliases => 'LaunchSpecification.KeyName' attribute :availability_zone, :aliases => 'LaunchSpecification.Placement.AvailabilityZone' attribute :flavor_id, :aliases => 'LaunchSpecification.InstanceType' attribute :image_id, :aliases => 'LaunchSpecification.ImageId' attribute :monitoring, :aliases => 'LaunchSpecification.Monitoring' attribute :block_device_mapping, :aliases => 'LaunchSpecification.BlockDeviceMapping' attribute :subnet_id, :aliases => 'LaunchSpecification.SubnetId' attribute :iam_instance_profile, :aliases => 'LaunchSpecification.IamInstanceProfile' attribute :tags, :aliases => 'tagSet' attribute :fault, :squash => 'message' attribute :user_data attr_writer :iam_instance_profile_name, :iam_instance_profile_arn def initialize(attributes={}) self.groups ||= self.security_group_ids || ["default"] self.flavor_id ||= 't1.micro' self.image_id ||= begin self.username ||= 'ubuntu' # Old 'connection' is renamed as service and should be used instead prepare_service_value(attributes) case @service.instance_variable_get(:@region) # Ubuntu 10.04 LTS 64bit (EBS) when 'ap-northeast-1' 'ami-5e0fa45f' when 'ap-southeast-1' 'ami-f092eca2' when 'eu-west-1' 'ami-3d1f2b49' when 'us-east-1' 'ami-3202f25b' when 'us-west-1' 'ami-f5bfefb0' end end super end def destroy requires :id service.cancel_spot_instance_requests(id) true end def key_pair requires :key_name service.key_pairs.all(key_name).first end def key_pair=(new_keypair) self.key_name = new_keypair && new_keypair.name end def ready? state == 'active' end def save requires :image_id, :flavor_id, :price options = { 'AvailabilityZoneGroup' => availability_zone_group, 'InstanceCount' => instance_count, 'LaunchGroup' => launch_group, 'LaunchSpecification.BlockDeviceMapping' => block_device_mapping, 'LaunchSpecification.KeyName' => key_name, 'LaunchSpecification.Monitoring.Enabled' => monitoring, 'LaunchSpecification.Placement.AvailabilityZone' => availability_zone, 'LaunchSpecification.SecurityGroupId' => security_group_ids || groups, 'LaunchSpecification.EbsOptimized' => ebs_optimized, 'LaunchSpecification.UserData' => user_data, 'LaunchSpecification.SubnetId' => subnet_id, 'LaunchSpecification.IamInstanceProfile.Arn' => @iam_instance_profile_arn, 'LaunchSpecification.IamInstanceProfile.Name' => @iam_instance_profile_name, 'Type' => request_type, 'ValidFrom' => valid_from, 'ValidUntil' => valid_until } options.delete_if {|key, value| value.nil?} data = service.request_spot_instances(image_id, flavor_id, price, options).body spot_instance_request = data['spotInstanceRequestSet'].first spot_instance_request['launchSpecification'].each do |name,value| spot_instance_request['LaunchSpecification.' + name[0,1].upcase + name[1..-1]] = value end spot_instance_request.merge(:groups => spot_instance_request['LaunchSpecification.GroupSet']) spot_instance_request.merge(options) merge_attributes( spot_instance_request ) end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/spot_requests.rb000066400000000000000000000062451437344660100235040ustar00rootroot00000000000000require 'fog/aws/models/compute/spot_request' module Fog module AWS class Compute class SpotRequests < Fog::Collection attribute :filters model Fog::AWS::Compute::SpotRequest def initialize(attributes) self.filters ||= {} super end def all(filters = self.filters) unless filters.is_a?(Hash) Fog::Logger.deprecation("all with #{filters.class} param is deprecated, use all('spot-instance-request-id' => []) instead [light_black](#{caller.first})[/]") filters = {'spot-instance-request-id' => [*filters]} end self.filters = filters data = service.describe_spot_instance_requests(filters).body load( data['spotInstanceRequestSet'].map do |spot_instance_request| spot_instance_request['LaunchSpecification.Placement.AvailabilityZone'] = spot_instance_request['launchedAvailabilityZone'] spot_instance_request['launchSpecification'].each do |name,value| spot_instance_request['LaunchSpecification.' + name[0,1].upcase + name[1..-1]] = value end spot_instance_request.merge(:groups => spot_instance_request['LaunchSpecification.GroupSet']) spot_instance_request end.flatten ) end def bootstrap(new_attributes = {}) spot_request = service.spot_requests.new(new_attributes) unless new_attributes[:key_name] # first or create fog_#{credential} keypair name = Fog.respond_to?(:credential) && Fog.credential || :default unless spot_request.key_pair = service.key_pairs.get("fog_#{name}") spot_request.key_pair = service.key_pairs.create( :name => "fog_#{name}", :public_key => spot_request.public_key ) end end # make sure port 22 is open in the first security group security_group = service.security_groups.get(spot_request.groups.first) authorized = security_group.ip_permissions.find do |ip_permission| ip_permission['ipRanges'].first && ip_permission['ipRanges'].first['cidrIp'] == '0.0.0.0/0' && ip_permission['fromPort'] == 22 && ip_permission['ipProtocol'] == 'tcp' && ip_permission['toPort'] == 22 end unless authorized security_group.authorize_port_range(22..22) end spot_request.save Fog.wait_for { spot_request.reload.ready? rescue nil } server = service.servers.get(spot_request.instance_id) if spot_request.tags service.create_tags( spot_request.instance_id, spot_request.tags ) end server.wait_for { ready? } server.setup(:key_data => [spot_request.private_key]) server end def get(spot_request_id) if spot_request_id self.class.new(:service => service).all('spot-instance-request-id' => spot_request_id).first end rescue Fog::Errors::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/subnet.rb000066400000000000000000000035171437344660100220630ustar00rootroot00000000000000module Fog module AWS class Compute class Subnet < Fog::Model identity :subnet_id, :aliases => 'subnetId' attribute :state attribute :vpc_id, :aliases => 'vpcId' attribute :cidr_block, :aliases => 'cidrBlock' attribute :available_ip_address_count, :aliases => 'availableIpAddressCount' attribute :availability_zone, :aliases => 'availabilityZone' attribute :tag_set, :aliases => 'tagSet' attribute :map_public_ip_on_launch, :aliases => 'mapPublicIpOnLaunch' attribute :default_for_az, :aliases => 'defaultForAz' def ready? requires :state state == 'available' end def network_interfaces service.network_interfaces.all('subnet-id' => [self.identity]) end # Removes an existing subnet # # subnet.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :subnet_id service.delete_subnet(subnet_id) true end # Create a subnet # # >> g = AWS.subnets.new(:vpc_id => "vpc-someId", :cidr_block => "10.0.0.0/24") # >> g.save # # == Returns: # # requestId and a subnet object # def save requires :vpc_id, :cidr_block options = {} options['AvailabilityZone'] = availability_zone if availability_zone data = service.create_subnet(vpc_id, cidr_block, options).body['subnet'] new_attributes = data.reject {|key,value| key == 'requestId'} merge_attributes(new_attributes) true true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/subnets.rb000066400000000000000000000046451437344660100222510ustar00rootroot00000000000000require 'fog/aws/models/compute/subnet' module Fog module AWS class Compute class Subnets < Fog::Collection attribute :filters model Fog::AWS::Compute::Subnet # Creates a new subnet # # AWS.subnets.new # # ==== Returns # # Returns the details of the new Subnet # #>> AWS.subnets.new # # def initialize(attributes) self.filters ||= {} super end # Returns an array of all Subnets that have been created # # AWS.subnets.all # # ==== Returns # # Returns an array of all VPCs # #>> AWS.subnets.all # # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.warning("all with #{filters_arg.class} param is deprecated, use all('subnet-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'subnet-id' => [*filters_arg]} end filters = filters_arg data = service.describe_subnets(filters).body load(data['subnetSet']) end # Used to retrieve a Subnet # subnet-id is required to get the associated VPC information. # # You can run the following command to get the details: # AWS.subnets.get("subnet-12345678") # # ==== Returns # #>> AWS.subnets.get("subnet-12345678") # # def get(subnet_id) if subnet_id self.class.new(:service => service).all('subnet-id' => subnet_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/tag.rb000066400000000000000000000011741437344660100213330ustar00rootroot00000000000000module Fog module AWS class Compute class Tag < Fog::Model identity :key attribute :value attribute :resource_id, :aliases => 'resourceId' attribute :resource_type, :aliases => 'resourceType' def initialize(attributes = {}) super end def destroy requires :key, :resource_id service.delete_tags(resource_id, key => value) true end def save requires :key, :resource_id service.create_tags(resource_id, key => value) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/tags.rb000066400000000000000000000011311437344660100215070ustar00rootroot00000000000000require 'fog/aws/models/compute/tag' module Fog module AWS class Compute class Tags < Fog::Collection attribute :filters model Fog::AWS::Compute::Tag def initialize(attributes) self.filters ||= {} super end def all(filters_arg = filters) filters = filters_arg data = service.describe_tags(filters).body load(data['tagSet']) end def get(key) if key self.class.new(:service => service).all('key' => key) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/volume.rb000066400000000000000000000076301437344660100220720ustar00rootroot00000000000000module Fog module AWS class Compute class Volume < Fog::Model identity :id, :aliases => 'volumeId' attribute :attached_at, :aliases => 'attachTime' attribute :availability_zone, :aliases => 'availabilityZone' attribute :created_at, :aliases => 'createTime' attribute :delete_on_termination, :aliases => 'deleteOnTermination' attribute :device attribute :encrypted attribute :key_id, :aliases => ['KmsKeyId', 'kmsKeyId'] attribute :iops attribute :server_id, :aliases => 'instanceId' attribute :size attribute :snapshot_id, :aliases => 'snapshotId' attribute :state, :aliases => 'status' attribute :tags, :aliases => 'tagSet' attribute :type, :aliases => 'volumeType' def initialize(attributes = {}) # assign server first to prevent race condition with persisted? @server = attributes.delete(:server) super end def destroy requires :id service.delete_volume(id) true end def ready? state == 'available' end def modification_in_progress? modifications.any? { |m| m['modificationState'] != 'completed' } end def modifications requires :identity service.describe_volumes_modifications('volume-id' => self.identity).body['volumeModificationSet'] end def save if identity update_params = { 'Size' => self.size, 'Iops' => self.iops, 'VolumeType' => self.type } service.modify_volume(self.identity, update_params) true else requires :availability_zone requires_one :size, :snapshot_id requires :iops if type == 'io1' data = service.create_volume(availability_zone, size, create_params).body merge_attributes(data) if tags = self.tags # expect eventual consistency Fog.wait_for { service.volumes.get(identity) } service.create_tags(identity, tags) end attach(@server, device) if @server && device end true end def server requires :server_id service.servers.get(server_id) end def snapshots requires :id service.snapshots(:volume => self) end def snapshot(description) requires :id service.create_snapshot(id, description) end def force_detach detach(true) end def attach(new_server, new_device) if !persisted? @server = new_server self.availability_zone = new_server.availability_zone elsif new_server wait_for { ready? } @server = nil self.server_id = new_server.id service.attach_volume(server_id, id, new_device) reload end end def detach(force = false) @server = nil self.server_id = nil if persisted? service.detach_volume(id, 'Force' => force) reload end end def server=(_) raise NoMethodError, 'use Fog::AWS::Compute::Volume#attach(server, device)' end private def attachmentSet=(new_attachment_set) merge_attributes(new_attachment_set.first || {}) end def create_params { 'Encrypted' => encrypted, 'KmsKeyId' => key_id, 'Iops' => iops, 'SnapshotId' => snapshot_id, 'VolumeType' => type } end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/volumes.rb000066400000000000000000000065171437344660100222600ustar00rootroot00000000000000require 'fog/aws/models/compute/volume' module Fog module AWS class Compute class Volumes < Fog::Collection attribute :filters attribute :server model Fog::AWS::Compute::Volume # Used to create a volume. There are 3 arguments and availability_zone and size are required. You can generate a new key_pair as follows: # AWS.volumes.create(:availability_zone => 'us-east-1a', :size => 10) # # ==== Returns # # # # The volume can be retrieved by running AWS.volumes.get("vol-1e2028b9"). See get method below. # def initialize(attributes) self.filters ||= {} super end # Used to return all volumes. # AWS.volumes.all # # ==== Returns # #>>AWS.volumes.all # # # The volume can be retrieved by running AWS.volumes.get("vol-1e2028b9"). See get method below. # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.deprecation("all with #{filters_arg.class} param is deprecated, use all('volume-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'volume-id' => [*filters_arg]} end filters = filters_arg data = service.describe_volumes(filters).body load(data['volumeSet']) if server self.replace(self.select {|volume| volume.server_id == server.id}) end self end # Used to retrieve a volume # volume_id is required to get the associated volume information. # # You can run the following command to get the details: # AWS.volumes.get("vol-1e2028b9") # # ==== Returns # #>> AWS.volumes.get("vol-1e2028b9") # # def get(volume_id) if volume_id self.class.new(:service => service).all('volume-id' => volume_id).first end end def new(attributes = {}) if server super({ :server => server }.merge!(attributes)) else super end end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/vpc.rb000066400000000000000000000101151437344660100213430ustar00rootroot00000000000000module Fog module AWS class Compute class VPC < Fog::Model identity :id, :aliases => 'vpcId' attribute :state attribute :cidr_block, :aliases => 'cidrBlock' attribute :dhcp_options_id, :aliases => 'dhcpOptionsId' attribute :tags, :aliases => 'tagSet' attribute :tenancy, :aliases => 'instanceTenancy' attribute :is_default, :aliases => 'isDefault' attribute :cidr_block_association_set, :aliases => 'cidrBlockAssociationSet' attribute :ipv6_cidr_block_association_set, :aliases => 'ipv6CidrBlockAssociationSet' attribute :amazon_provided_ipv6_cidr_block, :aliases => 'amazonProvidedIpv6CidrBlock' # Backward compatibility. Please use ipv6_cidr_block_association_set alias_method :ipv_6_cidr_block_association_set, :ipv6_cidr_block_association_set alias_method :ipv_6_cidr_block_association_set=, :ipv6_cidr_block_association_set= alias_method :amazon_provided_ipv_6_cidr_block, :amazon_provided_ipv6_cidr_block alias_method :amazon_provided_ipv_6_cidr_block=, :amazon_provided_ipv6_cidr_block= def subnets service.subnets(:filters => {'vpcId' => self.identity}).all end def initialize(attributes={}) self.dhcp_options_id ||= "default" self.tenancy ||= "default" self.amazon_provided_ipv_6_cidr_block ||=false super end def ready? requires :state state == 'available' end def is_default? requires :is_default is_default end # Removes an existing vpc # # vpc.destroy # # ==== Returns # # True or false depending on the result # def destroy requires :id service.delete_vpc(id) true end def classic_link_enabled? requires :identity service.describe_vpc_classic_link(:vpc_ids => [self.identity]).body['vpcSet'].first['classicLinkEnabled'] rescue nil end def enable_classic_link requires :identity service.enable_vpc_classic_link(self.identity).body['return'] end def disable_classic_link requires :identity service.disable_vpc_classic_link(self.identity).body['return'] end def classic_link_dns_enabled? requires :identity service.describe_vpc_classic_link_dns_support(:vpc_ids => [self.identity]).body['vpcs'].first['classicLinkDnsSupported'] rescue nil end def enable_classic_link_dns requires :identity service.enable_vpc_classic_link_dns_support(self.identity).body['return'] end def disable_classic_link_dns requires :identity service.disable_vpc_classic_link_dns_support(self.identity).body['return'] end # Create a vpc # # >> g = AWS.vpcs.new(:cidr_block => "10.1.2.0/24") # >> g.save # # == Returns: # # True or an exception depending on the result. Keep in mind that this *creates* a new vpc. # As such, it yields an InvalidGroup.Duplicate exception if you attempt to save an existing vpc. # def save requires :cidr_block options = { 'AmazonProvidedIpv6CidrBlock' => amazon_provided_ipv_6_cidr_block, 'InstanceTenancy' => tenancy } data = service.create_vpc(cidr_block, options).body['vpcSet'].first new_attributes = data.reject {|key,value| key == 'requestId'} new_attributes = data.reject {|key,value| key == 'requestId' || key == 'tagSet' } merge_attributes(new_attributes) if tags = self.tags # expect eventual consistency Fog.wait_for { self.reload rescue nil } service.create_tags( self.identity, tags ) end true end end end end end fog-aws-3.18.0/lib/fog/aws/models/compute/vpcs.rb000066400000000000000000000040021437344660100215240ustar00rootroot00000000000000require 'fog/aws/models/compute/vpc' module Fog module AWS class Compute class Vpcs < Fog::Collection attribute :filters model Fog::AWS::Compute::VPC # Creates a new VPC # # AWS.vpcs.new # # ==== Returns # # Returns the details of the new VPC # #>> AWS.vpcs.new # # def initialize(attributes) self.filters ||= {} super end # Returns an array of all VPCs that have been created # # AWS.vpcs.all # # ==== Returns # # Returns an array of all VPCs # #>> AWS.vpcs.all # # ] # > # def all(filters_arg = filters) unless filters_arg.is_a?(Hash) Fog::Logger.warning("all with #{filters_arg.class} param is deprecated, use all('vpc-id' => []) instead [light_black](#{caller.first})[/]") filters_arg = {'vpc-id' => [*filters_arg]} end filters = filters_arg data = service.describe_vpcs(filters).body load(data['vpcSet']) end # Used to retrieve a VPC # vpc_id is required to get the associated VPC information. # # You can run the following command to get the details: # AWS.vpcs.get("vpc-12345678") # # ==== Returns # #>> AWS.vpcs.get("vpc-12345678") # # def get(vpc_id) if vpc_id self.class.new(:service => service).all('vpc-id' => vpc_id).first end end end end end end fog-aws-3.18.0/lib/fog/aws/models/data_pipeline/000077500000000000000000000000001437344660100213525ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/data_pipeline/pipeline.rb000066400000000000000000000026461437344660100235140ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Pipeline < Fog::Model identity :id, :aliases => 'pipelineId' attribute :name attribute :description attribute :tags attribute :user_id, :aliases => 'userId' attribute :account_id, :aliases => 'accountId' attribute :state, :aliases => 'pipelineState' attribute :unique_id, :aliases => 'uniqueId' def initialize(attributes={}) # Extract the 'fields' portion of a response to attributes if attributes.include?('fields') string_fields = attributes['fields'].select { |f| f.include?('stringValue') } field_attributes = Hash[string_fields.map { |f| [f['key'][/^@(.+)$/, 1], f['stringValue']] }] merge_attributes(field_attributes) end super end def save requires :name requires :unique_id data = service.create_pipeline(unique_id, name, nil, tags) merge_attributes(data) true end def activate requires :id service.activate_pipeline(id) true end def put(objects) requires :id service.put_pipeline_definition(id, objects) true end def destroy requires :id service.delete_pipeline(id) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/data_pipeline/pipelines.rb000066400000000000000000000016161437344660100236730ustar00rootroot00000000000000require 'fog/aws/models/data_pipeline/pipeline' module Fog module AWS class DataPipeline class Pipelines < Fog::Collection model Fog::AWS::DataPipeline::Pipeline def all ids = [] begin result = service.list_pipelines ids << result['pipelineIdList'].map { |id| id['id'] } end while (result['hasMoreResults'] && result['marker']) load(service.describe_pipelines(ids.flatten)['pipelineDescriptionList']) end def get(id) data = service.describe_pipelines([id])['pipelineDescriptionList'].first new(data) rescue Excon::Errors::BadRequest => error data = Fog::JSON.decode(error.response.body) raise unless data['__type'] == 'PipelineDeletedException' || data['__type'] == 'PipelineNotFoundException' nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/dns/000077500000000000000000000000001437344660100173405ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/dns/record.rb000066400000000000000000000067441437344660100211560ustar00rootroot00000000000000module Fog module AWS class DNS class Record < Fog::Model extend Fog::Deprecation deprecate :ip, :value deprecate :ip=, :value= identity :name, :aliases => ['Name'] attribute :value, :aliases => ['ResourceRecords'] attribute :ttl, :aliases => ['TTL'] attribute :type, :aliases => ['Type'] attribute :status, :aliases => ['Status'] attribute :created_at, :aliases => ['SubmittedAt'] attribute :alias_target, :aliases => ['AliasTarget'] attribute :change_id, :aliases => ['Id'] attribute :region, :aliases => ['Region'] attribute :weight, :aliases => ['Weight'] attribute :set_identifier, :aliases => ['SetIdentifier'] attribute :failover, :aliases => ['Failover'] attribute :geo_location, :aliases => ['GeoLocation'] attribute :health_check_id, :aliases => ['HealthCheckId'] def initialize(attributes={}) super end def destroy options = attributes_to_options('DELETE') service.change_resource_record_sets(zone.id, [options]) true end def zone @zone end def save unless self.alias_target self.ttl ||= 3600 end options = attributes_to_options('CREATE') data = service.change_resource_record_sets(zone.id, [options]).body merge_attributes(data) true end def modify(new_attributes) options = [] # Delete the current attributes options << attributes_to_options('DELETE') # Create the new attributes merge_attributes(new_attributes) options << attributes_to_options('CREATE') data = service.change_resource_record_sets(zone.id, options).body merge_attributes(data) true end # Returns true if record is insync. May only be called for newly created or modified records that # have a change_id and status set. def ready? requires :change_id, :status status == 'INSYNC' end def reload # If we have a change_id (newly created or modified), then reload performs a get_change to update status. if change_id data = service.get_change(change_id).body merge_attributes(data) self else super end end private def zone=(new_zone) @zone = new_zone end def attributes_to_options(action) requires :name, :type, :zone requires_one :value, :alias_target options = { :action => action, :name => name, :resource_records => [*value], :alias_target => symbolize_keys(alias_target), :ttl => ttl, :type => type, :weight => weight, :set_identifier => set_identifier, :region => region, :failover => failover, :geo_location => geo_location, :health_check_id => health_check_id } unless self.alias_target requires :ttl options[:ttl] = ttl end options end end end end end fog-aws-3.18.0/lib/fog/aws/models/dns/records.rb000066400000000000000000000104221437344660100213250ustar00rootroot00000000000000require 'fog/aws/models/dns/record' module Fog module AWS class DNS class Records < Fog::Collection attribute :is_truncated, :aliases => ['IsTruncated'] attribute :max_items, :aliases => ['MaxItems'] attribute :name attribute :next_record_name, :aliases => ['NextRecordName'] attribute :next_record_type, :aliases => ['NextRecordType'] attribute :next_record_identifier, :aliases => ['NextRecordIdentifier'] attribute :type attribute :identifier attribute :zone model Fog::AWS::DNS::Record def all(options = {}) requires :zone options[:max_items] ||= max_items options[:name] ||= zone.domain options[:type] ||= type options[:identifier] ||= identifier options.delete_if {|key, value| value.nil?} data = service.list_resource_record_sets(zone.id, options).body # NextRecordIdentifier is completely absent instead of nil, so set to nil, or iteration breaks. data['NextRecordIdentifier'] = nil unless data.key?('NextRecordIdentifier') merge_attributes(data.reject {|key, value| !['IsTruncated', 'MaxItems', 'NextRecordName', 'NextRecordType', 'NextRecordIdentifier'].include?(key)}) load(data['ResourceRecordSets']) end # # Load all zone records into the collection. # def all! data = [] merge_attributes({'NextRecordName' => nil, 'NextRecordType' => nil, 'NextRecordIdentifier' => nil, 'IsTruncated' => nil}) begin options = { :name => next_record_name, :type => next_record_type, :identifier => next_record_identifier } options.delete_if {|key, value| value.nil?} batch = service.list_resource_record_sets(zone.id, options).body # NextRecordIdentifier is completely absent instead of nil, so set to nil, or iteration breaks. batch['NextRecordIdentifier'] = nil unless batch.key?('NextRecordIdentifier') merge_attributes(batch.reject {|key, value| !['IsTruncated', 'MaxItems', 'NextRecordName', 'NextRecordType', 'NextRecordIdentifier'].include?(key)}) data.concat(batch['ResourceRecordSets']) end while is_truncated load(data) end # # AWS Route 53 records are uniquely identified by a compound key of name, type, and identifier. # #get allows one to retrieve a record using one or more of those key components. # # ==== Parameters # * record_name - The name of the record to retrieve. # * record_type - The type of record to retrieve, if nil, then the first matching record is returned. # * record_identifier - The record set identifier to retrieve, if nil, then the first matching record is returned. # def get(record_name, record_type = nil, record_identifier = nil) requires :zone # Append a trailing period to the record_name if absent. record_name = record_name + "." unless record_name.end_with?(".") record_type = record_type.upcase unless record_type.nil? options = { :max_items => 1, :name => record_name, :type => record_type, :identifier => record_identifier } options.delete_if {|key, value| value.nil?} data = service.list_resource_record_sets(zone.id, options).body # look for an exact match in the records (data['ResourceRecordSets'] || []).map do |record_data| record = new(record_data) if (record.name.casecmp(record_name) == 0) && (record_type.nil? || (record.type == record_type)) && (record_identifier.nil? || (record.set_identifier == record_identifier)) record end end.compact.first rescue Fog::AWS::DNS::NotFound nil end def new(attributes = {}) requires :zone super({ :zone => zone }.merge!(attributes)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/dns/zone.rb000066400000000000000000000024041437344660100206400ustar00rootroot00000000000000# require 'fog/aws/models/dns/records' module Fog module AWS class DNS class Zone < Fog::Model identity :id, :aliases => 'Id' attribute :caller_reference, :aliases => 'CallerReference' attribute :change_info, :aliases => 'ChangeInfo' attribute :description, :aliases => 'Comment' attribute :domain, :aliases => 'Name' attribute :nameservers, :aliases => 'NameServers' def destroy requires :identity service.delete_hosted_zone(identity) true end def records @records ||= begin Fog::AWS::DNS::Records.new( :zone => self, :service => service ) end end def save requires :domain options = {} options[:caller_ref] = caller_reference if caller_reference options[:comment] = description if description data = service.create_hosted_zone(domain, options).body merge_attributes(data) true end private define_method(:HostedZone=) do |new_hosted_zone| merge_attributes(new_hosted_zone) end end end end end fog-aws-3.18.0/lib/fog/aws/models/dns/zones.rb000066400000000000000000000012761437344660100210310ustar00rootroot00000000000000require 'fog/aws/models/dns/zone' module Fog module AWS class DNS class Zones < Fog::Collection attribute :marker, :aliases => 'Marker' attribute :max_items, :aliases => 'MaxItems' model Fog::AWS::DNS::Zone def all(options = {}) options[:marker] ||= marker unless marker.nil? options[:maxitems] ||= max_items unless max_items.nil? data = service.list_hosted_zones(options).body['HostedZones'] load(data) end def get(zone_id) data = service.get_hosted_zone(zone_id).body new(data) rescue Fog::AWS::DNS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/efs/000077500000000000000000000000001437344660100173315ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/efs/file_system.rb000066400000000000000000000031271437344660100222040ustar00rootroot00000000000000module Fog module AWS class EFS class FileSystem < Fog::Model identity :id, :aliases => 'FileSystemId' attribute :owner_id, :aliases => 'OwnerId' attribute :creation_token, :aliases => 'CreationToken' attribute :performance_mode, :aliases => 'PerformanceMode' attribute :encrypted, :aliases => 'Encrypted' attribute :kms_key_id, :aliases => 'KmsKeyId' attribute :creation_time, :aliases => 'CreationTime' attribute :state, :aliases => 'LifeCycleState' attribute :name, :aliases => 'Name' attribute :number_of_mount_targets, :aliases => 'NumberOfMountTargets' attribute :size_in_bytes, :aliases => 'SizeInBytes' def ready? state == 'available' end def mount_targets requires :identity service.mount_targets(:file_system_id => self.identity).all end def destroy requires :identity service.delete_file_system(self.identity) true end def save params = {} params.merge!(:performance_mode => self.performance_mode) if self.performance_mode params.merge!(:encrypted => self.encrypted) if self.encrypted params.merge!(:kms_key_id => self.kms_key_id) if self.kms_key_id merge_attributes(service.create_file_system(self.creation_token || Fog::Mock.random_hex(32), params).body) end end end end end fog-aws-3.18.0/lib/fog/aws/models/efs/file_systems.rb000066400000000000000000000007751437344660100223750ustar00rootroot00000000000000require 'fog/aws/models/efs/file_system' module Fog module AWS class EFS class FileSystems < Fog::Collection model Fog::AWS::EFS::FileSystem def all data = service.describe_file_systems.body["FileSystems"] load(data) end def get(identity) data = service.describe_file_systems(:id => identity).body["FileSystems"].first new(data) rescue Fog::AWS::EFS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/efs/mount_target.rb000066400000000000000000000034121437344660100223660ustar00rootroot00000000000000module Fog module AWS class EFS class MountTarget < Fog::Model identity :id, :aliases => "MountTargetId" attribute :file_system_id, :aliases => "FileSystemId" attribute :ip_address, :aliases => "IpAddress" attribute :state, :aliases => "LifeCycleState" attribute :network_interface_id, :aliases => "NetworkInterfaceId" attribute :owner_id, :aliases => "OwnerId" attribute :subnet_id, :aliases => "SubnetId" def ready? state == 'available' end def destroy requires :identity service.delete_mount_target(self.identity) true end def file_system requires :file_system_id service.file_systems.get(self.file_system_id) end def security_groups if persisted? requires :identity service.describe_mount_target_security_groups(self.identity).body["SecurityGroups"] else @security_groups || [] end end def security_groups=(security_groups) if persisted? requires :identity service.modify_mount_target_security_groups(self.identity, security_groups) else @security_groups = security_groups end security_groups end def save requires :file_system_id, :subnet_id params = {} params.merge!('IpAddress' => self.ip_address) if self.ip_address params.merge!('SecurityGroups' => @security_groups) if @security_groups merge_attributes(service.create_mount_target(self.file_system_id, self.subnet_id, params).body) end end end end end fog-aws-3.18.0/lib/fog/aws/models/efs/mount_targets.rb000066400000000000000000000011171437344660100225510ustar00rootroot00000000000000require 'fog/aws/models/efs/mount_target' module Fog module AWS class EFS class MountTargets < Fog::Collection attribute :file_system_id model Fog::AWS::EFS::MountTarget def all data = service.describe_mount_targets(:file_system_id => self.file_system_id).body["MountTargets"] load(data) end def get(identity) data = service.describe_mount_targets(:id => identity).body["MountTargets"].first new(data) rescue Fog::AWS::EFS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/000077500000000000000000000000001437344660100210215ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/elasticache/cluster.rb000066400000000000000000000057301437344660100230340ustar00rootroot00000000000000module Fog module AWS class Elasticache class Cluster < Fog::Model # simple attributes identity :id, :aliases => 'CacheClusterId' attribute :auto_upgrade, :aliases => 'AutoMinorVersionUpgrade' attribute :status, :aliases => 'CacheClusterStatus' attribute :node_type, :aliases => 'CacheNodeType' attribute :engine, :aliases => 'Engine' attribute :engine_version, :aliases => 'EngineVersion' attribute :num_nodes, :aliases => 'NumCacheNodes' attribute :zone, :aliases => 'PreferredAvailabilityZone' attribute :port, :aliases => 'Port' attribute :maintenance_window, :aliases => 'PreferredMaintenanceWindow' # complex attributes attribute :nodes, :aliases => 'CacheNodes', :type => :array attribute :parameter_group, :aliases => 'CacheParameterGroup' attribute :pending_values, :aliases => 'PendingModifiedValues' attribute :create_time, :aliases => 'CacheClusterCreateTime', :type => :timestamp attribute :cache_security_groups, :aliases => 'CacheSecurityGroups', :type => :array attribute :security_groups, :aliases => 'SecurityGroups', :type => :array attribute :notification_config, :aliases => 'NotificationConfiguration' attribute :cache_subnet_group_name, :aliases => 'CacheSubnetGroupName' attribute :vpc_security_groups, :aliases => 'VpcSecurityGroups', :type => :array attribute :s3_snapshot_location, :aliases => 'SnapshotArns', :type => :array attribute :configuration_endpoint, :aliases => 'ConfigurationEndpoint' attr_accessor :parameter_group_name def ready? status == 'available' end def destroy requires :id service.delete_cache_cluster(id) true end def save requires :id parameter_group ||= Hash.new notification_config ||= Hash.new service.create_cache_cluster( id, { :node_type => node_type, :security_group_names => security_groups, :num_nodes => num_nodes, :auto_minor_version_upgrade => auto_upgrade, :engine => engine, :engine_version => engine_version, :notification_topic_arn => notification_config['TopicArn'], :port => port, :preferred_availablility_zone => zone, :preferred_maintenance_window => maintenance_window, :s3_snapshot_location => s3_snapshot_location, :parameter_group_name => parameter_group_name || parameter_group['CacheParameterGroupName'], :cache_subnet_group_name => cache_subnet_group_name, :vpc_security_groups => vpc_security_groups, } ) end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/clusters.rb000066400000000000000000000012321437344660100232100ustar00rootroot00000000000000require 'fog/aws/models/elasticache/cluster' module Fog module AWS class Elasticache class Clusters < Fog::Collection model Fog::AWS::Elasticache::Cluster def all load( service.describe_cache_clusters( nil, :show_node_info => true ).body['CacheClusters'] ) end def get(identity, show_node_info = true) new( service.describe_cache_clusters( identity, :show_node_info => show_node_info ).body['CacheClusters'].first ) rescue Fog::AWS::Elasticache::NotFound end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/parameter_group.rb000066400000000000000000000011551437344660100245440ustar00rootroot00000000000000module Fog module AWS class Elasticache class ParameterGroup < Fog::Model identity :id, :aliases => 'CacheParameterGroupName' attribute :description, :aliases => 'Description' attribute :family, :aliases => 'CacheParameterGroupFamily' def destroy requires :id service.delete_cache_parameter_group(id) true end def save requires :id service.create_cache_parameter_group( id, description = id, family = 'memcached1.4' ) end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/parameter_groups.rb000066400000000000000000000011501437344660100247220ustar00rootroot00000000000000require 'fog/aws/models/elasticache/parameter_group' module Fog module AWS class Elasticache class ParameterGroups < Fog::Collection model Fog::AWS::Elasticache::ParameterGroup def all load( service.describe_cache_parameter_groups.body['CacheParameterGroups'] ) end def get(identity) new( service.describe_cache_parameter_groups( identity ).body['CacheParameterGroups'].first ) rescue Fog::AWS::Elasticache::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/security_group.rb000066400000000000000000000025711437344660100244360ustar00rootroot00000000000000module Fog module AWS class Elasticache class SecurityGroup < Fog::Model identity :id, :aliases => 'CacheSecurityGroupName' attribute :description, :aliases => 'Description' attribute :ec2_groups, :aliases => 'EC2SecurityGroups', :type => :array attribute :owner_id, :aliases => 'OwnerId' def ready? ec2_groups.all?{|ingress| ingress['Status'] == 'authorized'} end def destroy requires :id service.delete_cache_security_group(id) true end def save requires :id requires :description service.create_cache_security_group(id, description) end def authorize_ec2_group(group_name, group_owner_id=owner_id) requires :id requires :owner_id if group_owner_id.nil? data = service.authorize_cache_security_group_ingress( id, group_name, group_owner_id ) merge_attributes(data.body['CacheSecurityGroup']) end def revoke_ec2_group(group_name, group_owner_id=owner_id) requires :id requires :owner_id if group_owner_id.nil? data = service.revoke_cache_security_group_ingress( id, group_name, group_owner_id ) merge_attributes(data.body['CacheSecurityGroup']) end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/security_groups.rb000066400000000000000000000011411437344660100246110ustar00rootroot00000000000000require 'fog/aws/models/elasticache/security_group' module Fog module AWS class Elasticache class SecurityGroups < Fog::Collection model Fog::AWS::Elasticache::SecurityGroup def all load( service.describe_cache_security_groups.body['CacheSecurityGroups'] ) end def get(identity) new( service.describe_cache_security_groups( identity ).body['CacheSecurityGroups'].first ) rescue Fog::AWS::Elasticache::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/subnet_group.rb000066400000000000000000000014631437344660100240660ustar00rootroot00000000000000module Fog module AWS class Elasticache class SubnetGroup < Fog::Model identity :id, :aliases => ['CacheSubnetGroupName', :name] attribute :description, :aliases => 'CacheSubnetGroupDescription' attribute :vpc_id, :aliases => 'VpcId' attribute :subnet_ids, :aliases => 'Subnets' def ready? # Just returning true, as Elasticache subnet groups # seem to not have a status, unlike RDS subnet groups. true end def save requires :description, :id, :subnet_ids service.create_cache_subnet_group(id, subnet_ids, description) reload end def destroy requires :id service.delete_cache_subnet_group(id) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/elasticache/subnet_groups.rb000066400000000000000000000012741437344660100242510ustar00rootroot00000000000000require 'fog/aws/models/elasticache/subnet_group' module Fog module AWS class Elasticache class SubnetGroups < Fog::Collection model Fog::AWS::Elasticache::SubnetGroup def all data = service.describe_cache_subnet_groups.body['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'] load(data) # data is an array of attribute hashes end def get(identity) data = service.describe_cache_subnet_groups(identity).body['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'].first new(data) # data is an attribute hash rescue Fog::AWS::Elasticache::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/000077500000000000000000000000001437344660100173165ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/elb/backend_server_description.rb000066400000000000000000000003651437344660100252270ustar00rootroot00000000000000module Fog module AWS class ELB class BackendServerDescription < Fog::Model attribute :policy_names, :aliases => 'PolicyNames' attribute :instance_port, :aliases => 'InstancePort' end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/backend_server_descriptions.rb000066400000000000000000000006571437344660100254160ustar00rootroot00000000000000require 'fog/aws/models/elb/backend_server_description' module Fog module AWS class ELB class BackendServerDescriptions < Fog::Collection model Fog::AWS::ELB::BackendServerDescription attr_accessor :data, :load_balancer def all load(data) end def get(instance_port) all.find { |e| e.instance_port == instance_port } end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/listener.rb000066400000000000000000000033001437344660100214640ustar00rootroot00000000000000module Fog module AWS class ELB class Listener < Fog::Model attribute :policy_names, :aliases => 'PolicyNames' attribute :instance_port, :aliases => 'InstancePort' attribute :instance_protocol, :aliases => 'InstanceProtocol' attribute :lb_port, :aliases => 'LoadBalancerPort' attribute :protocol, :aliases => 'Protocol' attribute :ssl_id, :aliases => 'SSLCertificateId' def initialize(attributes={}) # set defaults, which may be overridden in super merge_attributes(:policy_names => [], :instance_port => 80, :instance_protocol => 'HTTP', :lb_port => 80, :protocol => 'HTTP') super end def save requires :load_balancer, :instance_port, :lb_port, :protocol, :instance_protocol service.create_load_balancer_listeners(load_balancer.id, [to_params]) reload end def destroy requires :load_balancer, :lb_port service.delete_load_balancer_listeners(load_balancer.id, [lb_port]) reload end # Return the policy associated with this load balancer def policy load_balancer.policies.get(policy_names.first) end def reload load_balancer.reload end def load_balancer collection.load_balancer end def to_params { 'InstancePort' => instance_port, 'InstanceProtocol' => instance_protocol, 'LoadBalancerPort' => lb_port, 'Protocol' => protocol, 'SSLCertificateId' => ssl_id } end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/listeners.rb000066400000000000000000000013251437344660100216540ustar00rootroot00000000000000require 'fog/aws/models/elb/listener' module Fog module AWS class ELB class Listeners < Fog::Collection model Fog::AWS::ELB::Listener attr_accessor :data, :load_balancer def all load(munged_data) end def get(lb_port) all.find { |listener| listener.lb_port == lb_port } end private # Munge an array of ListenerDescription hashes like: # {'Listener' => listener, 'PolicyNames' => []} # to an array of listeners with a PolicyNames key def munged_data data.map { |description| description['Listener'].merge('PolicyNames' => description['PolicyNames']) } end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/load_balancer.rb000066400000000000000000000217751437344660100224250ustar00rootroot00000000000000module Fog module AWS class ELB class LoadBalancer < Fog::Model identity :id, :aliases => 'LoadBalancerName' attribute :availability_zones, :aliases => 'AvailabilityZones' attribute :created_at, :aliases => 'CreatedTime' attribute :dns_name, :aliases => 'DNSName' attribute :health_check, :aliases => 'HealthCheck' attribute :instances, :aliases => 'Instances' attribute :source_group, :aliases => 'SourceSecurityGroup' attribute :hosted_zone_name, :aliases => 'CanonicalHostedZoneName' attribute :hosted_zone_name_id, :aliases => 'CanonicalHostedZoneNameID' attribute :subnet_ids, :aliases => 'Subnets' attribute :security_groups, :aliases => 'SecurityGroups' attribute :scheme, :aliases => 'Scheme' attribute :vpc_id, :aliases => 'VPCId' attribute :tags, :aliases => 'tagSet' def initialize(attributes={}) if attributes[:subnet_ids] ||= attributes['Subnets'] attributes[:availability_zones] ||= attributes['AvailabilityZones'] else attributes[:availability_zones] ||= attributes['AvailabilityZones'] || %w(us-east-1a us-east-1b us-east-1c us-east-1d) end unless attributes['ListenerDescriptions'] new_listener = Fog::AWS::ELB::Listener.new attributes['ListenerDescriptions'] = [{ 'Listener' => new_listener.to_params, 'PolicyNames' => new_listener.policy_names }] end super end def connection_draining? requires :id service.describe_load_balancer_attributes(id).body['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['ConnectionDraining']['Enabled'] end def connection_draining_timeout requires :id service.describe_load_balancer_attributes(id).body['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['ConnectionDraining']['Timeout'] end def set_connection_draining(enabled, timeout=nil) requires :id attrs = {'Enabled' => enabled} attrs['Timeout'] = timeout if timeout service.modify_load_balancer_attributes(id, 'ConnectionDraining' => attrs) end def cross_zone_load_balancing? requires :id service.describe_load_balancer_attributes(id).body['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['CrossZoneLoadBalancing']['Enabled'] end def cross_zone_load_balancing= value requires :id service.modify_load_balancer_attributes(id, 'CrossZoneLoadBalancing' => {'Enabled' => value}) end def connection_settings_idle_timeout requires :id service.describe_load_balancer_attributes(id).body['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['ConnectionSettings']['IdleTimeout'] end def set_connection_settings_idle_timeout(timeout=60) requires :id attrs = {'IdleTimeout' => timeout} service.modify_load_balancer_attributes(id,'ConnectionSettings' => attrs) end def register_instances(instances) requires :id data = service.register_instances_with_load_balancer(instances, id).body['RegisterInstancesWithLoadBalancerResult'] data['Instances'].map!{|h| h['InstanceId']} merge_attributes(data) end def deregister_instances(instances) requires :id data = service.deregister_instances_from_load_balancer(instances, id).body['DeregisterInstancesFromLoadBalancerResult'] data['Instances'].map!{|h| h['InstanceId']} merge_attributes(data) end def enable_availability_zones(zones) requires :id data = service.enable_availability_zones_for_load_balancer(zones, id).body['EnableAvailabilityZonesForLoadBalancerResult'] merge_attributes(data) end def disable_availability_zones(zones) requires :id data = service.disable_availability_zones_for_load_balancer(zones, id).body['DisableAvailabilityZonesForLoadBalancerResult'] merge_attributes(data) end def attach_subnets(subnet_ids) requires :id data = service.attach_load_balancer_to_subnets(subnet_ids, id).body['AttachLoadBalancerToSubnetsResult'] merge_attributes(data) end def detach_subnets(subnet_ids) requires :id data = service.detach_load_balancer_from_subnets(subnet_ids, id).body['DetachLoadBalancerFromSubnetsResult'] merge_attributes(data) end def apply_security_groups(security_groups) requires :id data = service.apply_security_groups_to_load_balancer(security_groups, id).body['ApplySecurityGroupsToLoadBalancerResult'] merge_attributes(data) end def instance_health requires :id @instance_health ||= service.describe_instance_health(id).body['DescribeInstanceHealthResult']['InstanceStates'] end def instances_in_service instance_health.select{|hash| hash['State'] == 'InService'}.map{|hash| hash['InstanceId']} end def instances_out_of_service instance_health.select{|hash| hash['State'] == 'OutOfService'}.map{|hash| hash['InstanceId']} end def configure_health_check(health_check) requires :id data = service.configure_health_check(id, health_check).body['ConfigureHealthCheckResult']['HealthCheck'] merge_attributes(:health_check => data) end def backend_server_descriptions Fog::AWS::ELB::BackendServerDescriptions.new({ :data => attributes['BackendServerDescriptions'], :service => service, :load_balancer => self }) end def listeners Fog::AWS::ELB::Listeners.new( :data => attributes['ListenerDescriptions'], :service => service, :load_balancer => self ) end def policies requires :id service.policies(:load_balancer_id => self.identity) end def policy_descriptions requires :id @policy_descriptions ||= service.describe_load_balancer_policies(id).body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"] end def set_listener_policy(port, policy_name) requires :id policy_name = [policy_name].flatten service.set_load_balancer_policies_of_listener(id, port, policy_name) reload end def set_listener_ssl_certificate(port, ssl_certificate_id) requires :id service.set_load_balancer_listener_ssl_certificate(id, port, ssl_certificate_id) reload end def unset_listener_policy(port) set_listener_policy(port, []) end def ready? # ELB requests are synchronous true end def tags requires :id service.describe_tags(id). body['DescribeTagsResult']["LoadBalancers"][0]["Tags"] end def add_tags(new_tags) requires :id service.add_tags(id, new_tags) tags end def remove_tags(tag_keys) requires :id service.remove_tags(id, tag_keys) tags end def save requires :id requires :listeners # with the VPC release, the ELB can have either availability zones or subnets # if both are specified, the availability zones have preference #requires :availability_zones if (availability_zones || subnet_ids) service.create_load_balancer(availability_zones, id, listeners.map{|l| l.to_params}) if availability_zones service.create_load_balancer(nil, id, listeners.map{|l| l.to_params}, {:subnet_ids => subnet_ids, :security_groups => security_groups, :scheme => scheme}) if subnet_ids && !availability_zones else throw Fog::Errors::Error.new("No availability zones or subnet ids specified") end # reload instead of merge attributes b/c some attrs (like HealthCheck) # may be set, but only the DNS name is returned in the create_load_balance # API call reload end def reload @instance_health = nil @policy_descriptions = nil super end def destroy requires :id service.delete_load_balancer(id) end protected def all_associations_and_attributes super.merge( 'ListenerDescriptions' => attributes['ListenerDescriptions'], 'BackendServerDescriptions' => attributes['BackendServerDescriptions'], ) end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/load_balancers.rb000066400000000000000000000020621437344660100225740ustar00rootroot00000000000000require 'fog/aws/models/elb/load_balancer' module Fog module AWS class ELB class LoadBalancers < Fog::Collection model Fog::AWS::ELB::LoadBalancer # Creates a new load balancer def initialize(attributes = {}) super end def all result = [] marker = nil finished = false until finished data = service.describe_load_balancers('Marker' => marker).body result.concat(data['DescribeLoadBalancersResult']['LoadBalancerDescriptions']) marker = data['DescribeLoadBalancersResult']['NextMarker'] finished = marker.nil? end load(result) # data is an array of attribute hashes end def get(identity) return unless identity data = service.describe_load_balancers('LoadBalancerNames' => identity).body['DescribeLoadBalancersResult']['LoadBalancerDescriptions'].first new(data) rescue Fog::AWS::ELB::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/policies.rb000066400000000000000000000037321437344660100214570ustar00rootroot00000000000000require 'fog/aws/models/elb/policy' module Fog module AWS class ELB class Policies < Fog::Collection attribute :load_balancer_id model Fog::AWS::ELB::Policy def all(options={}) merge_attributes(options) requires :load_balancer_id data = service.describe_load_balancer_policies(self.load_balancer_id). body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"] load(munge(data)) end def get(id) all.find { |policy| id == policy.id } end def new(attributes={}) super(self.attributes.merge(attributes)) end private def munge(data) data.reduce([]) { |m,e| policy_attribute_descriptions = e["PolicyAttributeDescriptions"] policy = { :id => e["PolicyName"], :type_name => e["PolicyTypeName"], :policy_attributes => policy_attributes(policy_attribute_descriptions), :load_balancer_id => self.load_balancer_id, } case e["PolicyTypeName"] when 'AppCookieStickinessPolicyType' cookie_name = policy_attribute_descriptions.find{|h| h['AttributeName'] == 'CookieName'}['AttributeValue'] policy['CookieName'] = cookie_name if cookie_name when 'LBCookieStickinessPolicyType' cookie_expiration_period = policy_attribute_descriptions.find{|h| h['AttributeName'] == 'CookieExpirationPeriod'}['AttributeValue'].to_i policy['CookieExpirationPeriod'] = cookie_expiration_period if cookie_expiration_period > 0 end m << policy m } end def policy_attributes(policy_attribute_descriptions) policy_attribute_descriptions.reduce({}){|m,e| m[e["AttributeName"]] = e["AttributeValue"] m } end end end end end fog-aws-3.18.0/lib/fog/aws/models/elb/policy.rb000066400000000000000000000030571437344660100211470ustar00rootroot00000000000000module Fog module AWS class ELB class Policy < Fog::Model identity :id, :aliases => 'PolicyName' attribute :cookie, :aliases => 'CookieName' attribute :expiration, :aliases => 'CookieExpirationPeriod' attribute :type_name attribute :policy_attributes attribute :load_balancer_id attr_accessor :cookie_stickiness # Either :app or :lb def save requires :id, :load_balancer_id args = [load_balancer_id, id] if cookie_stickiness case cookie_stickiness when :app requires :cookie method = :create_app_cookie_stickiness_policy args << cookie when :lb method = :create_lb_cookie_stickiness_policy args << expiration if expiration else raise ArgumentError.new('cookie_stickiness must be :app or :lb') end else requires :type_name, :policy_attributes method = :create_load_balancer_policy args << type_name args << policy_attributes end service.send(method, *args) reload end def destroy requires :identity, :load_balancer_id service.delete_load_balancer_policy(self.load_balancer_id, self.identity) reload end def load_balancer requires :load_balancer_id service.load_balancers.new(:identity => self.load_balancer_id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/000077500000000000000000000000001437344660100201625ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/glacier/archive.rb000066400000000000000000000034321437344660100221320ustar00rootroot00000000000000module Fog module AWS class Glacier class Archive < Fog::Model identity :id attribute :description attribute :body attr_accessor :multipart_chunk_size #must be a power of 2 multiple of 1MB def vault @vault end def save requires :body, :vault if multipart_chunk_size && body.respond_to?(:read) self.id = multipart_save else data = service.create_archive(vault.id, body, 'description' => description) self.id = data.headers['x-amz-archive-id'] end true end def destroy requires :id service.delete_archive(vault.id,id) end private def vault=(new_vault) @vault = new_vault end def multipart_save # Initiate the upload res = service.initiate_multipart_upload vault.id, multipart_chunk_size, 'description' => description upload_id = res.headers["x-amz-multipart-upload-id"] hash = Fog::AWS::Glacier::TreeHash.new if body.respond_to?(:rewind) body.rewind rescue nil end offset = 0 while (chunk = body.read(multipart_chunk_size)) do part_hash = hash.add_part(chunk) part_upload = service.upload_part(vault.id, upload_id, chunk, offset, part_hash ) offset += chunk.bytesize end rescue # Abort the upload & reraise service.abort_multipart_upload(vault.id, upload_id) if upload_id raise else # Complete the upload service.complete_multipart_upload(vault.id, upload_id, offset, hash.hexdigest).headers['x-amz-archive-id'] end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/archives.rb000066400000000000000000000007521437344660100223170ustar00rootroot00000000000000require 'fog/aws/models/glacier/archive' module Fog module AWS class Glacier class Archives < Fog::Collection model Fog::AWS::Glacier::Archive attribute :vault #you can't list a vault's archives def all nil end def get(key) new(:id => key) end def new(attributes = {}) requires :vault super({ :vault => vault }.merge!(attributes)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/job.rb000066400000000000000000000042021437344660100212570ustar00rootroot00000000000000module Fog module AWS class Glacier class Job < Fog::Model ARCHIVE = 'archive-retrieval' INVENTORY = 'inventory-retrieval' identity :id, :aliases => "JobId" attribute :action, :aliases => "Action" attribute :archive_id, :aliases => "ArchiveId" attribute :archive_size, :aliases => "ArchiveSizeInBytes", :type => :integer attribute :completed, :aliases => "Completed", :type => :boolean attribute :completed_at, :aliases => "CompletionDate", :type => :time attribute :created_at, :aliases => "CreationDate", :type => :time attribute :inventory_size, :aliases => "InventorySizeInBytes", :type => :integer attribute :description, :aliases=> "JobDescription" attribute :tree_hash, :aliases=> "SHA256TreeHash" attribute :sns_topic, :aliases => "SNSTopic" attribute :status_code, :aliases=> "StatusCode" attribute :status_message, :aliases=> "StatusMessage" attribute :vault_arn, :aliases=> "VaultARN" attribute :format attribute :type def ready? completed end def save requires :vault, :type specification = {'Type' => type, 'ArchiveId' => archive_id, 'Format' => format, 'Description' => description, 'SNSTopic' => sns_topic}.reject{|k,v| v.nil?} data = service.initiate_job(vault.id, specification) self.id = data.headers['x-amz-job-id'] reload end def vault @vault end #pass :range => 1..1234 to only retrieve those bytes #pass :io => f to stream the response to that tio def get_output(options={}) if io = options.delete(:io) options = options.merge :response_block => lambda {|chunk, remaining_bytes, total_bytes| io.write chunk} end options['Range'] = options.delete :range service.get_job_output(vault.id, id, options) end private def vault=(new_vault) @vault = new_vault end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/jobs.rb000066400000000000000000000016231437344660100214460ustar00rootroot00000000000000require 'fog/aws/models/glacier/job' module Fog module AWS class Glacier class Jobs < Fog::Collection model Fog::AWS::Glacier::Job attribute :vault attribute :filters def initialize(attributes) self.filters = {} super end # acceptable filters are: # statuscode InProgress/Failed/Succeeded # completed (true/false) def all(filters = self.filters) self.filters = filters data = service.list_jobs(vault.id, self.filters).body['JobList'] load(data) end def get(key) data = service.describe_job(vault.id, key).body new(data) rescue Excon::Errors::NotFound nil end def new(attributes = {}) requires :vault super({ :vault => vault }.merge!(attributes)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/vault.rb000066400000000000000000000027161437344660100216500ustar00rootroot00000000000000require 'fog/aws/models/glacier/archives' require 'fog/aws/models/glacier/jobs' module Fog module AWS class Glacier class Vault < Fog::Model identity :id, :aliases => 'VaultName' attribute :created_at, :aliases => 'CreationDate', :type => :time attribute :last_inventory_at, :aliases => 'LastInventoryDate', :type => :time attribute :number_of_archives, :aliases => 'NumberOfArchives', :type => :integer attribute :size_in_bytes, :aliases => 'SizeInBytes', :type => :integer attribute :arn, :aliases => 'VaultARN' def ready? # Glacier requests are synchronous true end def archives @archives ||= Fog::AWS::Glacier::Archives.new(:vault => self, :service => service) end def jobs(filters={}) Fog::AWS::Glacier::Jobs.new(:vault => self, :service => service, :filters => filters) end def set_notification_configuration(topic, events) service.set_vault_notification_configuration(id, topic, events) end def delete_notification_configuration service.delete_vault_notification_configuration(id) end def save requires :id service.create_vault(id) reload end def destroy requires :id service.delete_vault(id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/glacier/vaults.rb000066400000000000000000000007001437344660100220220ustar00rootroot00000000000000require 'fog/aws/models/glacier/vault' module Fog module AWS class Glacier class Vaults < Fog::Collection model Fog::AWS::Glacier::Vault def all data = service.list_vaults.body['VaultList'] load(data) end def get(key) data = service.describe_vault(key).body new(data) rescue Excon::Errors::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/000077500000000000000000000000001437344660100173225ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/iam/access_key.rb000066400000000000000000000016351437344660100217650ustar00rootroot00000000000000module Fog module AWS class IAM class AccessKey < Fog::Model identity :id, :aliases => 'AccessKeyId' attribute :username, :aliases => 'UserName' attribute :secret_access_key, :aliases => 'SecretAccessKey' attribute :status, :aliases => 'Status' def save requires :username if !persisted? data = service.create_access_key('UserName'=> username).body["AccessKey"] else data = service.update_access_key(id, status, "UserName" => username).body["AccessKey"] end merge_attributes(data) true end def destroy requires :id requires :username service.delete_access_key(id,'UserName'=> username) true end def user requires :username service.users.get(username) end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/access_keys.rb000066400000000000000000000012201437344660100221360ustar00rootroot00000000000000require 'fog/aws/models/iam/access_key' module Fog module AWS class IAM class AccessKeys < Fog::Collection model Fog::AWS::IAM::AccessKey def initialize(attributes = {}) @username = attributes[:username] super end def all data = service.list_access_keys('UserName'=> @username).body['AccessKeys'] load(data) end def get(identity) self.all.select {|access_key| access_key.id == identity}.first end def new(attributes = {}) super({ :username => @username }.merge!(attributes)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/group.rb000066400000000000000000000046551437344660100210150ustar00rootroot00000000000000module Fog module AWS class IAM class Group < Fog::Model identity :id, :aliases => 'GroupId' attribute :arn, :aliases => 'Arn' attribute :name, :aliases => 'GroupName' attribute :path, :aliases => 'Path' attribute :users, :aliases => 'Users', :type => :array def add_user(user_or_name) requires :name user = if user_or_name.is_a?(Fog::AWS::IAM::User) user_or_name else service.users.new(:id => user_or_name) end service.add_user_to_group(self.name, user.identity) merge_attributes(:users => self.users + [user]) end def attach(policy_or_arn) requires :name arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.attach_group_policy(self.name, arn) end def attached_policies requires :name service.managed_policies(:group_name => self.name) end def destroy requires :name service.delete_group(self.name) true end def detach(policy_or_arn) requires :name arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.detach_group_policy(self.name, arn) end def save if !persisted? requires :name merge_attributes( service.create_group(self.name, self.path).body["Group"] ) else params = {} if self.name params['NewGroupName'] = self.name end if self.path params['NewPath'] = self.path end service.update_group(self.name, params) true end end def policies requires :name service.policies(:group_name => self.name) end def reload requires :name data = begin collection.get(self.name) rescue Excon::Errors::SocketError nil end return unless data merge_attributes(data.attributes) self end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/groups.rb000066400000000000000000000020421437344660100211640ustar00rootroot00000000000000require 'fog/aws/models/iam/group' require 'fog/aws/iam/paged_collection' module Fog module AWS class IAM class Groups < Fog::AWS::IAM::PagedCollection attribute :username model Fog::AWS::IAM::Group def all(options = {}) data, records = if self.username response = service.list_groups_for_user(self.username, options) [response.body, response.body['GroupsForUser']] else response = service.list_groups(options) [response.body, response.body['Groups']] end merge_attributes(data) load(records) end def get(identity) data = service.get_group(identity) group = data.body['Group'] users = data.body['Users'].map { |u| service.users.new(u) } new(group.merge(:users => users)) rescue Fog::AWS::IAM::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/instance_profile.rb000066400000000000000000000021641437344660100231760ustar00rootroot00000000000000module Fog module AWS class IAM class InstanceProfile < Fog::Model identity :name, :aliases => 'InstanceProfileName' attribute :id, :aliases => 'InstanceProfileId' attribute :roles, :aliases => 'Roles', :type => :array attribute :arn, :aliases => 'Arn' attribute :path, :aliases => 'Path' attribute :create_date, :aliases => 'CreateDate', :type => :time def add_role(role_name) requires :identity service.add_role_to_instance_profile(role_name, self.name) true end def remove_role(role_name) requires :identity service.remove_role_from_instance_profile(role_name, self.name) true end def destroy requires :identity service.delete_instance_profile(self.identity) true end def save requires :identity data = service.create_instance_profile(self.name, self.path).body['InstanceProfile'] merge_attributes(data) end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/instance_profiles.rb000066400000000000000000000011231437344660100233530ustar00rootroot00000000000000require 'fog/aws/models/iam/instance_profile' module Fog module AWS class IAM class InstanceProfiles < Fog::AWS::IAM::PagedCollection model Fog::AWS::IAM::InstanceProfile def all(options={}) body = service.list_instance_profiles(page_params(options)).body merge_attributes(body) load(body["InstanceProfiles"]) end def get(identity) new(service.get_instance_profile(identity).body["Role"]) rescue Excon::Errors::NotFound, Fog::AWS::IAM::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/managed_policies.rb000066400000000000000000000040351437344660100231340ustar00rootroot00000000000000require 'fog/aws/models/iam/managed_policy' require 'fog/aws/iam/paged_collection' module Fog module AWS class IAM class ManagedPolicies < Fog::AWS::IAM::PagedCollection attribute :username attribute :group_name attribute :role_name model Fog::AWS::IAM::ManagedPolicy def all(options={}) data = if self.username all_by_user(self.username, options) elsif self.group_name all_by_group(self.group_name, options) elsif self.role_name all_by_role(self.role_name, options) else all_policies(options) end load(data) end def get(identity) response = service.get_policy(identity) new(response.body['Policy']) rescue Fog::AWS::IAM::NotFound nil end protected def all_by_user(username, options={}) body = service.list_attached_user_policies(username, page_params(options)).body merge_attributes(body) body['Policies'].map do |policy| service.get_policy(policy['PolicyArn']).body['Policy'] end end def all_by_group(group_name, options={}) body = service.list_attached_group_policies(group_name, page_params(options)).body merge_attributes(body) body['Policies'].map do |policy| service.get_policy(policy['PolicyArn']).body['Policy'] end end def all_by_role(role_name, options={}) body = service.list_attached_role_policies(role_name, page_params(options)).body merge_attributes(body) body['Policies'].map do |policy| service.get_policy(policy['PolicyArn']).body['Policy'] end end def all_policies(options={}) body = service.list_policies(page_params(options)).body merge_attributes(body) body['Policies'] end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/managed_policy.rb000066400000000000000000000033141437344660100226230ustar00rootroot00000000000000module Fog module AWS class IAM class ManagedPolicy < Fog::Model identity :id, :aliases => 'PolicyId' attribute :arn, :aliases => 'Arn' attribute :attachable, :aliases => 'IsAttachable', :type => :boolean attribute :attachments, :aliases => 'AttachmentCount', :type => :integer attribute :created_at, :aliases => 'CreateDate', :type => :time attribute :default_version, :aliases => 'DefaultVersionId' attribute :description, :aliases => 'Description' attribute :name, :aliases => 'PolicyName' attribute :path, :aliases => 'Path' attribute :updated_at, :aliases => 'UpdateDate', :type => :time attr_accessor :policy_document def attach(user_or_username) requires :arn username = if user_or_username.respond_to?(:identity) user_or_username.identity else user_or_username end service.attach_user_policy(username, self.arn) end def document requires :arn, :default_version service.get_policy_version(self.arn, self.default_version). body['PolicyVersion']['Document'] end def reload service.managed_policies.get(self.arn) end def save requires :name, :policy_document merge_attributes(service.create_policy(self.name, self.policy_document, self.path, self.description).body["Policy"]) end def destroy requires :arn service.delete_policy(self.arn) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/policies.rb000066400000000000000000000041461437344660100214630ustar00rootroot00000000000000require 'fog/aws/models/iam/policy' require 'fog/aws/iam/paged_collection' module Fog module AWS class IAM class Policies < Fog::AWS::IAM::PagedCollection model Fog::AWS::IAM::Policy attribute :username attribute :group_name def all(options={}) requires_one :username, :group_name policies = if self.username all_by_user(self.username, options) else self.group_name all_by_group(self.group_name, options) end load(policies) # data is an array of attribute hashes end def get(identity) requires_one :username, :group_name response = if self.username service.get_user_policy(identity, self.username) else self.group_name service.get_group_policy(identity, self.group_name) end new(response.body['Policy']) rescue Fog::AWS::IAM::NotFound nil end def new(attributes = {}) super(self.attributes.merge(attributes)) end private # AWS method get_user_policy and list_group_policies only returns an array of policy names, this is kind of useless, # that's why it has to loop through the list to get the details of each element. I don't like it because it makes this method slow def all_by_group(group_name, options={}) response = service.list_group_policies(group_name, page_params(options)) merge_attributes(response.body) response.body['PolicyNames'].map do |policy_name| service.get_group_policy(policy_name, group_name).body['Policy'] end end def all_by_user(username, options={}) response = service.list_user_policies(username, page_params(options)) merge_attributes(response.body) response.body['PolicyNames'].map do |policy_name| service.get_user_policy(policy_name, username).body['Policy'] end end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/policy.rb000066400000000000000000000016511437344660100211510ustar00rootroot00000000000000module Fog module AWS class IAM class Policy < Fog::Model identity :id, :aliases => 'PolicyName' attribute :username, :aliases => 'UserName' attribute :document, :aliases => 'PolicyDocument' attr_accessor :group_name def save requires :id requires_one :username, :group_name requires :document data = if username service.put_user_policy(username, id, document).body else service.put_group_policy(group_name, id, document).body end merge_attributes(data) true end def destroy requires :id requires :username service.delete_user_policy(username, id) true end def user requires :username service.users.get(username) end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/role.rb000066400000000000000000000034541437344660100206160ustar00rootroot00000000000000module Fog module AWS class IAM class Role < Fog::Model identity :id, :aliases => 'RoleId' attribute :rolename, :aliases => 'RoleName' attribute :create_date, :aliases => 'CreateDate', :type => :time attribute :assume_role_policy_document, :aliases => 'AssumeRolePolicyDocument' attribute :arn, :aliases => 'Arn' attribute :path, :aliases => 'Path' def save raise Fog::Errors::Error.new('Resaving an existing object may create a duplicate') if persisted? requires :rolename requires :assume_role_policy_document data = service.create_role(rolename, assume_role_policy_document, path).body["Role"] merge_attributes(data) true end def attach(policy_or_arn) requires :rolename arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.attach_role_policy(self.rolename, arn) end def detach(policy_or_arn) requires :rolename arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.detach_role_policy(self.rolename, arn) end def attached_policies requires :rolename service.managed_policies(:role_name => self.rolename) end def instance_profiles requires :rolename service.instance_profiles.load(service.list_instance_profiles_for_role(self.rolename).body["InstanceProfiles"]) end def destroy requires :rolename service.delete_role(rolename) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/roles.rb000066400000000000000000000013751437344660100210010ustar00rootroot00000000000000require 'fog/aws/models/iam/role' module Fog module AWS class IAM class Roles < Fog::AWS::IAM::PagedCollection model Fog::AWS::IAM::Role def all(options={}) body = service.list_roles(page_params(options)).body merge_attributes(body) load(body["Roles"]) end def get(identity) new(service.get_role(identity).body["Role"]) rescue Excon::Errors::NotFound, Fog::AWS::IAM::NotFound nil end def new(attributes = {}) unless attributes.key?(:assume_role_policy_document) attributes[:assume_role_policy_document] = Fog::AWS::IAM::EC2_ASSUME_ROLE_POLICY.to_s end super end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/user.rb000066400000000000000000000044631437344660100206340ustar00rootroot00000000000000module Fog module AWS class IAM class User < Fog::Model identity :id, :aliases => 'UserName' attribute :path, :aliases => 'Path' attribute :arn, :aliases => 'Arn' attribute :user_id, :aliases => 'UserId' attribute :created_at, :aliases => 'CreateDate', :type => :time def access_keys requires :id service.access_keys(:username => id) end def attach(policy_or_arn) requires :identity arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.attach_user_policy(self.identity, arn) end def detach(policy_or_arn) requires :identity arn = if policy_or_arn.respond_to?(:arn) policy_or_arn.arn else policy_or_arn end service.detach_user_policy(self.identity, arn) end def attached_policies requires :identity service.managed_policies(:username => self.identity) end def destroy requires :id service.delete_user(id) true end def groups requires :identity service.groups(:username => self.identity) end def policies requires :identity service.policies(:username => self.identity) end def password=(password) requires :identity has_password = !!self.password_created_at if has_password && password.nil? service.delete_login_profile(self.identity) elsif has_password service.update_login_profile(self.identity, password) elsif !password.nil? service.create_login_profile(self.identity, password) end end def password_created_at requires :identity service.get_login_profile(self.identity).body["LoginProfile"]["CreateDate"] rescue Fog::AWS::IAM::NotFound nil end def save requires :id data = service.create_user(id, path || '/').body['User'] merge_attributes(data) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/iam/users.rb000066400000000000000000000024251437344660100210130ustar00rootroot00000000000000require 'fog/aws/models/iam/user' module Fog module AWS class IAM class Users < Fog::Collection attribute :is_truncated, :aliases => 'IsTruncated' attribute :marker, :aliases => 'Marker' model Fog::AWS::IAM::User def all(options = {}) merge_attributes(options) data = service.list_users(options).body merge_attributes('IsTruncated' => data['IsTruncated'], 'Marker' => data['Marker']) load(data['Users']) # data is an array of attribute hashes end def current new(service.get_user.body['User']) end def get(identity) data = service.get_user(identity).body['User'] new(data) # data is an attribute hash rescue Fog::AWS::IAM::NotFound nil end alias_method :each_user_this_page, :each def each if !block_given? self else subset = dup.all subset.each_user_this_page {|f| yield f} while subset.is_truncated subset = subset.all('Marker' => subset.marker, 'MaxItems' => 1000) subset.each_user_this_page {|f| yield f} end self end end end end end end fog-aws-3.18.0/lib/fog/aws/models/kms/000077500000000000000000000000001437344660100173465ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/kms/key.rb000066400000000000000000000015701437344660100204660ustar00rootroot00000000000000module Fog module AWS class KMS class Key < Fog::Model identity :id, :aliases => 'KeyId' attribute :account_id, :aliases => 'AWSAccountId' attribute :arn, :aliases => 'KeyArn' attribute :created_at, :aliases => 'CreationDate', :type => :time attribute :description, :aliases => 'Description' attribute :enabled, :aliases => 'Enabled', :type => :boolean attribute :usage, :aliases => 'KeyUsage' attr_writer :policy def reload requires :identity data = service.describe_key(self.identity) merge_attributes(data.body['KeyMetadata']) self end def save data = service.create_key(@policy, description, usage) merge_attributes(data.body['KeyMetadata']) true end end end end end fog-aws-3.18.0/lib/fog/aws/models/kms/keys.rb000066400000000000000000000012271437344660100206500ustar00rootroot00000000000000require 'fog/aws/models/kms/key' module Fog module AWS class KMS class Keys < Fog::PagedCollection attribute :filters attribute :truncated model Fog::AWS::KMS::Key def initialize(attributes) self.filters ||= {} super end # This method deliberately returns only a single page of results def all(filters_arg = filters) filters.merge!(filters_arg) result = service.list_keys(filters).body filters[:marker] = result['Marker'] self.truncated = result['Truncated'] load(result['Keys']) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/000077500000000000000000000000001437344660100173445ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/rds/cluster.rb000066400000000000000000000104561437344660100213600ustar00rootroot00000000000000module Fog module AWS class RDS class Cluster < Fog::Model identity :id, :aliases => 'DBClusterIdentifier' attribute :allocated_storage, :aliases => 'AllocatedStorage', :type => :integer attribute :backup_retention_period, :aliases => 'BackupRetentionPeriod', :type => :integer attribute :db_cluster_members, :aliases => 'DBClusterMembers', :type => :array attribute :db_cluster_parameter_group, :aliases => 'DBClusterParameterGroup' attribute :db_subnet_group, :aliases => 'DBSubnetGroupName' attribute :endpoint, :aliases => 'Endpoint' attribute :engine, :aliases => 'Engine' attribute :engine_version, :aliases => 'EngineVersion' attribute :password, :aliases => 'MasterUserPassword' attribute :master_username, :aliases => 'MasterUsername' attribute :port, :aliases => 'Port', :type => :integer attribute :preferred_backup_window, :aliases => 'PreferredBackupWindow' attribute :preferred_maintenance_window, :aliases => 'PreferredMaintenanceWindow' attribute :state, :aliases => 'Status' attribute :vpc_security_groups, :aliases => 'VpcSecurityGroups' attr_accessor :storage_encrypted #not in the response def ready? # [2019.01] I don't think this is going to work, at least not with Aurora # clusters. In my testing, the state reported by Fog for an Aurora cluster # is "active" as soon as the cluster is retrievable from AWS, and the # value doesn't change after that. Contrast that with the AWS Console UI, # which reports the cluster as "Creating" while it's being created. I don't # know where Fog is getting the state value from, but I don't think it's # correct, at least not for the purpose of knowing if the Cluster is ready # to have individual instances added to it. state == 'available' || state == 'active' end def snapshots requires :id service.cluster_snapshots(:cluster => self) end def servers(set=db_cluster_members) set.map do |member| service.servers.get(member['DBInstanceIdentifier']) end end def master db_cluster_members.detect { |member| member["master"] } end def replicas servers(db_cluster_members.select { |member| !member["master"] }) end def has_replica?(replica_name) replicas.detect { |replica| replica.id == replica_name } end def destroy(snapshot_identifier=nil) requires :id service.delete_db_cluster(id, snapshot_identifier, snapshot_identifier.nil?) true end def save requires :id requires :engine requires :master_username requires :password data = service.create_db_cluster(id, attributes_to_params) merge_attributes(data.body['CreateDBClusterResult']['DBCluster']) true end def attributes_to_params options = { 'AllocatedStorage' => allocated_storage, 'BackupRetentionPeriod' => backup_retention_period, 'DBClusterIdentifier' => identity, 'DBClusterParameterGroup' => db_cluster_parameter_group, 'DBSubnetGroupName' => db_subnet_group, 'Endpoint' => endpoint, 'Engine' => engine, 'EngineVersion' => engine_version, 'MasterUserPassword' => password, 'MasterUsername' => master_username, 'PreferredBackupWindow' => preferred_backup_window, 'PreferredMaintenanceWindow' => preferred_maintenance_window, 'Status' => state, 'StorageEncrypted' => storage_encrypted, 'VpcSecurityGroups' => vpc_security_groups, } options.delete_if { |key,value| value.nil? } end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/cluster_snapshots.rb000066400000000000000000000024111437344660100234520ustar00rootroot00000000000000require 'fog/aws/models/rds/snapshot' module Fog module AWS class RDS class ClusterSnapshots < Fog::Collection attribute :cluster attribute :filters model Fog::AWS::RDS::Snapshot def initialize(attributes) self.filters ||= {} if attributes[:cluster] filters[:identifier] = attributes[:cluster].id end if attributes[:type] filters[:type] = attributes[:type] end super end def all(filters_arg = filters) filters.merge!(filters_arg) page = service.describe_db_cluster_snapshots(filters).body['DescribeDBClusterSnapshotsResult'] filters[:marker] = page['Marker'] load(page['DBClusterSnapshots']) end def get(identity) data = service.describe_db_cluster_snapshots(:snapshot_id => identity).body['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end def create(params={}) if cluster super(params.merge(:cluster_id => cluster.id)) else super(params) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/clusters.rb000066400000000000000000000010401437344660100215300ustar00rootroot00000000000000require 'fog/aws/models/rds/cluster' module Fog module AWS class RDS class Clusters < Fog::Collection model Fog::AWS::RDS::Cluster def all data = service.describe_db_clusters.body['DescribeDBClustersResult']['DBClusters'] load(data) end def get(identity) data = service.describe_db_clusters(identity).body['DescribeDBClustersResult']['DBClusters'].first new(data) rescue Fog::AWS::RDS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/event_subscription.rb000066400000000000000000000023141437344660100236160ustar00rootroot00000000000000module Fog module AWS class RDS class EventSubscription < Fog::Model identity :id, :aliases => 'CustSubscriptionId' attribute :event_categories, :aliases => 'EventCategories', :type => :array attribute :source_type, :aliases => 'SourceType' attribute :enabled, :aliases => 'Enabled' attribute :status, :aliases => 'Status' attribute :creation_time, :aliases => 'SubscriptionCreationTime' attribute :sns_topic_arn, :aliases => 'SnsTopicArn' def ready? ! ['deleting', 'creating'].include?(status) end def destroy service.delete_event_subscription(id) reload end def save requires :id, :sns_topic_arn data = service.create_event_subscription( 'EventCategories' => event_categories, 'SourceType' => source_type, 'Enabled' => enabled || true, 'SubscriptionName' => id, 'SnsTopicArn' => sns_topic_arn ).body["CreateEventSubscriptionResult"]["EventSubscription"] merge_attributes(data) self end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/event_subscriptions.rb000066400000000000000000000012151437344660100240000ustar00rootroot00000000000000require 'fog/aws/models/rds/event_subscription' module Fog module AWS class RDS class EventSubscriptions < Fog::Collection model Fog::AWS::RDS::EventSubscription def all data = service.describe_event_subscriptions.body['DescribeEventSubscriptionsResult']['EventSubscriptionsList'] load(data) end def get(identity) data = service.describe_event_subscriptions('SubscriptionName' => identity).body['DescribeEventSubscriptionsResult']['EventSubscriptionsList'] new(data.first) rescue Fog::AWS::RDS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/instance_option.rb000066400000000000000000000023571437344660100230740ustar00rootroot00000000000000module Fog module AWS class RDS class InstanceOption < Fog::Model attribute :multi_az_capable, :aliases => 'MultiAZCapable', :type => :boolean attribute :engine, :aliases => 'Engine' attribute :license_model, :aliases => 'LicenseModel' attribute :read_replica_capable, :aliases => 'ReadReplicaCapable', :type => :boolean attribute :engine_version, :aliases => 'EngineVersion' attribute :availability_zones, :aliases => 'AvailabilityZones', :type => :array attribute :db_instance_class, :aliases => 'DBInstanceClass' attribute :vpc, :aliases => 'Vpc', :type => :boolean attribute :supports_iops, :aliases => 'SupportsIops', :type => :boolean attribute :supports_enhanced_monitoring, :aliases => 'SupportsEnhancedMonitoring', :type => :boolean attribute :supports_iam_database_authentication, :aliases => 'SupportsIAMDatabaseAuthentication', :type => :boolean attribute :supports_performance_insights, :aliases => 'SupportsPerformanceInsights', :type => :boolean attribute :supports_storage_encryption, :aliases => 'SupportsStorageEncryption', :type => :boolean attribute :storage_type, :aliases => 'StorageType' end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/instance_options.rb000066400000000000000000000013551437344660100232540ustar00rootroot00000000000000require 'fog/aws/models/rds/instance_option' module Fog module AWS class RDS class InstanceOptions < Fog::PagedCollection attribute :filters attribute :engine model Fog::AWS::RDS::InstanceOption def initialize(attributes) self.filters ||= {} super end # This method deliberately returns only a single page of results def all(filters_arg = filters) filters.merge!(filters_arg) result = service.describe_orderable_db_instance_options(engine, filters).body['DescribeOrderableDBInstanceOptionsResult'] filters[:marker] = result['Marker'] load(result['OrderableDBInstanceOptions']) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/log_file.rb000066400000000000000000000014111437344660100214460ustar00rootroot00000000000000module Fog module AWS class RDS class LogFile < Fog::Model attribute :rds_id, :aliases => 'DBInstanceIdentifier' attribute :name, :aliases => 'LogFileName' attribute :size, :aliases => 'Size', :type => :integer attribute :last_written, :aliases => 'LastWritten', :type => :time attribute :content, :aliases => 'LogFileData' attribute :marker, :aliases => 'Marker' attribute :more_content_available, :aliases => 'AdditionalDataPending', :type => :boolean def content_excerpt(marker=nil) result = service.download_db_logfile_portion(self.rds_id, self.name, {:marker => marker}) merge_attributes(result.body['DownloadDBLogFilePortionResult']) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/log_files.rb000066400000000000000000000025741437344660100216440ustar00rootroot00000000000000require 'fog/aws/models/rds/log_file' module Fog module AWS class RDS class LogFiles < Fog::Collection attribute :filters attribute :rds_id model Fog::AWS::RDS::LogFile def initialize(attributes) self.filters ||= {} super end # This method deliberately returns only a single page of results def all(filters_arg = filters) filters.merge!(filters_arg) result = service.describe_db_log_files(rds_id, filters).body['DescribeDBLogFilesResult'] filters[:marker] = result['Marker'] load(result['DBLogFiles']) end def each(filters_arg = filters) if block_given? begin page = self.all(filters_arg) # We need to explicitly use the base 'each' method here on the page, otherwise we get infinite recursion base_each = Fog::Collection.instance_method(:each) base_each.bind(page).call { |log_file| yield log_file } end while self.filters[:marker] end self end def get(file_name=nil) if file_name matches = self.select {|log_file| log_file.name.upcase == file_name.upcase} return matches.first unless matches.empty? end rescue Fog::AWS::RDS::NotFound end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/parameter.rb000066400000000000000000000010521437344660100216470ustar00rootroot00000000000000module Fog module AWS class RDS class Parameter < Fog::Model attribute :name, :aliases => ['ParameterName'] attribute :data_type, :aliases => 'DataType' attribute :description, :aliases => 'Description' attribute :allowed_values, :aliases => 'AllowedValues' attribute :source, :aliases => 'Source' attribute :modifiable, :aliases => 'IsModifiable' attribute :apply_type, :aliases => 'ApplyType' attribute :value, :aliases => 'ParameterValue' end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/parameter_group.rb000066400000000000000000000016161437344660100230710ustar00rootroot00000000000000module Fog module AWS class RDS class ParameterGroup < Fog::Model identity :id, :aliases => ['DBParameterGroupName', :name] attribute :family, :aliases => 'DBParameterGroupFamily' attribute :description, :aliases => 'Description' def save requires :family requires :description requires :id service.create_db_parameter_group(id, family, description) end def modify(changes) service.modify_db_parameter_group id, changes.map {|c| {'ParameterName' => c[:name], 'ParameterValue' => c[:value], 'ApplyMethod' => c[:apply_method]}} end def destroy requires :id service.delete_db_parameter_group(id) true end def parameters(filters={}) service.parameters({:group => self}.merge(filters)) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/parameter_groups.rb000066400000000000000000000012451437344660100232520ustar00rootroot00000000000000require 'fog/aws/models/rds/parameter_group' module Fog module AWS class RDS class ParameterGroups < Fog::Collection model Fog::AWS::RDS::ParameterGroup def all data = service.describe_db_parameter_groups.body['DescribeDBParameterGroupsResult']['DBParameterGroups'] load(data) # data is an array of attribute hashes end def get(identity) data = service.describe_db_parameter_groups(identity).body['DescribeDBParameterGroupsResult']['DBParameterGroups'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/parameters.rb000066400000000000000000000027341437344660100220420ustar00rootroot00000000000000require 'fog/aws/models/rds/parameter' module Fog module AWS class RDS class Parameters < Fog::Collection attribute :group attribute :filters model Fog::AWS::RDS::Parameter def initialize(attributes) self.filters ||= {} if attributes[:source] filters[:source] = attributes[:source] end super end def all(filters_arg = filters) filters = filters_arg result = [] marker = nil finished = false while !finished data = service.describe_db_parameters(group.id, filters.merge(:marker => marker)).body result.concat(data['DescribeDBParametersResult']['Parameters']) marker = data['DescribeDBParametersResult']['Marker'] finished = marker.nil? end load(result) # data is an array of attribute hashes end def defaults(family) page1 = service.describe_engine_default_parameters(family).body['DescribeEngineDefaultParametersResult'] marker = page1['Marker'] parameters = page1['Parameters'] until marker.nil? body = service.describe_engine_default_parameters(family, 'Marker' => marker).body['DescribeEngineDefaultParametersResult'] marker = body['Marker'] parameters += body['Parameters'] end load(parameters) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/security_group.rb000066400000000000000000000051361437344660100227610ustar00rootroot00000000000000module Fog module AWS class RDS class SecurityGroup < Fog::Model identity :id, :aliases => ['DBSecurityGroupName'] attribute :description, :aliases => 'DBSecurityGroupDescription' attribute :ec2_security_groups, :aliases => 'EC2SecurityGroups', :type => :array attribute :ip_ranges, :aliases => 'IPRanges', :type => :array attribute :owner_id, :aliases => 'OwnerId' def ready? (ec2_security_groups + ip_ranges).all?{|ingress| ingress['Status'] == 'authorized'} end def destroy requires :id service.delete_db_security_group(id) true end def save requires :id requires :description data = service.create_db_security_group(id, description).body['CreateDBSecurityGroupResult']['DBSecurityGroup'] merge_attributes(data) true end # group_owner_id defaults to the current owner_id def authorize_ec2_security_group(group_name, group_owner_id=owner_id) key = group_name.match(/^sg-/) ? 'EC2SecurityGroupId' : 'EC2SecurityGroupName' authorize_ingress({ key => group_name, 'EC2SecurityGroupOwnerId' => group_owner_id }) end def authorize_cidrip(cidrip) authorize_ingress({'CIDRIP' => cidrip}) end # Add the current machine to the RDS security group. def authorize_me authorize_ip_address(Fog::CurrentMachine.ip_address) end # Add the ip address to the RDS security group. def authorize_ip_address(ip) authorize_cidrip("#{ip}/32") end def authorize_ingress(opts) data = service.authorize_db_security_group_ingress(id, opts).body['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup'] merge_attributes(data) end # group_owner_id defaults to the current owner_id def revoke_ec2_security_group(group_name, group_owner_id=owner_id) key = group_name.match(/^sg-/) ? 'EC2SecurityGroupId' : 'EC2SecurityGroupName' revoke_ingress({ key => group_name, 'EC2SecurityGroupOwnerId' => group_owner_id }) end def revoke_cidrip(cidrip) revoke_ingress({'CIDRIP' => cidrip}) end def revoke_ingress(opts) data = service.revoke_db_security_group_ingress(id, opts).body['RevokeDBSecurityGroupIngressResult']['DBSecurityGroup'] merge_attributes(data) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/security_groups.rb000066400000000000000000000021661437344660100231440ustar00rootroot00000000000000require 'fog/aws/models/rds/security_group' module Fog module AWS class RDS class SecurityGroups < Fog::Collection attribute :server attribute :filters model Fog::AWS::RDS::SecurityGroup def initialize(attributes={}) self.filters ||= {} if attributes[:server] filters[:identifier] = attributes[:server].id end super end def all(filters_arg = filters) filters = filters_arg data = service.describe_db_security_groups(filters).body['DescribeDBSecurityGroupsResult']['DBSecurityGroups'] load(data) # data is an array of attribute hashes end # Example: # get('my_db_security_group') # => model for my_db_security_group def get(identity) data = service.describe_db_security_groups(identity).body['DescribeDBSecurityGroupsResult']['DBSecurityGroups'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end def new(attributes = {}) super end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/server.rb000066400000000000000000000203421437344660100212000ustar00rootroot00000000000000module Fog module AWS class RDS class Server < Fog::Model identity :id, :aliases => 'DBInstanceIdentifier' attribute :allocated_storage, :aliases => 'AllocatedStorage', :type => :integer attribute :auto_minor_version_upgrade, :aliases => 'AutoMinorVersionUpgrade' attribute :availability_zone, :aliases => 'AvailabilityZone' attribute :backup_retention_period, :aliases => 'BackupRetentionPeriod', :type => :integer attribute :ca_certificate_id, :aliases => 'CACertificateIdentifier' attribute :character_set_name, :aliases => 'CharacterSetName' attribute :cluster_id, :aliases => 'DBClusterIdentifier' attribute :created_at, :aliases => 'InstanceCreateTime', :type => :time attribute :db_name, :aliases => 'DBName' attribute :db_parameter_groups, :aliases => 'DBParameterGroups' attribute :db_security_groups, :aliases => 'DBSecurityGroups', :type => :array attribute :db_subnet_group_name, :aliases => 'DBSubnetGroupName' attribute :dbi_resource_id, :aliases => 'DbiResourceId' attribute :enable_iam_database_authentication, :aliases => 'EnableIAMDatabaseAuthentication', :type => :boolean attribute :endpoint, :aliases => 'Endpoint' attribute :engine, :aliases => 'Engine' attribute :engine_version, :aliases => 'EngineVersion' attribute :flavor_id, :aliases => 'DBInstanceClass' attribute :iops, :aliases => 'Iops', :type => :integer attribute :kms_key_id, :aliases => 'KmsKeyId' attribute :last_restorable_time, :aliases => 'LatestRestorableTime', :type => :time attribute :license_model, :aliases => 'LicenseModel' attribute :master_username, :aliases => 'MasterUsername' attribute :multi_az, :aliases => 'MultiAZ', :type => :boolean attribute :pending_modified_values, :aliases => 'PendingModifiedValues' attribute :preferred_backup_window, :aliases => 'PreferredBackupWindow' attribute :preferred_maintenance_window, :aliases => 'PreferredMaintenanceWindow' attribute :publicly_accessible, :aliases => 'PubliclyAccessible' attribute :read_replica_identifiers, :aliases => 'ReadReplicaDBInstanceIdentifiers', :type => :array attribute :read_replica_source, :aliases => 'ReadReplicaSourceDBInstanceIdentifier' attribute :state, :aliases => 'DBInstanceStatus' attribute :storage_encrypted, :aliases => 'StorageEncrypted', :type => :boolean attribute :storage_type, :aliases => 'StorageType' attribute :tde_credential_arn, :aliases => 'TdeCredentialArn' attribute :vpc_security_groups, :aliases => 'VpcSecurityGroups', :type => :array attribute :db_subnet_group, :aliases => 'DBSubnetGroup' attr_accessor :password, :parameter_group_name, :security_group_names, :port, :source_snapshot_id def cluster return nil unless cluster_id service.clusters.get(cluster_id) end def create_read_replica(replica_id, options={}) options[:security_group_names] ||= options['DBSecurityGroups'] params = self.class.new(options).attributes_to_params service.create_db_instance_read_replica(replica_id, id, params) service.servers.get(replica_id) end def ready? state == 'available' end def destroy(snapshot_identifier=nil) requires :id service.delete_db_instance(id, snapshot_identifier, snapshot_identifier.nil?) true end def reboot service.reboot_db_instance(id) true end def snapshots requires :id service.snapshots(:server => self) end def tags requires :id service.list_tags_for_resource(id). body['ListTagsForResourceResult']['TagList'] end def add_tags(new_tags) requires :id service.add_tags_to_resource(id, new_tags) tags end def remove_tags(tag_keys) requires :id service.remove_tags_from_resource(id, tag_keys) tags end def promote_read_replica requires :id data = service.promote_read_replica(id).body["PromoteReadReplicaResult"]["DBInstance"] merge_attributes(data) end alias promote promote_read_replica def modify(immediately, options) options[:security_group_names] ||= options['DBSecurityGroups'] params = self.class.new(options).attributes_to_params data = service.modify_db_instance(id, immediately, params) merge_attributes(data.body['ModifyDBInstanceResult']['DBInstance']) true end def save if source_snapshot_id requires :id data = service.restore_db_instance_from_db_snapshot(source_snapshot_id, id, attributes_to_params) merge_attributes(data.body['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']) else requires :engine if engine.start_with?('aurora') requires :cluster_id self.flavor_id ||= 'db.r4.large' else requires :master_username requires :password requires :allocated_storage self.flavor_id ||= 'db.m4.large' end data = service.create_db_instance(id, attributes_to_params) merge_attributes(data.body['CreateDBInstanceResult']['DBInstance']) true end end # Converts attributes to a parameter hash suitable for requests def attributes_to_params options = { 'AllocatedStorage' => allocated_storage, 'AutoMinorVersionUpgrade' => auto_minor_version_upgrade, 'AvailabilityZone' => availability_zone, 'BackupRetentionPeriod' => backup_retention_period, 'DBClusterIdentifier' => cluster_id, 'DBInstanceClass' => flavor_id, 'DBInstanceIdentifier' => id, 'DBName' => db_name, 'DBParameterGroupName' => parameter_group_name || attributes['DBParameterGroupName'], 'DBSecurityGroups' => security_group_names, 'DBSubnetGroupName' => db_subnet_group_name, 'EnableIAMDatabaseAuthentication' => enable_iam_database_authentication, 'Engine' => engine, 'EngineVersion' => engine_version, 'Iops' => iops, 'KmsKeyId' => kms_key_id, 'LicenseModel' => license_model, 'MasterUserPassword' => password || attributes['MasterUserPassword'], 'MasterUsername' => master_username, 'MultiAZ' => multi_az, 'Port' => port || attributes['Port'], 'PreferredBackupWindow' => preferred_backup_window, 'PreferredMaintenanceWindow' => preferred_maintenance_window, 'PubliclyAccessible' => publicly_accessible, 'StorageEncrypted' => storage_encrypted, 'StorageType' => storage_type, 'VpcSecurityGroups' => vpc_security_groups, } options.delete_if {|key, value| value.nil?} end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/servers.rb000066400000000000000000000012521437344660100213620ustar00rootroot00000000000000require 'fog/aws/models/rds/server' module Fog module AWS class RDS class Servers < Fog::Collection model Fog::AWS::RDS::Server def all data = service.describe_db_instances.body['DescribeDBInstancesResult']['DBInstances'] load(data) # data is an array of attribute hashes end def get(identity) data = service.describe_db_instances(identity).body['DescribeDBInstancesResult']['DBInstances'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end def restore(options) create(options) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/snapshot.rb000066400000000000000000000047311437344660100215350ustar00rootroot00000000000000module Fog module AWS class RDS class Snapshot < Fog::Model identity :id, :aliases => ['DBSnapshotIdentifier', 'DBClusterSnapshotIdentifier', :name] attribute :allocated_storage, :aliases => 'AllocatedStorage', :type => :integer attribute :availability_zone, :aliases => 'AvailabilityZone' attribute :cluster_created_at, :aliases => 'ClusterCreateTime', :type => :time attribute :cluster_id, :aliases => 'DBClusterIdentifier' attribute :created_at, :aliases => 'SnapshotCreateTime', :type => :time attribute :engine, :aliases => 'Engine' attribute :engine_version, :aliases => 'EngineVersion' attribute :instance_created_at, :aliases => 'InstanceCreateTime', :type => :time attribute :instance_id, :aliases => 'DBInstanceIdentifier' attribute :iops, :aliases => 'Iops', :type => :integer attribute :license_model, :aliases => 'LicenseModel' attribute :master_username, :aliases => 'MasterUsername' attribute :port, :aliases => 'Port', :type => :integer attribute :publicly_accessible, :aliases => 'PubliclyAccessible' attribute :state, :aliases => 'Status' attribute :storage_type, :aliases => 'StorageType' attribute :type, :aliases => 'SnapshotType' def ready? state == 'available' end def destroy requires :id requires_one :instance_id, :cluster_id if instance_id service.delete_db_snapshot(id) else service.delete_db_cluster_snapshot(id) end true end def save requires_one :instance_id, :cluster_id requires :id data = if instance_id service.create_db_snapshot(instance_id, id).body['CreateDBSnapshotResult']['DBSnapshot'] elsif cluster_id service.create_db_cluster_snapshot(cluster_id, id).body['CreateDBClusterSnapshotResult']['DBClusterSnapshot'] end merge_attributes(data) true end def server requires :instance_id service.servers.get(instance_id) end def cluster requires :cluster_id service.clusters.get(cluster_id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/snapshots.rb000066400000000000000000000047141437344660100217210ustar00rootroot00000000000000require 'fog/aws/models/rds/snapshot' module Fog module AWS class RDS class Snapshots < Fog::Collection attribute :server attribute :filters model Fog::AWS::RDS::Snapshot def initialize(attributes) self.filters ||= {} if attributes[:server] filters[:identifier] = attributes[:server].id end if attributes[:type] filters[:type] = attributes[:type] end super end # This method does NOT return all snapshots. Its implementation deliberately returns a single page # of results for any one call. It will return a single page based on the current or provided filters, # updating the filters with the marker for the next page. Calling this repeatedly will iterate # through pages. See the implementation of each for an example of such iteration. # # It is arguably incorrect for the method not to return all snapshots, particularly considering the # implementation in the corresponding 'elb' files. But this implementation has been released, and # backwards-compatibility requires leaving it as implemented. def all(filters_arg = filters) filters.merge!(filters_arg) page = service.describe_db_snapshots(filters).body['DescribeDBSnapshotsResult'] filters[:marker] = page['Marker'] load(page['DBSnapshots']) end # This will execute a block for each snapshot, fetching new pages of snapshots as required. def each(filters_arg = filters) if block_given? begin page = self.all(filters_arg) # We need to explicitly use the base 'each' method here on the page, otherwise we get infinite recursion base_each = Fog::Collection.instance_method(:each) base_each.bind(page).call { |snapshot| yield snapshot } end while self.filters[:marker] end self end def get(identity) data = service.describe_db_snapshots(:snapshot_id => identity).body['DescribeDBSnapshotsResult']['DBSnapshots'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end def new(attributes = {}) if server super({ :instance_id => server.id }.merge!(attributes)) else super end end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/subnet_group.rb000066400000000000000000000013701437344660100224060ustar00rootroot00000000000000module Fog module AWS class RDS class SubnetGroup < Fog::Model identity :id, :aliases => ['DBSubnetGroupName', :name] attribute :description, :aliases => 'DBSubnetGroupDescription' attribute :status, :aliases => 'SubnetGroupStatus' attribute :vpc_id, :aliases => 'VpcId' attribute :subnet_ids, :aliases => 'Subnets' def ready? requires :status status == 'Complete' end def save requires :description, :id, :subnet_ids service.create_db_subnet_group(id, subnet_ids, description) reload end def destroy requires :id service.delete_db_subnet_group(id) end end end end end fog-aws-3.18.0/lib/fog/aws/models/rds/subnet_groups.rb000066400000000000000000000012121437344660100225640ustar00rootroot00000000000000require 'fog/aws/models/rds/subnet_group' module Fog module AWS class RDS class SubnetGroups < Fog::Collection model Fog::AWS::RDS::SubnetGroup def all data = service.describe_db_subnet_groups.body['DescribeDBSubnetGroupsResult']['DBSubnetGroups'] load(data) # data is an array of attribute hashes end def get(identity) data = service.describe_db_subnet_groups(identity).body['DescribeDBSubnetGroupsResult']['DBSubnetGroups'].first new(data) # data is an attribute hash rescue Fog::AWS::RDS::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/sns/000077500000000000000000000000001437344660100173575ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/sns/subscription.rb000066400000000000000000000000001437344660100224160ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/sns/subscriptions.rb000066400000000000000000000004671437344660100226220ustar00rootroot00000000000000require 'fog/aws/models/sns/subscription' module Fog module AWS class SNS class Subscriptions < Fog::Collection model Fgo::AWS::SNS::Subscription def all data = service.list_subscriptions.body["Subscriptions"] load(data) end end end end end fog-aws-3.18.0/lib/fog/aws/models/sns/topic.rb000066400000000000000000000025011437344660100210200ustar00rootroot00000000000000module Fog module AWS class SNS class Topic < Fog::Model identity :id, :aliases => "TopicArn" attribute :owner, :aliases => "Owner" attribute :policy, :aliases => "Policy" attribute :display_name, :aliases => "DisplayName" attribute :subscriptions_pending, :aliases => "SubscriptionsPending" attribute :subscriptions_confirmed, :aliases => "SubscriptionsConfirmed" attribute :subscriptions_deleted, :aliases => "SubscriptionsDeleted" attribute :delivery_policy, :aliases => "DeliveryPolicy" attribute :effective_delivery_policy, :aliases => "EffectiveDeliveryPolicy" def ready? display_name end def update_topic_attribute(attribute, new_value) requires :id service.set_topic_attributes(id, attribute, new_value).body reload end def destroy requires :id service.delete_topic(id) true end def save requires :id data = service.create_topic(id).body["TopicArn"] if data data = {"id" => data} merge_attributes(data) true else false end end end end end end fog-aws-3.18.0/lib/fog/aws/models/sns/topics.rb000066400000000000000000000007751437344660100212160ustar00rootroot00000000000000require 'fog/aws/models/sns/topic' module Fog module AWS class SNS class Topics < Fog::Collection model Fog::AWS::SNS::Topic def all data = service.list_topics.body["Topics"].map { |t| {"id" => t} } #This is an array, but it needs to be an array of hashes for #load load(data) end def get(id) if data = service.get_topic_attributes(id).body["Attributes"] new(data) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/000077500000000000000000000000001437344660100202205ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/storage/directories.rb000066400000000000000000000023151437344660100230620ustar00rootroot00000000000000require 'fog/aws/models/storage/directory' module Fog module AWS class Storage class Directories < Fog::Collection model Fog::AWS::Storage::Directory def all data = service.get_service.body['Buckets'] load(data) end # Warning! This retrieves and caches meta data for the first 10,000 objects in the bucket, which can be very expensive. When possible use directories.new def get(key, options = {}) remap_attributes(options, { :delimiter => 'delimiter', :marker => 'marker', :max_keys => 'max-keys', :prefix => 'prefix' }) data = service.get_bucket(key, options).body directory = new(:key => data['Name'], :is_persisted => true) options = {} for k, v in data if ['CommonPrefixes', 'Delimiter', 'IsTruncated', 'Marker', 'MaxKeys', 'Prefix'].include?(k) options[k] = v end end directory.files.merge_attributes(options) directory.files.load(data['Contents']) directory rescue Excon::Errors::NotFound nil end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/directory.rb000066400000000000000000000110451437344660100225520ustar00rootroot00000000000000require 'fog/aws/models/storage/files' require 'fog/aws/models/storage/versions' module Fog module AWS class Storage class Directory < Fog::Model VALID_ACLS = ['private', 'public-read', 'public-read-write', 'authenticated-read'] attr_reader :acl identity :key, :aliases => ['Name', 'name'] attribute :creation_date, :aliases => 'CreationDate', :type => 'time' def acl=(new_acl) unless VALID_ACLS.include?(new_acl) raise ArgumentError.new("acl must be one of [#{VALID_ACLS.join(', ')}]") else @acl = new_acl end end def destroy requires :key service.delete_bucket(key) true rescue Excon::Errors::NotFound false end # @param options [Hash] (defaults to: {}) — a customizable set of options. # Consider tuning this values for big buckets. # @option options timeout [Integer] — default: Fog.timeout — Maximum number of # seconds to wait for the bucket to be empty. # @option options interval [Proc|Integer] — default: Fog.interval — Seconds to wait before # retrying to check if the bucket is empty. def destroy!(options = {}) requires :key options = { timeout: Fog.timeout, interval: Fog.interval, }.merge(options) attempts = 0 begin clear! Fog.wait_for(options[:timeout], options[:interval]) { objects_keys.size == 0 } service.delete_bucket(key) true rescue Excon::Errors::HTTPStatusError false end end def location @location ||= (bucket_location || Storage::DEFAULT_REGION) end # NOTE: you can't change the region once the bucket is created def location=(new_location) @location = new_location end def files @files ||= Fog::AWS::Storage::Files.new(:directory => self, :service => service) end def payer requires :key data = service.get_request_payment(key) data.body['Payer'] end def payer=(new_payer) requires :key service.put_request_payment(key, new_payer) @payer = new_payer end def versioning? requires :key data = service.get_bucket_versioning(key) data.body['VersioningConfiguration']['Status'] == 'Enabled' end def versioning=(new_versioning) requires :key service.put_bucket_versioning(key, new_versioning ? 'Enabled' : 'Suspended') end def versions @versions ||= Fog::AWS::Storage::Versions.new(:directory => self, :service => service) end def public=(new_public) self.acl = new_public ? 'public-read' : 'private' new_public end def public_url requires :key if service.get_bucket_acl(key).body['AccessControlList'].find {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'} service.request_url( :bucket_name => key ) else nil end end def save requires :key options = {} options['x-amz-acl'] = acl if acl # http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html # Ignore the default region us-east-1 if !persisted? && location != DEFAULT_REGION options['LocationConstraint'] = location end service.put_bucket(key, options) attributes[:is_persisted] = true true end def persisted? # is_persisted is true in case of directories.get or after #save # creation_date is set in case of directories.all attributes[:is_persisted] || !!attributes[:creation_date] end private def bucket_location requires :key return nil unless persisted? data = service.get_bucket_location(key) data.body['LocationConstraint'] end def objects_keys requires :key bucket_query = service.get_bucket(key) bucket_query.body["Contents"].map {|c| c["Key"]} end def clear! requires :key service.delete_multiple_objects(key, objects_keys) if objects_keys.size > 0 end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/file.rb000066400000000000000000000432111437344660100214650ustar00rootroot00000000000000require 'fog/aws/models/storage/versions' module Fog module AWS class Storage class File < Fog::Model # @deprecated use {Fog::AWS::Storage::MIN_MULTIPART_CHUNK_SIZE} instead MIN_MULTIPART_CHUNK_SIZE = Fog::AWS::Storage::MIN_MULTIPART_CHUNK_SIZE # @deprecated use {Fog::AWS::Storage::MAX_SINGLE_PUT_SIZE} instead MAX_SINGLE_PUT_SIZE = Fog::AWS::Storage::MAX_SINGLE_PUT_SIZE # @deprecated not used for anything MULTIPART_COPY_THRESHOLD = 15728640 # @see AWS Object docs http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectOps.html identity :key, :aliases => 'Key' attr_writer :body attribute :cache_control, :aliases => 'Cache-Control' attribute :content_disposition, :aliases => 'Content-Disposition' attribute :content_encoding, :aliases => 'Content-Encoding' attribute :content_length, :aliases => ['Content-Length', 'Size'], :type => :integer attribute :content_md5, :aliases => 'Content-MD5' attribute :content_type, :aliases => 'Content-Type' attribute :etag, :aliases => ['Etag', 'ETag'] attribute :expires, :aliases => 'Expires' attribute :last_modified, :aliases => ['Last-Modified', 'LastModified'] attribute :metadata attribute :owner, :aliases => 'Owner' attribute :storage_class, :aliases => ['x-amz-storage-class', 'StorageClass'] attribute :encryption, :aliases => 'x-amz-server-side-encryption' attribute :encryption_key, :aliases => 'x-amz-server-side-encryption-customer-key' attribute :version, :aliases => 'x-amz-version-id' attribute :kms_key_id, :aliases => 'x-amz-server-side-encryption-aws-kms-key-id' attribute :tags, :aliases => 'x-amz-tagging' attribute :website_redirect_location, :aliases => 'x-amz-website-redirect-location' UploadPartData = Struct.new(:part_number, :upload_options, :etag) class PartList def initialize(parts = []) @parts = parts @mutex = Mutex.new end def push(part) @mutex.synchronize { @parts.push(part) } end def shift @mutex.synchronize { @parts.shift } end def clear! @mutex.synchronize { @parts.clear } end def size @mutex.synchronize { @parts.size } end def to_a @mutex.synchronize { @parts.dup } end end # @note Chunk size to use for multipart uploads. # Use small chunk sizes to minimize memory. E.g. 5242880 = 5mb attr_reader :multipart_chunk_size def multipart_chunk_size=(mp_chunk_size) service.validate_chunk_size(mp_chunk_size, 'multipart_chunk_size') @multipart_chunk_size = mp_chunk_size end # @note Number of threads used to copy files. def concurrency=(concurrency) raise ArgumentError.new('minimum concurrency is 1') if concurrency.to_i < 1 @concurrency = concurrency.to_i end def concurrency @concurrency || 1 end def acl requires :directory, :key service.get_object_acl(directory.key, key).body['AccessControlList'] end # Set file's access control list (ACL). # # valid acls: private, public-read, public-read-write, authenticated-read, bucket-owner-read, bucket-owner-full-control # # @param [String] new_acl one of valid options # @return [String] @acl # def acl=(new_acl) valid_acls = ['private', 'public-read', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control'] unless valid_acls.include?(new_acl) raise ArgumentError.new("acl must be one of [#{valid_acls.join(', ')}]") end @acl = new_acl end # Get file's body if exists, else ''. # # @return [File] # def body return attributes[:body] if attributes.key?(:body) file = collection.get(identity) attributes[:body] = if file file.body else '' end end # Set body attribute. # # @param [File] new_body # @return [File] attributes[:body] # def body=(new_body) attributes[:body] = new_body end # Get the file instance's directory. # # @return [Fog::AWS::Storage::Directory] # def directory @directory end # Copy object from one bucket to other bucket. # # required attributes: directory, key # # @param target_directory_key [String] # @param target_file_key [String] # @param options [Hash] options for copy_object method # @return [String] Fog::AWS::Files#head status of directory contents # def copy(target_directory_key, target_file_key, options = {}) requires :directory, :key self.multipart_chunk_size = service.max_copy_chunk_size if multipart_chunk_size.nil? if multipart_chunk_size > 0 && self.content_length.to_i >= multipart_chunk_size upload_part_options = options.select { |key, _| ALLOWED_UPLOAD_PART_OPTIONS.include?(key.to_sym) } upload_part_options = upload_part_options.merge({ 'x-amz-copy-source' => "#{directory.key}/#{key}" }) multipart_copy(options, upload_part_options, target_directory_key, target_file_key) else service.copy_object(directory.key, key, target_directory_key, target_file_key, options) end target_directory = service.directories.new(:key => target_directory_key) target_directory.files.head(target_file_key) end # Destroy file via http DELETE. # # required attributes: directory, key # # @param options [Hash] # @option options versionId [] # @return [Boolean] true if successful # def destroy(options = {}) requires :directory, :key attributes[:body] = nil if options['versionId'] == version service.delete_object(directory.key, key, options) true end remove_method :metadata def metadata attributes.reject {|key, value| !(key.to_s =~ /^x-amz-/)} end remove_method :metadata= def metadata=(new_metadata) merge_attributes(new_metadata) end remove_method :owner= def owner=(new_owner) if new_owner attributes[:owner] = { :display_name => new_owner['DisplayName'] || new_owner[:display_name], :id => new_owner['ID'] || new_owner[:id] } end end def public? acl.any? {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'} end # Set Access-Control-List permissions. # # valid new_publics: public_read, private # # @param [String] new_public # @return [String] new_public # def public=(new_public) if new_public @acl = 'public-read' else @acl = 'private' end new_public end # Get publicly accessible url via http GET. # Checks permissions before creating. # Defaults to s3 subdomain or compliant bucket name # # required attributes: directory, key # # @return [String] public url # def public_url requires :directory, :key if public? service.request_url( :bucket_name => directory.key, :object_name => key ) else nil end end # Save file with body as contents to directory.key with name key via http PUT # # required attributes: body, directory, key # # @param [Hash] options # @option options [String] acl sets x-amz-acl HTTP header. Valid values include, private | public-read | public-read-write | authenticated-read | bucket-owner-read | bucket-owner-full-control # @option options [String] cache_control sets Cache-Control header. For example, 'No-cache' # @option options [String] content_disposition sets Content-Disposition HTTP header. For exampple, 'attachment; filename=testing.txt' # @option options [String] content_encoding sets Content-Encoding HTTP header. For example, 'x-gzip' # @option options [String] content_md5 sets Content-MD5. For example, '79054025255fb1a26e4bc422aef54eb4' # @option options [String] content_type Content-Type. For example, 'text/plain' # @option options [String] expires sets number of seconds before AWS Object expires. # @option options [String] storage_class sets x-amz-storage-class HTTP header. Defaults to 'STANDARD'. Or, 'REDUCED_REDUNDANCY' # @option options [String] encryption sets HTTP encryption header. Set to 'AES256' to encrypt files at rest on S3 # @option options [String] tags sets x-amz-tagging HTTP header. For example, 'Org-Id=1' or 'Org-Id=1&Service=MyService' # @option options [String] website_redirect_location sets x-amz-website-redirect-location HTTP header. For example, 'website_redirect_location=http://www.rubydoc.info/github/fog/fog-aws' # @return [Boolean] true if no errors # def save(options = {}) requires :body, :directory, :key if options != {} Fog::Logger.deprecation("options param is deprecated, use acl= instead [light_black](#{caller.first})[/]") end options['x-amz-acl'] ||= @acl if @acl options['Cache-Control'] = cache_control if cache_control options['Content-Disposition'] = content_disposition if content_disposition options['Content-Encoding'] = content_encoding if content_encoding options['Content-MD5'] = content_md5 if content_md5 options['Content-Type'] = content_type if content_type options['Expires'] = expires if expires options.merge!(metadata) options['x-amz-storage-class'] = storage_class if storage_class options['x-amz-tagging'] = tags if tags options['x-amz-website-redirect-location'] = website_redirect_location if website_redirect_location options.merge!(encryption_headers) self.multipart_chunk_size = service.max_put_chunk_size if multipart_chunk_size.nil? if multipart_chunk_size > 0 && Fog::Storage.get_body_size(body) >= multipart_chunk_size && body.respond_to?(:read) data = multipart_save(options) merge_attributes(data.body) else data = service.put_object(directory.key, key, body, options) merge_attributes(data.headers.reject {|key, value| ['Content-Length', 'Content-Type'].include?(key)}) end self.etag = self.etag.gsub('"','') if self.etag self.content_length = Fog::Storage.get_body_size(body) self.content_type ||= Fog::Storage.get_content_type(body) true end # Get a url for file. # # required attributes: key # # @param expires [String] number of seconds (since 1970-01-01 00:00) before url expires # @param options [Hash] # @return [String] url # def url(expires, options = {}) requires :key collection.get_url(key, expires, options) end # File version if exists or creates new version. # @return [Fog::AWS::Storage::Version] # def versions @versions ||= begin Fog::AWS::Storage::Versions.new( :file => self, :service => service ) end end private def directory=(new_directory) @directory = new_directory end def multipart_save(options) # Initiate the upload res = service.initiate_multipart_upload(directory.key, key, options) upload_id = res.body["UploadId"] # Store ETags of upload parts part_tags = [] # Upload each part # TODO: optionally upload chunks in parallel using threads # (may cause network performance problems with many small chunks) # TODO: Support large chunk sizes without reading the chunk into memory if body.respond_to?(:rewind) body.rewind rescue nil end while (chunk = body.read(multipart_chunk_size)) do part_upload = service.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk, part_headers(chunk)) part_tags << part_upload.headers["ETag"] end if part_tags.empty? #it is an error to have a multipart upload with no parts part_upload = service.upload_part(directory.key, key, upload_id, 1, '', part_headers('')) part_tags << part_upload.headers["ETag"] end rescue # Abort the upload & reraise service.abort_multipart_upload(directory.key, key, upload_id) if upload_id raise else # Complete the upload service.complete_multipart_upload(directory.key, key, upload_id, part_tags) end def multipart_copy(options, upload_part_options, target_directory_key, target_file_key) # Initiate the upload res = service.initiate_multipart_upload(target_directory_key, target_file_key, options) upload_id = res.body["UploadId"] # Store ETags of upload parts part_tags = [] pending = PartList.new(create_part_list(upload_part_options)) thread_count = self.concurrency completed = PartList.new errors = upload_in_threads(target_directory_key, target_file_key, upload_id, pending, completed, thread_count) raise errors.first if errors.any? part_tags = completed.to_a.sort_by { |part| part.part_number }.map(&:etag) rescue => e # Abort the upload & reraise service.abort_multipart_upload(target_directory_key, target_file_key, upload_id) if upload_id raise else # Complete the upload service.complete_multipart_upload(target_directory_key, target_file_key, upload_id, part_tags) end def encryption_headers if encryption && encryption_key encryption_customer_key_headers elsif encryption { 'x-amz-server-side-encryption' => encryption, 'x-amz-server-side-encryption-aws-kms-key-id' => kms_key_id }.reject {|_, value| value.nil?} else {} end end def part_headers(chunk) base_headers = part_checksum_headers(chunk) # Only SSE-C headers needed in the UploadPart request. [1] # x-amz-server-side-encryption and # x-amz-server-side-encryption-aws-kms-key-id are only needed # in the CreateMultipartUpload request. [2] # [1] https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html # [2] https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html base_headers.merge!(encryption_customer_key_headers) if encryption && encryption_key base_headers end def encryption_customer_key_headers { 'x-amz-server-side-encryption-customer-algorithm' => encryption, 'x-amz-server-side-encryption-customer-key' => Base64.encode64(encryption_key.to_s).chomp!, 'x-amz-server-side-encryption-customer-key-md5' => Base64.encode64(OpenSSL::Digest::MD5.digest(encryption_key.to_s)).chomp! } end def part_checksum_headers(chunk) if service.disable_content_md5_validation {} else { 'Content-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(chunk)).strip } end end def create_part_list(upload_part_options) current_pos = 0 count = 0 pending = [] while current_pos < self.content_length do start_pos = current_pos end_pos = [current_pos + self.multipart_chunk_size, self.content_length - 1].min range = "bytes=#{start_pos}-#{end_pos}" part_options = upload_part_options.dup part_options['x-amz-copy-source-range'] = range pending << UploadPartData.new(count + 1, part_options, nil) count += 1 current_pos = end_pos + 1 end pending end def upload_in_threads(target_directory_key, target_file_key, upload_id, pending, completed, thread_count) threads = [] thread_count.times do thread = Thread.new do begin while part = pending.shift part_upload = service.upload_part_copy(target_directory_key, target_file_key, upload_id, part.part_number, part.upload_options) part.etag = part_upload.body['ETag'] completed.push(part) end rescue => error pending.clear! error end end thread.abort_on_exception = true threads << thread end threads.map(&:value).compact end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/files.rb000066400000000000000000000106441437344660100216540ustar00rootroot00000000000000require 'fog/aws/models/storage/file' module Fog module AWS class Storage class Files < Fog::Collection extend Fog::Deprecation deprecate :get_url, :get_https_url attribute :common_prefixes, :aliases => 'CommonPrefixes' attribute :delimiter, :aliases => 'Delimiter' attribute :directory attribute :is_truncated, :aliases => 'IsTruncated' attribute :marker, :aliases => 'Marker' attribute :max_keys, :aliases => ['MaxKeys', 'max-keys'] attribute :prefix, :aliases => 'Prefix' model Fog::AWS::Storage::File DASHED_HEADERS = %w( Cache-Control Content-Disposition Content-Encoding Content-Length Content-MD5 Content-Type ).freeze def all(options = {}) requires :directory options = { 'delimiter' => delimiter, 'marker' => marker, 'max-keys' => max_keys, 'prefix' => prefix }.merge!(options) options = options.reject {|key,value| value.nil? || value.to_s.empty?} merge_attributes(options) parent = directory.collection.get( directory.key, options ) if parent merge_attributes(parent.files.attributes) load(parent.files.map {|file| file.attributes}) else nil end end alias_method :each_file_this_page, :each def each if !block_given? self else subset = dup.all subset.each_file_this_page {|f| yield f} while subset.is_truncated subset = subset.all(:marker => subset.last.key) subset.each_file_this_page {|f| yield f} end self end end def get(key, options = {}, &block) requires :directory data = service.get_object(directory.key, key, options, &block) normalize_headers(data) file_data = data.headers.merge({ :body => data.body, :key => key }) new(file_data) rescue Excon::Errors::NotFound => error case error.response.body when /NoSuchKey<\/Code>/ nil when /NoSuchBucket<\/Code>/ raise(Fog::AWS::Storage::NotFound.new("Directory #{directory.identity} does not exist.")) else raise(error) end end def get_url(key, expires, options = {}) requires :directory service.get_object_url(directory.key, key, expires, options) end def get_http_url(key, expires, options = {}) requires :directory service.get_object_http_url(directory.key, key, expires, options) end def get_https_url(key, expires, options = {}) requires :directory service.get_object_https_url(directory.key, key, expires, options) end def head_url(key, expires, options = {}) requires :directory service.head_object_url(directory.key, key, expires, options) end def head(key, options = {}) requires :directory data = service.head_object(directory.key, key, options) normalize_headers(data) file_data = data.headers.merge({ :key => key }) new(file_data) rescue Excon::Errors::NotFound nil end def new(attributes = {}) requires :directory super({ :directory => directory }.merge!(attributes)) end def normalize_headers(data) data.headers['Last-Modified'] = Time.parse(fetch_and_delete_header(data, 'Last-Modified')) etag = fetch_and_delete_header(data, 'ETag').gsub('"','') data.headers['ETag'] = etag DASHED_HEADERS.each do |header| value = fetch_and_delete_header(data, header) data.headers[header] = value if value end end private def fetch_and_delete_header(response, header) value = response.get_header(header) return unless value response.headers.keys.each do |key| response.headers.delete(key) if key.downcase == header.downcase end value end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/version.rb000066400000000000000000000021051437344660100222300ustar00rootroot00000000000000module Fog module AWS class Storage class Version < Fog::Model identity :version, :aliases => 'VersionId' attribute :key, :aliases => 'Key' attribute :last_modified, :aliases => ['Last-Modified', 'LastModified'] attribute :latest, :aliases => 'IsLatest', :type => :boolean attribute :content_length, :aliases => ['Content-Length', 'Size'], :type => :integer attribute :delete_marker, :type => :boolean def file @file ||= if collection.file collection.file.directory.files.get(key, 'versionId' => version) else collection.directory.files.get(key, 'versionId' => version) end end def destroy if collection.file collection.service.delete_object(collection.file.directory.key, key, 'versionId' => version) else collection.service.delete_object(collection.directory.key, key, 'versionId' => version) end end end end end end fog-aws-3.18.0/lib/fog/aws/models/storage/versions.rb000066400000000000000000000014361437344660100224210ustar00rootroot00000000000000require 'fog/aws/models/storage/version' module Fog module AWS class Storage class Versions < Fog::Collection attribute :file attribute :directory model Fog::AWS::Storage::Version def all(options = {}) data = if file service.get_bucket_object_versions(file.directory.key, options.merge('prefix' => file.key)).body['Versions'] else service.get_bucket_object_versions(directory.key, options).body['Versions'] end load(data) end def new(attributes = {}) version_type = attributes.keys.first model = super(attributes[version_type]) model.delete_marker = version_type == 'DeleteMarker' model end end end end end fog-aws-3.18.0/lib/fog/aws/models/support/000077500000000000000000000000001437344660100202705ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/models/support/flagged_resource.rb000066400000000000000000000005101437344660100241110ustar00rootroot00000000000000module Fog module AWS class Support class FlaggedResource < Fog::Model identity :resource_id, :aliases => "resourceId" attribute :is_suppressed, :aliases => "isSuppressed", :type => :boolean attribute :metadata attribute :region attribute :status end end end end fog-aws-3.18.0/lib/fog/aws/models/support/flagged_resources.rb000066400000000000000000000003311437344660100242750ustar00rootroot00000000000000require 'fog/aws/models/support/flagged_resource' module Fog module AWS class Support class FlaggedResources < Fog::Collection model Fog::AWS::Support::FlaggedResource end end end end fog-aws-3.18.0/lib/fog/aws/models/support/trusted_advisor_check.rb000066400000000000000000000040631437344660100251760ustar00rootroot00000000000000module Fog module AWS class Support class TrustedAdvisorCheck < Fog::Model identity :id, :aliases => 'checkId' attribute :name attribute :category attribute :description attribute :metadata attribute :flagged_resources, :aliases => 'flaggedResources' attribute :resources_summary, :aliases => 'resourcesSummary' attribute :status attribute :timestamp attribute :category_specific_summary, :aliases => 'categorySpecificSummary' def populate_extended_attributes(lazy=false) return if lazy == true data = service.describe_trusted_advisor_check_result(:id => self.identity).body["result"] merge_attributes(data) end def flagged_resources(lazy=true) if attributes[:flagged_resources].nil? populate_extended_attributes(lazy) if attributes[:flagged_resources] map_flagged_resources! service.flagged_resources.load(attributes[:flagged_resources]) else nil end else if attributes[:flagged_resources].first['metadata'].is_a?(Array) map_flagged_resources! end service.flagged_resources.load(attributes[:flagged_resources]) end end def category_specific_summary(lazy=true) populate_extended_attributes(lazy) if attributes[:category_specific_summary].nil? attributes[:category_stecific_summary] end def resources_summary(lazy=true) populate_extended_attributes(lazy) if attributes[:resources_summary].nil? attributes[:resources_summary] end private def map_flagged_resources! attributes[:flagged_resources].map! do |fr| fr['metadata'] = fr['metadata'].each_with_index.inject({}) do |hash,(data,index)| hash[self.metadata[index]] = data hash end fr end end end end end end fog-aws-3.18.0/lib/fog/aws/models/support/trusted_advisor_checks.rb000066400000000000000000000010131437344660100253510ustar00rootroot00000000000000require 'fog/aws/models/support/trusted_advisor_check' module Fog module AWS class Support class TrustedAdvisorChecks < Fog::Collection model Fog::AWS::Support::TrustedAdvisorCheck def all data = service.describe_trusted_advisor_checks.body['checks'] load(data) end def get(id) data = service.describe_trusted_advisor_check_result(:id => id).body['result'] new(data).populate_extended_attributes end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/000077500000000000000000000000001437344660100167505ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/000077500000000000000000000000001437344660100214205ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/basic.rb000066400000000000000000000007471437344660100230360ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class Basic < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_adjustment_types.rb000066400000000000000000000024151437344660100272110ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeAdjustmentTypes < Fog::Parsers::Base def reset reset_adjustment_type @results = { 'AdjustmentTypes' => [] } @response = { 'DescribeAdjustmentTypesResult' => {}, 'ResponseMetadata' => {} } end def reset_adjustment_type @adjustment_type = {} end def start_element(name, attrs = []) super case name when 'AdjustmentTypes' @in_adjustment_types = true end end def end_element(name) case name when 'member' if @in_adjustment_types @results['AdjustmentTypes'] << @adjustment_type reset_adjustment_type end when 'AdjustmentType' @adjustment_type[name] = value when 'AdjustmentTypes' @in_adjustment_types = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeAdjustmentTypesResponse' @response['DescribeAdjustmentTypesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_auto_scaling_groups.rb000066400000000000000000000124111437344660100276530ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeAutoScalingGroups < Fog::Parsers::Base def reset reset_auto_scaling_group reset_enabled_metric reset_instance reset_suspended_process reset_tag @results = { 'AutoScalingGroups' => [] } @response = { 'DescribeAutoScalingGroupsResult' => {}, 'ResponseMetadata' => {} } end def reset_auto_scaling_group @auto_scaling_group = { 'AvailabilityZones' => [], 'EnabledMetrics' => [], 'Instances' => [], 'LoadBalancerNames' => [], 'SuspendedProcesses' => [], 'Tags' => [], 'TargetGroupARNs' => [], 'TerminationPolicies' => [] } end def reset_enabled_metric @enabled_metric = {} end def reset_instance @instance = {} end def reset_suspended_process @suspended_process = {} end def reset_tag @tag = {} end def start_element(name, attrs = []) super case name when 'member' when 'AvailabilityZones' @in_availability_zones = true when 'EnabledMetrics' @in_enabled_metrics = true when 'Instances' @in_instances = true when 'LoadBalancerNames' @in_load_balancer_names = true when 'SuspendedProcesses' @in_suspended_processes = true when 'Tags' @in_tags = true when 'TargetGroupARNs' @in_target_groups = true when 'TerminationPolicies' @in_termination_policies = true end end def end_element(name) case name when 'member' if @in_availability_zones @auto_scaling_group['AvailabilityZones'] << value elsif @in_enabled_metrics @auto_scaling_group['EnabledMetrics'] << @enabled_metric reset_enabled_metric elsif @in_instances @auto_scaling_group['Instances'] << @instance reset_instance elsif @in_load_balancer_names @auto_scaling_group['LoadBalancerNames'] << value elsif @in_suspended_processes @auto_scaling_group['SuspendedProcesses'] << @suspended_process reset_suspended_process elsif @in_tags @auto_scaling_group['Tags'] << @tag reset_tag elsif @in_target_groups @auto_scaling_group['TargetGroupARNs'] << value elsif @in_termination_policies @auto_scaling_group['TerminationPolicies'] << value else @results['AutoScalingGroups'] << @auto_scaling_group reset_auto_scaling_group end when 'AvailabilityZones' @in_availability_zones = false when 'Granularity', 'Metric' @enabled_metric[name] = value when 'EnabledMetrics' @in_enabled_metrics = false when 'AvailabilityZone', 'HealthStatus', 'InstanceId', 'LifecycleState' @instance[name] = value when 'Instances' @in_instances = false when 'LoadBalancerNames' @in_load_balancer_names = false when 'ProcessName', 'SuspensionReason' @suspended_process[name] = value when 'SuspendedProcesses' @in_suspended_processes = false when 'Key', 'ResourceId', 'ResourceType', 'Value' @tag[name] = value when 'PropagateAtLaunch' @tag[name] = (value == 'true') when 'Tags' @in_tags = false when 'TargetGroupARNs' @in_target_groups = false when 'TerminationPolicies' @in_termination_policies = false when 'LaunchConfigurationName' if @in_instances @instance[name] = value else @auto_scaling_group[name] = value end when 'AutoScalingGroupARN', 'AutoScalingGroupName' @auto_scaling_group[name] = value when 'CreatedTime' @auto_scaling_group[name] = Time.parse(value) when 'DefaultCooldown', 'DesiredCapacity', 'HealthCheckGracePeriod' @auto_scaling_group[name] = value.to_i when 'HealthCheckType' @auto_scaling_group[name] = value when 'MaxSize', 'MinSize' @auto_scaling_group[name] = value.to_i when 'PlacementGroup', 'VPCZoneIdentifier' @auto_scaling_group[name] = value when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeAutoScalingGroupsResponse' @response['DescribeAutoScalingGroupsResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_auto_scaling_instances.rb000066400000000000000000000022731437344660100303300ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeAutoScalingInstances < Fog::Parsers::Base def reset reset_auto_scaling_instance @results = { 'AutoScalingInstances' => [] } @response = { 'DescribeAutoScalingInstancesResult' => {}, 'ResponseMetadata' => {} } end def reset_auto_scaling_instance @auto_scaling_instance = {} end def end_element(name) case name when 'member' @results['AutoScalingInstances'] << @auto_scaling_instance reset_auto_scaling_instance when 'AutoScalingGroupName', 'AvailabilityZone', 'HealthStatus', 'InstanceId', 'LaunchConfigurationName', 'LifecycleState' @auto_scaling_instance[name] = value when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeAutoScalingInstancesResponse' @response['DescribeAutoScalingInstancesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_auto_scaling_notification_types.rb000066400000000000000000000022521437344660100322500ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeAutoScalingNotificationTypes < Fog::Parsers::Base def reset @results = { 'AutoScalingNotificationTypes' => [] } @response = { 'DescribeAutoScalingNotificationTypesResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super case name when 'AutoScalingNotificationTypes' @in_auto_scaling_notification_types = true end end def end_element(name) case name when 'member' if @in_auto_scaling_notification_types @results['AutoScalingNotificationTypes'] << value end when 'AutoScalingNotificationTypes' @in_auto_scaling_notification_types = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeAutoScalingNotificationTypesResponse' @response['DescribeAutoScalingNotificationTypesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_launch_configurations.rb000066400000000000000000000072321437344660100301750ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeLaunchConfigurations < Fog::Parsers::Base def reset reset_launch_configuration reset_block_device_mapping reset_ebs @results = { 'LaunchConfigurations' => [] } @response = { 'DescribeLaunchConfigurationsResult' => {}, 'ResponseMetadata' => {} } end def reset_launch_configuration @launch_configuration = { 'BlockDeviceMappings' => [], 'InstanceMonitoring' => {}, 'SecurityGroups' => [], 'ClassicLinkVPCSecurityGroups' => []} end def reset_block_device_mapping @block_device_mapping = {} end def reset_ebs @ebs = {} end def start_element(name, attrs = []) super case name when 'BlockDeviceMappings' @in_block_device_mappings = true when 'SecurityGroups' @in_security_groups = true when 'ClassicLinkVPCSecurityGroups' @in_classic_link_security_groups = true end end def end_element(name) case name when 'member' if @in_block_device_mappings @launch_configuration['BlockDeviceMappings'] << @block_device_mapping reset_block_device_mapping elsif @in_security_groups @launch_configuration['SecurityGroups'] << value elsif @in_classic_link_security_groups @launch_configuration['ClassicLinkVPCSecurityGroups'] << value else @results['LaunchConfigurations'] << @launch_configuration reset_launch_configuration end when 'DeviceName', 'VirtualName' @block_device_mapping[name] = value when 'SnapshotId', 'VolumeSize', 'VolumeType', 'Iops' @ebs[name] = value when 'Ebs' @block_device_mapping[name] = @ebs reset_ebs when 'EbsOptimized' @launch_configuration[name] = value == 'true' when 'Enabled' @launch_configuration['InstanceMonitoring'][name] = (value == 'true') when 'CreatedTime' @launch_configuration[name] = Time.parse(value) when 'ImageId', 'InstanceType', 'KeyName' @launch_configuration[name] = value when 'LaunchConfigurationARN', 'LaunchConfigurationName', 'ClassicLinkVPCId' @launch_configuration[name] = value when 'KernelId', 'RamdiskId', 'UserData' @launch_configuration[name] = value when 'IamInstanceProfile', 'PlacementTenancy' @launch_configuration[name] = value when 'SpotPrice' @launch_configuration[name] = value.to_f when 'AssociatePublicIpAddress' @in_associate_public_ip = false when 'BlockDeviceMappings' @in_block_device_mappings = false when 'LaunchConfigurations' @in_launch_configurations = false when 'SecurityGroups' @in_security_groups = false when 'ClassicLinkVPCSecurityGroups' @in_classic_link_security_groups = false when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeLaunchConfigurationsResponse' @response['DescribeLaunchConfigurationsResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_metric_collection_types.rb000066400000000000000000000031531437344660100305310ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeMetricCollectionTypes < Fog::Parsers::Base def reset reset_granularity reset_metric @results = { 'Granularities' => [], 'Metrics' => [] } @response = { 'DescribeMetricCollectionTypesResult' => {}, 'ResponseMetadata' => {} } end def reset_granularity @granularity = {} end def reset_metric @metric = {} end def start_element(name, attrs = []) super case name when 'Granularities' @in_granularities = true when 'Metrics' @in_metrics = true end end def end_element(name) case name when 'member' if @in_granularities @results['Granularities'] << @granularity reset_granularity elsif @in_metrics @results['Metrics'] << @metric reset_metric end when 'Granularity' @granularity[name] = value when 'Granularities' @in_granularities = false when 'Metric' @metric[name] = value when 'Metrics' @in_metrics = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeMetricCollectionTypesResult' @response[name] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_notification_configurations.rb000066400000000000000000000022541437344660100314100ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeNotificationConfigurations < Fog::Parsers::Base def reset reset_notification_configuration @results = { 'NotificationConfigurations' => [] } @response = { 'DescribeNotificationConfigurationsResult' => {}, 'ResponseMetadata' => {} } end def reset_notification_configuration @notification_configuration = {} end def end_element(name) case name when 'member' @results['NotificationConfigurations'] << @notification_configuration reset_notification_configuration when 'AutoScalingGroupName','NotificationType', 'TopicARN' @notification_configuration[name] = value when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeNotificationConfigurationsResponse' @response['DescribeNotificationConfigurationsResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_policies.rb000066400000000000000000000033371437344660100254220ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribePolicies < Fog::Parsers::Base def reset reset_scaling_policy reset_alarm @results = { 'ScalingPolicies' => [] } @response = { 'DescribePoliciesResult' => {}, 'ResponseMetadata' => {} } @in_alarms = false end def reset_scaling_policy @scaling_policy = { 'Alarms' => [] } end def reset_alarm @alarm = {} end def start_element(name, attrs = []) super case name when 'Alarms' @in_alarms = true end end def end_element(name) case name when 'AlarmARN', 'AlarmName' @alarm[name] = value when 'AdjustmentType', 'AutoScalingGroupName', 'PolicyARN', 'PolicyName' @scaling_policy[name] = value when 'Cooldown', 'MinAdjustmentStep', 'ScalingAdjustment' @scaling_policy[name] = value.to_i when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribePoliciesResponse' @response['DescribePoliciesResult'] = @results when 'Alarms' @in_alarms = false when 'member' if @in_alarms @scaling_policy['Alarms'] << @alarm reset_alarm else @results['ScalingPolicies'] << @scaling_policy reset_scaling_policy end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_scaling_activities.rb000066400000000000000000000023241437344660100274520ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeScalingActivities < Fog::Parsers::Base def reset reset_activity @results = { 'Activities' => [] } @response = { 'DescribeScalingActivitiesResult' => {}, 'ResponseMetadata' => {} } end def reset_activity @activity = {} end def end_element(name) case name when 'member' @results['Activities'] << @activity reset_activity when 'ActivityId', 'AutoScalingGroupName', 'Cause', 'Description', 'StatusCode', 'StatusMessage' @activity[name] = value when 'EndTime', 'StartTime' @activity[name] = Time.parse(value) when 'Progress' @activity[name] = value.to_i when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeScalingActivitiesResponse' @response['DescribeScalingActivitiesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_scaling_process_types.rb000066400000000000000000000023331437344660100302100ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeScalingProcessTypes < Fog::Parsers::Base def reset reset_process_type @results = { 'Processes' => [] } @response = { 'DescribeScalingProcessTypesResult' => {}, 'ResponseMetadata' => {} } end def reset_process_type @process_type = {} end def start_element(name, attrs = []) super case name when 'Processes' @in_processes = true end end def end_element(name) case name when 'member' if @in_processes @results['Processes'] << @process_type reset_process_type end when 'ProcessName' @process_type[name] = value when 'Processes' @in_processes = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeScalingProcessTypesResponse' @response['DescribeScalingProcessTypesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_scheduled_actions.rb000066400000000000000000000026441437344660100272730ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeScheduledActions < Fog::Parsers::Base def reset reset_scheduled_update_group_action @results = { 'ScheduledUpdateGroupActions' => [] } @response = { 'DescribeScheduledActionsResult' => {}, 'ResponseMetadata' => {} } end def reset_scheduled_update_group_action @scheduled_update_group_action = {} end def end_element(name) case name when 'member' @results['ScheduledUpdateGroupActions'] << @scheduled_update_group_action reset_scheduled_update_group_action when 'AutoScalingGroupName', 'ScheduledActionARN', 'ScheduledActionName', 'Recurrence' @scheduled_update_group_action[name] = value when 'DesiredCapacity', 'MaxSize', 'MinSize' @scheduled_update_group_action[name] = value.to_i when 'Time', 'StartTime', 'EndTime' @scheduled_update_group_action[name] = Time.parse(value) when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeScheduledActionsResponse' @response['DescribeScheduledActionsResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_tags.rb000066400000000000000000000017501437344660100245460ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeTags < Fog::Parsers::Base def reset reset_tag @results = { 'Tags' => [] } @response = { 'DescribeTagsResult' => {}, 'ResponseMetadata' => {} } end def reset_tag @tag = {} end def end_element(name) case name when 'member' @results['Tags'] << @tag reset_tag when 'Key', 'ResourceId', 'ResourceType', 'Value' @tag[name] = value when 'PropagateAtLaunch' @tag[name] = (value == 'true') when 'NextToken' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeTagsResponse' @response['DescribeTagsResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/describe_termination_policy_types.rb000066400000000000000000000021451437344660100307430ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class DescribeTerminationPolicyTypes < Fog::Parsers::Base def reset @results = { 'TerminationPolicyTypes' => [] } @response = { 'DescribeTerminationPolicyTypesResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super case name when 'TerminationPolicyTypes' @in_termination_policy_types = true end end def end_element(name) case name when 'member' if @in_termination_policy_types @results['TerminationPolicyTypes'] << value end when 'TerminationPolicyTypes' @in_termination_policy_types = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeTerminationPolicyTypesResponse' @response['DescribeTerminationPolicyTypesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/put_notification_configuration.rb000066400000000000000000000007761437344660100302640ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class PutNotificationConfiguration < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/put_scaling_policy.rb000066400000000000000000000012121437344660100256300ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class PutScalingPolicy < Fog::Parsers::Base def reset @results = {} @response = { 'PutScalingPolicyResult' => {}, 'ResponseMetadata' => {} } end def end_element(name) case name when 'PolicyARN' @results[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'PutScalingPolicyResponse' @response['PutScalingPolicyResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/auto_scaling/terminate_instance_in_auto_scaling_group.rb000066400000000000000000000020141437344660100322500ustar00rootroot00000000000000module Fog module Parsers module AWS module AutoScaling class TerminateInstanceInAutoScalingGroup < Fog::Parsers::Base def reset @results = { 'Activity' => {} } @response = { 'TerminateInstanceInAutoScalingGroupResult' => {}, 'ResponseMetadata' => {} } end def end_element(name) case name when 'ActivityId', 'AutoScalingGroupName', 'Cause', 'Description', 'StatusCode', 'StatusMessage' @results['Activity'][name] = value when 'EndTime', 'StartTime' @results['Activity'][name] = Time.parse(value) when 'Progress' @results['Activity'][name] = value.to_i when 'RequestId' @response['ResponseMetadata'][name] = value when 'TerminateInstanceInAutoScalingGroupResponse' @response['TerminateInstanceInAutoScalingGroupResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/000077500000000000000000000000001437344660100207145ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/check_dns_availability.rb000066400000000000000000000006451437344660100257210ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CheckDNSAvailability < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CheckDNSAvailabilityResult") tag 'FullyQualifiedCNAME', :string tag 'Available', :boolean end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/create_application.rb000066400000000000000000000011711437344660100250670ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CreateApplication < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CreateApplicationResult") tag 'Application', :object tag 'Versions', :string, :list tag 'ConfigurationTemplates', :string, :list tag 'ApplicationName', :string tag 'Description', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/create_application_version.rb000066400000000000000000000012771437344660100266430ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CreateApplicationVersion < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CreateApplicationVersionResult") tag 'ApplicationVersion', :object tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'SourceBundle', :object tag 'S3Bucket', :string tag 'S3Key', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/create_configuration_template.rb000066400000000000000000000015131437344660100273260ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CreateConfigurationTemplate < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CreateConfigurationTemplateResult") tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'DeploymentStatus', :string tag 'Description', :string tag 'EnvironmentName', :string tag 'OptionSettings', :object, :list tag 'Namespace', :string tag 'OptionName', :string tag 'Value', :string tag 'SolutionStackName', :string tag 'TemplateName', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/create_environment.rb000066400000000000000000000021271437344660100251320ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CreateEnvironment < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CreateEnvironmentResult") tag 'ApplicationName', :string tag 'CNAME', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'EndpointURL', :string tag 'EnvironmentId', :string tag 'EnvironmentName', :string tag 'Health', :string tag 'Resources', :object tag 'LoadBalancer', :object tag 'Domain', :string tag 'LoadBalancerName', :string tag 'Listeners', :object, :list tag 'Port', :integer tag 'Protocol', :string tag 'SolutionStackName', :string tag 'Status', :string tag 'TemplateName', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/create_storage_location.rb000066400000000000000000000005661437344660100261270ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class CreateStorageLocation < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("CreateStorageLocationResult") tag 'S3Bucket', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_application_versions.rb000066400000000000000000000013151437344660100273340ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeApplicationVersions < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeApplicationVersionsResult") tag 'ApplicationVersions', :object, :list tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'SourceBundle', :object tag 'S3Bucket', :string tag 'S3Key', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_applications.rb000066400000000000000000000012071437344660100255670ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeApplications < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeApplicationsResult") tag 'Applications', :object, :list tag 'Versions', :string, :list tag 'ConfigurationTemplates', :string, :list tag 'ApplicationName', :string tag 'Description', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_configuration_options.rb000066400000000000000000000016371437344660100275320ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeConfigurationOptions < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeConfigurationOptionsResult") tag 'SolutionStackName', :string tag 'Options', :object, :list tag 'ChangeSeverity', :string tag 'DefaultValue', :string tag 'MaxLength', :integer tag 'MaxValue', :integer tag 'MinValue', :integer tag 'Name', :string tag 'Namespace', :string tag 'Regex', :object tag 'Label', :string tag 'Pattern', :string tag 'UserDefined', :boolean tag 'ValueOptions', :string, :list tag 'ValueType', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_configuration_settings.rb000066400000000000000000000016071437344660100276740ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeConfigurationSettings < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeConfigurationSettingsResult") tag 'ConfigurationSettings', :object, :list tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'DeploymentStatus', :string tag 'Description', :string tag 'EnvironmentName', :string tag 'OptionSettings', :object, :list tag 'Namespace', :string tag 'OptionName', :string tag 'Value', :string tag 'SolutionStackName', :string tag 'TemplateName', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_environment_resources.rb000066400000000000000000000022721437344660100275420ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeEnvironmentResources < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeEnvironmentResourcesResult") tag 'EnvironmentResources', :object tag 'AutoScalingGroups', :object, :list tag 'Name', :string tag 'EnvironmentName', :string tag 'Instances', :object, :list tag 'Id', :string tag 'LaunchConfigurations', :object, :list tag 'LoadBalancers', :object, :list tag 'Resources', :object, :list tag 'Description', :string tag 'LogicalResourceId', :string tag 'PhysicalResourceId', :string tag 'Type', :string tag 'Properties', :object, :list tag 'RuntimeSources', :object, :list tag 'Parameter', :string tag 'Versions', :object, :list tag 'ApplicationName', :string tag 'VersionLabel', :string tag 'Triggers', :object, :list end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_environments.rb000066400000000000000000000022141437344660100256270ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeEnvironments < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeEnvironmentsResult") tag 'Environments', :object, :list tag 'ApplicationName', :string tag 'CNAME', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'EndpointURL', :string tag 'EnvironmentId', :string tag 'EnvironmentName', :string tag 'Health', :string tag 'Resources', :object tag 'LoadBalancer', :object tag 'Domain', :string tag 'LoadBalancerName', :string tag 'Listeners', :object, :list tag 'Port', :integer tag 'Protocol', :string tag 'SolutionStackName', :string tag 'Status', :string tag 'TemplateName', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/describe_events.rb000066400000000000000000000013131437344660100244030ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class DescribeEvents < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("DescribeEventsResult") tag 'Events', :object, :list tag 'ApplicationName', :string tag 'EnvironmentName', :string tag 'EventDate', :datetime tag 'Message', :string tag 'RequestId', :string tag 'Severity', :string tag 'TemplateName', :string tag 'VersionLabel', :string tag 'NextToken', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/empty.rb000066400000000000000000000006411437344660100224000ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk class Empty < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/list_available_solution_stacks.rb000066400000000000000000000010501437344660100275140ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class ListAvailableSolutionStacks < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("ListAvailableSolutionStacksResult") tag 'SolutionStackDetails', :object, :list tag 'PermittedFileTypes', :string, :list tag 'SolutionStackName', :string tag 'SolutionStacks', :string, :list end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/parser.rb000066400000000000000000000051771437344660100225470ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk class BaseParser < Fog::Parsers::Base def initialize(result_name) @result_name = result_name # Set before super, since super calls reset super() @tags = {} @list_tags = {} end def reset @response = { @result_name => {}, 'ResponseMetadata' => {} } # Push root object to top of stack @parse_stack = [ { :type => :object, :value => @response[@result_name]} ] end def tag name, *traits if traits.delete(:list) @list_tags[name] = true end if traits.length == 1 @tags[name] = traits.last else raise "Too many traits specified, only specify :list or a type" end end def start_element(name, attrs = []) super if name == 'member' if @parse_stack.last[:type] == :object @parse_stack.last[:value] << {} # Push any empty object end elsif @list_tags.key?(name) set_value(name, [], :array) # Set an empty array @parse_stack.push({ :type => @tags[name], :value => get_parent[name] }) elsif @tags[name] == :object set_value(name, {}, :object) @parse_stack.push({ :type => @tags[name], :value => get_parent[name] }) end end def end_element(name) case name when 'member' if @parse_stack.last[:type] != :object @parse_stack.last[:value] << value end when 'RequestId' @response['ResponseMetadata'][name] = value else if @list_tags.key?(name) || @tags[name] == :object @parse_stack.pop() elsif @tags.key?(name) set_value(name, value, @tags[name]) end end end def get_parent parent = @parse_stack.last[:value] parent.is_a?(Array) ? parent.last : parent end def set_value(name, value, type) case type when :datetime get_parent[name] = Time.parse value when :boolean get_parent[name] = value == "true" # True only if value is true when :integer get_parent[name] = value.to_i else get_parent[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/retrieve_environment_info.rb000066400000000000000000000010451437344660100265250ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class RetrieveEnvironmentInfo < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("RetrieveEnvironmentInfoResult") tag 'EnvironmentInfo', :object, :list tag 'Ec2InstanceId', :string tag 'InfoType', :string tag 'Message', :string tag 'SampleTimestamp', :datetime end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/terminate_environment.rb000066400000000000000000000021351437344660100256560ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class TerminateEnvironment < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("TerminateEnvironmentResult") tag 'ApplicationName', :string tag 'CNAME', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'EndpointURL', :string tag 'EnvironmentId', :string tag 'EnvironmentName', :string tag 'Health', :string tag 'Resources', :object tag 'LoadBalancer', :object tag 'Domain', :string tag 'LoadBalancerName', :string tag 'Listeners', :object, :list tag 'Port', :integer tag 'Protocol', :string tag 'SolutionStackName', :string tag 'Status', :string tag 'TemplateName', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/update_application.rb000066400000000000000000000011711437344660100251060ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class UpdateApplication < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("UpdateApplicationResult") tag 'Application', :object tag 'Versions', :string, :list tag 'ConfigurationTemplates', :string, :list tag 'ApplicationName', :string tag 'Description', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/update_application_version.rb000066400000000000000000000012771437344660100266620ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class UpdateApplicationVersion < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("UpdateApplicationVersionResult") tag 'ApplicationVersion', :object tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'SourceBundle', :object tag 'S3Bucket', :string tag 'S3Key', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/update_configuration_template.rb000066400000000000000000000015131437344660100273450ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class UpdateConfigurationTemplate < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("UpdateConfigurationTemplateResult") tag 'ApplicationName', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'DeploymentStatus', :string tag 'Description', :string tag 'EnvironmentName', :string tag 'OptionSettings', :object, :list tag 'Namespace', :string tag 'OptionName', :string tag 'Value', :string tag 'SolutionStackName', :string tag 'TemplateName', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/update_environment.rb000066400000000000000000000021271437344660100251510ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class UpdateEnvironment < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("UpdateEnvironmentResult") tag 'ApplicationName', :string tag 'CNAME', :string tag 'DateCreated', :datetime tag 'DateUpdated', :datetime tag 'Description', :string tag 'EndpointURL', :string tag 'EnvironmentId', :string tag 'EnvironmentName', :string tag 'Health', :string tag 'Resources', :object tag 'LoadBalancer', :object tag 'Domain', :string tag 'LoadBalancerName', :string tag 'Listeners', :object, :list tag 'Port', :integer tag 'Protocol', :string tag 'SolutionStackName', :string tag 'Status', :string tag 'TemplateName', :string tag 'VersionLabel', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/beanstalk/validate_configuration_settings.rb000066400000000000000000000010371437344660100277020ustar00rootroot00000000000000module Fog module Parsers module AWS module ElasticBeanstalk require 'fog/aws/parsers/beanstalk/parser' class ValidateConfigurationSettings < Fog::Parsers::AWS::ElasticBeanstalk::BaseParser def initialize super("ValidateConfigurationSettingsResult") tag 'Messages', :object, :list tag 'Message', :string tag 'Namespace', :string tag 'OptionName', :string tag 'Severity', :string end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/000077500000000000000000000000001437344660100175145ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/cdn/distribution.rb000066400000000000000000000037631437344660100225710ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class Distribution < Fog::Parsers::Base def reset @response = { 'DistributionConfig' => { 'CNAME' => [], 'Logging' => {}, 'TrustedSigners' => [] } } end def start_element(name, attrs = []) super case name when 'CustomOrigin', 'S3Origin' @origin = name @response['DistributionConfig'][@origin] = {} end end def end_element(name) case name when 'AwsAccountNumber' @response['DistributionConfig']['TrustedSigners'] << value when 'Bucket', 'Prefix' @response['DistributionConfig']['Logging'][name] = value when 'CNAME' @response['DistributionConfig']['CNAME'] << value when 'DNSName', 'OriginAccessIdentity', 'OriginProtocolPolicy' @response['DistributionConfig'][@origin][name] = value when 'DomainName', 'Id', 'Status' @response[name] = value when 'CallerReference', 'Comment', 'DefaultRootObject', 'Origin', 'OriginAccessIdentity' @response['DistributionConfig'][name] = value when 'Enabled' if value == 'true' @response['DistributionConfig'][name] = true else @response['DistributionConfig'][name] = false end when 'HTTPPort', 'HTTPSPort' @response['DistributionConfig'][@origin][name] = value.to_i when 'InProgressInvalidationBatches' @response[name] = value.to_i when 'LastModifiedTime' @response[name] = Time.parse(value) when 'Protocol' @response['DistributionConfig']['RequireProtocols'] = value when 'Self' @response['DistributionConfig']['TrustedSigners'] << 'Self' end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/get_distribution_list.rb000066400000000000000000000035521437344660100244570ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class GetDistributionList < Fog::Parsers::Base def reset @distribution_summary = { 'CNAME' => [], 'TrustedSigners' => [] } @response = { 'DistributionSummary' => [] } end def start_element(name, attrs = []) super case name when 'CustomOrigin', 'S3Origin' @origin = name @distribution_summary[@origin] = {} end end def end_element(name) case name when 'DistributionSummary' @response['DistributionSummary'] << @distribution_summary @distribution_summary = { 'CNAME' => [], 'TrustedSigners' => [] } when 'Comment', 'DomainName', 'Id', 'Origin', 'Status' @distribution_summary[name] = value when 'CNAME' @distribution_summary[name] << value when 'DNSName', 'OriginAccessIdentity', 'OriginProtocolPolicy' @distribution_summary[@origin][name] = value when 'Enabled' if value == 'true' @distribution_summary[name] = true else @distribution_summary[name] = false end when 'HTTPPort', 'HTTPSPort' @distribution_summary[@origin][name] = value.to_i when 'LastModifiedTime' @distribution_summary[name] = Time.parse(value) when 'IsTruncated' if value == 'true' @response[name] = true else @response[name] = false end when 'Marker', 'NextMarker' @response[name] = value when 'MaxItems' @response[name] = value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/get_invalidation.rb000066400000000000000000000012471437344660100233650ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class GetInvalidation < Fog::Parsers::Base def reset @response = { 'InvalidationBatch' => { 'Path' => [] } } end def start_element(name, attrs = []) super end def end_element(name) case name when 'Path' @response['InvalidationBatch'][name] << value when 'Id', 'Status', 'CreateTime' @response[name] = value when 'CallerReference' @response['InvalidationBatch'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/get_invalidation_list.rb000066400000000000000000000017701437344660100244210ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class GetInvalidationList < Fog::Parsers::Base def reset @invalidation_summary = { } @response = { 'InvalidationSummary' => [] } end def start_element(name, attrs = []) super end def end_element(name) case name when 'InvalidationSummary' @response['InvalidationSummary'] << @invalidation_summary @invalidation_summary = {} when 'Id', 'Status' @invalidation_summary[name] = @value when 'IsTruncated' if @value == 'true' @response[name] = true else @response[name] = false end when 'Marker', 'NextMarker' @response[name] = @value when 'MaxItems' @response[name] = @value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/get_streaming_distribution_list.rb000066400000000000000000000033731437344660100265310ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class GetStreamingDistributionList < Fog::Parsers::Base def reset @distribution_summary = { 'CNAME' => [], 'TrustedSigners' => [] } @response = { 'StreamingDistributionSummary' => [] } end def start_element(name, attrs = []) super case name when 'S3Origin' @origin = name @distribution_summary[@origin] = {} end end def end_element(name) case name when 'StreamingDistributionSummary' @response['StreamingDistributionSummary'] << @distribution_summary @distribution_summary = { 'CNAME' => [], 'TrustedSigners' => [] } when 'Comment', 'DomainName', 'Id', 'Status' @distribution_summary[name] = @value when 'CNAME' @distribution_summary[name] << @value when 'DNSName', 'OriginAccessIdentity' @distribution_summary[@origin][name] = @value when 'Enabled' if @value == 'true' @distribution_summary[name] = true else @distribution_summary[name] = false end when 'LastModifiedTime' @distribution_summary[name] = Time.parse(@value) when 'IsTruncated' if @value == 'true' @response[name] = true else @response[name] = false end when 'Marker', 'NextMarker' @response[name] = @value when 'MaxItems' @response[name] = @value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/post_invalidation.rb000066400000000000000000000011311437344660100235630ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class PostInvalidation < Fog::Parsers::Base def reset @response = { 'InvalidationBatch' => { 'Path' => [] } } end def end_element(name) case name when 'CallerReference' @response['InvalidationBatch'][name] = value when 'CreateTime', 'Id', 'Status' @response[name] = value when 'Path' @response['InvalidationBatch'][name] << value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cdn/streaming_distribution.rb000066400000000000000000000041641437344660100246360ustar00rootroot00000000000000module Fog module Parsers module AWS module CDN class StreamingDistribution < Fog::Parsers::Base def reset @response = { 'StreamingDistributionConfig' => { 'CNAME' => [], 'Logging' => {}, 'TrustedSigners' => [] } } end def start_element(name, attrs = []) super case name when 'CustomOrigin', 'S3Origin' @origin = name @response['StreamingDistributionConfig'][@origin] = {} end end def end_element(name) case name when 'AwsAccountNumber' @response['StreamingDistributionConfig']['TrustedSigners'] << @value when 'Bucket', 'Prefix' @response['StreamingDistributionConfig']['Logging'][name] = @value when 'CNAME' @response['StreamingDistributionConfig']['CNAME'] << @value when 'DNSName', 'OriginAccessIdentity', 'OriginProtocolPolicy' @response['StreamingDistributionConfig'][@origin][name] = @value when 'DomainName', 'Id', 'Status' @response[name] = @value when 'CallerReference', 'Comment', 'DefaultRootObject', 'Origin', 'OriginAccessIdentity' @response['StreamingDistributionConfig'][name] = @value when 'Enabled' if @value == 'true' @response['StreamingDistributionConfig'][name] = true else @response['StreamingDistributionConfig'][name] = false end when 'HTTPPort', 'HTTPSPort' @response['StreamingDistributionConfig'][@origin][name] = @value.to_i when 'InProgressInvalidationBatches' @response[name] = @value.to_i when 'LastModifiedTime' @response[name] = Time.parse(@value) when 'Protocol' @response['StreamingDistributionConfig']['RequireProtocols'] = @value when 'Self' @response['StreamingDistributionConfig']['TrustedSigners'] << 'Self' end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/000077500000000000000000000000001437344660100221345ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/basic.rb000066400000000000000000000005621437344660100235450ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class Basic < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value when 'NextToken' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/create_change_set.rb000066400000000000000000000004771437344660100261140ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class CreateChangeSet < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'Id' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/create_stack.rb000066400000000000000000000005001437344660100251040ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class CreateStack < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'StackId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_account_limits.rb000066400000000000000000000011271437344660100273370ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeAccountLimits < Fog::Parsers::Base def reset @limit = {} @response = { 'AccountLimits' => [] } end def end_element(name) case name when 'Name', 'Value' @limit[name] = value when 'member' @response['AccountLimits'] << @limit @limit = {} when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_change_set.rb000066400000000000000000000104671437344660100264310ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeChangeSet < Fog::Parsers::Base def reset @response = fresh_change_set reset_parameter reset_change reset_resource_change reset_resource_change_detail reset_resource_target_definition end def reset_parameter @parameter = {} end def reset_change @change = {} end def reset_resource_change @resource_change = {'Details' => [], 'Scope' => [] } end def reset_resource_change_detail @resource_change_detail = {} end def reset_resource_target_definition @resource_target_definition = {} end def fresh_change_set {'Capabilities' => [], 'Changes' => [], 'NotificationARNs' => [], 'Parameters' => [], 'Tags' => []} end def start_element(name, attrs=[]) super case name when 'Capabilities' @in_capabilities = true when 'Changes' @in_changes = true when 'ResourceChange' @in_resource_change = true when 'Scope' @in_scope = true when 'Details' @in_details = true when 'Target' @in_target = true when 'NotificationARNs' @in_notification_arns = true when 'Parameters' @in_parameters = true when 'Tags' @in_tags = true end end def end_element(name) case name when 'ChangeSetId', 'ChangeSetName', 'Description', 'ExecutionStatus', 'StackId', 'StackName', 'StatusReason', 'Status' @response[name] = value when 'CreationTime' @response[name] = Time.parse(value) when 'member' if @in_capabilities @response['Capabilities'] << value elsif @in_scope @resource_change['Scope'] << value elsif @in_notification_arns @response['NotificationARNs'] << value elsif @in_parameters @response['Parameters'] << @parameter reset_parameter elsif @in_tags @response['Tags'] << @tag reset_tag elsif @in_details @resource_change['Details'] << @resource_change_detail reset_resource_change_detail elsif @in_changes @response['Changes'] << @change reset_change end when 'ParameterValue', 'ParameterKey' @parameter[name] = value if @in_parameters when 'Parameters' @in_parameters = false when 'Value', 'Key' @tag[name] = value if @in_tags when 'Tags' @in_tags = false when 'Capabilities' @in_capabilities = false when 'Scope' @in_scope = false when 'NotificationARNs' @in_notification_arns = false when 'Type' @change[name] = value if @in_changes when 'Changes' @in_changes = false when 'ResourceChange' if @in_resource_change @change[name] = @resource_change @in_resource_change = false end when 'Action','LogicalResourceId','PhysicalResourceId','Replacement','ResourceType' @resource_change[name] = value if @in_resource_change when 'Details' @in_details = false when 'CausingEntity','ChangeSource','Evaluation' if @in_details @resource_change_detail[name] = value end when 'Attribute','Name','RequiresRecreation' if @in_target @resource_target_definition[name] = value end when 'Target' if @in_target @resource_change_detail[name] = @resource_target_definition @in_target = false end end end end end end end endfog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_stack_events.rb000066400000000000000000000014521437344660100270140ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeStackEvents < Fog::Parsers::Base def reset @event = {} @response = { 'StackEvents' => [] } end def end_element(name) case name when 'EventId', 'LogicalResourceId', 'PhysicalResourceId', 'ResourceProperties', 'ResourceStatus', 'ResourceStatusReason', 'ResourceType', 'StackId', 'StackName' @event[name] = value when 'member' @response['StackEvents'] << @event @event = {} when 'RequestId' @response[name] = value when 'Timestamp' @event[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_stack_resource.rb000066400000000000000000000015341437344660100273400ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeStackResource < Fog::Parsers::Base def reset @resource = {} @response = { 'StackResourceDetail' => {} } end def end_element(name) case name when 'Description','LogicalResourceId', 'Metadata', 'PhysicalResourceId', 'ResourceStatus', 'ResourceStatusReason', 'ResourceType', 'StackId', 'StackName' @resource[name] = value when 'StackResourceDetail' @response['StackResourceDetail'] = @resource @resource = {} when 'RequestId' @response[name] = value when 'LastUpdatedTimestamp' @resource[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_stack_resources.rb000066400000000000000000000014111437344660100275150ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeStackResources < Fog::Parsers::Base def reset @resource = {} @response = { 'StackResources' => [] } end def end_element(name) case name when 'StackId', 'StackName', 'LogicalResourceId', 'PhysicalResourceId', 'ResourceType', 'ResourceStatus' @resource[name] = value when 'member' @response['StackResources'] << @resource @resource = {} when 'RequestId' @response[name] = value when 'Timestamp' @resource[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/describe_stacks.rb000066400000000000000000000052661437344660100256220ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class DescribeStacks < Fog::Parsers::Base def reset @stack = { 'Outputs' => [], 'Parameters' => [], 'Capabilities' => [], 'Tags' => [] } @output = {} @parameter = {} @tag = {} @response = { 'Stacks' => [] } end def start_element(name, attrs = []) super case name when 'Outputs' @in_outputs = true when 'Parameters' @in_parameters = true when 'Capabilities' @in_capabilities = true when 'Tags' @in_tags = true end end def end_element(name) if @in_outputs case name when 'OutputKey', 'OutputValue', 'Description' @output[name] = value when 'member' @stack['Outputs'] << @output @output = {} when 'Outputs' @in_outputs = false end elsif @in_parameters case name when 'ParameterKey', 'ParameterValue' @parameter[name] = value when 'member' @stack['Parameters'] << @parameter @parameter = {} when 'Parameters' @in_parameters = false end elsif @in_tags case name when 'Key', 'Value' @tag[name] = value when 'member' @stack['Tags'] << @tag @tag = {} when 'Tags' @in_tags = false end elsif @in_capabilities case name when 'member' @stack['Capabilities'] << value when 'Capabilities' @in_capabilities = false end else case name when 'member' @response['Stacks'] << @stack @stack = { 'Outputs' => [], 'Parameters' => [], 'Capabilities' => [], 'Tags' => []} when 'RequestId' @response[name] = value when 'CreationTime' @stack[name] = Time.parse(value) when 'DisableRollback' case value when 'false' @stack[name] = false when 'true' @stack[name] = true end when 'StackName', 'StackId', 'StackStatus' @stack[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/estimate_template_cost.rb000066400000000000000000000005051437344660100272170ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class EstimateTemplateCost < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'Url' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/get_stack_policy.rb000066400000000000000000000005131437344660100260030ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class GetStackPolicy < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'StackPolicyBody' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/get_template.rb000066400000000000000000000005051437344660100251330ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class GetTemplate < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'TemplateBody' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/get_template_summary.rb000066400000000000000000000035301437344660100267110ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class GetTemplateSummary < Fog::Parsers::Base def reset reset_parameter @response = {'Capabilities' => [],'ResourceTypes' => '','Parameters' => [] } end def reset_parameter @parameter = {'AllowedValues' => []} end def start_element(name, attrs=[]) super case name when 'Capabilities' @in_capabilities = true when 'Parameters' @in_parameters = true when 'ResourceTypes' @in_resource_types = true end end def end_element(name) case name when 'member' if @in_capabilities @response['Capabilities'] << value elsif @in_resource_types @response['ResourceTypes'] << value elsif @in_parameters @response['Parameters'] << @parameter reset_parameter end when 'DefaultValue', 'NoEcho', 'ParameterKey', 'ParameterType', 'ParameterType' @parameter[name] = value if @in_parameters when 'Description' if @in_parameters @parameter[name] = value else @response[name] = value end when 'ParameterConstraints' @parameter['AllowedValues'] << value if @in_parameters when 'RequestId' @response[name] = value when 'Parameters' @in_parameters = false when 'ResourceTypes' @in_resource_types = false when 'Capabilities' @in_capabilities = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/list_change_sets.rb000066400000000000000000000015251437344660100260020ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class ListChangeSets < Fog::Parsers::Base def reset @change_set = {} @response = { 'Summaries' => [] } end def end_element(name) case name when 'ChangeSetId', 'ChangeSetName', 'Description', 'ExecutionStatus', 'StackId', 'StackName', 'Status', 'StackReason' @change_set[name] = value when 'member' @response['Summaries'] << @change_set @change_set = {} when 'RequestId' @response[name] = value when 'CreationTime' @change_set[name] = Time.parse(value) when 'NextToken' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/list_stack_resources.rb000066400000000000000000000015131437344660100267130ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class ListStackResources < Fog::Parsers::Base def reset @resource = {} @response = { 'StackResourceSummaries' => [] } end def end_element(name) case name when 'ResourceStatus', 'LogicalResourceId', 'PhysicalResourceId', 'ResourceType' @resource[name] = value when 'member' @response['StackResourceSummaries'] << @resource @resource = {} when 'LastUpdatedTimestamp' @resource[name] = Time.parse(value) when 'RequestId' @response[name] = value when 'NextToken' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/list_stacks.rb000066400000000000000000000015341437344660100250070ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class ListStacks < Fog::Parsers::Base def reset @stack = {} @response = { 'StackSummaries' => [] } end def end_element(name) case name when 'StackId', 'StackStatus', 'StackName', 'TemplateDescription' @stack[name] = value when 'member' @response['StackSummaries'] << @stack @stack = {} when 'RequestId' @response[name] = value when 'CreationTime' @stack[name] = Time.parse(value) when 'DeletionTime' @stack[name] = Time.parse(value) when 'NextToken' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/update_stack.rb000066400000000000000000000005001437344660100251230ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class UpdateStack < Fog::Parsers::Base def end_element(name) case name when 'RequestId', 'StackId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_formation/validate_template.rb000066400000000000000000000023451437344660100261510ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudFormation class ValidateTemplate < Fog::Parsers::Base def reset @parameter = {} @response = { 'Parameters' => [] } end def start_element(name, attrs = []) super case name when 'Parameters' @in_parameters = true end end def end_element(name) case name when 'DefaultValue', 'ParameterKey' @parameter[name] = value when 'Description' if @in_parameters @parameter[name] = value else @response[name] = value end when 'RequestId' @response[name] = value when 'member' @response['Parameters'] << @parameter @parameter = {} when 'NoEcho' case value when 'false' @parameter[name] = false when 'true' @parameter[name] = true end when 'Parameters' @in_parameters = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/000077500000000000000000000000001437344660100212445ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/delete_alarms.rb000066400000000000000000000007561437344660100244020ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class DeleteAlarms < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/describe_alarm_history.rb000066400000000000000000000021671437344660100263140ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class DescribeAlarmHistory < Fog::Parsers::Base def reset @response = { 'DescribeAlarmHistoryResult' => {'AlarmHistoryItems' => []}, 'ResponseMetadata' => {} } reset_alarm_history_item end def reset_alarm_history_item @alarm_history_item = {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'AlarmName', 'HistoryItemType', 'HistorySummary' @alarm_history_item[name] = value when 'Timestamp' @alarm_history_item[name] = Time.parse value when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextToken' @response['ResponseMetadata'][name] = value when 'member' @response['DescribeAlarmHistoryResult']['AlarmHistoryItems'] << @alarm_history_item reset_alarm_history_item end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/describe_alarms.rb000066400000000000000000000071571437344660100247220ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class DescribeAlarms < Fog::Parsers::Base def reset @response = { 'DescribeAlarmsResult' => {'MetricAlarms' => []}, 'ResponseMetadata' => {} } reset_metric_alarms end def reset_metric_alarms @metric_alarms = { 'Dimensions' => [], 'AlarmActions' => [], 'OKActions' => [], 'InsufficientDataActions' => [] } end def reset_dimension @dimension = {} end def reset_alarm_actions @alarm_actions = {} end def reset_ok_actions @ok_actions = {} end def reset_insufficient_data_actions @insufficient_data_actions = {} end def start_element(name, attrs = []) super case name when 'Dimensions' @in_dimensions = true when 'AlarmActions' @in_alarm_actions = true when 'OKActions' @in_ok_actions = true when 'InsufficientDataActions' @in_insufficient_data_actions = true when 'member' reset_dimension if @in_dimensions reset_alarm_actions if @in_alarm_actions reset_ok_actions if @in_ok_actions reset_insufficient_data_actions if @in_insufficient_data_actions end end def end_element(name) case name when 'Name', 'Value' @dimension[name] = value when 'AlarmConfigurationUpdatedTimestamp', 'StateUpdatedTimestamp' @metric_alarms[name] = Time.parse value when 'Period', 'EvaluationPeriods' @metric_alarms[name] = value.to_i when 'Threshold' @metric_alarms[name] = value.to_f when 'AlarmActions' @in_alarm_actions = false when 'OKActions' @in_ok_actions = false when 'InsufficientDataActions' @in_insufficient_data_actions = false when 'AlarmName', 'Namespace', 'MetricName', 'AlarmDescription', 'AlarmArn', 'Unit', 'StateValue', 'Statistic', 'ComparisonOperator', 'StateReason', 'ActionsEnabled' @metric_alarms[name] = value when 'StateUpdatedTimestamp', 'AlarmConfigurationUpdatedTimestamp' @metric_alarms[name] = Time.parse value when 'Dimensions' @in_dimensions = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextToken' @response['ResponseMetadata'][name] = value when 'member' if @in_dimensions @metric_alarms['Dimensions'] << @dimension elsif @in_alarm_actions @metric_alarms['AlarmActions'] << value.to_s.strip elsif @in_ok_actions @metric_alarms['OKActions'] << value.to_s.strip elsif @in_insufficient_data_actions @metric_alarms['InsufficientDataActions'] << value.to_s.strip elsif @metric_alarms.key?('AlarmName') @response['DescribeAlarmsResult']['MetricAlarms'] << @metric_alarms reset_metric_alarms elsif @response['DescribeAlarmsResult']['MetricAlarms'].last != nil @response['DescribeAlarmsResult']['MetricAlarms'].last.merge!( @metric_alarms) end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/describe_alarms_for_metric.rb000066400000000000000000000046011437344660100271220ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class DescribeAlarmsForMetric < Fog::Parsers::Base def reset @response = { 'DescribeAlarmsForMetricResult' => {'MetricAlarms' => []}, 'ResponseMetadata' => {} } reset_metric_alarms end def reset_metric_alarms @metric_alarms = {'Dimensions' => []} end def reset_dimension @dimension = {} end def start_element(name, attrs = []) super case name when 'Dimensions' @in_dimensions = true when 'member' if @in_dimensions reset_dimension end end end def end_element(name) case name when 'Name', 'Value' @dimension[name] = value when 'Period', 'EvaluationPeriods' @metric_alarms[name] = value.to_i when 'Threshold' @metric_alarms[name] = value.to_f when 'AlarmActions', 'OKActions', 'InsufficientDataActions' @metric_alarms[name] = value.to_s.strip when 'AlarmName', 'Namespace', 'MetricName', 'AlarmDescription', 'AlarmArn', 'Unit', 'StateValue', 'Statistic', 'ComparisonOperator', 'StateReason', 'ActionsEnabled' @metric_alarms[name] = value when 'StateUpdatedTimestamp', 'AlarmConfigurationUpdatedTimestamp' @metric_alarms[name] = Time.parse value when 'Dimensions' @in_dimensions = false when 'NextToken' @response['ResponseMetadata'][name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'member' if !@in_dimensions if @metric_alarms.key?('AlarmName') @response['DescribeAlarmsForMetricResult']['MetricAlarms'] << @metric_alarms reset_metric_alarms elsif @response['DescribeAlarmsForMetricResult']['MetricAlarms'].last != nil @response['DescribeAlarmsForMetricResult']['MetricAlarms'].last.merge!( @metric_alarms) end else @metric_alarms['Dimensions'] << @dimension end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/disable_alarm_actions.rb000066400000000000000000000007641437344660100260770ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class DisableAlarmActions < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/enable_alarm_actions.rb000066400000000000000000000007631437344660100257210ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class EnableAlarmActions < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/get_metric_statistics.rb000066400000000000000000000021641437344660100261700ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class GetMetricStatistics < Fog::Parsers::Base def reset @response = { 'GetMetricStatisticsResult' => {'Datapoints' => []}, 'ResponseMetadata' => {} } reset_datapoint end def reset_datapoint @datapoint = {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'Average', 'Maximum', 'Minimum', 'SampleCount', 'Sum' @datapoint[name] = value.to_f when 'Unit' @datapoint[name] = value when 'Timestamp' @datapoint[name] = Time.parse value when 'member' @response['GetMetricStatisticsResult']['Datapoints'] << @datapoint reset_datapoint when 'Label' @response['GetMetricStatisticsResult'][name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/list_metrics.rb000066400000000000000000000027271437344660100243020ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class ListMetrics < Fog::Parsers::Base def reset @response = { 'ListMetricsResult' => {'Metrics' => []}, 'ResponseMetadata' => {} } reset_metric end def reset_metric @metric = {'Dimensions' => []} end def reset_dimension @dimension = {} end def start_element(name, attrs = []) super case name when 'Dimensions' @in_dimensions = true when 'member' if @in_dimensions reset_dimension end end end def end_element(name) case name when 'Name', 'Value' @dimension[name] = value when 'Namespace', 'MetricName' @metric[name] = value when 'Dimensions' @in_dimensions = false when 'NextMarker', 'NextToken' @response['ListMetricsResult'][name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'member' if !@in_dimensions @response['ListMetricsResult']['Metrics'] << @metric reset_metric else @metric['Dimensions'] << @dimension end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/put_metric_alarm.rb000066400000000000000000000007651437344660100251300ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class PutMetricAlarm < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/put_metric_data.rb000066400000000000000000000007571437344660100247460ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class PutMetricData < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/cloud_watch/set_alarm_state.rb000066400000000000000000000007561437344660100247500ustar00rootroot00000000000000module Fog module Parsers module AWS module CloudWatch class SetAlarmState < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/000077500000000000000000000000001437344660100204245ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/compute/allocate_address.rb000066400000000000000000000005301437344660100242400ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AllocateAddress < Fog::Parsers::Base def end_element(name) case name when 'publicIp', 'requestId', 'domain', 'allocationId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/assign_private_ip_addresses.rb000066400000000000000000000007521437344660100265200ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AssignPrivateIpAddresses < Fog::Parsers::Base def end_element(name) case name when 'requestId' @response[name] = value when 'return' if value == 'true' @response[name] = true else @response[name] = false end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/associate_address.rb000066400000000000000000000007631437344660100244370ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AssociateAddress < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'associationId' @response[name] = value when 'return' if value == 'true' @response[name] = true else @response[name] = false end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/associate_route_table.rb000066400000000000000000000005071437344660100253130ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AssociateRouteTable < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'associationId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/attach_network_interface.rb000066400000000000000000000005111437344660100260030ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AttachNetworkInterface < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'attachmentId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/attach_volume.rb000066400000000000000000000006551437344660100236120ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class AttachVolume < Fog::Parsers::Base def end_element(name) case name when 'attachTime' @response[name] = Time.parse(value) when 'device', 'instanceId', 'requestId', 'status', 'volumeId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/basic.rb000066400000000000000000000007271437344660100220400ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class Basic < Fog::Parsers::Base def end_element(name) case name when 'requestId' @response[name] = value when 'return' if value == 'true' @response[name] = true else @response[name] = false end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/cancel_spot_instance_requests.rb000066400000000000000000000012661437344660100270670ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CancelSpotInstanceRequests < Fog::Parsers::Base def reset @spot_instance_request = {} @response = { 'spotInstanceRequestSet' => [] } end def end_element(name) case name when 'item' @response['spotInstanceRequestSet'] << @spot_instance_request @spot_instance_request = {} when 'requestId' @response[name] = value when 'spotInstanceRequestId', 'state' @spot_instance_request[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/copy_image.rb000066400000000000000000000005551437344660100230720ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CopyImage < Fog::Parsers::Base def end_element(name) case name when 'imageId' @response[name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/copy_snapshot.rb000066400000000000000000000005631437344660100236460ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CopySnapshot < Fog::Parsers::Base def end_element(name) case name when 'snapshotId' @response[name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_dhcp_options.rb000066400000000000000000000043261437344660100247720ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateDhcpOptions < Fog::Parsers::Base def reset @dhcp_options = { 'dhcpConfigurationSet' => {}, 'tagSet' => {} } @response = { 'dhcpOptionsSet' => [] } @tag = {} @value_set = [] @dhcp_configuration = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'dhcpConfigurationSet' @in_dhcp_configuration_set = true when 'valueSet' @in_value_set = true end end def end_element(name) if @in_tag_set case name when 'item' @dhcp_options['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_dhcp_configuration_set case name when 'item' unless @in_value_set @dhcp_options['dhcpConfigurationSet'][@dhcp_configuration['key']] = @value_set @value_set=[] @dhcp_configuration = {} end when 'key', 'value' if !@in_value_set @dhcp_configuration[name] = value else @value_set << value end when 'valueSet' @in_value_set = false when 'dhcpConfigurationSet' @in_dhcp_configuration_set = false end else case name when 'dhcpOptionsId' @dhcp_options[name] = value when 'dhcpOptions' @response['dhcpOptionsSet'] << @dhcp_options @dhcp_options = { 'tagSet' => {} } @dhcp_options = { 'dhcpOptionsSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_image.rb000066400000000000000000000005521437344660100233600ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateImage < Fog::Parsers::Base def end_element(name) case name when 'instanceId', 'requestId', 'name', 'description', 'noReboot', 'imageId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_internet_gateway.rb000066400000000000000000000034771437344660100256600ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateInternetGateway < Fog::Parsers::Base def reset @internet_gateway = { 'attachmentSet' => {}, 'tagSet' => {} } @response = { 'internetGatewaySet' => [] } @tag = {} @attachment = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'attachmentSet' @in_attachment_set = true end end def end_element(name) if @in_tag_set case name when 'item' @vpc['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_attachment_set case name when 'item' @internet_gateway['attachmentSet'][@attachment['key']] = @attachment['value'] @attachment = {} when 'key', 'value' @attachment[name] = value when 'attachmentSet' @in_attachment_set = false end else case name when 'internetGatewayId' @internet_gateway[name] = value when 'internetGateway' @response['internetGatewaySet'] << @internet_gateway @internet_gateway = { 'tagSet' => {} } @internet_gateway = { 'attachmentSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_key_pair.rb000066400000000000000000000005341437344660100241010ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateKeyPair < Fog::Parsers::Base def end_element(name) case name when 'keyFingerprint', 'keyMaterial', 'keyName', 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_network_acl.rb000066400000000000000000000011441437344660100246040ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute require 'fog/aws/parsers/compute/network_acl_parser' class CreateNetworkAcl < NetworkAclParser def reset super @response = { 'networkAcl' => {} } end def end_element(name) case name when 'requestId' @response[name] = value when 'networkAcl' @response['networkAcl'] = @network_acl reset_nacl else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_network_interface.rb000066400000000000000000000011771437344660100260130ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute require 'fog/aws/parsers/compute/network_interface_parser' class CreateNetworkInterface < NetworkInterfaceParser def reset super @response = { 'networkInterface' => {} } end def end_element(name) case name when 'requestId' @response[name] = value when 'networkInterface' @response['networkInterface'] = @nic reset_nic else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_route_table.rb000066400000000000000000000042221437344660100246010ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateRouteTable < Fog::Parsers::Base def reset @in_route_set = false @in_association_set = false @route = {} @association = {} @route_table = { 'routeSet' => [], 'tagSet' => {}, 'associationSet' => [] } @response = { 'routeTable' => [] } @tag = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'routeSet' @in_route_set = true when 'associationSet' @in_association_set = true end end def end_element(name) if @in_tag_set case name when 'item' @route_table['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'tagSet' @in_tag_set = false end elsif @in_route_set case name when 'routeSet' @in_route_set = false when 'destinationCidrBlock', 'gatewayId', 'state' @route[name] = value when 'item' @route_table['routeSet'] << @route @route = {} end elsif @in_association_set case name when 'routeTableAssociationId', 'routeTableId', 'main' @association[name] = value when 'associationSet' @route_table['associationSet'] << @association @in_association_set = false end else case name when 'routeTableId', 'vpcId' @route_table[name] = value when 'routeTable' @response['routeTable'] << @route_table @route_table = { 'routeSet' => {}, 'tagSet' => {}, 'associationSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_security_group.rb000066400000000000000000000007601437344660100253620ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateSecurityGroup < Fog::Parsers::Base def end_element(name) case name when 'return' if value == 'true' @response[name] = true else @response[name] = false end when 'requestId', 'groupId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_snapshot.rb000066400000000000000000000011321437344660100241300ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateSnapshot < Fog::Parsers::Base def end_element(name) case name when 'description', 'ownerId', 'progress', 'snapshotId', 'status', 'volumeId', 'statusMessage' @response[name] = value when 'requestId' @response[name] = value when 'startTime' @response[name] = Time.parse(value) when 'volumeSize' @response[name] = value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_subnet.rb000066400000000000000000000047051437344660100236020ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateSubnet < Fog::Parsers::Base def reset @subnet = { 'tagSet' => {} } @response = { 'subnet' => [] } @tag = {} @ipv6_cidr_block_association = {} @in_tag_set = false @in_ipv6_cidr_block_association_set = false @in_cidr_block_state = false end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'ipv6CidrBlockAssociationSet' @in_ipv6_cidr_block_association_set = true when 'ipv6CidrBlockState' @in_cidr_block_state = true end end def end_element(name) if @in_tag_set case name when 'item' @subnet['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_ipv6_cidr_block_association_set if @in_cidr_block_state case name when 'state' @ipv6_cidr_block_association['ipv6CidrBlockState'] = { name => value } when 'ipv6CidrBlockState' @in_cidr_block_state = false end else case name when 'item' @subnet['ipv6CidrBlockAssociationSet'] = @ipv6_cidr_block_association @ipv6_cidr_block_association = {} when 'ipv6CidrBlock', 'associationId' @ipv6_cidr_block_association[name] = value when 'ipv6CidrBlockAssociationSet' @in_ipv6_cidr_block_association_set = false end end else case name when 'subnetId', 'state', 'vpcId', 'cidrBlock', 'availableIpAddressCount', 'availabilityZone' @subnet[name] = value when 'mapPublicIpOnLaunch', 'defaultForAz' @subnet[name] = value == 'true' ? true : false when 'subnet' @response['subnet'] = @subnet when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_volume.rb000066400000000000000000000011531437344660100236030ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateVolume < Fog::Parsers::Base def end_element(name) case name when 'availabilityZone', 'requestId', 'snapshotId', 'status', 'volumeId', 'volumeType', 'kmsKeyId' @response[name] = value when 'createTime' @response[name] = Time.parse(value) when 'size', 'iops' @response[name] = value.to_i when 'encrypted' @response[name] = (value == 'true') end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/create_vpc.rb000066400000000000000000000022531437344660100230660ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class CreateVpc < Fog::Parsers::Base def reset @vpc = { 'tagSet' => {} } @response = { 'vpcSet' => [] } @tag = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @vpc['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end else case name when 'vpcId', 'state', 'cidrBlock', 'dhcpOptionsId' @vpc[name] = value when 'vpc' @response['vpcSet'] << @vpc @vpc = { 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/deregister_image.rb000066400000000000000000000005071437344660100242520ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DeregisterImage < Fog::Parsers::Base def end_element(name) case name when 'return', 'requestId', 'imageId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_account_attributes.rb000066400000000000000000000021751437344660100265200ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeAccountAttributes < Fog::Parsers::Base def reset @attribute = { 'values' => []} @account_attributes = [] @response = { 'accountAttributeSet' => [] } end def start_element(name, attrs = []) super case name when 'attributeValueSet' @in_attribute_value_set = true end end def end_element(name) case name when 'attributeName' @attribute[name] = value when 'attributeValue' @attribute['values'] << value when['requestId'] @response[name] = value when 'item' @response['accountAttributeSet'] << @attribute @attribute = { 'values' => []} unless @in_attribute_value_set when 'attributeValueSet' @in_attribute_value_set = false else end @response['accountAttributeSet'].uniq! end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_addresses.rb000066400000000000000000000024171437344660100245720ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeAddresses < Fog::Parsers::Base def reset @response = { 'addressesSet' => [] } @address = {'tagSet' => {}} @tag = {} end def start_element(name, attrs = []) super if name == 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @address['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end else case name when 'instanceId', 'publicIp', 'domain', 'allocationId', 'associationId', 'networkInterfaceId', 'networkInterfaceOwnerId', 'privateIpAddress' @address[name] = value when 'item' @response['addressesSet'] << @address @address = { 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_availability_zones.rb000066400000000000000000000021301437344660100264750ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeAvailabilityZones < Fog::Parsers::Base def start_element(name, attrs = []) case name when 'messageSet' @in_message_set = true end super end def reset @availability_zone = { 'messageSet' => [] } @response = { 'availabilityZoneInfo' => [] } end def end_element(name) case name when 'item' unless @in_message_set @response['availabilityZoneInfo'] << @availability_zone @availability_zone = { 'messageSet' => [] } end when 'message' @availability_zone['messageSet'] << value when 'regionName', 'zoneName', 'zoneState' @availability_zone[name] = value when 'requestId' @response[name] = value when 'messageSet' @in_message_set = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_classic_link_instances.rb000066400000000000000000000032021437344660100273130ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeClassicLinkInstances < Fog::Parsers::Base def reset @instance = { 'tagSet' => {}, 'groups' => [] } @response = { 'instancesSet' => [] } @tag = {} @group = {} end def start_element(name, attrs = []) super case name when 'groupSet' @in_group_set = true when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @instance['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_group_set case name when 'item' @instance['groups'] << @group @group = {} when 'groupId', 'groupName' @group[name] = value when 'groupSet' @in_group_set = false end else case name when 'vpcId', 'instanceId' @instance[name] = value when 'item' @response['instancesSet'] << @instance @instance = { 'tagSet' => {}, 'groups' => [] } when 'requestId', 'nextToken' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_dhcp_options.rb000066400000000000000000000042641437344660100253100ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeDhcpOptions < Fog::Parsers::Base def reset @dhcp_options = { 'dhcpConfigurationSet' => {}, 'tagSet' => {} } @response = { 'dhcpOptionsSet' => [] } @tag = {} @value_set = [] @dhcp_configuration = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'dhcpConfigurationSet' @in_dhcp_configuration_set = true when 'valueSet' @in_value_set = true end end def end_element(name) if @in_tag_set case name when 'item' @dhcp_options['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_dhcp_configuration_set case name when 'item' unless @in_value_set @dhcp_options['dhcpConfigurationSet'][@dhcp_configuration['key']] = @value_set @value_set=[] @dhcp_configuration = {} end when 'key', 'value' if !@in_value_set @dhcp_configuration[name] = value else @value_set << value end when 'valueSet' @in_value_set = false when 'dhcpConfigurationSet' @in_dhcp_configuration_set = false end else case name when 'dhcpOptionsId' @dhcp_options[name] = value when 'item' @response['dhcpOptionsSet'] << @dhcp_options @dhcp_options = { 'dhcpConfigurationSet' => {}, 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_image_attribute.rb000066400000000000000000000101341437344660100257550ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeImageAttribute < Fog::Parsers::Base def reset @response = { } @in_description = false @in_kernelId = false @in_ramdiskId = false @in_launchPermission = false @in_productCodes = false @in_blockDeviceMapping = false @in_sriovNetSupport = false end def start_element(name, attrs = []) super case name when 'description' @in_description = true when 'kernel' @in_kernel = true when 'ramdisk' @in_ramdisk = true when 'launchPermission' @in_launchPermission= true unless @response.key?('launchPermission') @response['launchPermission'] = [] end when 'productCodes' @in_productCodes = true @product_codes = {} unless @response.key?('productCodes') @response['productCodes'] = [] end when 'blockDeviceMapping' @in_blockDeviceMapping = true @block_device_mapping = {} unless @response.key?('blockDeviceMapping') @response['blockDeviceMapping'] = [] end when 'sriovNetSupport' unless @response.key?('sriovNetSupport') @response['sriovNetSupport'] = 'false' end @in_sriovNetSupport = true end end def end_element(name) if @in_description case name when 'value' @response['description'] = value when 'description' @in_description= false end elsif @in_kernel case name when 'value' @response['kernelId'] = value when 'kernel' @in_kernelId = false end elsif @in_ramdisk case name when 'value' @response['ramdiskId'] = value when 'ramdisk' @in_ramdiskId = false end elsif @in_launchPermission case name when 'group', 'userId' @response['launchPermission'] << value when 'launchPermission' @in_launchPermission = false end elsif @in_blockDeviceMapping case name when 'item' @response["blockDeviceMapping"] << @block_device_mapping @block_device_mapping = {} when 'volumeId', 'status', 'deviceName' @block_device_mapping[name] = value when 'attachTime' @block_device_mapping['attachTime'] = Time.parse(value) when 'deleteOnTermination' @block_device_mapping['deleteOnTermination'] = (value == 'true') when 'blockDeviceMapping' @in_blockDeviceMapping = false end elsif @in_productCodes case name when 'item' @response['productCodes'] << @product_codes @product_codes = {} when 'productCode', 'type' @product_codes[name] = value when 'productCodes' @in_productCodes = false end elsif @in_sriovNetSupport case name when 'value' @response["sriovNetSupport"] = value when "sriovNetSupport" @in_sriovNetSupport = false end else case name when 'requestId', 'imageId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_images.rb000066400000000000000000000063251437344660100240640ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeImages < Fog::Parsers::Base def reset @block_device_mapping = {} @image = { 'blockDeviceMapping' => [], 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} } @response = { 'imagesSet' => [] } @state_reason = {} @tag = {} end def start_element(name, attrs = []) super case name when 'blockDeviceMapping' @in_block_device_mapping = true when 'productCodes' @in_product_codes = true when 'stateReason' @in_state_reason = true when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_block_device_mapping case name when 'blockDeviceMapping' @in_block_device_mapping = false when 'deviceName', 'virtualName', 'snapshotId', 'deleteOnTermination', 'volumeType', 'encrypted' @block_device_mapping[name] = value when 'volumeSize' @block_device_mapping[name] = value.to_i when 'item' @image['blockDeviceMapping'] << @block_device_mapping @block_device_mapping = {} end elsif @in_product_codes case name when 'productCode' @image['productCodes'] << value when 'productCodes' @in_product_codes = false end elsif @in_tag_set case name when 'item' @image['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_state_reason case name when 'code', 'message' @state_reason[name] = value when 'stateReason' @image['stateReason'] = @state_reason @state_reason = {} @in_state_reason = false end else case name when 'architecture', 'description', 'hypervisor', 'imageId', 'imageLocation', 'imageOwnerAlias', 'imageOwnerId', 'imageState', 'imageType', 'kernelId', 'name', 'platform', 'ramdiskId', 'rootDeviceType','rootDeviceName','virtualizationType' @image[name] = value when 'isPublic','enaSupport' if value == 'true' @image[name] = true else @image[name] = false end when 'creationDate' @image[name] = Time.parse(value) if value && !value.empty? when 'item' @response['imagesSet'] << @image @image = { 'blockDeviceMapping' => [], 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_instance_attribute.rb000066400000000000000000000142321437344660100265020ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeInstanceAttribute < Fog::Parsers::Base def reset @response = { } @in_instanceType = false @in_kernelId = false @in_ramdiskId = false @in_userData = false @in_disableApiTermination = false @in_instanceInitiatedShutdownBehavior = false @in_rootDeviceName = false @in_blockDeviceMapping = false @in_productCodes = false @in_ebsOptimized = false @in_sriovNetSupport = false @in_sourceDestCheck = false @in_groupSet = false end def start_element(name, attrs = []) super case name when 'instanceType' @in_instanceType = true when 'kernel' @in_kernel = true when 'ramdisk' @in_ramdisk = true when 'userData' @in_userData = true when 'disableApiTermination' @in_disableApiTermination = true when 'instanceInitiatedShutdownBehavior' @in_instanceInitiatedShutdownBehavior = true when 'rootDeviceName' @in_rootDeviceName = true when 'blockDeviceMapping' @in_blockDeviceMapping = true @block_device_mapping = {} unless @response.key?('blockDeviceMapping') @response['blockDeviceMapping'] = [] end when 'productCodes' @in_productCodes = true unless @response.key?('productCodes') @response['productCodes'] = [] end when 'ebsOptimized' @in_ebsOptimized = true when 'sriovNetSupport' @in_sriovNetSupport = true when 'sourceDestCheck' @in_sourceDestCheck = true when 'groupSet' @in_groupSet = true @group = {} unless @response.key?('groupSet') @response['groupSet'] = [] end end end def end_element(name) if @in_instanceType case name when 'value' @response['instanceType'] = value when 'instanceType' @in_instanceType = false end elsif @in_kernel case name when 'value' @response['kernelId'] = value when 'kernel' @in_kernelId = false end elsif @in_ramdisk case name when 'value' @response['ramdiskId'] = value when 'ramdisk' @in_ramdiskId = false end elsif @in_userData case name when 'value' @response['userData'] = value when 'userData' @in_userData = false end elsif @in_disableApiTermination case name when 'value' @response['disableApiTermination'] = (value == 'true') when 'disableApiTermination' @in_disableApiTermination = false end elsif @in_instanceInitiatedShutdownBehavior case name when 'value' @response['instanceInitiatedShutdownBehavior'] = value when 'instanceInitiatedShutdownBehavior' @in_instanceInitiatedShutdownBehavior = false end elsif @in_rootDeviceName case name when 'value' @response['rootDeviceName'] = value when 'rootDeviceName' @in_rootDeviceName = false end elsif @in_blockDeviceMapping case name when 'item' @response["blockDeviceMapping"] << @block_device_mapping @block_device_mapping = {} when 'volumeId', 'status', 'deviceName' @block_device_mapping[name] = value when 'attachTime' @block_device_mapping['attachTime'] = Time.parse(value) when 'deleteOnTermination' @block_device_mapping['deleteOnTermination'] = (value == 'true') when 'blockDeviceMapping' @in_blockDeviceMapping = false end elsif @in_productCodes @response['productCodes'] << value case name when 'productCodes' @in_productCodes = false end elsif @in_ebsOptimized case name when 'value' @response['ebsOptimized'] = (value == 'true') when 'ebsOptimized' @in_ebsOptimized = false end elsif @in_sriovNetSupport case name when 'value' @response["sriovNetSupport"] = value when "sriovNetSupport" @in_sriovNetSupport = false end elsif @in_sourceDestCheck case name when 'value' @response['sourceDestCheck'] = (value == 'true') when 'sourceDestCheck' @in_sourceDestCheck = false end elsif @in_groupSet case name when 'item' @response['groupSet'] << @group @group = {} when 'groupId' @group["groupId"] = value when 'groupSet' @in_groupSet = false end else case name when 'requestId', 'instanceId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_instance_status.rb000066400000000000000000000042471437344660100260270ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeInstanceStatus < Fog::Parsers::Base def new_instance! @instance = { 'instanceState' => {}, 'systemStatus' => { 'details' => [] }, 'instanceStatus' => { 'details' => [] }, 'eventsSet' => [] } end def new_item! @item = {} end def reset @response = { 'instanceStatusSet' => [] } @inside = nil end def start_element(name, attrs=[]) super case name when 'item' if @inside new_item! else new_instance! end when 'systemStatus' @inside = :systemStatus when 'instanceState' @inside = :instanceState when 'instanceStatus' @inside = :instanceStatus when 'eventsSet' @inside = :eventsSet end end def end_element(name) case name #Simple closers when 'instanceId', 'availabilityZone' @instance[name] = value when 'nextToken', 'requestId' @response[name] = value when 'systemStatus', 'instanceState', 'instanceStatus', 'eventsSet' @inside = nil when 'item' case @inside when :eventsSet @instance['eventsSet'] << @item when :systemStatus, :instanceStatus @instance[@inside.to_s]['details'] << @item when nil @response['instanceStatusSet'] << @instance end @item = nil when 'code' case @inside when :eventsSet @item[name] = value when :instanceState @instance[@inside.to_s][name] = value.to_i end when 'description', 'notBefore', 'notAfter', 'name', 'status' @item.nil? ? (@instance[@inside.to_s][name] = value) : (@item[name] = value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_instances.rb000066400000000000000000000124441437344660100246050ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeInstances < Fog::Parsers::Base def reset @block_device_mapping = {} @network_interface = {} @context = [] @contexts = ['blockDevices', 'blockDeviceMapping', 'groupSet', 'iamInstanceProfile', 'instancesSet', 'instanceState', 'networkInterfaceSet', 'placement', 'productCodes', 'stateReason', 'tagSet'] @instance = { 'blockDeviceMapping' => [], 'networkInterfaces' => [], 'iamInstanceProfile' => {}, 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} } @reservation = { 'groupIds' => [], 'groupSet' => [], 'instancesSet' => [] } @response = { 'reservationSet' => [] } @tag = {} end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) case name when 'amiLaunchIndex' @instance[name] = value.to_i when 'arn' @instance[@context.last][name] = value when 'availabilityZone', 'tenancy' @instance['placement'][name] = value when 'architecture', 'clientToken', 'dnsName', 'hypervisor', 'imageId', 'instanceId', 'instanceType', 'ipAddress', 'kernelId', 'keyName', 'instanceLifecycle', 'platform', 'privateDnsName', 'privateIpAddress', 'ramdiskId', 'reason', 'requesterId', 'rootDeviceName', 'rootDeviceType', 'spotInstanceRequestId', 'virtualizationType' @instance[name] = value when 'attachTime' @block_device_mapping[name] = Time.parse(value) when *@contexts @context.pop when 'code' @instance[@context.last][name] = @context.last == 'stateReason' ? value : value.to_i when 'message' @instance[@context.last][name] = value when 'deleteOnTermination' @block_device_mapping[name] = (value == 'true') when 'deviceName', 'status', 'volumeId' @block_device_mapping[name] = value when 'subnetId', 'vpcId', 'ownerId', 'networkInterfaceId', 'attachmentId' @network_interface[name] = value @instance[name] = value when 'groupId', 'groupName' case @context.last when 'groupSet' (name == 'groupName') ? current_key = 'groupSet' : current_key = 'groupIds' case @context[-2] when 'instancesSet' @reservation[current_key] << value when 'networkInterfaceSet' @network_interface[current_key] ||= [] @network_interface[current_key] << value end when 'placement' @instance['placement'][name] = value end when 'id' @instance[@context.last][name] = value when 'item' case @context.last when 'blockDeviceMapping' @instance['blockDeviceMapping'] << @block_device_mapping @block_device_mapping = {} when 'networkInterfaceSet' @instance['networkInterfaces'] << @network_interface @network_interface = {} when 'instancesSet' @reservation['instancesSet'] << @instance @instance = { 'blockDeviceMapping' => [], 'networkInterfaces' => [], 'iamInstanceProfile' => {}, 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [], 'stateReason' => {}, 'tagSet' => {} } when 'tagSet' @instance['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'blockDevices' # Ignore this one (Eucalyptus specific) when nil @response['reservationSet'] << @reservation @reservation = { 'groupIds' => [], 'groupSet' => [], 'instancesSet' => [] } end when 'key', 'value' @tag[name] = value when 'launchTime' @instance[name] = Time.parse(value) when 'name' @instance[@context.last][name] = value when 'ownerId', 'reservationId' @reservation[name] = value when 'requestId', 'nextToken' @response[name] = value when 'productCode' @instance['productCodes'] << value when 'state' @instance['monitoring'][name] = (value == 'enabled') when 'ebsOptimized' @instance['ebsOptimized'] = (value == 'true') when 'sourceDestCheck' if value == 'true' @instance[name] = true else @instance[name] = false end # Eucalyptus passes status in schema non conforming way when 'stateCode' @instance['instanceState']['code'] = value when 'stateName' @instance['instanceState']['name'] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_internet_gateways.rb000066400000000000000000000034001437344660100263420ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeInternetGateways < Fog::Parsers::Base def reset @internet_gateway = { 'attachmentSet' => {}, 'tagSet' => {} } @response = { 'internetGatewaySet' => [] } @tag = {} @attachment = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'attachmentSet' @in_attachment_set = true end end def end_element(name) if @in_tag_set case name when 'item' @internet_gateway['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_attachment_set case name when 'item' @internet_gateway['attachmentSet']=@attachment @attachment = {} when 'vpcId', 'state' @attachment[name] = value when 'attachmentSet' @in_attachment_set = false end else case name when 'internetGatewayId' @internet_gateway[name] = value when 'item' @response['internetGatewaySet'] << @internet_gateway @internet_gateway = { 'attachmentSet' => {}, 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_key_pairs.rb000066400000000000000000000010771437344660100246040ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeKeyPairs < Fog::Parsers::Base def reset @key = {} @response = { 'keySet' => [] } end def end_element(name) case name when 'item' @response['keySet'] << @key @key = {} when 'keyFingerprint', 'keyName' @key[name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_network_acls.rb000066400000000000000000000016601437344660100253070ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute require 'fog/aws/parsers/compute/network_acl_parser' class DescribeNetworkAcls < NetworkAclParser def reset super @response = { 'networkAclSet' => [] } @item_level = 0 end def start_element(name, attrs = []) super case name when 'item' @item_level += 1 end end def end_element(name) case name when 'requestId' @response[name] = value when 'item' @item_level -= 1 if @item_level == 0 @response['networkAclSet'] << @network_acl reset_nacl else super end else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_network_interface_attribute.rb000066400000000000000000000046231437344660100304120ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeNetworkInterfaceAttribute < NetworkInterfaceParser def reset @response = { } @in_description = false @in_group_set = false @in_source_dest_check = false @in_attachment = false end def start_element(name, attrs = []) super case name when 'description' @in_description = true when 'groupSet' @in_group_set = true @group = {} unless @response.key?('groupSet') @response['groupSet'] = {} end when 'sourceDestCheck' @in_source_dest_check = true when 'attachment' @in_attachment = true @attachment = {} end end def end_element(name) if @in_description case name when 'value' @response['description'] = value when 'description' @in_description = false end elsif @in_group_set case name when 'item' @response['groupSet'][@group['groupId']] = @group['groupName'] @group = {} when 'groupId', 'groupName' @group[name] = value when 'groupSet' @in_group_set = false end elsif @in_source_dest_check case name when 'value' @response['sourceDestCheck'] = (value == 'true') when 'sourceDestCheck' @in_source_dest_check = false end elsif @in_attachment case name when 'attachmentId', 'instanceId', 'instanceOwnerId', 'deviceIndex', 'status', 'attachTime', 'deleteOnTermination' @attachment[name] = value when 'attachment' @response['attachment'] = @attachment @in_attachment = false end else case name when 'requestId', 'networkInterfaceId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_network_interfaces.rb000066400000000000000000000017051437344660100265100ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute require 'fog/aws/parsers/compute/network_interface_parser' class DescribeNetworkInterfaces < NetworkInterfaceParser def reset super @response = { 'networkInterfaceSet' => [] } @item_level = 0 end def start_element(name, attrs = []) super case name when 'item' @item_level += 1 end end def end_element(name) case name when 'requestId' @response[name] = value when 'item' @item_level -= 1 if @item_level == 0 @response['networkInterfaceSet'] << @nic reset_nic else super end else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_placement_groups.rb000066400000000000000000000012211437344660100261540ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribePlacementGroups < Fog::Parsers::Base def reset @placement_group = {} @response = { 'placementGroupSet' => [] } end def end_element(name) case name when 'item' @response['placementGroupSet'] << @placement_group @placement_group = {} when 'groupName', 'state', 'strategy' @placement_group[name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_regions.rb000066400000000000000000000011251437344660100242560ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeRegions < Fog::Parsers::Base def reset @region = {} @response = { 'regionInfo' => [] } end def end_element(name) case name when 'item' @response['regionInfo'] << @region @region = {} when 'regionEndpoint', 'regionName' @region[name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_reserved_instances.rb000066400000000000000000000044761437344660100265120ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeReservedInstances < Fog::Parsers::Base def get_default_item {'tagSet' => {}, 'recurringCharges' => []} end def reset @context = [] @contexts = ['reservedInstancesSet', 'recurringCharges', 'tagSet'] @reserved_instance = get_default_item @response = { 'reservedInstancesSet' => [] } @charge = {} @tag = {} end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) case name when 'availabilityZone', 'instanceTenancy', 'instanceType', 'offeringType', 'productDescription', 'reservedInstancesId', 'scope', 'state' @reserved_instance[name] = value when 'duration', 'instanceCount' @reserved_instance[name] = value.to_i when 'fixedPrice', 'usagePrice' @reserved_instance[name] = value.to_f when *@contexts @context.pop when 'item' case @context.last when 'reservedInstancesSet' @response['reservedInstancesSet'] << @reserved_instance @reserved_instance = get_default_item when 'recurringCharges' @reserved_instance['recurringCharges'] << { 'frequency' => @charge['frequency'], 'amount' => @charge['amount'] } @charge = {} when 'tagSet' @reserved_instance['tagSet'][@tag['key']] = @tag['value'] @tag = {} end when 'amount' case @context.last when 'reservedInstancesSet' @reserved_instance[name] = value.to_f when 'recurringCharges' @charge[name] = value.to_f end when 'frequency' @charge[name] = value when 'key', 'value' @tag[name] = value when 'requestId' @response[name] = value when 'start','end' @reserved_instance[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_reserved_instances_offerings.rb000066400000000000000000000021121437344660100305350ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeReservedInstancesOfferings < Fog::Parsers::Base def reset @reserved_instances_offering = {} @response = { 'reservedInstancesOfferingsSet' => [] } end def end_element(name) case name when 'availabilityZone', 'currencyCode', 'instanceType', 'offeringType', 'instanceTenancy', 'productDescription', 'reservedInstancesOfferingId' @reserved_instances_offering[name] = value when 'duration' @reserved_instances_offering[name] = value.to_i when 'fixedPrice', 'usagePrice' @reserved_instances_offering[name] = value.to_f when 'item' @response['reservedInstancesOfferingsSet'] << @reserved_instances_offering unless @reserved_instances_offering.empty? @reserved_instances_offering = {} when 'requestId', 'nextToken' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_route_tables.rb000066400000000000000000000062441437344660100253070ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeRouteTables < Fog::Parsers::Base def reset @association = { 'routeTableAssociationId' => nil, 'routeTableId' => nil, 'subnetId' => nil, 'main' => false } @in_association_set = false @in_route_set = false @route = { 'destinationCidrBlock' => nil, 'gatewayId' => nil, 'instanceId' => nil, 'instanceOwnerId' => nil, 'networkInterfaceId' => nil, 'vpcPeeringConnectionId' => nil, 'natGatewayId' => nil, 'state' => nil, 'origin' => nil } @response = { 'routeTableSet' => [] } @tag = {} @route_table = { 'associationSet' => [], 'tagSet' => {}, 'routeSet' => [] } end def start_element(name, attrs = []) super case name when 'associationSet' @in_association_set = true when 'tagSet' @in_tag_set = true when 'routeSet' @in_route_set = true end end def end_element(name) if @in_association_set case name when 'associationSet' @in_association_set = false when 'routeTableAssociationId', 'routeTableId', 'subnetId' @association[name] = value when 'main' if value == 'true' @association[name] = true else @association[name] = false end when 'item' @route_table['associationSet'] << @association @association = { 'routeTableAssociationId' => nil, 'routeTableId' => nil, 'subnetId' => nil, 'main' => false } end elsif @in_tag_set case name when 'key', 'value' @tag[name] = value when 'item' @route_table['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'tagSet' @in_tag_set = false end elsif @in_route_set case name when 'destinationCidrBlock', 'gatewayId', 'instanceId', 'instanceOwnerId', 'networkInterfaceId', 'vpcPeeringConnectionId', 'natGatewayId', 'state', 'origin' @route[name] = value when 'item' @route_table['routeSet'] << @route @route = { 'destinationCidrBlock' => nil, 'gatewayId' => nil, 'instanceId' => nil, 'instanceOwnerId' => nil, 'networkInterfaceId' => nil, 'vpcPeeringConnectionId' => nil, 'natGatewayId' => nil, 'state' => nil, 'origin' => nil } when 'routeSet' @in_route_set = false end else case name when 'routeTableId', 'vpcId' @route_table[name] = value when 'item' @response['routeTableSet'] << @route_table @route_table = { 'associationSet' => [], 'tagSet' => {}, 'routeSet' => [] } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_security_groups.rb000066400000000000000000000107221437344660100260610ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeSecurityGroups < Fog::Parsers::Base def reset @group = {} @ip_permission = { 'groups' => [], 'ipRanges' => [], 'ipv6Ranges' => []} @ip_permission_egress = { 'groups' => [], 'ipRanges' => [], 'ipv6Ranges' => []} @ip_range = {} @ipv6_range = {} @security_group = { 'ipPermissions' => [], 'ipPermissionsEgress' => [], 'tagSet' => {} } @response = { 'securityGroupInfo' => [] } @tag = {} end def start_element(name, attrs = []) super case name when 'groups' @in_groups = true when 'ipPermissions' @in_ip_permissions = true when 'ipPermissionsEgress' @in_ip_permissions_egress = true when 'ipRanges' @in_ip_ranges = true when 'ipv6Ranges' @in_ipv6_ranges = true when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @security_group['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end else case name when 'cidrIp' @ip_range[name] = value when 'cidrIpv6' @ipv6_range[name] = value when 'fromPort', 'toPort' if @in_ip_permissions_egress @ip_permission_egress[name] = value.to_i else @ip_permission[name] = value.to_i end when 'groups' @in_groups = false when 'groupDescription', 'ownerId', 'vpcId' @security_group[name] = value when 'groupId','groupName' if @in_groups @group[name] = value else @security_group[name] = value end when 'ipPermissions' @in_ip_permissions = false when 'ipPermissionsEgress' @in_ip_permissions_egress = false when 'ipProtocol' if @in_ip_permissions_egress @ip_permission_egress[name] = value else @ip_permission[name] = value end when 'ipRanges' @in_ip_ranges = false when 'ipv6Ranges' @in_ipv6_ranges = false when 'item' if @in_groups if @in_ip_permissions_egress @ip_permission_egress['groups'] << @group else @ip_permission['groups'] << @group end @group = {} elsif @in_ip_ranges if @in_ip_permissions_egress @ip_permission_egress['ipRanges'] << @ip_range else @ip_permission['ipRanges'] << @ip_range end @ip_range = {} elsif @in_ipv6_ranges if @in_ip_permissions_egress @ip_permission_egress['ipv6Ranges'] << @ipv6_range else @ip_permission['ipv6Ranges'] << @ipv6_range end @ipv6_range = {} elsif @in_ip_permissions @security_group['ipPermissions'] << @ip_permission @ip_permission = { 'groups' => [], 'ipRanges' => [], 'ipv6Ranges' => []} elsif @in_ip_permissions_egress @security_group['ipPermissionsEgress'] << @ip_permission_egress @ip_permission_egress = { 'groups' => [], 'ipRanges' => [], 'ipv6Ranges' => []} else @response['securityGroupInfo'] << @security_group @security_group = { 'ipPermissions' => [], 'ipPermissionsEgress' => [], 'tagSet' => {} } end when 'requestId', 'nextToken' @response[name] = value when 'userId' @group[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_snapshots.rb000066400000000000000000000027101437344660100246330ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeSnapshots < Fog::Parsers::Base def reset @response = { 'snapshotSet' => [] } @snapshot = { 'tagSet' => {} } @tag = {} end def start_element(name, attrs = []) super if name == 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @snapshot['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end else case name when 'item' @response['snapshotSet'] << @snapshot @snapshot = { 'tagSet' => {} } when 'description', 'ownerId', 'progress', 'snapshotId', 'status', 'volumeId' @snapshot[name] ||= value when 'requestId' @response[name] = value when 'startTime' @snapshot[name] = Time.parse(value) when 'volumeSize' @snapshot[name] = value.to_i when 'encrypted' @snapshot[name] = (value == 'true') end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_spot_price_history.rb000066400000000000000000000015021437344660100265370ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeSpotPriceHistory < Fog::Parsers::Base def reset @spot_price = {} @response = { 'spotPriceHistorySet' => [] } end def end_element(name) case name when 'availabilityZone', 'instanceType', 'productDescription' @spot_price[name] = value when 'item' @response['spotPriceHistorySet'] << @spot_price @spot_price = {} when 'requestId', 'nextToken' @response[name] = value when 'spotPrice' @spot_price[name] = value.to_f when 'timestamp' @spot_price[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_subnets.rb000066400000000000000000000047721437344660100243060ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeSubnets < Fog::Parsers::Base def reset @subnet = { 'tagSet' => {} } @response = { 'subnetSet' => [] } @tag = {} @ipv6_cidr_block_association = {} @in_tag_set = false @in_ipv6_cidr_block_association_set = false @in_cidr_block_state = false end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true when 'ipv6CidrBlockAssociationSet' @in_ipv6_cidr_block_association_set = true when 'ipv6CidrBlockState' @in_cidr_block_state = true end end def end_element(name) if @in_tag_set case name when 'item' @subnet['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_ipv6_cidr_block_association_set if @in_cidr_block_state case name when 'state' @ipv6_cidr_block_association['ipv6CidrBlockState'] = { name => value } when 'ipv6CidrBlockState' @in_cidr_block_state = false end else case name when 'item' @subnet['ipv6CidrBlockAssociationSet'] = @ipv6_cidr_block_association @ipv6_cidr_block_association = {} when 'ipv6CidrBlock', 'associationId' @ipv6_cidr_block_association[name] = value when 'ipv6CidrBlockAssociationSet' @in_ipv6_cidr_block_association_set = false end end else case name when 'subnetId', 'state', 'vpcId', 'cidrBlock', 'availableIpAddressCount', 'availabilityZone' @subnet[name] = value when 'mapPublicIpOnLaunch', 'defaultForAz' @subnet[name] = value == 'true' ? true : false when 'item' @response['subnetSet'] << @subnet @subnet = { 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_tags.rb000066400000000000000000000011141437344660100235440ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeTags < Fog::Parsers::Base def reset @tag = {} @response = { 'tagSet' => [] } end def end_element(name) case name when 'resourceId', 'resourceType', 'key', 'value' @tag[name] = value when 'item' @response['tagSet'] << @tag @tag = {} when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_volume_status.rb000066400000000000000000000052771437344660100255360ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVolumeStatus < Fog::Parsers::Base def reset @action_set = {} @detail = {} @event_set = {} @volume_status = { 'details' => [] } @volume = { 'actionsSet' => [], 'eventsSet' => [] } @response = { 'volumeStatusSet' => [] } end def start_element(name, attrs=[]) super case name when 'actionsSet' @in_actions_set = true when 'details' @in_details = true when 'eventsSet' @in_events_set = true when 'volumeStatus' @in_volume_status = true end end def end_element(name) if @in_actions_set case name when 'actionsSet' @in_actions_set = false when 'code', 'eventId', 'eventType', 'description' @action_set[name] = value.strip when 'item' @volume['actionsSet'] << @action_set @action_set = {} end elsif @in_details case name when 'details' @in_details = false when 'name', 'status' @detail[name] = value when 'item' @volume_status['details'] << @detail @detail = {} end elsif @in_events_set case name when 'eventsSet' @in_events_set = false when 'code', 'eventId', 'eventType', 'description' @event_set[name] = value.strip when 'notAfter', 'notBefore' @event_set[name] = Time.parse(value) when 'item' @volume['eventsSet'] << @event_set @event_set = {} end elsif @in_volume_status case name when 'volumeStatus' @volume['volumeStatus'] = @volume_status @volume_status = { 'details' => [] } @in_volume_status = false when 'status' @volume_status[name] = value end else case name when 'volumeId', 'availabilityZone' @volume[name] = value when 'nextToken', 'requestId' @response[name] = value when 'item' @response['volumeStatusSet'] << @volume @volume = { 'actionsSet' => [], 'eventsSet' => [] } end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_volumes.rb000066400000000000000000000042631437344660100243100ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVolumes < Fog::Parsers::Base def reset @attachment = {} @in_attachment_set = false @response = { 'volumeSet' => [] } @tag = {} @volume = { 'attachmentSet' => [], 'tagSet' => {} } end def start_element(name, attrs = []) super case name when 'attachmentSet' @in_attachment_set = true when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_attachment_set case name when 'attachmentSet' @in_attachment_set = false when 'attachTime' @attachment[name] = Time.parse(value) when 'deleteOnTermination' @attachment[name] = value == 'true' when 'device', 'instanceId', 'status', 'volumeId', 'kmsKeyId' @attachment[name] = value when 'item' @volume['attachmentSet'] << @attachment @attachment = {} end elsif @in_tag_set case name when 'key', 'value' @tag[name] = value when 'item' @volume['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'tagSet' @in_tag_set = false end else case name when 'availabilityZone', 'snapshotId', 'status', 'volumeId', 'volumeType' @volume[name] = value when 'createTime' @volume[name] = Time.parse(value) when 'item' @response['volumeSet'] << @volume @volume = { 'attachmentSet' => [], 'tagSet' => {} } when 'requestId' @response[name] = value when 'size', 'iops' @volume[name] = value.to_i when 'encrypted' @volume[name] = (value == 'true') end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_volumes_modifications.rb000066400000000000000000000016711437344660100272200ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVolumesModifications < Fog::Parsers::Base def reset @response = { 'volumeModificationSet' => [] } @modification = {} end def end_element(name) case name when 'modificationState', 'originalVolumeType', 'statusMessage', 'targetVolumeType', 'volumeId' @modification[name] = value when 'startTime', 'endTime' @modification[name] = Time.parse(value) when 'originalIops', 'originalSize', 'progress', 'targetIops', 'targetSize' @modification[name] = value.to_i when 'requestId' @response[name] = value when 'item' @response['volumeModificationSet'] << @modification.dup @modification = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_vpc_attribute.rb000066400000000000000000000024731437344660100254720ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVpcAttribute < Fog::Parsers::Base def reset @response = { } @in_enable_dns_support = false @in_enable_dns_hostnames = false end def start_element(name, attrs = []) super case name when 'enableDnsSupport' @in_enable_dns_support = true when 'enableDnsHostnames' @in_enable_dns_hostnames = true end end def end_element(name) if @in_enable_dns_support case name when 'value' @response['enableDnsSupport'] = (value == 'true') when 'enableDnsSupport' @in_enable_dns_support = false end elsif @in_enable_dns_hostnames case name when 'value' @response['enableDnsHostnames'] = (value == 'true') when 'enableDnsHostnames' @in_enable_dns_hostnames = false end else case name when 'requestId', 'vpcId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_vpc_classic_link.rb000066400000000000000000000023471437344660100261250ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVpcClassicLink < Fog::Parsers::Base def reset @vpc = { 'tagSet' => {} } @response = { 'vpcSet' => [] } @tag = {} end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true end end def end_element(name) if @in_tag_set case name when 'item' @vpc['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end else case name when 'vpcId' @vpc[name] = value when 'classicLinkEnabled' @vpc[name] = value == 'true' when 'item' @response['vpcSet'] << @vpc @vpc = { 'tagSet' => {} } when 'requestId' @response[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_vpc_classic_link_dns_support.rb000066400000000000000000000011171437344660100305570ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVpcClassicLinkDnsSupport < Fog::Parsers::Base def reset @vpc = {} @response = { 'vpcs' => [] } end def end_element(name) case name when 'vpcId' @vpc[name] = value when 'classicLinkDnsSupported' @vpc[name] = value == 'true' when 'item' @response['vpcs'] << @vpc @vpc = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/describe_vpcs.rb000066400000000000000000000057741437344660100236010ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DescribeVpcs < Fog::Parsers::Base def reset @response = { 'vpcSet' => [] } @context = [] end def start_element(name, attrs = []) super @context.push(name) case @context[1 .. -1].join('.') when 'vpcSet.item' @current_vpc = { 'tagSet' => {}, 'cidrBlockAssociationSet' => [], 'ipv6CidrBlockAssociationSet' => [] } when 'vpcSet.item.tagSet.item' @current_tag_key = @current_tag_value = nil when 'vpcSet.item.cidrBlockAssociationSet.item' @current_cidr_block = {} when 'vpcSet.item.ipv6CidrBlockAssociationSet.item' @current_ipv6_block = {} end end def end_element(name) case @context[1 .. -1].join('.') # tagSet when 'vpcSet.item.tagSet.item' @current_vpc['tagSet'][@current_tag_key] = @current_tag_value @current_tag_key = @current_tag_value = nil when 'vpcSet.item.tagSet.item.key' @current_tag_key = value when 'vpcSet.item.tagSet.item.value' @current_tag_value = value # cidrBlockAssociationSet when 'vpcSet.item.cidrBlockAssociationSet.item.cidrBlock', 'vpcSet.item.cidrBlockAssociationSet.item.associationId' @current_cidr_block[name] = value when 'vpcSet.item.cidrBlockAssociationSet.item.cidrBlockState' @current_cidr_block['state'] = value.strip when 'vpcSet.item.cidrBlockAssociationSet.item' @current_vpc['cidrBlockAssociationSet'] << @current_cidr_block # ipv6CidrBlockAssociationSet when 'vpcSet.item.ipv6CidrBlockAssociationSet.item.ipv6CidrBlock', 'vpcSet.item.ipv6CidrBlockAssociationSet.item.associationId' @current_ipv6_block[name] = value when 'vpcSet.item.ipv6CidrBlockAssociationSet.item.ipv6CidrBlockState' @current_ipv6_block['state'] = value.strip when 'vpcSet.item.ipv6CidrBlockAssociationSet.item' @current_vpc['ipv6CidrBlockAssociationSet'] << @current_ipv6_block # vpc when 'vpcSet.item.vpcId', 'vpcSet.item.state', 'vpcSet.item.cidrBlock', 'vpcSet.item.dhcpOptionsId', 'vpcSet.item.instanceTenancy' @current_vpc[name] = value when 'vpcSet.item.isDefault' @current_vpc['isDefault'] = value == 'true' when 'vpcSet.item' @response['vpcSet'] << @current_vpc # root when 'requestId' @response[name] = value end @context.pop end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/detach_volume.rb000066400000000000000000000006551437344660100235760ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class DetachVolume < Fog::Parsers::Base def end_element(name) case name when 'attachTime' @response[name] = Time.parse(value) when 'device', 'instanceId', 'requestId', 'status', 'volumeId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/get_console_output.rb000066400000000000000000000010501437344660100246660ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class GetConsoleOutput < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'instanceId', 'requestId' @response[name] = value when 'output' @response[name] = value && Base64.decode64(value) when 'timestamp' @response[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/get_password_data.rb000066400000000000000000000007371437344660100244520ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class GetPasswordData < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'instanceId', 'requestId', 'passwordData' @response[name] = @value when 'timestamp' @response[name] = Time.parse(@value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/import_key_pair.rb000066400000000000000000000005151437344660100241470ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class ImportKeyPair < Fog::Parsers::Base def end_element(name) case name when 'keyFingerprint', 'keyName', 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/modify_subnet_attribute.rb000066400000000000000000000007351437344660100257100ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class ModifySubnetAttribute < Fog::Parsers::Base def reset @response = { } end def end_element(name) case name when 'return' @response[name] = value == 'true' ? true : false when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/modify_volume.rb000066400000000000000000000014701437344660100236310ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class ModifyVolume < Fog::Parsers::Base def reset @response = {'volumeModification' => {}} end def end_element(name) case name when 'modificationState', 'originalVolumeType', 'statusMessage', 'targetVolumeType', 'volumeId' @response['volumeModification'][name] = value when 'startTime', 'endTime' @response['volumeModification'][name] = Time.parse(value) when 'originalIops', 'originalSize', 'progress', 'targetIops', 'targetSize' @response['volumeModification'][name] = value.to_i when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/monitor_unmonitor_instances.rb000066400000000000000000000015001437344660100266150ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class MonitorUnmonitorInstances < Fog::Parsers::Base def reset @response = {} @instance_set = [] @current_instance_set = {} end def end_element(name) case name when 'requestId' @response['requestId'] = value when 'instanceId' @current_instance_set['instanceId'] = value when 'item' @instance_set << @current_instance_set @current_instance_set = {} when 'state' @current_instance_set['monitoring'] = value when 'instancesSet' @response['instancesSet'] = @instance_set end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/move_address_to_vpc.rb000066400000000000000000000005151437344660100247770ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class MoveAddressToVpc < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'allocationId', 'status' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/network_acl_parser.rb000066400000000000000000000062241437344660100246410ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class NetworkAclParser < Fog::Parsers::Base def reset_nacl @network_acl = { 'associationSet' => [], 'entrySet' => [], 'tagSet' => {} } @association = {} @entry = { 'icmpTypeCode' => {}, 'portRange' => {} } @tag = {} @in_entry_set = false @in_association_set = false @in_tag_set = false @in_port_range = false @in_icmp_type_code = false end def reset reset_nacl end def start_element(name, attrs = []) super case name when 'entrySet' @in_entry_set = true when 'associationSet' @in_association_set = true when 'tagSet' @in_tag_set = true when 'portRange' @in_port_range = true when 'icmpTypeCode' @in_icmp_type_code = true end end def end_element(name) if @in_entry_set if @in_port_range case name when 'portRange' @in_port_range = false when 'from', 'to' @entry['portRange'][name] = value.to_i end elsif @in_icmp_type_code case name when 'icmpTypeCode' @in_icmp_type_code = false when 'code', 'type' @entry['icmpTypeCode'][name] = value.to_i end else case name when 'entrySet' @in_entry_set = false when 'item' @network_acl['entrySet'] << @entry @entry = { 'icmpTypeCode' => {}, 'portRange' => {} } when 'ruleNumber', 'protocol' @entry[name] = value.to_i when 'ruleAction', 'cidrBlock' @entry[name] = value when 'egress' @entry[name] = value == 'true' end end elsif @in_association_set case name when 'associationSet' @in_association_set = false when 'item' @network_acl['associationSet'] << @association @association = {} when 'networkAclAssociationId', 'networkAclId', 'subnetId' @association[name] = value end elsif @in_tag_set case name when 'tagSet' @in_tag_set = false when 'item' @network_acl['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value end else case name when 'networkAclId', 'vpcId' @network_acl[name] = value when 'default' @network_acl[name] = value == 'true' end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/network_interface_parser.rb000066400000000000000000000072211437344660100260400ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class NetworkInterfaceParser < Fog::Parsers::Base def reset_nic @nic = { 'groupSet' => {}, 'attachment' => {}, 'association' => {}, 'tagSet' => {}, 'privateIpAddressesSet' => [] } @in_tag_set = false @in_group_set = false @in_attachment = false @in_association = false @in_private_ip_addresses = false end def reset reset_nic end def start_element(name, attrs = []) super case name when 'tagSet' @in_tag_set = true @tag = {} when 'groupSet' @in_group_set = true @group = {} when 'attachment' @in_attachment = true @attachment = {} when 'association' @in_association = true @association = {} when 'privateIpAddressesSet' @in_private_ip_addresses = true @private_ip_addresses = [] @private_ip_address = {} end end def end_element(name) if @in_tag_set case name when 'item' @nic['tagSet'][@tag['key']] = @tag['value'] @tag = {} when 'key', 'value' @tag[name] = value when 'tagSet' @in_tag_set = false end elsif @in_group_set case name when 'item' @nic['groupSet'][@group['groupId']] = @group['groupName'] @group = {} when 'groupId', 'groupName' @group[name] = value when 'groupSet' @in_group_set = false end elsif @in_attachment case name when 'attachmentId', 'instanceId', 'instanceOwnerId', 'deviceIndex', 'status', 'attachTime', 'deleteOnTermination' @attachment[name] = value when 'attachment' @nic['attachment'] = @attachment @in_attachment = false end elsif @in_association case name when 'associationId', 'publicIp', 'ipOwnerId' @association[name] = value when 'association' @nic['association'] = @association @in_association = false end elsif @in_private_ip_addresses case name when 'item' if value @private_ip_address['item'] = value.strip else @private_ip_address['item'] = value end @private_ip_addresses << @private_ip_address @private_ip_address = {} when 'privateIpAddress', 'privateDnsName', 'primary' @private_ip_address[name] = value when 'privateIpAddressesSet' @nic['privateIpAddresses'] = @private_ip_addresses @in_private_ip_address = false end else case name when 'networkInterfaceId', 'subnetId', 'vpcId', 'availabilityZone', 'description', 'ownerId', 'requesterId', 'requesterManaged', 'status', 'macAddress', 'privateIpAddress', 'privateDnsName' @nic[name] = value when 'sourceDestCheck' @nic['sourceDestCheck'] = (value == 'true') end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/purchase_reserved_instances_offering.rb000066400000000000000000000006311437344660100304100ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class PurchaseReservedInstancesOffering < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'reservedInstancesId', 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/register_image.rb000066400000000000000000000004731437344660100237430ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class RegisterImage < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'imageId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/replace_network_acl_association.rb000066400000000000000000000006211437344660100273470ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class ReplaceNetworkAclAssociation < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'requestId', 'newAssociationId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/restore_address_to_classic.rb000066400000000000000000000005201437344660100263410ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class RestoreAddressToClassic < Fog::Parsers::Base def end_element(name) case name when 'requestId', 'publicIp', 'status' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/run_instances.rb000066400000000000000000000071711437344660100236320ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class RunInstances < Fog::Parsers::Base def reset @block_device_mapping = {} @network_interfaces = {} @context = [] @contexts = ['networkInterfaces', 'blockDeviceMapping', 'groupSet', 'placement', 'productCodes'] @instance = { 'networkInterfaces' => [], 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] } @response = { 'groupSet' => [], 'instancesSet' => [] } end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) case name when 'amiLaunchIndex' @instance[name] = value.to_i when 'architecture', 'clientToken', 'dnsName', 'hypervisor', 'imageId', 'instanceId', 'instanceType', 'ipAddress', 'kernelId', 'keyName', 'instanceLifecycle', 'privateDnsName', 'privateIpAddress', 'ramdiskId', 'reason', 'requesterId', 'rootDeviceType', 'sourceDestCheck', 'spotInstanceRequestId', 'virtualizationType' @instance[name] = value when 'availabilityZone', 'tenancy' @instance['placement'][name] = value when 'attachTime' @block_device_mapping[name] = Time.parse(value) when *@contexts @context.pop when 'code' @instance['instanceState'][name] = value.to_i when 'deleteOnTermination' @block_device_mapping[name] = (value == 'true') @network_interfaces[name] = (value == 'true') when 'deviceName', 'status', 'volumeId' @block_device_mapping[name] = value when 'networkInterfaceId' @network_interfaces[name] = value when 'groupId' @response['groupSet'] << value when 'groupName' case @context.last when 'groupSet' @response['groupSet'] << value when 'placement' @instance['placement'][name] = value end when 'item' case @context.last when 'blockDeviceMapping' @instance['blockDeviceMapping'] << @block_device_mapping @block_device_mapping = {} when 'networkInterfaces' @instance['networkInterfaces'] << @network_interfaces @network_interfaces = {} when nil @response['instancesSet'] << @instance @instance = { 'networkInterfaces' => [], 'blockDeviceMapping' => [], 'instanceState' => {}, 'monitoring' => {}, 'placement' => {}, 'productCodes' => [] } end when 'launchTime' @instance[name] = Time.parse(value) when 'name' @instance['instanceState'][name] = value when 'ownerId', 'requestId', 'reservationId' @response[name] = value when 'product_code' @instance['productCodes'] << value when 'state' @instance['monitoring'][name] = (value == 'true') when 'subnetId' @response[name] = value when 'ebsOptimized' @instance['ebsOptimized'] = (value == 'true') when 'associatePublicIP' @instance['associatePublicIP'] = (value == 'true') end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/spot_datafeed_subscription.rb000066400000000000000000000012761437344660100263650ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class SpotDatafeedSubscription < Fog::Parsers::Base def reset @response = { 'spotDatafeedSubscription' => {} } end def end_element(name) case name when 'bucket', 'ownerId', 'prefix', 'state' @response['spotDatafeedSubscription'][name] = value when 'code', 'message' @response['spotDatafeedSubscription']['fault'] ||= {} @response['spotDatafeedSubscription'][name] = value when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/spot_instance_requests.rb000066400000000000000000000055751437344660100255710ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class SpotInstanceRequests < Fog::Parsers::Base def reset @block_device_mapping = {} @context = [] @contexts = ['blockDeviceMapping', 'groupSet', 'iamInstanceProfile', 'networkInterfaceSet'] @spot_instance_request = { 'launchSpecification' => { 'iamInstanceProfile' => {}, 'blockDeviceMapping' => [], 'groupSet' => [] } } @response = { 'spotInstanceRequestSet' => [] } end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) case name when 'attachTime' @block_device_mapping[name] = Time.parse(value) when *@contexts @context.pop when 'code', 'message' @spot_instance_request['fault'] ||= {} @spot_instance_request['fault'][name] = value when 'createTime' @spot_instance_request[name] = Time.parse(value) when 'deleteOnTermination' @block_device_mapping[name] = (value == 'true') when 'deviceName', 'status', 'volumeId' @block_device_mapping[name] = value when 'groupId' if !@context.include?('networkInterfaceSet') @spot_instance_request['launchSpecification']['groupSet'] << value end when 'arn', 'name' @spot_instance_request['launchSpecification']['iamInstanceProfile'][name] = value when 'instanceId', 'launchedAvailabilityZone', 'productDescription', 'spotInstanceRequestId', 'state', 'type' @spot_instance_request[name] = value when 'item' case @context.last when 'blockDeviceMapping' @spot_instance_request['launchSpecification']['blockDeviceMapping'] << @block_device_mapping @block_device_mapping = {} when nil @response['spotInstanceRequestSet'] << @spot_instance_request @spot_instance_request = { 'launchSpecification' => { 'iamInstanceProfile' => {}, 'blockDeviceMapping' => [], 'groupSet' => [] } } end when 'imageId', 'instanceType', 'keyname', 'subnetId' @spot_instance_request['launchSpecification'][name] = value when 'ebsOptimized' @spot_instance_request['launchSpecification'][name] = value == 'true' when 'enabled' @spot_instance_request['launchSpecification']['monitoring'] = (value == 'true') when 'requestId' @response[name] = value when 'spotPrice' @spot_instance_request[name] = value.to_f end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/start_stop_instances.rb000066400000000000000000000017251437344660100252270ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class StartStopInstances < Fog::Parsers::Base def reset @instance = { 'currentState' => {}, 'previousState' => {} } @response = { 'instancesSet' => [] } @state = nil end def start_element(name, attrs = []) super case name when 'currentState', 'previousState' @state = name end end def end_element(name) case name when 'code' @instance[@state][name] = value.to_s when 'instanceId' @instance[name] = value when 'item' @response['instancesSet'] << @instance @instance = { 'currentState' => {}, 'previousState' => {} } when 'name' @instance[@state][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/compute/terminate_instances.rb000066400000000000000000000030201437344660100250030ustar00rootroot00000000000000module Fog module Parsers module AWS module Compute class TerminateInstances < Fog::Parsers::Base def reset @instance = { 'previousState' => {}, 'currentState' => {} } @response = { 'instancesSet' => [] } end def start_element(name, attrs = []) super if name == 'previousState' @in_previous_state = true elsif name == 'currentState' @in_current_state = true end end def end_element(name) case name when 'instanceId' @instance[name] = value when 'item' @response['instancesSet'] << @instance @instance = { 'previousState' => {}, 'currentState' => {} } when 'code' if @in_previous_state @instance['previousState'][name] = value.to_i elsif @in_current_state @instance['currentState'][name] = value.to_i end when 'name' if @in_previous_state @instance['previousState'][name] = value elsif @in_current_state @instance['currentState'][name] = value end when 'previousState' @in_previous_state = false when 'requestId' @response[name] = value when 'currentState' @in_current_state = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/000077500000000000000000000000001437344660100175345ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/dns/change_resource_record_sets.rb000066400000000000000000000007211437344660100256110ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class ChangeResourceRecordSets < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'Id' @response[name] = value.sub('/change/', '') when 'Status', 'SubmittedAt' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/create_health_check.rb000066400000000000000000000000001437344660100237740ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/dns/create_hosted_zone.rb000066400000000000000000000030171437344660100237260ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class CreateHostedZone < Fog::Parsers::Base def reset @hosted_zone = {} @change_info = {} @name_servers = [] @response = {} @section = :hosted_zone end def end_element(name) if @section == :hosted_zone case name when 'Id' @hosted_zone[name] = value.sub('/hostedzone/', '') when 'Name', 'CallerReference', 'Comment', 'PrivateZone' @hosted_zone[name]= value when 'HostedZone' @response['HostedZone'] = @hosted_zone @hosted_zone = {} @section = :change_info end elsif @section == :change_info case name when 'Id' @change_info[name]= value.sub('/change/', '') when 'Status', 'SubmittedAt' @change_info[name] = value when 'ChangeInfo' @response['ChangeInfo'] = @change_info @change_info = {} @section = :name_servers end elsif @section == :name_servers case name when 'NameServer' @name_servers << value when 'NameServers' @response['NameServers'] = @name_servers @name_servers = {} end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/delete_hosted_zone.rb000066400000000000000000000006661437344660100237340ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class DeleteHostedZone < Fog::Parsers::Base def reset @response = {} @response['ChangeInfo'] = {} end def end_element(name) case name when 'Id', 'Status', 'SubmittedAt' @response['ChangeInfo'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/get_change.rb000066400000000000000000000007021437344660100221440ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class GetChange < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'Id' @response[name] = value.sub('/change/', '') when 'Status', 'SubmittedAt' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/get_hosted_zone.rb000066400000000000000000000030341437344660100232410ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class GetHostedZone < Fog::Parsers::Base def reset @hosted_zone = {} @name_servers = [] @response = {} @section = :hosted_zone @vpcs = [] @vpc = {} end def end_element(name) if @section == :hosted_zone case name when 'Id' @hosted_zone[name]= value.sub('/hostedzone/', '') when 'Name', 'CallerReference', 'Comment', 'PrivateZone', 'Config' @hosted_zone[name]= value when 'ResourceRecordSetCount' @hosted_zone['ResourceRecordSetCount'] = value.to_i when 'HostedZone' @response['HostedZone'] = @hosted_zone @hosted_zone = {} @section = :name_servers end elsif @section == :name_servers case name when 'NameServer' @name_servers << value when 'NameServers' @response['NameServers'] = @name_servers @name_servers = {} when 'VPCId', 'VPCRegion' @vpc[name] = value when 'VPC' @vpcs << @vpc @vpc = {} when 'VPCs' @response['HostedZone']['VPCs'] = @vpcs @vpcs = {} @section = :vpcs end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/health_check.rb000066400000000000000000000017541437344660100224720ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class HealthCheck < Fog::Parsers::Base def reset @health_check = {} @health_check_config = {} @response = {} end def end_element(name) case name when 'HealthCheck' @response[name] = @health_check when 'HealthCheckConfig' @health_check[name] = @health_check_config @health_check_config = {} when 'Id', 'CallerReference' @health_check[name] = value when 'HealthCheckVersion' @health_check[name] = value.to_i when 'IPAddress', 'Port', 'Type', 'ResourcePath', 'FullyQualifiedDomainName', 'SearchString', 'FailureThreshold' @health_check_config[name] = value when 'RequestInterval' @health_check_config[name] = value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/list_health_checks.rb000066400000000000000000000024571437344660100237110ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class ListHealthChecks < Fog::Parsers::Base def reset @health_checks = [] @health_check = {} @health_check_config = {} @response = {} end def end_element(name) case name when 'HealthChecks' @response['HealthChecks'] = @health_checks when 'HealthCheck' @health_checks << @health_check @health_check = {} when 'HealthCheckConfig' @health_check[name] = @health_check_config @health_check_config = {} when 'Id', 'CallerReference' @health_check[name] = value when 'HealthCheckVersion' @health_check[name] = value.to_i when 'IPAddress', 'Port', 'Type', 'ResourcePath', 'FullyQualifiedDomainName', 'SearchString', 'FailureThreshold' @health_check_config[name] = value when 'RequestInterval' @health_check_config[name] = value.to_i when 'MaxItems' @response[name] = value.to_i when 'IsTruncated', 'Marker', 'NextMarker' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/list_hosted_zones.rb000066400000000000000000000017211437344660100236210ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class ListHostedZones < Fog::Parsers::Base def reset @hosted_zones = [] @zone = {} @response = {} end def end_element(name) case name when 'Id' @zone[name] = value.sub('/hostedzone/', '') when 'Name', 'CallerReference', 'Comment', 'PrivateZone' @zone[name] = value when 'ResourceRecordSetCount' @zone['ResourceRecordSetCount'] = value.to_i when 'HostedZone' @hosted_zones << @zone @zone = {} when 'HostedZones' @response['HostedZones'] = @hosted_zones when 'MaxItems' @response[name] = value.to_i when 'IsTruncated', 'Marker', 'NextMarker' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/dns/list_resource_record_sets.rb000066400000000000000000000041031437344660100253350ustar00rootroot00000000000000module Fog module Parsers module AWS module DNS class ListResourceRecordSets < Fog::Parsers::Base def reset @resource_record = [] @resource_record_set = {} @resource_record_set['ResourceRecords'] = [] @alias_target = {} @geo_location = {} @response = {} @response['ResourceRecordSets'] = [] @section = :resource_record_set end def end_element(name) if @section == :resource_record_set case name when 'Type', 'TTL', 'SetIdentifier', 'Weight', 'Region', 'HealthCheckId', 'Failover' @resource_record_set[name] = value when 'Name' @resource_record_set[name] = value.gsub('\\052', '*') when 'Value' @resource_record_set['ResourceRecords'] << value when 'AliasTarget' @resource_record_set[name] = @alias_target @alias_target = {} when 'HostedZoneId', 'DNSName', 'EvaluateTargetHealth' @alias_target[name] = value when 'GeoLocation' @resource_record_set[name] = @geo_location @geo_location = {} when 'ContinentCode', 'CountryCode', 'SubdivisionCode' @geo_location[name] = value when 'ResourceRecordSet' @response['ResourceRecordSets'] << @resource_record_set @resource_record_set = {} @resource_record_set['ResourceRecords'] = [] when 'ResourceRecordSets' @section = :main end elsif @section == :main case name when 'MaxItems' @response[name] = value.to_i when 'NextRecordName', 'NextRecordType', 'NextRecordIdentifier' @response[name] = value when 'IsTruncated' @response[name] = value == 'true' end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/000077500000000000000000000000001437344660100175225ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/ecs/base.rb000066400000000000000000000011111437344660100207530ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS # Base parser for ResponseMetadata, RequestId class Base < Fog::Parsers::Base def reset super @response = {'ResponseMetadata' => {}} end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/container_instance.rb000066400000000000000000000051051437344660100237160ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ContainerInstance < Fog::Parsers::AWS::ECS::Base def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) super case name when 'stringSetValue' @context.pop case @context.last when 'remainingResources' @remaining_resource[name] = @string_set when 'registeredResources' @registered_resource[name] = @string_set end @string_set = [] when *@contexts @context.pop when 'member' case @context.last when 'remainingResources' @container_instance['remainingResources'] ||= [] @container_instance['remainingResources'] << @remaining_resource @remaining_resource = {} when 'registeredResources' @container_instance['registeredResources'] ||= [] @container_instance['registeredResources'] << @registered_resource @registered_resource = {} when 'stringSetValue' @string_set << value.to_i end when 'longValue', 'integerValue' case @context.last when 'remainingResources' @remaining_resource[name] = value.to_i when 'registeredResources' @registered_resource[name] = value.to_i end when 'doubleValue' case @context.last when 'remainingResources' @remaining_resource[name] = value.to_f when 'registeredResources' @registered_resource[name] = value.to_f end when 'name', 'type' case @context.last when 'remainingResources' @remaining_resource[name] = value when 'registeredResources' @registered_resource[name] = value end when 'agentConnected' @container_instance[name] = value == 'true' when 'runningTasksCount', 'pendingTasksCount' @container_instance[name] = value.to_i when 'status', 'containerInstanceArn', 'ec2InstanceId' @container_instance[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/create_cluster.rb000066400000000000000000000014071437344660100230550ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class CreateCluster < Fog::Parsers::AWS::ECS::Base def reset super @result = 'CreateClusterResult' @response[@result] = {} @cluster = {} end def end_element(name) super case name when 'clusterName', 'clusterArn', 'status' @cluster[name] = value when 'registeredContainerInstancesCount', 'runningTasksCount', 'pendingTasksCount' @cluster[name] = value.to_i when 'cluster' @response[@result]['cluster'] = @cluster end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/create_service.rb000066400000000000000000000013631437344660100230350ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/service' class CreateService < Fog::Parsers::AWS::ECS::Service def reset super @result = 'CreateServiceResult' @response[@result] = {} @contexts = %w(service loadBalancers events deployments) @service = {} @context = [] @deployment = {} @load_balancer = {} @event = {} end def end_element(name) super case name when 'service' @response[@result]['service'] = @service end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/delete_cluster.rb000066400000000000000000000014071437344660100230540ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class DeleteCluster < Fog::Parsers::AWS::ECS::Base def reset super @result = 'DeleteClusterResult' @response[@result] = {} @cluster = {} end def end_element(name) super case name when 'clusterName', 'clusterArn', 'status' @cluster[name] = value when 'registeredContainerInstancesCount', 'runningTasksCount', 'pendingTasksCount' @cluster[name] = value.to_i when 'cluster' @response[@result]['cluster'] = @cluster end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/delete_service.rb000066400000000000000000000013631437344660100230340ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/service' class DeleteService < Fog::Parsers::AWS::ECS::Service def reset super @result = 'DeleteServiceResult' @response[@result] = {} @contexts = %w(service loadBalancers events deployments) @service = {} @context = [] @deployment = {} @load_balancer = {} @event = {} end def end_element(name) super case name when 'service' @response[@result]['service'] = @service end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/deregister_container_instance.rb000066400000000000000000000015121437344660100261310ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/container_instance' class DeregisterContainerInstance < Fog::Parsers::AWS::ECS::ContainerInstance def reset super @result = 'DeregisterContainerInstanceResult' @response[@result] = {} @contexts = %w(registeredResources remainingResources stringSetValue) @context = [] @container_instance = {} @registered_resource = {} @remaining_resource = {} @string_set = [] end def end_element(name) super case name when 'containerInstance' @response[@result][name] = @container_instance end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/deregister_task_definition.rb000066400000000000000000000016701437344660100254420ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task_definition' class DeregisterTaskDefinition < Fog::Parsers::AWS::ECS::TaskDefinition def reset @response = {} @result = 'DeregisterTaskDefinitionResult' @definition = 'taskDefinition' @response[@result] = { @definition => { 'volumes' => [], 'containerDefinitions' => [] } } @contexts = %w(volumes containerDefinitions command entryPoint environment links mountPoints portMappings volumesFrom) @context = [] @volume = {} @host = {} @container = {} @environment = {} @mountpoint = {} @portmapping = {} @volume_from = {} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/describe_clusters.rb000066400000000000000000000027171437344660100235620ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class DescribeClusters < Fog::Parsers::AWS::ECS::Base def reset super @result = 'DescribeClustersResult' @response[@result] = {} @contexts = %w(failures clusters) @context = [] @clusters = [] @failures = [] @cluster = {} @failure = {} end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) super case name when 'clusterName', 'clusterArn', 'status' @cluster[name] = value when 'arn', 'reason' @failure[name] = value when 'member' case @context.last when 'clusters' @clusters << @cluster unless @cluster.empty? @cluster = {} when 'failures' @failures << @failure unless @failure.empty? @failure = {} end when 'clusters' @response[@result][name] = @clusters @context.pop when 'failures' @response[@result][name] = @failures @context.pop end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/describe_container_instances.rb000066400000000000000000000020751437344660100257440ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/container_instance' class DescribeContainerInstances < Fog::Parsers::AWS::ECS::ContainerInstance def reset super @result = 'DescribeContainerInstancesResult' @response[@result] = { 'containerInstances' => [], 'failures' => [] } @contexts = %w(containerInstances registeredResources remainingResources stringSetValue) @context = [] @container_instance = {} @registered_resource = {} @remaining_resource = {} @string_set = [] end def end_element(name) super case name when 'member' case @context.last when 'containerInstances' @response[@result]['containerInstances'] << @container_instance @container_instance = {} end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/describe_services.rb000066400000000000000000000011611437344660100235310ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/service' class DescribeServices < Fog::Parsers::AWS::ECS::Service def reset super @result = 'DescribeServicesResult' @response[@result] = { 'services' => [], 'failures' => [] } @service = {} @failure = {} @contexts = %w(failures services loadBalancers events deployments) @context = [] @deployment = {} @load_balancer = {} @event = {} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/describe_task_definition.rb000066400000000000000000000016531437344660100250660ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task_definition' class DescribeTaskDefinition < Fog::Parsers::AWS::ECS::TaskDefinition def reset super @result = 'DescribeTaskDefinitionResult' @definition = 'taskDefinition' @response[@result] = { @definition => { 'volumes' => [], 'containerDefinitions' => [] } } @contexts = %w(volumes containerDefinitions command entryPoint environment links mountPoints portMappings volumesFrom) @context = [] @volume = {} @host = {} @container = {} @environment = {} @mountpoint = {} @portmapping = {} @volume_from = {} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/describe_tasks.rb000066400000000000000000000012651437344660100230400ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task' class DescribeTasks < Fog::Parsers::AWS::ECS::Task def reset super @result = 'DescribeTasksResult' @response[@result] = {'failures' => [], 'tasks' => []} @contexts = %w(failures tasks containers overrides networkBindings containerOverrides) @context = [] @task = {} @failure = {} @container = {} @net_binding = {} @container_overrides = [] end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_clusters.rb000066400000000000000000000011461437344660100227500ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListClusters < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListClustersResult' @response[@result] = {'clusterArns' => []} end def end_element(name) super case name when 'member' @response[@result]['clusterArns'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_container_instances.rb000066400000000000000000000012161437344660100251330ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListContainerInstances < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListContainerInstancesResult' @response[@result] = {'containerInstanceArns' => []} end def end_element(name) super case name when 'member' @response[@result]['containerInstanceArns'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_services.rb000066400000000000000000000011461437344660100227270ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListServices < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListServicesResult' @response[@result] = {'serviceArns' => []} end def end_element(name) super case name when 'member' @response[@result]['serviceArns'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_task_definition_families.rb000066400000000000000000000011741437344660100261300ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListTaskDefinitionFamilies < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListTaskDefinitionFamiliesResult' @response[@result] = {'families' => []} end def end_element(name) super case name when 'member' @response[@result]['families'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_task_definitions.rb000066400000000000000000000012021437344660100244320ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListTaskDefinitions < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListTaskDefinitionsResult' @response[@result] = {'taskDefinitionArns' => []} end def end_element(name) super case name when 'member' @response[@result]['taskDefinitionArns'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/list_tasks.rb000066400000000000000000000011321437344660100222240ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class ListTasks < Fog::Parsers::AWS::ECS::Base def reset super @result = 'ListTasksResult' @response[@result] = {'taskArns' => []} end def end_element(name) super case name when 'member' @response[@result]['taskArns'] << value when 'NextToken' @response[@result][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/register_task_definition.rb000066400000000000000000000016531437344660100251320ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task_definition' class RegisterTaskDefinition < Fog::Parsers::AWS::ECS::TaskDefinition def reset super @result = 'RegisterTaskDefinitionResult' @definition = 'taskDefinition' @response[@result] = { @definition => { 'volumes' => [], 'containerDefinitions' => [] } } @contexts = %w(volumes containerDefinitions command entryPoint environment links mountPoints portMappings volumesFrom) @context = [] @volume = {} @host = {} @container = {} @environment = {} @mountpoint = {} @portmapping = {} @volume_from = {} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/run_task.rb000066400000000000000000000012511437344660100216740ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task' class RunTask < Fog::Parsers::AWS::ECS::Task def reset super @result = 'RunTaskResult' @response[@result] = {'failures' => [], 'tasks' => []} @contexts = %w(failures tasks containers overrides networkBindings containerOverrides) @context = [] @task = {} @failure = {} @container = {} @net_binding = {} @container_overrides = [] end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/service.rb000066400000000000000000000051001437344660100215030ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class Service < Fog::Parsers::AWS::ECS::Base def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) super case name when *@contexts @context.pop when 'member' case @context.last when 'services' @response[@result]['services'] << @service @service = {} when 'loadBalancers' @service['loadBalancers'] ||= [] @service['loadBalancers'] << @load_balancer @load_balancer = {} when 'events' @service['events'] ||= [] @service['events'] << @event @event = {} when 'deployments' @service['deployments'] ||= [] @service['deployments'] << @deployment @deployment = {} end when 'clusterArn', 'roleArn', 'serviceArn', 'serviceName' @service[name] = value when 'taskDefinition', 'status' case @context.last when 'service', 'services' @service[name] = value when 'deployments' @deployment[name] = value end when 'desiredCount', 'pendingCount', 'runningCount' case @context.last when 'service', 'services' @service[name] = value.to_i when 'deployments' @deployment[name] = value.to_i end when 'loadBalancerName', 'containerName' @load_balancer[name] = value when 'containerPort' @load_balancer[name] = value.to_i when 'createdAt' case @context.last when 'events' @event[name] = Time.parse(value) when 'deployments' @deployment[name] = Time.parse(value) end when 'id' case @context.last when 'events' @event[name] = value when 'deployments' @deployment[name] = value end when 'message' @event[name] = value when 'updatedAt' @deployment[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/start_task.rb000066400000000000000000000012551437344660100222310ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task' class StartTask < Fog::Parsers::AWS::ECS::Task def reset super @result = 'StartTaskResult' @response[@result] = {'failures' => [], 'tasks' => []} @contexts = %w(failures tasks containers overrides networkBindings containerOverrides) @context = [] @task = {} @failure = {} @container = {} @net_binding = {} @container_overrides = [] end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/stop_task.rb000066400000000000000000000011501437344660100220530ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/task' class StopTask < Fog::Parsers::AWS::ECS::Task def reset super @result = 'StopTaskResult' @response[@result] = {'task' => {}} @contexts = %w(task containers overrides networkBindings containerOverrides) @context = [] @task = {} @container = {} @net_binding = {} @container_overrides = [] end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/task.rb000066400000000000000000000046321437344660100210160ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class Task < Fog::Parsers::AWS::ECS::Base def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) super case name when 'containerOverrides' @task['overrides'] ||= {} @task['overrides'][name] = @container_overrides @context.pop when 'task' @response[@result][name] = @task when *@contexts @context.pop when 'member' case @context.last when 'tasks' @response[@result]['tasks'] << @task @task = {} when 'containers' @task['containers'] ||= [] @task['containers'] << @container @container = {} when 'networkBindings' @container['networkBindings'] ||= [] @container['networkBindings'] << @net_binding @net_binding = {} when 'failures' @response[@result]['failures'] << @failure @failure = {} end when 'clusterArn', 'desiredStatus', 'startedBy', 'containerInstanceArn', 'taskDefinitionArn' @task[name] = value when 'taskArn', 'lastStatus' case @context.last when 'tasks' @task[name] = value when 'containers' @container[name] = value end when 'containerArn' @container[name] = value when 'exitCode' @container[name] = value.to_i when 'name' case @context.last when 'containers' @container[name] = value when 'containerOverrides' @container_overrides << value end when 'networkBindings' @container[name] = @net_bindings when 'bindIP' @net_binding[name] = value when 'hostPort', 'containerPort' @net_binding[name] = value.to_i when 'arn', 'reason' @failure[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/task_definition.rb000066400000000000000000000062171437344660100232270ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/base' class TaskDefinition < Fog::Parsers::AWS::ECS::Base def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) super case name when 'taskDefinitionArn' @response[@result][@definition][name] = value when 'revision' @response[@result][@definition][name] = value.to_i when *@contexts @context.pop when 'member' case @context.last when 'volumes' @response[@result][@definition]['volumes'] << @volume @volume = {} when 'containerDefinitions' @response[@result][@definition]['containerDefinitions'] << @container @container = {} when 'command' @container['command'] ||= [] @container['command'] << value when 'entryPoint' @container['entryPoint'] ||= [] @container['entryPoint'] << value when 'links' @container['links'] ||= [] @container['links'] << value when 'environment' @container['environment'] ||= [] @container['environment'] << @environment @environment = {} when 'mountPoints' @container['mountPoints'] ||= [] @container['mountPoints'] << @mountpoint @mountpoint = {} when 'portMappings' @container['portMappings'] ||= [] @container['portMappings'] << @portmapping @portmapping = {} end when 'name' case @context.last when 'volumes' @volume[name] = value when 'containerDefinitions' @container[name] = value when 'environment' @environment[name] = value end when 'host' @volume[name] = @host @host = {} when 'sourcePath' @host[name] = value when 'cpu', 'memory' @container[name] = value.to_i when 'essential' @container[name] = value == 'true' when 'image' @container[name] = value when 'value' @environment[name] = value when 'readOnly' case @context.last when 'mountPoints' @mountpoint[name] = value == 'true' when 'volumesFrom' @volume_from[name] = value == 'true' end when 'containerPath', 'sourceVolume' @mountpoint[name] = value when 'containerPort', 'hostPort' @portmapping[name] = value.to_i when 'sourceContainer' @volume_from[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ecs/update_service.rb000066400000000000000000000013631437344660100230540ustar00rootroot00000000000000module Fog module Parsers module AWS module ECS require 'fog/aws/parsers/ecs/service' class UpdateService < Fog::Parsers::AWS::ECS::Service def reset super @result = 'UpdateServiceResult' @response[@result] = {} @contexts = %w(service loadBalancers events deployments) @service = {} @context = [] @deployment = {} @load_balancer = {} @event = {} end def end_element(name) super case name when 'service' @response[@result]['service'] = @service end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/000077500000000000000000000000001437344660100212155ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/authorize_cache_security_group_ingress.rb000066400000000000000000000010371437344660100315750ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/security_group_parser' class AuthorizeCacheSecurityGroupIngress < Fog::Parsers::AWS::Elasticache::SecurityGroupParser def end_element(name) case name when 'CacheSecurityGroup' then @response['CacheSecurityGroup'] = @security_group reset_security_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/base.rb000066400000000000000000000011231437344660100224510ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache # Base parser for ResponseMetadata, RequestId class Base < Fog::Parsers::Base def reset super @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/cache_cluster_parser.rb000066400000000000000000000103601437344660100257220ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/base' class CacheClusterParser < Base def reset super reset_cache_cluster end def reset_cache_cluster @cache_cluster = { 'CacheSecurityGroups' => [], 'CacheNodes' => [], 'CacheParameterGroup' => {}, 'ConfigurationEndpoint' => {}, 'SecurityGroups' => [] } end def start_element(name, attrs = []) super case name when 'CacheSecurityGroup'; then @cache_security_group = {} when 'CacheNode'; then @cache_node = {} when 'PendingModifiedValues'; then @pending_values = {} when 'ConfigurationEndpoint'; then @configuration_endpoint = {} when 'SecurityGroups' @in_security_groups = true @security_group_members = [] when 'member' if @in_security_groups @in_security_group_member = true @security_group_member = {} end end end def end_element(name) case name when 'AutoMinorVersionUpgrade', 'CacheClusterId', 'CacheClusterStatus', 'CacheNodeType', 'Engine', 'PreferredAvailabilityZone', 'PreferredMaintenanceWindow' @cache_cluster[name] = value when 'EngineVersion', 'CacheNodeIdsToRemoves' if @pending_values @pending_values[name] = value ? value.strip : name else @cache_cluster[name] = value end when 'NumCacheNodes' if @pending_values @pending_values[name] = value.to_i else @cache_cluster[name] = value.to_i end when 'CacheClusterCreateTime' @cache_cluster[name] = DateTime.parse(value) when 'CacheSecurityGroup' @cache_cluster["#{name}s"] << @cache_security_group unless @cache_security_group.empty? when 'ConfigurationEndpoint' @cache_cluster['ConfigurationEndpoint'] = @configuration_endpoint when 'CacheSecurityGroupName', 'CacheSubnetGroupName' @cache_cluster[name] = value when 'Status' if @in_security_group_member @security_group_member[name] = value else @cache_cluster[name] = value end when 'CacheNode' @cache_cluster["#{name}s"] << @cache_node unless @cache_node.empty? @cache_node = nil when'PendingModifiedValues' @cache_cluster[name] = @pending_values @pending_values = nil when 'Port', 'Address' if @cache_node @cache_node[name] = value ? value.strip : name elsif @pending_values @pending_values[name] = value ? value.strip : name elsif @configuration_endpoint @configuration_endpoint[name] = value ? value.strip : name end when 'CacheNodeCreateTime', 'CacheNodeStatus', 'ParameterGroupStatus', 'CacheNodeId' if @cache_node @cache_node[name] = value ? value.strip : name elsif @pending_values @pending_values[name] = value ? value.strip : name end when 'CacheNodeIdsToReboots', 'CacheParameterGroupName', 'ParameterApplyStatus' @cache_cluster['CacheParameterGroup'][name] = value when 'SecurityGroups' @in_security_groups = false @cache_cluster['SecurityGroups'] = @security_group_members when 'SecurityGroupId' @security_group_member[name] = value if @in_security_group_member when 'member' if @in_security_groups @in_security_group_member = false @security_group_members << @security_group_member end else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/create_cache_subnet_group.rb000066400000000000000000000015661437344660100267340ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/subnet_group_parser' class CreateCacheSubnetGroup < Fog::Parsers::AWS::Elasticache::SubnetGroupParser def reset @response = { 'CreateCacheSubnetGroupResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSubnetGroup' then @response['CreateCacheSubnetGroupResult']['CacheSubnetGroup'] = @cache_subnet_group @cache_subnet_group = fresh_subnet_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_cache_clusters.rb000066400000000000000000000010731437344660100263720ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/cache_cluster_parser' class DescribeCacheClusters < CacheClusterParser def reset super @response['CacheClusters'] = [] end def end_element(name) case name when 'CacheCluster' @response["#{name}s"] << @cache_cluster reset_cache_cluster else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_cache_parameters.rb000066400000000000000000000007561437344660100267000ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/engine_defaults_parser' class DescribeCacheParameters < EngineDefaultsParser def end_element(name) case name when 'DescribeCacheParametersResult' @response[name] = @engine_defaults reset_engine_defaults else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_cache_subnet_groups.rb000066400000000000000000000017771437344660100274400ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/subnet_group_parser' class DescribeCacheSubnetGroups < Fog::Parsers::AWS::Elasticache::SubnetGroupParser def reset @response = { 'DescribeCacheSubnetGroupsResult' => {'CacheSubnetGroups' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'CacheSubnetGroup' @response['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'] << @cache_subnet_group @cache_subnet_group = fresh_subnet_group when 'Marker' @response['DescribeCacheSubnetGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_engine_default_parameters.rb000066400000000000000000000007471437344660100306060ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/engine_defaults_parser' class DescribeEngineDefaultParameters < EngineDefaultsParser def end_element(name) case name when 'EngineDefaults' @response[name] = @engine_defaults reset_engine_defaults else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_parameter_groups.rb000066400000000000000000000011231437344660100267560ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/parameter_group_parser' class DescribeParameterGroups < ParameterGroupParser def reset super @response['CacheParameterGroups'] = [] end def end_element(name) case name when 'CacheParameterGroup' @response["#{name}s"] << @parameter_group reset_parameter_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_reserved_cache_nodes.rb000066400000000000000000000017641437344660100275440ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache class DescribeReservedCacheNodes < Fog::Parsers::Base def reset @reserved_node = {} @response = { 'ReservedCacheNodes' => [] } end def end_element(name) case name when 'ReservedCacheNodeId', 'ReservedCacheNodesOfferingId', 'CacheNodeType', 'ProductDescription', 'State' @reserved_node[name] = @value when 'Duration', 'CacheNodeCount' @reserved_node[name] = @value.to_i when 'FixedPrice', 'UsagePrice' @reserved_node[name] = @value.to_f when 'ReservedCacheNode' @response['ReservedCacheNodes'] << @reserved_node @reserved_node = {} when 'Marker' @response[name] = @value when 'StartTime' @reserved_node[name] = Time.parse(@value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/describe_security_groups.rb000066400000000000000000000011141437344660100266450ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/security_group_parser' class DescribeSecurityGroups < SecurityGroupParser def reset super @response['CacheSecurityGroups'] = [] end def end_element(name) case name when 'CacheSecurityGroup' @response["#{name}s"] << @security_group reset_security_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/engine_defaults_parser.rb000066400000000000000000000033261437344660100262560ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/base' class EngineDefaultsParser < Base def reset super reset_engine_defaults end def reset_engine_defaults @engine_defaults = { 'CacheNodeTypeSpecificParameters' => [], 'Parameters' => [], } end def start_element(name, attrs = []) case name when 'CacheNodeTypeSpecificParameter', 'Parameter' @parameter = {} when 'CacheNodeTypeSpecificValues' @parameter[name] = [] when 'CacheNodeTypeSpecificValue' @node_specific_value = {} else super end end def end_element(name) case name when 'CacheParameterGroupFamily' @engine_defaults[name] = value when 'CacheNodeTypeSpecificParameter', 'Parameter' if not @parameter.empty? @engine_defaults["#{name}s"] << @parameter end when 'AllowedValues', 'DataType', 'Description', 'IsModifiable', 'MinimumEngineVersion', 'ParameterName', 'ParameterValue', 'Source' @parameter[name] = value when 'CacheNodeType', 'Value' @node_specific_value[name] = value when 'CacheNodeTypeSpecificValue' if not @node_specific_value.empty? @parameter["#{name}s"] << @node_specific_value end else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/event_list.rb000066400000000000000000000016651437344660100237260ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/base' class EventListParser < Base def reset super @response['Events'] = [] end def start_element(name, attrs = []) super case name when 'Event'; then @event = {} end end def end_element(name) case name when 'Date' @event[name] = DateTime.parse(value.strip) when 'Message', 'SourceIdentifier', 'SourceType' @event[name] = value ? value.strip : name when 'Event' @response['Events'] << @event unless @event.empty? when 'IsTruncated', 'Marker', 'NextMarker' @response[name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/modify_parameter_group.rb000066400000000000000000000011401437344660100263010ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/parameter_group_parser' class ModifyParameterGroup < ParameterGroupParser def reset super @response['ModifyCacheParameterGroupResult'] = [] end def end_element(name) case name when 'ModifyCacheParameterGroupResult' @response[name] = @parameter_group reset_parameter_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/parameter_group_parser.rb000066400000000000000000000011631437344660100263130ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/base' class ParameterGroupParser < Base def reset super reset_parameter_group end def reset_parameter_group @parameter_group = {} end def end_element(name) case name when 'Description', 'CacheParameterGroupName', 'CacheParameterGroupFamily' @parameter_group[name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/reset_parameter_group.rb000066400000000000000000000011351437344660100261400ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/parameter_group_parser' class ResetParameterGroup < ParameterGroupParser def reset super @response['ResetCacheParameterGroupResult'] = [] end def end_element(name) case name when 'ResetCacheParameterGroupResult' @response[name] = @parameter_group reset_parameter_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/security_group_parser.rb000066400000000000000000000017651437344660100262120ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/base' class SecurityGroupParser < Fog::Parsers::Base def reset super reset_security_group end def reset_security_group @security_group = {'EC2SecurityGroups' => []} end def start_element(name, attrs = []) super case name when 'EC2SecurityGroup'; then @ec2_group = {} end end def end_element(name) case name when 'Description', 'CacheSecurityGroupName', 'OwnerId' @security_group[name] = value when 'EC2SecurityGroup' @security_group["#{name}s"] << @ec2_group unless @ec2_group.empty? when 'EC2SecurityGroupName', 'EC2SecurityGroupOwnerId', 'Status' @ec2_group[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/single_cache_cluster.rb000066400000000000000000000007201437344660100257060ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/cache_cluster_parser' class SingleCacheCluster < CacheClusterParser def end_element(name) case name when 'CacheCluster' @response[name] = @cache_cluster reset_cache_cluster else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/single_parameter_group.rb000066400000000000000000000007411437344660100263010ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/parameter_group_parser' class SingleParameterGroup < ParameterGroupParser def end_element(name) case name when 'CacheParameterGroup' @response[name] = @parameter_group reset_parameter_group else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/single_security_group.rb000066400000000000000000000013541437344660100261710ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache require 'fog/aws/parsers/elasticache/security_group_parser' class SingleSecurityGroup < SecurityGroupParser def reset super @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'CacheSecurityGroup' @response[name] = @security_group reset_security_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elasticache/subnet_group_parser.rb000066400000000000000000000021471437344660100256360ustar00rootroot00000000000000module Fog module Parsers module AWS module Elasticache class SubnetGroupParser < Fog::Parsers::Base def reset @cache_subnet_group = fresh_subnet_group end def start_element(name, attrs = []) super end def end_element(name) case name when 'VpcId' then @cache_subnet_group['VpcId'] = value when 'SubnetGroupStatus' then @cache_subnet_group['SubnetGroupStatus'] = value when 'CacheSubnetGroupDescription' then @cache_subnet_group['CacheSubnetGroupDescription'] = value when 'CacheSubnetGroupName' then @cache_subnet_group['CacheSubnetGroupName'] = value when 'SubnetIdentifier' then @cache_subnet_group['Subnets'] << value when 'Marker' @response['DescribeCacheSubnetGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end def fresh_subnet_group {'Subnets' => []} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/000077500000000000000000000000001437344660100175125ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/elb/apply_security_groups_to_load_balancer.rb000066400000000000000000000011551437344660100300440ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class ApplySecurityGroupsToLoadBalancer < Fog::Parsers::Base def reset @response = { 'ApplySecurityGroupsToLoadBalancerResult' => { 'SecurityGroups' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['ApplySecurityGroupsToLoadBalancerResult']['SecurityGroups'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/attach_load_balancer_to_subnets.rb000066400000000000000000000011151437344660100263740ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class AttachLoadBalancerToSubnets < Fog::Parsers::Base def reset @response = { 'AttachLoadBalancerToSubnetsResult' => { 'Subnets' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['AttachLoadBalancerToSubnetsResult']['Subnets'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/configure_health_check.rb000066400000000000000000000015421437344660100245040ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class ConfigureHealthCheck < Fog::Parsers::Base def reset @health_check = {} @response = { 'ConfigureHealthCheckResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'Target' @health_check[name] = value when 'Interval', 'Timeout', 'UnhealthyThreshold', 'HealthyThreshold' @health_check[name] = value.to_i when 'HealthCheck' @response['ConfigureHealthCheckResult'][name] = @health_check when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/create_load_balancer.rb000066400000000000000000000010341437344660100241260ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class CreateLoadBalancer < Fog::Parsers::Base def reset @response = { 'CreateLoadBalancerResult' => {}, 'ResponseMetadata' => {} } end def end_element(name) case name when 'DNSName' @response['CreateLoadBalancerResult'][name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/delete_load_balancer.rb000066400000000000000000000007001437344660100241240ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DeleteLoadBalancer < Fog::Parsers::Base def reset @response = { 'DeleteLoadBalancerResult' => nil, 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/deregister_instances_from_load_balancer.rb000066400000000000000000000011671437344660100301210ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DeregisterInstancesFromLoadBalancer < Fog::Parsers::Base def reset @response = { 'DeregisterInstancesFromLoadBalancerResult' => { 'Instances' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'InstanceId' @response['DeregisterInstancesFromLoadBalancerResult']['Instances'] << {name => value} when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_instance_health.rb000066400000000000000000000014121437344660100250260ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DescribeInstanceHealth < Fog::Parsers::Base def reset @response = { 'DescribeInstanceHealthResult' => { 'InstanceStates' => [] }, 'ResponseMetadata' => {} } @instance_state = {} end def end_element(name) case name when 'Description', 'State', 'InstanceId', 'ReasonCode' @instance_state[name] = value when 'member' @response['DescribeInstanceHealthResult']['InstanceStates'] << @instance_state @instance_state = {} when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_load_balancer_attributes.rb000066400000000000000000000040461437344660100267170ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DescribeLoadBalancerAttributes < Fog::Parsers::Base def reset @response = { 'DescribeLoadBalancerAttributesResult' => { 'LoadBalancerAttributes' => {} }, 'ResponseMetadata' => {} } @stack = [] end def start_element(name, attrs = []) super case name when 'ConnectionDraining' @connection_draining = {} when 'CrossZoneLoadBalancing' @cross_zone_load_balancing = {} when 'ConnectionSettings' @connection_settings = {} end end def end_element(name) case name when 'Enabled' if @cross_zone_load_balancing @cross_zone_load_balancing['Enabled'] = value == 'true' ? true : false elsif @connection_draining @connection_draining['Enabled'] = value == 'true' ? true : false end when 'IdleTimeout' @connection_settings['IdleTimeout'] = value.to_i when 'Timeout' if @connection_draining @connection_draining['Timeout'] = value.to_i end when 'ConnectionDraining' @response['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['ConnectionDraining'] = @connection_draining @connection_draining = nil when 'CrossZoneLoadBalancing' @response['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['CrossZoneLoadBalancing'] = @cross_zone_load_balancing @cross_zone_load_balancing = nil when 'ConnectionSettings' @response['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes']['ConnectionSettings'] = @connection_settings @connection_settings = nil when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_load_balancer_policies.rb000066400000000000000000000035241437344660100263400ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DescribeLoadBalancerPolicies < Fog::Parsers::Base def reset reset_policy reset_policy_attribute_description @results = { 'PolicyDescriptions' => [] } @response = { 'DescribeLoadBalancerPoliciesResult' => {}, 'ResponseMetadata' => {} } end def reset_policy @policy = { 'PolicyAttributeDescriptions' => [], 'PolicyName' => '', 'PolicyTypeName' => '' } end def reset_policy_attribute_description @policy_attribute_description = { 'AttributeName' => '', 'AttributeValue' => '' } end def start_element(name, attrs = []) super case name when 'PolicyAttributeDescriptions' @in_policy_attributes = true end end def end_element(name) case name when 'member' if @in_policy_attributes @policy['PolicyAttributeDescriptions'] << @policy_attribute_description reset_policy_attribute_description elsif !@in_policy_attributes @results['PolicyDescriptions'] << @policy reset_policy end when 'PolicyName', 'PolicyTypeName' @policy[name] = value when 'PolicyAttributeDescriptions' @in_policy_attributes = false when 'AttributeName', 'AttributeValue' @policy_attribute_description[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeLoadBalancerPoliciesResponse' @response['DescribeLoadBalancerPoliciesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_load_balancer_policy_types.rb000066400000000000000000000043711437344660100272550ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DescribeLoadBalancerPolicyTypes < Fog::Parsers::Base def reset reset_policy_type reset_policy_attribute_type_description @results = { 'PolicyTypeDescriptions' => [] } @response = { 'DescribeLoadBalancerPolicyTypesResult' => {}, 'ResponseMetadata' => {} } end def reset_policy_type @policy_type = { 'Description' => '', 'PolicyAttributeTypeDescriptions' => [], 'PolicyTypeName' => '' } end def reset_policy_attribute_type_description @policy_attribute_type_description = { 'AttributeName' => '', 'AttributeType' => '', 'Cardinality' => '', 'DefaultValue' => '', 'Description' => '' } end def start_element(name, attrs = []) super case name when 'PolicyAttributeTypeDescriptions' @in_policy_attribute_types = true end end def end_element(name) case name when 'member' if @in_policy_attribute_types @policy_type['PolicyAttributeTypeDescriptions'] << @policy_attribute_type_description reset_policy_attribute_type_description elsif !@in_policy_attribute_types @results['PolicyTypeDescriptions'] << @policy_type reset_policy_type end when 'Description' if @in_policy_attribute_types @policy_attribute_type_description[name] = value else @policy_type[name] = value end when 'PolicyTypeName' @policy_type[name] = value when 'PolicyAttributeTypeDescriptions' @in_policy_attribute_types = false when 'AttributeName', 'AttributeType', 'Cardinality', 'DefaultValue' @policy_attribute_type_description[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'DescribeLoadBalancerPolicyTypesResponse' @response['DescribeLoadBalancerPolicyTypesResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_load_balancers.rb000066400000000000000000000147431437344660100246410ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DescribeLoadBalancers < Fog::Parsers::Base def reset reset_load_balancer reset_listener_description reset_stickiness_policy reset_backend_server_description @results = { 'LoadBalancerDescriptions' => [] } @response = { 'DescribeLoadBalancersResult' => {}, 'ResponseMetadata' => {} } end def reset_load_balancer @load_balancer = { 'Subnets' => [], 'SecurityGroups' => [], 'ListenerDescriptions' => [], 'Instances' => [], 'AvailabilityZones' => [], 'Policies' => {'AppCookieStickinessPolicies' => [], 'LBCookieStickinessPolicies' => [], 'OtherPolicies' => []}, 'HealthCheck' => {}, 'SourceSecurityGroup' => {}, 'BackendServerDescriptions' => [] } end def reset_listener_description @listener_description = { 'PolicyNames' => [], 'Listener' => {} } end def reset_backend_server_description @backend_server_description = {} end def reset_stickiness_policy @stickiness_policy = {} end def start_element(name, attrs = []) super case name when 'ListenerDescriptions' @in_listeners = true when 'Instances' @in_instances = true when 'AvailabilityZones' @in_availability_zones = true when 'SecurityGroups' @in_security_groups = true when 'Subnets' @in_subnets = true when 'PolicyNames' @in_policy_names = true when 'Policies' @in_policies = true when 'LBCookieStickinessPolicies' @in_lb_cookies = true when 'AppCookieStickinessPolicies' @in_app_cookies = true when 'AppCookieStickinessPolicies' @in_app_cookies = true when 'OtherPolicies' @in_other_policies = true when 'BackendServerDescriptions' @in_backend_server_descriptions = true end end def end_element(name) case name when 'member' if @in_policy_names && @in_listeners @listener_description['PolicyNames'] << value elsif @in_availability_zones @load_balancer['AvailabilityZones'] << value elsif @in_security_groups @load_balancer['SecurityGroups'] << value elsif @in_subnets @load_balancer['Subnets'] << value elsif @in_listeners @load_balancer['ListenerDescriptions'] << @listener_description reset_listener_description elsif @in_app_cookies @load_balancer['Policies']['AppCookieStickinessPolicies'] << @stickiness_policy reset_stickiness_policy elsif @in_lb_cookies @load_balancer['Policies']['LBCookieStickinessPolicies'] << @stickiness_policy reset_stickiness_policy elsif @in_other_policies @load_balancer['Policies']['OtherPolicies'] << value elsif @in_backend_server_descriptions && @in_policy_names @backend_server_description['PolicyNames'] ||= [] @backend_server_description['PolicyNames'] << value elsif @in_backend_server_descriptions && !@in_policy_names @load_balancer['BackendServerDescriptions'] << @backend_server_description reset_backend_server_description elsif !@in_instances && !@in_policies && !@in_backend_server_descriptions @results['LoadBalancerDescriptions'] << @load_balancer reset_load_balancer end when 'BackendServerDescriptions' @in_backend_server_descriptions = false when 'InstancePort' if @in_backend_server_descriptions @backend_server_description[name] = value.to_i elsif @in_listeners @listener_description['Listener'][name] = value.to_i end when 'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'LoadBalancerName', 'DNSName', 'Scheme', 'Type', 'State', 'LoadBalancerArn', 'IpAddressType', 'CanonicalHostedZoneId' @load_balancer[name] = value when 'CreatedTime' @load_balancer[name] = Time.parse(value) when 'ListenerDescriptions' @in_listeners = false when 'PolicyNames' @in_policy_names = false when 'Protocol', 'SSLCertificateId', 'InstanceProtocol' @listener_description['Listener'][name] = value when 'LoadBalancerPort' @listener_description['Listener'][name] = value.to_i when 'Instances' @in_instances = false when 'InstanceId' @load_balancer['Instances'] << value when 'VPCId', 'VpcId' @load_balancer[name] = value when 'AvailabilityZones' @in_availability_zones = false when 'SecurityGroups' @in_security_groups = false when 'Subnets' @in_subnets = false when 'Policies' @in_policies = false when 'AppCookieStickinessPolicies' @in_app_cookies = false when 'LBCookieStickinessPolicies' @in_lb_cookies = false when 'OtherPolicies' @in_other_policies = false when 'OwnerAlias', 'GroupName' @load_balancer['SourceSecurityGroup'][name] = value when 'Interval', 'HealthyThreshold', 'Timeout', 'UnhealthyThreshold' @load_balancer['HealthCheck'][name] = value.to_i when 'Target' @load_balancer['HealthCheck'][name] = value when 'PolicyName', 'CookieName' @stickiness_policy[name] = value when 'CookieExpirationPeriod' @stickiness_policy[name] = value.to_i when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextMarker' @results['NextMarker'] = value when 'DescribeLoadBalancersResponse' @response['DescribeLoadBalancersResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/describe_tags.rb000066400000000000000000000011141437344660100226320ustar00rootroot00000000000000module Fog module Parsers module Compute module AWS class DescribeTags < Fog::Parsers::Base def reset @tag = {} @response = { 'tagSet' => [] } end def end_element(name) case name when 'resourceId', 'resourceType', 'key', 'value' @tag[name] = value when 'item' @response['tagSet'] << @tag @tag = {} when 'requestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/detach_load_balancer_from_subnets.rb000066400000000000000000000011231437344660100267000ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DetachLoadBalancerFromSubnets < Fog::Parsers::Base def reset @response = { 'DetachLoadBalancerFromSubnetsResult' => { 'Subnets' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['DetachLoadBalancerFromSubnetsResult']['Subnets'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/disable_availability_zones_for_load_balancer.rb000066400000000000000000000012051437344660100311040ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class DisableAvailabilityZonesForLoadBalancer < Fog::Parsers::Base def reset @response = { 'DisableAvailabilityZonesForLoadBalancerResult' => { 'AvailabilityZones' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['DisableAvailabilityZonesForLoadBalancerResult']['AvailabilityZones'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/empty.rb000066400000000000000000000007371437344660100212040ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class Empty < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/enable_availability_zones_for_load_balancer.rb000066400000000000000000000012021437344660100307240ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class EnableAvailabilityZonesForLoadBalancer < Fog::Parsers::Base def reset @response = { 'EnableAvailabilityZonesForLoadBalancerResult' => { 'AvailabilityZones' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['EnableAvailabilityZonesForLoadBalancerResult']['AvailabilityZones'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/register_instances_with_load_balancer.rb000066400000000000000000000011611437344660100276120ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB class RegisterInstancesWithLoadBalancer < Fog::Parsers::Base def reset @response = { 'RegisterInstancesWithLoadBalancerResult' => { 'Instances' => [] }, 'ResponseMetadata' => {} } end def end_element(name) case name when 'InstanceId' @response['RegisterInstancesWithLoadBalancerResult']['Instances'] << {name => value} when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elb/tag_list_parser.rb000066400000000000000000000031771437344660100232310ustar00rootroot00000000000000module Fog module Parsers module AWS module ELB # parses an XML-formatted list of resource tags from AWS class TagListParser < Fog::Parsers::Base # each tag is modeled as a String pair (2-element Array) def reset @this_key = nil @this_value = nil @tags = Hash.new @response = { 'DescribeTagsResult' => { 'LoadBalancers' => [] }, 'ResponseMetadata' => {} } @in_tags = false end def start_element(name, attrs = []) super case name when 'member' unless @in_tags @load_balancer_name = nil @tags = {} end when 'Tags' @in_tags = true end end def end_element(name) super case name when 'member' if @in_tags @tags[@this_key] = @this_value @this_key, @this_value = nil, nil else @response['DescribeTagsResult']['LoadBalancers'] << { 'Tags' => @tags, 'LoadBalancerName' => @load_balancer_name } end when 'Key' @this_key = value when 'Value' @this_value = value when 'LoadBalancerName' @load_balancer_name = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'Tags' @in_tags = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/000077500000000000000000000000001437344660100177625ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/create_load_balancer.rb000066400000000000000000000057151437344660100244100ustar00rootroot00000000000000module Fog module Parsers module AWS module ELBV2 class CreateLoadBalancer < Fog::Parsers::Base def reset reset_load_balancer reset_availability_zone @load_balancer_addresses = {} @state = {} @results = { 'LoadBalancers' => [] } @response = { 'CreateLoadBalancerResult' => {}, 'ResponseMetadata' => {} } end def reset_load_balancer @load_balancer = { 'SecurityGroups' => [], 'AvailabilityZones' => [] } end def reset_availability_zone @availability_zone = { 'LoadBalancerAddresses' => [] } end def start_element(name, attrs = []) super case name when 'AvailabilityZones' @in_availability_zones = true when 'LoadBalancerAddresses' @in_load_balancer_addresses = true when 'SecurityGroups' @in_security_groups = true when 'State' @in_state = true end end def end_element(name) case name when 'member' if @in_availability_zones && @in_load_balancer_addresses @availability_zone['LoadBalancerAddresses'] << @load_balancer_addresses elsif @in_availability_zones @load_balancer['AvailabilityZones'] << @availability_zone reset_availability_zone elsif @in_security_groups @load_balancer['SecurityGroups'] << value else @results['LoadBalancers'] << @load_balancer reset_load_balancer end when 'SubnetId', 'ZoneName' @availability_zone[name] = value when 'IpAddress', 'AllocationId' @load_balancer_addresses[name] = value when 'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'LoadBalancerName', 'DNSName', 'Scheme', 'Type', 'LoadBalancerArn', 'IpAddressType', 'CanonicalHostedZoneId', 'VpcId' @load_balancer[name] = value when 'CreatedTime' @load_balancer[name] = Time.parse(value) when 'LoadBalancerAddresses' @in_load_balancer_addresses = false when 'AvailabilityZones' @in_availability_zones = false when 'SecurityGroups' @in_security_groups = false when 'State' @in_state = false @load_balancer[name] = @state @state = {} when 'Code' @state[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextMarker' @results['NextMarker'] = value when 'CreateLoadBalancerResponse' @response['CreateLoadBalancerResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/describe_listeners.rb000066400000000000000000000073621437344660100241670ustar00rootroot00000000000000module Fog module Parsers module AWS module ELBV2 class DescribeListeners < Fog::Parsers::Base def reset reset_listener @default_action = {} @certificate = {} @config = {} @target_groups = [] @target_group = {} @target_group_stickiness_config = {} @results = { 'Listeners' => [] } @response = { 'DescribeListenersResult' => {}, 'ResponseMetadata' => {} } end def reset_listener @listener= { 'DefaultActions' => [], 'Certificates' => [] } end def start_element(name, attrs = []) super case name when 'DefaultActions' @in_default_actions = true when 'Certificates' @in_certificates = true when 'TargetGroups' @in_target_groups = true when 'TargetGroupStickinessConfig' @in_target_group_stickiness_config = true end end def end_element(name) if @in_default_actions case name when 'member' if @in_target_groups @target_groups << @target_group @target_group = {} else @listener['DefaultActions'] << @default_action @default_action = {} end when 'TargetGroupArn' if @in_target_groups @target_group[name] = value else @default_action[name] = value end when 'Weight' @target_group[name] = value when 'Type', 'Order' @default_action[name] = value when 'Path', 'Protocol', 'Port', 'Query', 'Host', 'StatusCode', 'ContentType', 'MessageBody', 'StatusCode' @config[name] = value when 'RedirectConfig', 'ForwardConfig', 'FixedResponseConfig' @default_action[name] = @config @config = {} when 'DurationSeconds', 'Enabled' @target_group_stickiness_config[name] = value when 'DefaultActions' @in_default_actions = false when 'TargetGroupStickinessConfig' if @in_target_group_stickiness_config @config['TargetGroupStickinessConfig'] = @target_group_stickiness_config @in_target_group_stickiness_config = false @target_group_stickiness_config = {} end when 'TargetGroups' @config['TargetGroups'] = @target_groups @in_target_groups = false @target_groups = [] end else case name when 'member' if @in_certificates @listener['Certificates'] << @certificate @certificate = {} else @results['Listeners'] << @listener reset_listener end when 'LoadBalancerArn', 'Protocol', 'Port', 'ListenerArn', 'SslPolicy' @listener[name] = value when 'CertificateArn' @certificate[name] = value when 'Certificates' @in_certificates = false when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextMarker' @results['NextMarker'] = value when 'DescribeListenersResponse' @response['DescribeListenersResult'] = @results end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/describe_load_balancers.rb000066400000000000000000000057311437344660100251060ustar00rootroot00000000000000module Fog module Parsers module AWS module ELBV2 class DescribeLoadBalancers < Fog::Parsers::Base def reset reset_load_balancer reset_availability_zone @load_balancer_addresses = {} @state = {} @results = { 'LoadBalancers' => [] } @response = { 'DescribeLoadBalancersResult' => {}, 'ResponseMetadata' => {} } end def reset_load_balancer @load_balancer = { 'SecurityGroups' => [], 'AvailabilityZones' => [] } end def reset_availability_zone @availability_zone = { 'LoadBalancerAddresses' => [] } end def start_element(name, attrs = []) super case name when 'AvailabilityZones' @in_availability_zones = true when 'LoadBalancerAddresses' @in_load_balancer_addresses = true when 'SecurityGroups' @in_security_groups = true when 'State' @in_state = true end end def end_element(name) case name when 'member' if @in_availability_zones && @in_load_balancer_addresses @availability_zone['LoadBalancerAddresses'] << @load_balancer_addresses elsif @in_availability_zones @load_balancer['AvailabilityZones'] << @availability_zone reset_availability_zone elsif @in_security_groups @load_balancer['SecurityGroups'] << value else @results['LoadBalancers'] << @load_balancer reset_load_balancer end when 'SubnetId', 'ZoneName' @availability_zone[name] = value when 'IpAddress', 'AllocationId' @load_balancer_addresses[name] = value when 'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'LoadBalancerName', 'DNSName', 'Scheme', 'Type', 'LoadBalancerArn', 'IpAddressType', 'CanonicalHostedZoneId', 'VpcId' @load_balancer[name] = value when 'CreatedTime' @load_balancer[name] = Time.parse(value) when 'LoadBalancerAddresses' @in_load_balancer_addresses = false when 'AvailabilityZones' @in_availability_zones = false when 'SecurityGroups' @in_security_groups = false when 'State' @in_state = false @load_balancer[name] = @state @state = {} when 'Code' @state[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'NextMarker' @results['NextMarker'] = value when 'DescribeLoadBalancersResponse' @response['DescribeLoadBalancersResult'] = @results end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/describe_tags.rb000066400000000000000000000027421437344660100231120ustar00rootroot00000000000000module Fog module Parsers module AWS module ELBV2 class DescribeTags < Fog::Parsers::Base def reset @this_key = nil @this_value = nil @tags = Hash.new @response = { 'DescribeTagsResult' => { 'TagDescriptions' => [] }, 'ResponseMetadata' => {} } @in_tags = false end def start_element(name, attrs = []) super case name when 'member' unless @in_tags @resource_arn = nil @tags = {} end when 'Tags' @in_tags = true end end def end_element(name) super case name when 'member' if @in_tags @tags[@this_key] = @this_value @this_key, @this_value = nil, nil else @response['DescribeTagsResult']['TagDescriptions'] << { 'Tags' => @tags, 'ResourceArn' => @resource_arn } end when 'Key' @this_key = value when 'Value' @this_value = value when 'ResourceArn' @resource_arn = value when 'RequestId' @response['ResponseMetadata'][name] = value when 'Tags' @in_tags = false end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/elbv2/empty.rb000066400000000000000000000002071437344660100214440ustar00rootroot00000000000000module Fog module Parsers module AWS module ELBV2 class Empty < ELB::Empty end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/000077500000000000000000000000001437344660100175335ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/emr/add_instance_groups.rb000066400000000000000000000011111437344660100240650ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class AddInstanceGroups < Fog::Parsers::Base def start_element(name, attrs = []) super case name when 'InstanceGroupIds' @response['InstanceGroupIds'] = [] end end def end_element(name) case name when 'JobFlowId' @response[name] = value when 'member' @response['InstanceGroupIds'] << value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/add_job_flow_steps.rb000066400000000000000000000004561437344660100237140ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class AddJobFlowSteps < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/describe_job_flows.rb000066400000000000000000000114641437344660100237120ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class DescribeJobFlows < Fog::Parsers::Base def reset @context = [] @contexts = ['BootstrapActions', 'ExecutionStatusDetail', 'Instances', 'Steps', 'InstanceGroups', 'Args'] @response = { 'JobFlows' => [] } @bootstrap_actions = {'ScriptBootstrapActionConfig' => {'Args' => []}} @instance = { 'InstanceGroups' => [], 'Placement' => {}} @step = { 'ExecutionStatusDetail' => {}, 'StepConfig' => { 'HadoopJarStepConfig' => { 'Args' => [], 'Properties' => [] } } } @flow = {'Instances' => [], 'ExecutionStatusDetail' => {}, 'BootstrapActions' => [], 'Steps' => []} @instance_group_detail = {} @execution_status_detail = {} end def start_element(name, attrs = []) super if @contexts.include?(name) @context.push(name) end end def end_element(name) if @context.last == 'BootstrapActions' case name when 'Name' @bootstrap_actions[name] = value when 'Path' @bootstrap_actions['ScriptBootstrapActionConfig'][name] = value when 'BootstrapActions' @flow['BootstrapActions'] = @bootstrap_actions @bootstrap_actions = {'ScriptBootstrapActionConfig' => {'Args' => []}} end end if @context.last == 'ExecutionStatusDetail' case name when 'CreationDateTime', 'EndDateTime', 'LastStateChangeReason', 'ReadyDateTime', 'StartDateTime', 'State' @execution_status_detail[name] = value when 'ExecutionStatusDetail' if @context.include?('Steps') @step['ExecutionStatusDetail'] = @execution_status_detail else @flow['ExecutionStatusDetail'] = @execution_status_detail end @execution_status_detail = {} end end if @context.last == 'Instances' case name when 'AvailabilityZone' @instance['Placement'][name] = value when 'Ec2KeyName', 'HadoopVersion', 'InstanceCount', 'KeepJobFlowAliveWhenNoSteps', 'MasterInstanceId', 'MasterInstanceType', 'MasterPublicDnsName', 'NormalizedInstanceHours', 'SlaveInstanceType', 'TerminationProtected' @instance[name] = value when 'member' @instance['InstanceGroups'] << @instance_group_detail @instance_group_detail = {} when 'Instances' @flow['Instances'] = @instance @instance = { 'InstanceGroups' => [], 'Placement' => {}} end end if @context.last == 'InstanceGroups' case name when 'member' @instance['InstanceGroups'] << @instance_group_detail @instance_group_detail = {} else @instance_group_detail[name] = value end end if @context.last == 'Args' if name == 'member' if @context.include?('Steps') @step['StepConfig']['HadoopJarStepConfig']['Args'] << value.strip else @bootstrap_actions['ScriptBootstrapActionConfig']['Args'] << value end end end if @context.last == 'Steps' case name when 'ActionOnFailure', 'Name' @step[name] = value when 'Jar', 'MainClass' @step['StepConfig']['HadoopJarStepConfig'][name] = value when 'member' @flow['Steps'] << @step @step = { 'ExecutionStatusDetail' => {}, 'StepConfig' => { 'HadoopJarStepConfig' => { 'Args' => [], 'Properties' => [] } } } end end if @context.empty? case name when 'AmiVersion', 'JobFlowId', 'LogUri', 'Name' @flow[name] = value when 'member' @response['JobFlows'] << @flow @flow = {'Instances' => [], 'ExecutionStatusDetail' => {}, 'BootstrapActions' => [], 'Steps' => []} end end if @context.last == name @context.pop end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/modify_instance_groups.rb000066400000000000000000000004631437344660100246350ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class ModifyInstanceGroups < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/run_job_flow.rb000066400000000000000000000005541437344660100225510ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class RunJobFlow < Fog::Parsers::Base def end_element(name) case name when 'JobFlowId' @response[name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/set_termination_protection.rb000066400000000000000000000004671437344660100255410ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class SetTerminationProtection < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/emr/terminate_job_flows.rb000066400000000000000000000004601437344660100241140ustar00rootroot00000000000000module Fog module Parsers module AWS module EMR class TerminateJobFlows < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/000077500000000000000000000000001437344660100175165ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/iam/base_instance_profile.rb000066400000000000000000000042511437344660100243630ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class BaseInstanceProfile < Fog::Parsers::Base def reset super @stack = [] end def start_element(name,attrs = []) super case name when 'InstanceProfile' @instance_profile = {'Roles' =>[]} when 'InstanceProfiles' @stack << 'InstanceProfiles' when 'Roles' @stack << 'Role' when 'member' case @stack.last when 'InstanceProfiles' @instance_profile = {'Roles' =>[]} when 'Roles' if @instance_profile @role = {} end end end end def end_element(name) if @instance_profile case name when 'Arn', 'Path' if @role @role[name] = value else @instance_profile[name] = value end when 'AssumeRolePolicyDocument', 'RoleId','RoleName' @role[name] = value if @role when 'CreateDate' if @role @role[name] = Time.parse(value) else @instance_profile[name] = Time.parse(value) end when 'member' case @stack.last when 'InstanceProfiles' finished_instance_profile(@instance_profile) @instance_profile = nil when 'Roles' if @instance_profile @instance_profile['Roles'] << @role @role = nil end end when 'InstanceProfiles', 'Roles' @stack.pop when 'InstanceProfile' finished_instance_profile(@instance_profile) @instance_profile = nil when 'InstanceProfileName', 'InstanceProfileId' @instance_profile[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/basic.rb000066400000000000000000000004441437344660100211260ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class Basic < Fog::Parsers::Base def end_element(name) case name when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/create_access_key.rb000066400000000000000000000007721437344660100235050ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class CreateAccessKey < Fog::Parsers::Base def reset @response = { 'AccessKey' => {} } end def end_element(name) case name when 'AccessKeyId', 'UserName', 'SecretAccessKey', 'Status' @response['AccessKey'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/create_group.rb000066400000000000000000000007351437344660100225270ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class CreateGroup < Fog::Parsers::Base def reset @response = { 'Group' => {} } end def end_element(name) case name when 'Arn', 'GroupId', 'GroupName', 'Path' @response['Group'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/create_user.rb000066400000000000000000000007301437344660100223440ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class CreateUser < Fog::Parsers::Base def reset @response = { 'User' => {} } end def end_element(name) case name when 'Arn', 'UserId', 'UserName', 'Path' @response['User'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_account_password_policy.rb000066400000000000000000000015351437344660100256430ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetAccountPolicyPolicy < Fog::Parsers::Base def reset @response = {'AccountPasswordPolicy' => {}} end def end_element(name) case name when 'MinimumPasswordLength', 'MaxPasswordAge','PasswordReusePrevention' #boolean values @response['AccountPasswordPolicy'][name] = !!value when 'RequireSymbols','RequireNumbers','RequireUppercaseCharacters','RequireLowercaseCharacters','AllowUsersToChangePassword','HardExpiry','ExpirePasswords' #integer values @response['AccountPasswordPolicy'][name] = value.to_i when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_account_summary.rb000066400000000000000000000017271437344660100241220ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetAccountSummary < Fog::Parsers::Base def reset super @stack = [] @response = {'Summary' => {}} end def start_element(name, attrs = []) super case name when 'SummaryMap' @stack << name end end def end_element(name) case name when 'SummaryMap' @stack.pop when 'key' if @stack.last == 'SummaryMap' @key = value end when 'value' if @stack.last == 'SummaryMap' @response['Summary'][@key] = value.strip.to_i end when 'RequestId' if @stack.empty? @response['RequestId'] = value.strip end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_group.rb000066400000000000000000000025501437344660100220400ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetGroup < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_GetGroup.html def reset @user = {} @response = { 'Group' => {}, 'Users' => [] } end def start_element(name, attrs = []) super case name when 'Group' @in_group = true when 'Users' @in_users = true end end def end_element(name) case name when 'Arn', 'Path' if @in_group @response['Group'][name] = value elsif @in_users @user[name] = value end when 'Group' @in_group = false when 'GroupName', 'GroupId' @response['Group'][name] = value when 'Users' @in_users = false when 'UserId', 'UserName' @user[name] = value when 'member' @response['Users'] << @user @user = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_group_policy.rb000066400000000000000000000015571437344660100234250ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetGroupPolicy < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetGroupPolicy.html def reset @response = { 'Policy' => {} } end def end_element(name) case name when 'GroupName', 'PolicyName' @response[name] = value when 'PolicyDocument' @response['Policy'][name] = if decoded_string = URI.decode_www_form_component(value) Fog::JSON.decode(decoded_string) rescue value else value end when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_role_policy.rb000066400000000000000000000014301437344660100232200ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetRolePolicy < Fog::Parsers::Base def reset @response = {'Policy' => {}} end def end_element(name) case name when 'RoleName', 'PolicyName' @response['Policy'][name] = value when 'PolicyDocument' @response['Policy'][name] = if decoded_string = URI.decode_www_form_component(value) Fog::JSON.decode(decoded_string) rescue value else value end when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_user.rb000066400000000000000000000012031437344660100216540ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetUser < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetUser.html def reset @response = { 'User' => {} } end def end_element(name) case name when 'Arn', 'UserId', 'UserName', 'Path' @response['User'][name] = value when 'CreateDate' @response['User'][name] = Time.parse(value) when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/get_user_policy.rb000066400000000000000000000015661437344660100232470ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class GetUserPolicy < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetUserPolicy.html def reset @response = { 'Policy' => {} } end def end_element(name) case name when 'UserName', 'PolicyName' @response['Policy'][name] = value when 'PolicyDocument' @response['Policy'][name] = if decoded_string = URI.decode_www_form_component(value) Fog::JSON.decode(decoded_string) rescue value else value end when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/instance_profile.rb000066400000000000000000000011311437344660100233630ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM require 'fog/aws/parsers/iam/base_instance_profile' class InstanceProfile < Fog::Parsers::AWS::IAM::BaseInstanceProfile def reset super @response = {} end def finished_instance_profile(profile) @response['InstanceProfile'] = profile end def end_element(name) case name when 'RequestId' @response[name] = value end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_access_keys.rb000066400000000000000000000013001437344660100233640ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListAccessKeys < Fog::Parsers::Base def reset @access_key = {} @response = { 'AccessKeys' => [] } end def end_element(name) case name when 'AccessKeyId', 'Status', 'UserName' @access_key[name] = value when 'member' @response['AccessKeys'] << @access_key @access_key = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_account_aliases.rb000066400000000000000000000010601437344660100242300ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListAccountAliases < Fog::Parsers::Base def reset @response = { 'AccountAliases' => [] } end def end_element(name) case name when 'member' @response['AccountAliases'] << @value when 'IsTruncated' response[name] = (@value == 'true') when 'Marker', 'RequestId' response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_groups.rb000066400000000000000000000012421437344660100224140ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListGroups < Fog::Parsers::Base def reset @group = {} @response = { 'Groups' => [] } end def end_element(name) case name when 'Arn', 'GroupId', 'GroupName', 'Path' @group[name] = value when 'member' @response['Groups'] << @group @group = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_groups_for_user.rb000066400000000000000000000013331437344660100243210ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListGroupsForUser < Fog::Parsers::Base def reset @group_for_user = {} @response = { 'GroupsForUser' => [] } end def end_element(name) case name when 'Path', 'GroupName', 'GroupId', 'Arn' @group_for_user[name] = value when 'member' @response['GroupsForUser'] << @group_for_user @group_for_user = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_instance_profiles.rb000066400000000000000000000013221437344660100246030ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM require 'fog/aws/parsers/iam/base_instance_profile' class ListInstanceProfiles < Fog::Parsers::AWS::IAM::BaseInstanceProfile def reset super @response = {'InstanceProfiles' => []} end def finished_instance_profile(profile) @response['InstanceProfiles'] << profile end def end_element(name) case name when 'RequestId', 'Marker' @response[name] = value when 'IsTruncated' @response[name] = (value == 'true') end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_managed_policies.rb000066400000000000000000000027041437344660100243640ustar00rootroot00000000000000require 'fog/aws/parsers/iam/policy_parser' module Fog module Parsers module AWS module IAM class ListManagedPolicies < Fog::Parsers::AWS::IAM::PolicyParser def reset super @response = { 'Policies' => [] , 'Marker' => '', 'IsTruncated' => false} end def finished_policy(policy) @response['Policies'] << policy end def start_element(name,attrs = []) case name when 'AttachedPolicies' @stack << name when 'AttachedPolicy' @policy = fresh_policy when 'member' if @stack.last == 'AttachedPolicies' @policy = fresh_policy end end super end def end_element(name) case name when 'RequestId', 'Marker' @response[name] = value when 'IsTruncated' @response[name] = (value == 'true') when 'PolicyArn', 'PolicyName' @policy[name] = value when 'AttachedPolicies' if @stack.last == 'AttachedPolicies' @stack.pop end when 'member' if @stack.last == 'AttachedPolicies' finished_policy(@policy) @policy = nil end end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_mfa_devices.rb000066400000000000000000000014111437344660100233400ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListMFADevices < Fog::Parsers::Base def reset @mfa_device = {} @response = { 'MFADevices' => [] } end def end_element(name) case name when 'SerialNumber', 'UserName' @mfa_device[name] = value when 'EnableDate' @mfa_device[name] = Time.parse(value) when 'member' @response['MFADevices'] << @mfa_device @mfa_device = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_policies.rb000066400000000000000000000010411437344660100227010ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListPolicies < Fog::Parsers::Base def reset @response = { 'PolicyNames' => [] } end def end_element(name) case name when 'member' @response['PolicyNames'] << value when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_policy_versions.rb000066400000000000000000000032171437344660100243300ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListPolicyVersions < Fog::Parsers::Base def reset super @stack = [] @response = { 'Versions' => [], 'Marker' => '', 'IsTruncated' => false } end def start_element(name,attrs = []) case name when 'Versions' @stack << name when 'member' if @stack.last == 'Versions' @version = {} end end super end def end_element(name) case name when 'member' @response['Versions'] << @version @version = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' @response[name] = value end super end def end_element(name) case name when 'VersionId' @version[name] = value when 'CreateDate' @version[name] = Time.parse(value) when 'IsDefaultVersion' @version[name] = (value == 'true') when 'Versions' if @stack.last == 'Versions' @stack.pop end when 'member' if @stack.last == 'Versions' finished_version(@version) @version = nil end end end def finished_version(version) @response['Versions'] << version end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_roles.rb000066400000000000000000000012161437344660100222220ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM require 'fog/aws/parsers/iam/role_parser' class ListRoles < Fog::Parsers::AWS::IAM::RoleParser def reset super @response = { 'Roles' => [] } end def finished_role(role) @response['Roles'] << role end def end_element(name) case name when 'RequestId', 'Marker' @response[name] = value when 'IsTruncated' @response[name] = (value == 'true') end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_server_certificates.rb000066400000000000000000000015661437344660100251410ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListServerCertificates < Fog::Parsers::Base def reset @response = { 'Certificates' => [] } reset_certificate end def reset_certificate @certificate = {} end def end_element(name) case name when 'Arn', 'Path', 'ServerCertificateId', 'ServerCertificateName' @certificate[name] = value when 'UploadDate' @certificate[name] = Time.parse(value) when 'member' @response['Certificates'] << @certificate reset_certificate when 'IsTrunctated' @response[name] = !!value when 'Marker' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_signing_certificates.rb000066400000000000000000000014241437344660100252620ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListSigningCertificates < Fog::Parsers::Base def reset @signing_certificate = {} @response = { 'SigningCertificates' => [] } end def end_element(name) case name when 'UserName', 'CertificateId', 'CertificateBody', 'Status' @signing_certificate[name] = value when 'member' @response['SigningCertificates'] << @signing_certificate @signing_certificate = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/list_users.rb000066400000000000000000000013451437344660100222420ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class ListUsers < Fog::Parsers::Base def reset @user = {} @response = { 'Users' => [] } end def end_element(name) case name when 'Arn', 'UserId', 'UserName', 'Path' @user[name] = value when 'CreateDate' @user[name] = Time.parse(value) when 'member' @response['Users'] << @user @user = {} when 'IsTruncated' response[name] = (value == 'true') when 'Marker', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/login_profile.rb000066400000000000000000000010771437344660100227000ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class LoginProfile < Fog::Parsers::Base def reset @response = { 'LoginProfile' => {} } end def end_element(name) case name when 'UserName' @response['LoginProfile']['UserName'] = value when 'CreateDate' @response['LoginProfile']['CreateDate'] = Time.parse(value) when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/policy_parser.rb000066400000000000000000000027731437344660100227270ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class PolicyParser < Fog::Parsers::Base def reset @policy = fresh_policy @stack = [] end def start_element(name,attrs = []) case name when 'Policies' @stack << name when 'Policy' @policy = fresh_policy when 'member' if @stack.last == 'Policies' @policy = fresh_policy end end super end def fresh_policy {'AttachmentCount' => 0, 'Description' => ''} end def end_element(name) case name when 'Arn', 'DefaultVersionId', 'Description', 'Path', 'PolicyName', 'PolicyId' @policy[name] = value when 'CreateDate', 'UpdateDate' @policy[name] = Time.parse(value) when 'IsAttachable' @policy[name] = (value == 'true') when 'AttachmentCount' @policy[name] = value.to_i when 'Policy' finished_policy(@policy) @policy = nil when 'Policies' if @stack.last == 'Policies' @stack.pop end when 'member' if @stack.last == 'Policies' finished_policy(@policy) @policy = nil end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/policy_version.rb000066400000000000000000000016151437344660100231120ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class PolicyVersion < Fog::Parsers::Base def reset super @version = {} @response = { 'PolicyVersion' => @version } end def end_element(name) case name when 'RequestId' @response[name] = value when 'VersionId' @version[name] = value when 'IsDefaultVersion' @version[name] = (value == 'true') when 'Document' @version[name] = if decoded_string = URI.decode_www_form_component(value) Fog::JSON.decode(decoded_string) rescue value else value end end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/role_parser.rb000066400000000000000000000022521437344660100223610ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class RoleParser < Fog::Parsers::Base def reset @role = {} @stack = [] end def start_element(name,attrs = []) case name when 'Roles' @stack << name when 'Role' @role = {} when 'member' if @stack.last == 'Roles' @role = {} end end super end def end_element(name) case name when 'Arn', 'AssumeRolePolicyDocument', 'Path', 'RoleId','RoleName' @role[name] = value if @role when 'CreateDate' @role[name] = Time.parse(value) if @role when 'Role' finished_role(@role) @role = nil when 'Roles' if @stack.last == 'Roles' @stack.pop end when 'member' if @stack.last == 'Roles' finished_role(@role) @role = nil end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/single_policy.rb000066400000000000000000000011011437344660100226740ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM require 'fog/aws/parsers/iam/policy_parser' class SinglePolicy < Fog::Parsers::AWS::IAM::PolicyParser def reset super @response = { 'Policy' => {} } end def finished_policy(policy) @response['Policy'] = policy end def end_element(name) case name when 'RequestId' @response[name] = value end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/single_role.rb000066400000000000000000000010611437344660100223430ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM require 'fog/aws/parsers/iam/role_parser' class SingleRole < Fog::Parsers::AWS::IAM::RoleParser def reset super @response = { 'Role' => {} } end def finished_role(role) @response['Role'] = role end def end_element(name) case name when 'RequestId' @response[name] = value end super end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/update_group.rb000066400000000000000000000011011437344660100225320ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class UpdateGroup < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateGroup.html def reset @response = { 'Group' => {} } end def end_element(name) case name when 'Arn', 'GroupId', 'GroupName', 'Path' @response['Group'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/update_user.rb000066400000000000000000000010741437344660100223650ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class UpdateUser < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateUser.html def reset @response = { 'User' => {} } end def end_element(name) case name when 'Arn', 'UserId', 'UserName', 'Path' @response['User'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/upload_server_certificate.rb000066400000000000000000000012231437344660100252550ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class UploadServerCertificate < Fog::Parsers::Base def reset @response = { 'Certificate' => {} } end def end_element(name) case name when 'Arn', 'Path', 'ServerCertificateId', 'ServerCertificateName', 'CertificateBody', 'CertificateChain' @response['Certificate'][name] = value when 'UploadDate' @response['Certificate'][name] = Time.parse(value) when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/iam/upload_signing_certificate.rb000066400000000000000000000010111437344660100254000ustar00rootroot00000000000000module Fog module Parsers module AWS module IAM class UploadSigningCertificate < Fog::Parsers::Base def reset @response = { 'Certificate' => {} } end def end_element(name) case name when 'CertificateId', 'UserName', 'CertificateBody', 'Status' @response['Certificate'][name] = value when 'RequestId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/kms/000077500000000000000000000000001437344660100175425ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/kms/describe_key.rb000066400000000000000000000014641437344660100225240ustar00rootroot00000000000000module Fog module Parsers module AWS module KMS class DescribeKey < Fog::Parsers::Base def reset @response = { 'KeyMetadata' => {} } end def start_element(name, attrs = []) super case name when 'KeyMetadata' @key = {} end end def end_element(name) case name when 'KeyUsage', 'AWSAccountId', 'Description', 'KeyId', 'Arn' @key[name] = value when 'CreationDate' @key[name] = Time.parse(value) when 'Enabled' @key[name] = (value == 'true') when 'KeyMetadata' @response['KeyMetadata'] = @key end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/kms/list_keys.rb000066400000000000000000000015401437344660100220750ustar00rootroot00000000000000module Fog module Parsers module AWS module KMS class ListKeys < Fog::Parsers::Base def reset @response = { 'Keys' => [] } end def start_element(name, attrs = []) super case name when 'Keys' @keys = [] when 'member' @key = {} end end def end_element(name) case name when 'KeyId', 'KeyArn' @key[name] = value when 'member' @keys << @key when 'Keys' @response['Keys'] = @keys when 'Truncated' @response['Truncated'] = (value == 'true') when 'NextMarker' @response['Marker'] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/lambda/000077500000000000000000000000001437344660100201705ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/lambda/base.rb000066400000000000000000000015751437344660100214370ustar00rootroot00000000000000module Fog module AWS module Parsers module Lambda class Base def process(body) body.inject({}) { |h, (k, v)| h[k] = rules(k, v); h } end private def rules(key, value) case value when Hash process(value) when Array value.map { |i| process(i) } else case key when 'LastModified' Time.parse(value) when 'Policy', 'Statement' begin Fog::JSON.decode(value) rescue Fog::JSON::DecodeError => e Fog::Logger.warning("Error parsing response json - #{e}") {} end else value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/000077500000000000000000000000001437344660100175405ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/rds/authorize_db_security_group_ingress.rb000066400000000000000000000015701437344660100274440ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/security_group_parser' class AuthorizeDBSecurityGroupIngress < Fog::Parsers::AWS::RDS::SecurityGroupParser def reset @response = { 'AuthorizeDBSecurityGroupIngressResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSecurityGroup' then @response['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup'] = @security_group @security_group = fresh_security_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/base.rb000066400000000000000000000011131437344660100207730ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS # Base parser for ResponseMetadata, RequestId class Base < Fog::Parsers::Base def reset super @response = { 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/copy_db_snapshot.rb000066400000000000000000000014441437344660100234260ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class CopyDBSnapshot < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'CopyDBSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSnapshot' then @response['CopyDBSnapshotResult']['DBSnapshot'] = @db_snapshot @db_snapshot = fresh_snapshot when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_cluster.rb000066400000000000000000000014401437344660100235350ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_parser' class CreateDBCluster < Fog::Parsers::AWS::RDS::DbClusterParser def reset @response = { 'CreateDBClusterResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBCluster' @response['CreateDBClusterResult']['DBCluster'] = @db_cluster @db_cluster = fresh_cluster when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_cluster_snapshot.rb000066400000000000000000000015551437344660100254630ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_snapshot_parser' class CreateDBClusterSnapshot < Fog::Parsers::AWS::RDS::DBClusterSnapshotParser def reset @response = { 'CreateDBClusterSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBClusterSnapshot' @response['CreateDBClusterSnapshotResult']['DBClusterSnapshot'] = @db_cluster_snapshot @db_cluster_snapshot = fresh_snapshot when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_instance.rb000066400000000000000000000014311437344660100236600ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class CreateDBInstance < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'CreateDBInstanceResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['CreateDBInstanceResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_instance_read_replica.rb000066400000000000000000000014721437344660100263570ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class CreateDBInstanceReadReplica < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'CreateDBInstanceReadReplicaResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['CreateDBInstanceReadReplicaResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_parameter_group.rb000066400000000000000000000017601437344660100252550ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class CreateDbParameterGroup < Fog::Parsers::Base def reset @response = { 'CreateDBParameterGroupResult' => {}, 'ResponseMetadata' => {} } @db_parameter_group = {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBParameterGroupFamily' @db_parameter_group['DBParameterGroupFamily'] = value when 'Description' @db_parameter_group['Description'] = value when 'DBParameterGroupName' @db_parameter_group['DBParameterGroupName'] = value when 'DBParameterGroup' @response['CreateDBParameterGroupResult']['DBParameterGroup']= @db_parameter_group when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_security_group.rb000066400000000000000000000015321437344660100251410ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/security_group_parser' class CreateDBSecurityGroup < Fog::Parsers::AWS::RDS::SecurityGroupParser def reset @response = { 'CreateDBSecurityGroupResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSecurityGroup' then @response['CreateDBSecurityGroupResult']['DBSecurityGroup'] = @security_group @security_group = fresh_security_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_snapshot.rb000066400000000000000000000014521437344660100237160ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class CreateDBSnapshot < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'CreateDBSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSnapshot' then @response['CreateDBSnapshotResult']['DBSnapshot'] = @db_snapshot @db_snapshot = fresh_snapshot when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_db_subnet_group.rb000066400000000000000000000015141437344660100245720ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/subnet_group_parser' class CreateDBSubnetGroup < Fog::Parsers::AWS::RDS::SubnetGroupParser def reset @response = { 'CreateDBSubnetGroupResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSubnetGroup' then @response['CreateDBSubnetGroupResult']['DBSubnetGroup'] = @db_subnet_group @db_subnet_group = fresh_subnet_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/create_event_subscription.rb000066400000000000000000000015331437344660100253370ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/event_subscription_parser' class CreateEventSubscription < Fog::Parsers::AWS::RDS::EventSubscriptionParser def reset @response = { 'CreateEventSubscriptionResult' => {}, 'ResponseMetadata' => {} } @event_subscription = {} super end def start_element(name, attrs = []) super end def end_element(name) case name when 'EventSubscription' @response['CreateEventSubscriptionResult']['EventSubscription'] = @event_subscription when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/db_cluster_parser.rb000066400000000000000000000050251437344660100235710ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DbClusterParser < Fog::Parsers::Base def reset @db_cluster = fresh_cluster end def fresh_cluster {'AvailabilityZones' => [], 'VpcSecurityGroups' => []} end def start_element(name, attrs=[]) super case name when 'AvailabilityZones' @in_availability_zones = true when 'DBClusterMembers' @in_db_cluster_members = true @db_cluster_members = [] when 'DBClusterMember' @db_cluster_member = {} when 'VpcSecurityGroupMembership' @vpc_security_group = {} when 'VpcSecurityGroups' @in_vpc_security_groups = true @vpc_security_groups = [] end end def end_element(name) case name when 'Port', 'Engine', 'Status', 'BackupRetentionPeriod', 'DBSubnetGroup', 'EngineVersion', 'Endpoint', 'DBClusterParameterGroup', 'DBClusterIdentifier', 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'AllocatedStorage', 'MasterUsername' @db_cluster[name] = value when 'VpcSecurityGroups' @in_vpc_security_groups = false @db_cluster['VpcSecurityGroups'] = @vpc_security_groups when 'VpcSecurityGroupMembership' @vpc_security_groups << @vpc_security_group @vpc_security_group = {} when 'VpcSecurityGroupId' @vpc_security_group[name] = value when 'Status' # Unfortunately, status is used in VpcSecurityGroupMemebership and # DBSecurityGroups if @in_db_security_groups @db_security_group[name]=value end if @in_vpc_security_groups @vpc_security_group[name] = value end when 'DBClusterMembers' @in_db_cluster_members = false @db_cluster['DBClusterMembers'] = @db_cluster_members when 'DBClusterMember' @db_cluster_members << @db_cluster_member @db_cluster_member = {} when 'IsClusterWriter' @db_cluster_member['master'] = value == "true" when 'DBInstanceIdentifier' @db_cluster_member[name] = value when 'DBCluster' @db_cluster = fresh_cluster end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/db_cluster_snapshot_parser.rb000066400000000000000000000013771437344660100255160ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DBClusterSnapshotParser < Fog::Parsers::Base def reset @db_cluster_snapshot = fresh_snapshot end def fresh_snapshot {} end def start_element(name, attrs=[]) super end def end_element(name) case name when 'Port', 'PercentProgress', 'AllocatedStorage' @db_cluster_snapshot[name] = value.to_i when 'SnapshotCreateTime', 'ClusterCreateTime' @db_cluster_snapshot[name] = Time.parse(value) else @db_cluster_snapshot[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/db_engine_version_parser.rb000066400000000000000000000020161437344660100251170ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DBEngineVersionParser < Fog::Parsers::Base def reset @db_engine_version = fresh_engine_version end def fresh_engine_version {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBEngineDescription' then @db_engine_version['DBEngineDescription'] = @value when 'DBEngineVersionDescription' then @db_engine_version['DBEngineVersionDescription'] = @value when 'DBParameterGroupFamily' then @db_engine_version['DBParameterGroupFamily'] = @value when 'DBEngineVersionIdentifier' then @db_engine_version['DBEngineVersionIdentifier'] = @value when 'Engine' then @db_engine_version['Engine'] = @value when 'EngineVersion' then @db_engine_version['EngineVersion'] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/db_parameter_parser.rb000066400000000000000000000011311437344660100240620ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DBParameterParser < Fog::Parsers::Base def reset @db_parameter = new_db_parameter end def new_db_parameter {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'IsModifiable' @value == "true" ? true : false else @db_parameter[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/db_parser.rb000066400000000000000000000150331437344660100220300ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DbParser < Fog::Parsers::Base def reset @db_instance = fresh_instance end def fresh_instance {'PendingModifiedValues' => [], 'DBSecurityGroups' => [], 'ReadReplicaDBInstanceIdentifiers' => [], 'Endpoint' => {}, 'DBSubnetGroup' => {}} end def start_element(name, attrs = []) super case name when 'PendingModifiedValues' @in_pending_modified_values = true @pending_modified_values = {} when 'DBSecurityGroups' @in_db_security_groups = true @db_security_groups = [] when 'DBSecurityGroup' @db_security_group = {} when 'Endpoint' @in_endpoint = true @endpoint = {} when 'DBParameterGroup' @db_parameter_group = {} when 'DBParameterGroups' @in_db_parameter_groups = true @db_parameter_groups = [] when 'VpcSecurityGroupMembership' @vpc_security_group = {} when 'VpcSecurityGroups' @in_vpc_security_groups = true @vpc_security_groups = [] when 'DBSubnetGroup' @in_db_subnet_group = true @db_subnet_group = {} when 'Subnets' @in_subnets = true @subnets = [] when 'Subnet' @subnet = {} when 'SubnetAvailabilityZone' @in_subnet_availability_zone = true @subnet_availability_zone = {} end end def end_element(name) case name when 'LatestRestorableTime', 'InstanceCreateTime' @db_instance[name] = Time.parse value when 'Engine', 'DBInstanceStatus', 'DBInstanceIdentifier', 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'AvailabilityZone', 'MasterUsername', 'DBName', 'LicenseModel', 'DBSubnetGroupName', 'StorageType', 'KmsKeyId', 'TdeCredentialArn', 'SecondaryAvailabilityZone', 'DbiResourceId', 'CACertificateIdentifier', 'CharacterSetName', 'DbiResourceId', 'LicenseModel', 'KmsKeyId', 'DBClusterIdentifier' @db_instance[name] = value when 'MultiAZ', 'AutoMinorVersionUpgrade', 'PubliclyAccessible', 'StorageEncrypted', 'EnableIAMDatabaseAuthentication' @db_instance[name] = (value == 'true') when 'DBParameterGroups' @in_db_parameter_groups = false @db_instance['DBParameterGroups'] = @db_parameter_groups when 'DBParameterGroup' @db_parameter_groups << @db_parameter_group @db_parameter_group = {} when 'ParameterApplyStatus', 'DBParameterGroupName' if @in_db_parameter_groups @db_parameter_group[name] = value end when 'BackupRetentionPeriod', 'Iops', 'AllocatedStorage' if @in_pending_modified_values @pending_modified_values[name] = value.to_i else @db_instance[name] = value.to_i end when 'DBInstanceClass', 'EngineVersion', 'MasterUserPassword', 'MultiAZ' if @in_pending_modified_values @pending_modified_values[name] = value else @db_instance[name] = value end when 'DBSecurityGroups' @in_db_security_groups = false @db_instance['DBSecurityGroups'] = @db_security_groups when 'DBSecurityGroupName' @db_security_group[name]=value when 'DBSecurityGroup' @db_security_groups << @db_security_group @db_security_group = {} when 'VpcSecurityGroups' @in_vpc_security_groups = false @db_instance['VpcSecurityGroups'] = @vpc_security_groups when 'VpcSecurityGroupMembership' @vpc_security_groups << @vpc_security_group @vpc_security_group = {} when 'VpcSecurityGroupId' @vpc_security_group[name] = value when 'Status' # Unfortunately, status is used in VpcSecurityGroupMemebership and # DBSecurityGroups if @in_db_security_groups @db_security_group[name]=value end if @in_vpc_security_groups @vpc_security_group[name] = value end when 'Address' @endpoint[name] = value when 'Port' if @in_pending_modified_values @pending_modified_values[name] = value.to_i elsif @in_endpoint @endpoint[name] = value.to_i end when 'PendingModifiedValues' @in_pending_modified_values = false @db_instance['PendingModifiedValues'] = @pending_modified_values when 'Endpoint' @in_endpoint = false @db_instance['Endpoint'] = @endpoint when 'ReadReplicaDBInstanceIdentifier' @db_instance['ReadReplicaDBInstanceIdentifiers'] << value when 'ReadReplicaSourceDBInstanceIdentifier' @db_instance['ReadReplicaSourceDBInstanceIdentifier'] = value when 'DBInstance' @db_instance = fresh_instance when 'DBSubnetGroup' @in_db_subnet_group = false @db_instance['DBSubnetGroup'] = @db_subnet_group when 'VpcId' if @in_db_subnet_group @db_subnet_group[name] = value end when 'Subnets' @in_subnets = false if @in_db_subnet_group @db_subnet_group['Subnets'] = @subnets end when 'Subnet' if @in_subnets @subnets << @subnet end when 'SubnetIdentifier', 'SubnetStatus' if @in_subnets @subnet[name] = value end when 'SubnetAvailabilityZone' @in_subnet_availability_zone = false @subnet['SubnetAvailabilityZone'] = @subnet_availability_zone when 'Name' if @in_subnet_availability_zone @subnet_availability_zone[name] = value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_cluster.rb000066400000000000000000000014401437344660100235340ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_parser' class DeleteDBCluster < Fog::Parsers::AWS::RDS::DbClusterParser def reset @response = { 'DeleteDBClusterResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBCluster' @response['DeleteDBClusterResult']['DBCluster'] = @db_cluster @db_cluster = fresh_cluster when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_cluster_snapshot.rb000066400000000000000000000015531437344660100254600ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_snapshot_parser' class DeleteDBClusterSnapshot < Fog::Parsers::AWS::RDS::DBClusterSnapshotParser def reset @response = {'DeleteDBClusterSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value when 'DBClusterSnapshot' @response['DeleteDBClusterSnapshotResult']['DBClusterSnapshot'] = @db_cluster_snapshot @db_cluster_snapshot = fresh_snapshot else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_instance.rb000066400000000000000000000014321437344660100236600ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class DeleteDBInstance < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'DeleteDBInstanceResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['DeleteDBInstanceResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_parameter_group.rb000066400000000000000000000010251437344660100252460ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DeleteDbParameterGroup < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } @db_parameter_group = {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_security_group.rb000066400000000000000000000011611437344660100251360ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class DeleteDBSecurityGroup < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_snapshot.rb000066400000000000000000000014451437344660100237170ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class DeleteDBSnapshot < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'DeleteDBSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value when 'DBSnapshot' @response['DeleteDBSnapshotResult']['DBSnapshot'] = @db_snapshot @db_snapshot = fresh_snapshot else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_db_subnet_group.rb000066400000000000000000000012331437344660100245670ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/subnet_group_parser' class DeleteDBSubnetGroup < Fog::Parsers::AWS::RDS::SubnetGroupParser def reset @response = { 'DeleteDBSubnetGroupResponse' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/delete_event_subscription.rb000066400000000000000000000011611437344660100253330ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class DeleteEventSubscription < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'ResponseMetadata' => {} } super end def start_element(name, attrs=[]) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_cluster_snapshots.rb000066400000000000000000000017671437344660100261700ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_snapshot_parser' class DescribeDBClusterSnapshots < Fog::Parsers::AWS::RDS::DBClusterSnapshotParser def reset @response = {'DescribeDBClusterSnapshotsResult' => {'DBClusterSnapshots' => []}, 'ResponseMetadata' => {}} super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBClusterSnapshot' @response['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'] << @db_cluster_snapshot @db_cluster_snapshot = fresh_snapshot when 'Marker' @response['DescribeDBClusterSnapshotsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_clusters.rb000066400000000000000000000016341437344660100242420ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_cluster_parser' class DescribeDBClusters < Fog::Parsers::AWS::RDS::DbClusterParser def reset @response = { 'DescribeDBClustersResult' => { 'DBClusters' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs=[]) super end def end_element(name) case name when 'DBCluster' @response['DescribeDBClustersResult']['DBClusters'] << @db_cluster @db_cluster = fresh_cluster when 'Marker' @response['DescribeDBClustersResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_engine_versions.rb000066400000000000000000000017601437344660100255730ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_engine_version_parser' class DescribeDBEngineVersions < Fog::Parsers::AWS::RDS::DBEngineVersionParser def reset @response = { 'DescribeDBEngineVersionsResult' => {'DBEngineVersions' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBEngineVersion' then @response['DescribeDBEngineVersionsResult']['DBEngineVersions'] << @db_engine_version @db_engine_version = fresh_engine_version when 'Marker' @response['DescribeDBEngineVersionsResult']['Marker'] = @value when 'RequestId' @response['ResponseMetadata'][name] = @value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_instances.rb000066400000000000000000000016301437344660100243610ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class DescribeDBInstances < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'DescribeDBInstancesResult' => {'DBInstances' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['DescribeDBInstancesResult']['DBInstances'] << @db_instance @db_instance = fresh_instance when 'Marker' @response['DescribeDBInstancesResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_log_files.rb000066400000000000000000000023341437344660100243370ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DescribeDBLogFiles < Fog::Parsers::Base attr_reader :rds_id def initialize(rds_id) @rds_id = rds_id super() end def reset @response = { 'DescribeDBLogFilesResult' => {'DBLogFiles' => []}, 'ResponseMetadata' => {} } fresh_log_file end def fresh_log_file @db_log_file = {'DBInstanceIdentifier' => @rds_id} end def start_element(name, attrs = []) super end def end_element(name) case name when 'LastWritten' then @db_log_file[name] = Time.at(value.to_i / 1000) when 'LogFileName' then @db_log_file[name] = value when 'Size' then @db_log_file[name] = value.to_i when 'DescribeDBLogFilesDetails' @response['DescribeDBLogFilesResult']['DBLogFiles'] << @db_log_file fresh_log_file when 'Marker' then @response['DescribeDBLogFilesResult'][name] = value when 'RequestId' then @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_parameter_groups.rb000066400000000000000000000022151437344660100257510ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DescribeDBParameterGroups < Fog::Parsers::Base def reset @response = { 'DescribeDBParameterGroupsResult' => {'DBParameterGroups' => []}, 'ResponseMetadata' => {} } @db_parameter_group = {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBParameterGroupFamily' then @db_parameter_group['DBParameterGroupFamily'] = value when 'Description' then @db_parameter_group['Description'] = value when 'DBParameterGroupName' then @db_parameter_group['DBParameterGroupName'] = value when 'DBParameterGroup' then @response['DescribeDBParameterGroupsResult']['DBParameterGroups'] << @db_parameter_group @db_parameter_group = {} when 'Marker' @response['DescribeDBParameterGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_parameters.rb000066400000000000000000000027231437344660100245410ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DescribeDBParameters < Fog::Parsers::Base def reset @response = { 'DescribeDBParametersResult' =>{}, 'ResponseMetadata' => {} } @parameter = {} @parameters = [] end def start_element(name, attrs = []) super end def end_element(name) case name when 'ParameterValue' then @parameter['ParameterValue'] = value when 'DataType' then @parameter['DataType'] = value when 'AllowedValues' then @parameter['AllowedValues'] = value when 'Source' then @parameter['Source'] = value when 'IsModifiable' then @parameter['IsModifiable'] = value == 'true' ? true : false when 'Description' then @parameter['Description'] = value when 'ApplyType' then @parameter['ApplyType'] = value when 'ParameterName' then @parameter['ParameterName'] = value when 'Parameter' @parameters << @parameter @parameter = {} when 'Marker' @response['DescribeDBParametersResult']['Marker'] = value when 'Parameters' @response['DescribeDBParametersResult']['Parameters'] = @parameters when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_reserved_instances.rb000066400000000000000000000023251437344660100262620ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DescribeDBReservedInstances < Fog::Parsers::Base def reset @reserved_instance = {} @response = { 'ReservedDBInstances' => [] } end def end_element(name) case name when 'ReservedDBInstanceId', 'ReservedDBInstancesOfferingId', 'DBInstanceClass', 'ProductDescription', 'State' @reserved_instance[name] = @value when 'Duration', 'DBInstanceCount' @reserved_instance[name] = @value.to_i when 'FixedPrice', 'UsagePrice' @reserved_instance[name] = @value.to_f when 'ReservedDBInstance' @response['ReservedDBInstances'] << @reserved_instance @reserved_instance = {} when 'Marker' @response[name] = @value when 'MultiAZ' if @value == 'false' @reserved_instance[name] = false else @reserved_instance[name] = true end when 'StartTime' @reserved_instance[name] = Time.parse(@value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_security_groups.rb000066400000000000000000000017431437344660100256450ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/security_group_parser' class DescribeDBSecurityGroups < Fog::Parsers::AWS::RDS::SecurityGroupParser def reset @response = { 'DescribeDBSecurityGroupsResult' => {'DBSecurityGroups' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSecurityGroup' then @response['DescribeDBSecurityGroupsResult']['DBSecurityGroups'] << @security_group @security_group = fresh_security_group when 'Marker' @response['DescribeDBSecurityGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_snapshots.rb000066400000000000000000000016511437344660100244170ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/snapshot_parser' class DescribeDBSnapshots < Fog::Parsers::AWS::RDS::SnapshotParser def reset @response = { 'DescribeDBSnapshotsResult' => {'DBSnapshots' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSnapshot' then @response['DescribeDBSnapshotsResult']['DBSnapshots'] << @db_snapshot @db_snapshot = fresh_snapshot when 'Marker' @response['DescribeDBSnapshotsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_db_subnet_groups.rb000066400000000000000000000017141437344660100252740ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/subnet_group_parser' class DescribeDBSubnetGroups < Fog::Parsers::AWS::RDS::SubnetGroupParser def reset @response = { 'DescribeDBSubnetGroupsResult' => {'DBSubnetGroups' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSubnetGroup' @response['DescribeDBSubnetGroupsResult']['DBSubnetGroups'] << @db_subnet_group @db_subnet_group = fresh_subnet_group when 'Marker' @response['DescribeDBSubnetGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_engine_default_parameters.rb000066400000000000000000000017341437344660100271260ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parameter_parser' class DescribeEngineDefaultParameters < Fog::Parsers::AWS::RDS::DBParameterParser def reset @response = {'DescribeEngineDefaultParametersResult' => {'Parameters' => []}, 'ResponseMetadata' => {}} super end def start_element(name, attrs = []) super end def end_element(name) case name when 'Parameter' @response['DescribeEngineDefaultParametersResult']['Parameters'] << @db_parameter @db_parameter = new_db_parameter when 'Marker' @response['DescribeEngineDefaultParametersResult']['Marker'] = @value when 'RequestId' @response['ResponseMetadata'][name] = @value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_event_subscriptions.rb000066400000000000000000000020111437344660100260270ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/event_subscription_parser' class DescribeEventSubscriptions < Fog::Parsers::AWS::RDS::EventSubscriptionParser def reset @response = { 'DescribeEventSubscriptionsResult' => { 'EventSubscriptionsList' => []}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'EventSubscription' @response['DescribeEventSubscriptionsResult']['EventSubscriptionsList'] << @event_subscription @event_subscription = fresh_event_subscription when 'Marker' @response['DescribeEventSubscriptionsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/describe_orderable_db_instance_options.rb000066400000000000000000000037451437344660100300010ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DescribeOrderableDBInstanceOptions < Fog::Parsers::Base def reset @response = { 'DescribeOrderableDBInstanceOptionsResult' => {'OrderableDBInstanceOptions' => []}, 'ResponseMetadata' => {} } @db_instance_option = {} @db_instance_options = [] end def start_element(name, attrs = []) case name when 'AvailabilityZones' then @availability_zones = [] when 'AvailabilityZone' then @availability_zone = {} end super end def end_element(name) case name when 'MultiAZCapable', 'ReadReplicaCapable', 'Vpc', 'SupportsIops', 'SupportsEnhancedMonitoring', 'SupportsIAMDatabaseAuthentication', 'SupportsPerformanceInsights', 'SupportsStorageEncryption' then @db_instance_option[name] = to_boolean(value) when 'Engine', 'LicenseModel', 'EngineVersion', 'DBInstanceClass', 'StorageType' then @db_instance_option[name] = value when 'AvailabilityZones' then @db_instance_option[name] = @availability_zones when 'AvailabilityZone' then @availability_zones << @availability_zone unless @availability_zone.empty? when 'Name' then @availability_zone[name] = value when 'OrderableDBInstanceOption' @db_instance_options << @db_instance_option @db_instance_option = {} when 'OrderableDBInstanceOptions' @response['DescribeOrderableDBInstanceOptionsResult']['OrderableDBInstanceOptions'] = @db_instance_options when 'Marker' then @response['DescribeOrderableDBInstanceOptionsResult'][name] = value when 'RequestId' then @response['ResponseMetadata'][name] = value end end def to_boolean(v) (v =~ /\A\s*(true|yes|1|y)\s*$/i) == 0 end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/download_db_logfile_portion.rb000066400000000000000000000010461437344660100256150ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class DownloadDBLogFilePortion < Fog::Parsers::Base def reset @response = { 'DownloadDBLogFilePortionResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) key = (name == 'RequestId') ? 'ResponseMetadata' : 'DownloadDBLogFilePortionResult' @response[key][name] = value end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/event_list.rb000066400000000000000000000016451437344660100222470ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/base' class EventListParser < Base def reset super @response['Events'] = [] end def start_element(name, attrs = []) super case name when 'Event'; then @event = {} end end def end_element(name) case name when 'Date' @event[name] = DateTime.parse(value.strip) when 'Message', 'SourceIdentifier', 'SourceType' @event[name] = value ? value.strip : name when 'Event' @response['Events'] << @event unless @event.empty? when 'IsTruncated', 'Marker', 'NextMarker' @response[name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/event_subscription_parser.rb000066400000000000000000000020071437344660100253650ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class EventSubscriptionParser < Fog::Parsers::Base def reset @event_subscription = fresh_event_subscription end def fresh_event_subscription {'EventCategories'=> []} end def start_element(name, attrs = []) super case name when 'EventCategoriesList' @in_event_categories_list = true end end def end_element(name) case name when 'EventCategory' @event_subscription['EventCategories'] << value @in_event_categories_list = false when 'SubscriptionCreationTime' @event_subscription[name] = Time.parse(value) when 'Enabled', 'CustomerAwsId', 'SourceType', 'Status', 'CustSubscriptionId', 'SnsTopicArn' @event_subscription[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/modify_db_instance.rb000066400000000000000000000014311437344660100237040ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class ModifyDBInstance < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'ModifyDBInstanceResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['ModifyDBInstanceResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/modify_db_parameter_group.rb000066400000000000000000000012261437344660100252760ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class ModifyDbParameterGroup < Fog::Parsers::Base def reset @response = { 'ModifyDBParameterGroupResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBParameterGroupName' @response['ModifyDBParameterGroupResult']['DBParameterGroupName'] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/modify_db_snapshot_attribute.rb000066400000000000000000000010341437344660100260210ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class ModifyDbSnapshotAttribute < Fog::Parsers::Base def reset @response = { 'ModifyDbSnapshotAttributeResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/modify_db_subnet_group.rb000066400000000000000000000010311437344660100246100ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class ModifyDBSubnetGroup < Fog::Parsers::Base def reset @response = { 'ModifyDBSubnetGrouptAttributeResult' => {}, 'ResponseMetadata' => {} } end def start_element(name, attrs = []) super end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end endfog-aws-3.18.0/lib/fog/aws/parsers/rds/promote_read_replica.rb000066400000000000000000000014371437344660100242510ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class PromoteReadReplica < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'PromoteReadReplicaResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['PromoteReadReplicaResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/reboot_db_instance.rb000066400000000000000000000014321437344660100237100ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class RebootDBInstance < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'RebootDBInstanceResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['RebootDBInstanceResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/restore_db_instance_from_db_snapshot.rb000066400000000000000000000015061437344660100275120ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class RestoreDBInstanceFromDBSnapshot < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'RestoreDBInstanceFromDBSnapshotResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/restore_db_instance_to_point_in_time.rb000066400000000000000000000014671437344660100275300ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/db_parser' class RestoreDBInstanceToPointInTime < Fog::Parsers::AWS::RDS::DbParser def reset @response = { 'RestoreDBInstanceToPointInTime' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBInstance' @response['RestoreDBInstanceToPointInTime']['DBInstance'] = @db_instance @db_instance = fresh_instance when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/revoke_db_security_group_ingress.rb000066400000000000000000000015571437344660100267320ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS require 'fog/aws/parsers/rds/security_group_parser' class RevokeDBSecurityGroupIngress < Fog::Parsers::AWS::RDS::SecurityGroupParser def reset @response = { 'RevokeDBSecurityGroupIngressResult' => {}, 'ResponseMetadata' => {} } super end def start_element(name, attrs = []) super end def end_element(name) case name when 'DBSecurityGroup' then @response['RevokeDBSecurityGroupIngressResult']['DBSecurityGroup'] = @security_group @security_group = fresh_security_group when 'RequestId' @response['ResponseMetadata'][name] = value else super end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/security_group_parser.rb000066400000000000000000000021501437344660100245220ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class SecurityGroupParser < Fog::Parsers::Base def reset @security_group = fresh_security_group end def fresh_security_group {'EC2SecurityGroups' => [], 'IPRanges' => []} end def start_element(name, attrs = []) super case name when 'EC2SecurityGroup', 'IPRange'; then @ingress = {} end end def end_element(name) case name when 'DBSecurityGroupDescription' then @security_group['DBSecurityGroupDescription'] = value when 'DBSecurityGroupName' then @security_group['DBSecurityGroupName'] = value when 'OwnerId' then @security_group['OwnerId'] = value when 'EC2SecurityGroup', 'IPRange' @security_group["#{name}s"] << @ingress unless @ingress.empty? when 'EC2SecurityGroupName', 'EC2SecurityGroupOwnerId', 'CIDRIP', 'Status' @ingress[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/snapshot_parser.rb000066400000000000000000000013061437344660100233000ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class SnapshotParser < Fog::Parsers::Base def reset @db_snapshot = fresh_snapshot end def fresh_snapshot {} end def start_element(name, attrs = []) super end def end_element(name) case name when 'AllocatedStorage', 'Port' @db_snapshot[name] = value.to_i when 'InstanceCreateTime', 'SnapshotCreateTime' @db_snapshot[name] = Time.parse(value) else @db_snapshot[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/subnet_group_parser.rb000066400000000000000000000020761437344660100241620ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS class SubnetGroupParser < Fog::Parsers::Base def reset @db_subnet_group = fresh_subnet_group end def start_element(name, attrs = []) super end def end_element(name) case name when 'VpcId' then @db_subnet_group['VpcId'] = value when 'SubnetGroupStatus' then @db_subnet_group['SubnetGroupStatus'] = value when 'DBSubnetGroupDescription' then @db_subnet_group['DBSubnetGroupDescription'] = value when 'DBSubnetGroupName' then @db_subnet_group['DBSubnetGroupName'] = value when 'SubnetIdentifier' then @db_subnet_group['Subnets'] << value when 'Marker' @response['DescribeDBSubnetGroupsResult']['Marker'] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end def fresh_subnet_group {'Subnets' => []} end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/rds/tag_list_parser.rb000066400000000000000000000016271437344660100232550ustar00rootroot00000000000000module Fog module Parsers module AWS module RDS # parses an XML-formatted list of resource tags from AWS class TagListParser < Fog::Parsers::Base # each tag is modeled as a String pair (2-element Array) def reset @this_key = nil @this_value = nil @tags = Hash.new @response = {'ListTagsForResourceResult' => {'TagList' => {}}} end def end_element(name) super case name when 'Tag' @tags[@this_key] = @this_value @this_key, @this_value = nil, nil when 'Key' @this_key = value when 'Value' @this_value = value when 'TagList' @response['ListTagsForResourceResult']['TagList'] = @tags end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/000077500000000000000000000000001437344660100205605ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster.rb000066400000000000000000000010271437344660100225660ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_parser' class Cluster < ClusterParser def reset super @response = {} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'Cluster' @response = {name => @cluster} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster_parser.rb000066400000000000000000000133471437344660100241520ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class ClusterParser < Fog::Parsers::Base # :cluster_identifier - (String) # :node_type - (String) # :cluster_status - (String) # :modify_status - (String) # :master_username - (String) # :db_name - (String) # :endpoint - (Hash) # :address - (String) # :port - (Integer) # :cluster_create_time - (Time) # :automated_snapshot_retention_period - (Integer) # :cluster_security_groups - (Array) # :cluster_security_group_name - (String) # :status - (String) # :vpc_security_groups - (Array) # :vpc_security_group_id - (String) # :status - (String) # :cluster_parameter_groups - (Array) # :parameter_group_name - (String) # :parameter_apply_status - (String) # :cluster_subnet_group_name - (String) # :vpc_id - (String) # :availability_zone - (String) # :preferred_maintenance_window - (String) # :pending_modified_values - (Hash) # :master_user_password - (String) # :node_type - (String) # :number_of_nodes - (Integer) # :cluster_type - (String) # :cluster_version - (String) # :automated_snapshot_retention_period - (Integer) # :cluster_version - (String) # :allow_version_upgrade - (Boolean) # :number_of_nodes - (Integer) # :publicly_accessible - (Boolean) # :encrypted - (Boolean) # :restore_status - (Hash) # :status - (String) # :current_restore_rate_in_mega_bytes_per_second - (Numeric) # :snapshot_size_in_mega_bytes - (Integer) # :progress_in_mega_bytes - (Integer) # :elapsed_time_in_seconds - (Integer) # :estimated_time_to_completion_in_seconds - (Integer) def reset @cluster = fresh_cluster end def fresh_cluster { 'ClusterParameterGroups' => [], 'ClusterSecurityGroups' => [], 'VpcSecurityGroups' => [], 'EndPoint' => {}, 'PendingModifiedValues'=> {}, 'RestoreStatus' => {}} end def start_element(name, attrs = []) super case name when 'ClusterSecurityGroups' @in_cluster_security_groups = true @cluster_security_group = {} when 'ClusterParameterGroups' @cluster_parameter_group = {} when 'VpcSecurityGroups' @in_vpc_security_groups = true @vpc_security_group = {} when 'PendingModifiedValues' @in_pending_modified_values = true end end def end_element(name) case name when 'AvailabilityZone', 'ClusterIdentifier', 'ClusterStatus', 'ClusterSubnetGroupName', 'DBName', 'MasterUsername', 'ModifyStatus', 'PreferredMaintenanceWindow', 'VpcId' @cluster[name] = value when 'ClusterCreateTime' @cluster[name] = Time.parse(value) when 'AllowVersionUpgrade', 'Encrypted', 'PubliclyAccessible' @cluster[name] = (value == "true") when 'Address' @cluster['EndPoint'][name] = value when 'Port' @cluster['EndPoint'][name] = value.to_i when 'NodeType', 'ClusterVersion' if @in_pending_modified_values @cluster['PendingModifiedValues'][name] = value else @cluster[name] = value end when 'NumberOfNodes', 'AutomatedSnapshotRetentionPeriod' if @in_pending_modified_values @cluster['PendingModifiedValues'][name] = value.to_i else @cluster[name] = value.to_i end when 'MasterUserPassword', 'ClusterType' @cluster['PendingModifiedValues'][name] = value when 'Status' if @in_vpc_security_groups @vpc_security_group[name] = value elsif @in_cluster_security_groups @cluster_security_group[name] = value else @cluster['RestoreStatus'][name] = value end when 'ParameterGroupName', 'ParameterApplyStatus' @cluster_parameter_group[name] = value when 'ClusterSecurityGroupName' @cluster_security_group[name] = value when 'VpcSecurityGroupId' @vpc_security_group[name] = value when 'SnapshotSizeInMegaBytes', 'ProgressInMegaBytes', 'ElapsedTimeInSeconds', 'EstimatedTimeToCompletionInSeconds' @cluster['RestoreStatus'][name] = value.to_i when 'CurrentRestoreRateInMegaBytesPerSecond' @cluster['RestoreStatus'][name] = value.to_f when 'ClusterSecurityGroups' @in_cluster_security_groups = false when 'VpcSecurityGroups' @in_vpc_security_groups = false when 'PendingModifiedValues' @in_pending_modified_values = false when 'ClusterParameterGroup' @cluster['ClusterParameterGroups'] << {name => @cluster_parameter_group} @cluster_parameter_group = {} when 'ClusterSecurityGroup' @cluster['ClusterSecurityGroups'] << {name => @cluster_security_group} @cluster_security_group = {} when 'VpcSecurityGroup' @cluster['VpcSecurityGroups'] << {name => @vpc_security_group} @vpc_security_group = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster_security_group_parser.rb000066400000000000000000000026761437344660100273200ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class ClusterSecurityGroupParser < Fog::Parsers::Base # :cluster_security_group_name - (String) # :description - (String) # :ec_2_security_groups - (Array) # :status - (String) # :ec2_security_group_name - (String) # :ec2_security_group_owner_id - (String) # :ip_ranges - (Array) # :status - (String) # :cidrip - (String) def reset @cluster_security_group = fresh_cluster_security_group end def fresh_cluster_security_group {'EC2SecurityGroups' => [], 'IPRanges' => []} end def start_element(name, attrs = []) super case name when 'EC2SecurityGroups', 'IPRanges' @list = {} @list_name = name end end def end_element(name) super case name when 'ClusterSecurityGroupName', 'Description' @cluster_security_group[name] = value when 'EC2SecurityGroupName', 'EC2SecurityGroupOwnerId', 'CIDRIP', 'Status' @list[name] = value when 'EC2SecurityGroup', 'IPRange' @cluster_security_group[@list_name] << {name => @list} @list = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster_snapshot.rb000066400000000000000000000012051437344660100245030ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_snapshot_parser' class ClusterSnapshot < ClusterSnapshotParser # :parameter_group_name - (String) # :parameter_group_status - (String) def reset super @response = {} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'Snapshot' @response = @snapshot end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster_snapshot_parser.rb000066400000000000000000000047201437344660100260640ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class ClusterSnapshotParser < Fog::Parsers::Base # :snapshot_identifier - (String) # :cluster_identifier - (String) # :snapshot_create_time - (Time) # :status - (String) # :port - (Integer) # :availability_zone - (String) # :cluster_create_time - (Time) # :master_username - (String) # :cluster_version - (String) # :snapshot_type - (String) # :node_type - (String) # :number_of_nodes - (Integer) # :db_name - (String) # :vpc_id - (String) # :encrypted - (Boolean) # :accounts_with_restore_access - (Array) # :account_id - (String) # :owner_account - (String) # :total_backup_size_in_mega_bytes - (Numeric) # :actual_incremental_backup_size_in_mega_bytes - (Numeric) # :backup_progress_in_mega_bytes - (Numeric) # :current_backup_rate_in_mega_bytes_per_second - (Numeric) # :estimated_seconds_to_completion - (Integer) # :elapsed_time_in_seconds - (Integer) def reset @snapshot = fresh_snapshot end def fresh_snapshot {'Snapshot' => { 'AccountsWithRestoreAccess' => [] }} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'SnapshotIdentifier', 'ClusterIdentifier', 'Status', 'AvailabilityZone', 'MasterUsername', 'ClusterVersion', 'SnapshotType', 'NodeType', 'DBName', 'VpcId', 'OwnerAccount' @snapshot['Snapshot'][name] = value when 'Port', 'NumberOfNodes', 'ElapsedTimeInSeconds', 'EstimatedSecondsToCompletion' @snapshot['Snapshot'][name] = value.to_i when 'SnapshotCreateTime', 'ClusterCreateTime' @snapshot['Snapshot'][name] = Time.parse(value) when 'Encrypted' @snapshot['Snapshot'][name] = (value == "true") when 'TotalBackupSizeInMegaBytes', 'ActualIncrementalBackupSizeInMegaBytes', 'BackupProgressInMegaBytes', 'CurrentBackupRateInMegaBytesPerSecond' @snapshot['Snapshot'][name] = value.to_f when 'AccountId' @snapshot['Snapshot']['AccountsWithRestoreAccess'] << value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/cluster_subnet_group_parser.rb000066400000000000000000000025541437344660100267440ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class ClusterSubnetGroupParser < Fog::Parsers::Base # :cluster_subnet_group_name - (String) # :description - (String) # :vpc_id - (String) # :subnet_group_status - (String) # :subnets - (Array) # :subnet_identifier - (String) # :subnet_availability_zone - (Hash) # :name - (String) # :subnet_status - (String) def reset @response = { 'Subnets' => [] } end def fresh_subnet {'SubnetAvailabilityZone'=>{}} end def start_element(name, attrs = []) super case name when 'Subnets' @subnet = fresh_subnet end end def end_element(name) super case name when 'ClusterSubnetGroupName', 'Desciption', 'VpcId', 'SubnetGroupStatus' @response[name] = value when 'SubnetIdentifier', 'SubnetStatus' @subnet[name] = value when 'Name' @subnet['SubnetAvailabilityZone'][name] = value when 'Subnet' @response['Subnets'] << {name => @subnet} @subnet = fresh_subnet end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/create_cluster_parameter_group.rb000066400000000000000000000013031437344660100273620ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class CreateClusterParameterGroup < Fog::Parsers::Base # :parameter_group_name - (String) # :parameter_group_family - (String) # :description - (String) def reset @response = {'ClusterParameterGroup'=>{}} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'ParameterGroupName', 'ParameterGroupFamily', 'Description' @response['ClusterParameterGroup'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/create_cluster_security_group.rb000066400000000000000000000012251437344660100272540ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_security_group_parser' class CreateClusterSecurityGroup < ClusterSecurityGroupParser # :cluster_security_group def reset super @response = {} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'ClusterSecurityGroup' @response['ClusterSecurityGroup'] = @cluster_security_group end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_parameter_groups.rb000066400000000000000000000021021437344660100300600ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeClusterParameterGroups < Fog::Parsers::Base # :marker - (String) # :parameter_groups - (Array) # :parameter_group_name - (String) # :parameter_group_family - (String) # :description - (String) def reset @response = { 'ParameterGroups' => [] } end def start_element(name, attrs = []) super case name when 'ParameterGroups' @parameter_group = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ParameterGroupName', 'ParameterGroupFamily', 'Description' @parameter_group[name] = value when 'ClusterParameterGroup' @response['ParameterGroups'] << {name => @parameter_group} @parameter_group = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_parameters.rb000066400000000000000000000025061437344660100266540ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeClusterParameters < Fog::Parsers::Base # :marker - (String) # :parameters - (Array) # :parameter_name - (String) # :parameter_value - (String) # :description - (String) # :source - (String) # :data_type - (String) # :allowed_values - (String) # :is_modifiable - (Boolean) # :minimum_engine_version - (String) def reset @response = { 'Parameters' => [] } end def start_element(name, attrs = []) super case name when 'Parameters' @parameter = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'MinimumEngineVersion' @parameter[name] = value when 'IsModifiable' @parameter[name] = (value == "true") when 'Parameter' @response['Parameters'] << {name => @parameter} @parameter = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_security_groups.rb000066400000000000000000000017661437344660100277660ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_security_group_parser' class DescribeClusterSecurityGroups < ClusterSecurityGroupParser # :marker - (String) # :cluster_security_groups - (Array) def reset @response = { 'ClusterSecurityGroups' => [] } end def start_element(name, attrs = []) super case name when 'ClusterSecurityGroups' @cluster_security_group = fresh_cluster_security_group end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ClusterSecurityGroup' @response['ClusterSecurityGroups'] << { name => @cluster_security_group } @cluster_security_group = fresh_cluster_security_group end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_snapshots.rb000066400000000000000000000015261437344660100265340ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_snapshot_parser' class DescribeClusterSnapshots < ClusterSnapshotParser # :marker - (String) # :snapshots - (Array) def reset @response = { 'Snapshots' => [] } end def start_element(name, attrs = []) super case name when 'Snapshots' @snapshot = fresh_snapshot end end def end_element(name) super case name when 'Marker' @response[name] = value when 'Snapshot' @response['Snapshots'] << @snapshot @snapshot = fresh_snapshot end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_subnet_groups.rb000066400000000000000000000034321437344660100274070ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeClusterSubnetGroups < Fog::Parsers::Base # :marker - (String) # :cluster_subnet_groups - (Array) # :cluster_subnet_group_name - (String) # :description - (String) # :vpc_id - (String) # :subnet_group_status - (String) # :subnets - (Array) # :subnet_identifier - (String) # :subnet_availability_zone - (Hash) # :name - (String) # :subnet_status - (String) def reset @response = { 'ClusterSubnetGroups' => [] } end def start_element(name, attrs = []) super case name when 'ClusterSubnetGroups' @cluster_subnet_group = {'Subnets' => []} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ClusterSubnetGroup' @response['ClusterSubnetGroups'] << {name => @cluster_subnet_group} @cluster_subnet_group = {'Subnets' => []} when 'ClusterSubnetGroupName', 'Description', 'VpcId', 'SubnetGroupStatus' @cluster_subnet_group[name] = value when 'Subnet' @cluster_subnet_group['Subnets'] << {name => @subnet} if @subnet @subnet = {} when 'SubnetAvailabilityZone' @subnet['SubnetAvailabilityZone'] = {} when 'Name' @subnet['SubnetAvailabilityZone']['Name'] = value when 'SubnetIdentifier', 'SubnetStatus' @subnet[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_cluster_versions.rb000066400000000000000000000027531437344660100263650ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeClusterVersions < Fog::Parsers::Base # :marker - (String) # :cluster_versions - (Array) # :cluster_version - (String) # :cluster_parameter_group_family - (String) # :description - (String) def reset @response = { 'ClusterVersions' => [] } @cluster_version_depth = 0 end def start_element(name, attrs = []) super case name when 'ClusterVersions' @cluster_version = {} when 'ClusterVersion' # Sadly, there are two nodes of different type named cluster_version # that are nested, so we keep track of which one we're in @cluster_version_depth += 1 end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ClusterVersion' @cluster_version_depth -= 1 if @cluster_version_depth == 0 @response['ClusterVersions'] << {name => @cluster_version} @cluster_version = {} else @cluster_version[name] = value end when 'ClusterParameterGroupFamily', 'Description' @cluster_version[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_clusters.rb000066400000000000000000000011501437344660100246060ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_parser' class DescribeClusters < ClusterParser def reset super @response = {"ClusterSet" => []} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'Cluster' @response["ClusterSet"] << {name => @cluster} @cluster = fresh_cluster end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_default_cluster_parameters.rb000066400000000000000000000026241437344660100303610ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeDefaultClusterParameters < Fog::Parsers::Base # :marker - (String) # :parameter_group_family - (String) # :parameters - (Array) # :parameter_name - (String) # :parameter_value - (String) # :description - (String) # :source - (String) # :data_type - (String) # :allowed_values - (String) # :is_modifiable - (Boolean) # :minimum_engine_version - (String) def reset @response = { 'Parameters' => [] } end def start_element(name, attrs = []) super case name when 'Parameters' @parameter = {} end end def end_element(name) super case name when 'Marker', 'ParameterGroupFamily' @response[name] = value when 'ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'MinimumEngineVersion' @parameter[name] = value when 'IsModifiable' @parameter[name] = (value == "true") when 'Parameter' @response['Parameters'] << {name => @parameter} @parameter = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_events.rb000066400000000000000000000020271437344660100242520ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeEvents < Fog::Parsers::Base # :marker - (String) # :events - (Array) # :source_identifier - (String) # :source_type - (String) # :message - (String) # :date - (Time) def reset @response = { 'Events' => [] } end def start_element(name, attrs = []) super case name when 'Events' @event = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'SourceIdentifier', 'SourceType', 'Message' @event[name] = value when 'Date' @event[name] = Time.parse(value) when 'Event' @response['Events'] << {name => @event} @event = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_orderable_cluster_options.rb000066400000000000000000000032111437344660100302150ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeOrderableClusterOptions < Fog::Parsers::Base # :marker - (String) # :orderable_cluster_options - (Array) # :cluster_version - (String) # :cluster_type - (String) # :node_type - (String) # :availability_zones - (Array) # :name - (String) def reset @response = { 'OrderableClusterOptions' => [] } end def fresh_orderable_cluster_option {'AvailabilityZones' => []} end def start_element(name, attrs = []) super case name when 'OrderableClusterOptions' @orderable_cluster_option = fresh_orderable_cluster_option when 'AvailabilityZones' @availability_zone = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'ClusterVersion', 'ClusterType', 'NodeType' @orderable_cluster_option[name] = value when 'Name' @availability_zone[name] = value when 'AvailabilityZone' @orderable_cluster_option['AvailabilityZones'] << {name => @availability_zone} @availability_zone = {} when 'OrderableClusterOption' @response['OrderableClusterOptions'] << {name => @orderable_cluster_option} @orderable_cluster_option = fresh_orderable_cluster_option end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_reserved_node_offerings.rb000066400000000000000000000042261437344660100276370ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeReservedNodeOfferings < Fog::Parsers::Base # :marker - (String) # :reserved_node_offerings - (Array) # :reserved_node_offering_id - (String) # :node_type - (String) # :duration - (Integer) # :fixed_price - (Numeric) # :usage_price - (Numeric) # :currency_code - (String) # :offering_type - (String) # :recurring_charges - (Array) # :recurring_charge_amount - (Numeric) # :recurring_charge_frequency - (String) def reset @response = { 'ReservedNodeOfferings' => [] } end def fresh_reserved_node_offering {'RecurringCharges' => []} end def start_element(name, attrs = []) super case name when 'ReservedNodeOfferings' @reserved_node_offering = fresh_reserved_node_offering when 'RecurringCharges' @recurring_charge = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'Duration' @reserved_node_offering[name] = value.to_i when 'FixedPrice', 'UsagePrice' @reserved_node_offering[name] = value.to_f when 'CurrencyCode', 'OfferingType', 'NodeType', 'ReservedNodeOfferingId' @reserved_node_offering[name] = value when 'RecurringChargeAmount' @recurring_charge[name] = value.to_f when 'RecurringChargeFrequency' @recurring_charge[name] = value when 'RecurringCharge' @reserved_node_offering['RecurringCharges'] << {name => @recurring_charge} @recurring_charge = {} when 'ReservedNodeOffering' @response['ReservedNodeOfferings'] << {name => @reserved_node_offering} @reserved_node_offering = fresh_reserved_node_offering end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_reserved_nodes.rb000066400000000000000000000044371437344660100257640ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeReservedNodes < Fog::Parsers::Base # :marker - (String) # :reserved_nodes - (Array) # :reserved_node_id - (String) # :reserved_node_offering_id - (String) # :node_type - (String) # :start_time - (Time) # :duration - (Integer) # :fixed_price - (Numeric) # :usage_price - (Numeric) # :currency_code - (String) # :node_count - (Integer) # :state - (String) # :offering_type - (String) # :recurring_charges - (Array) # :recurring_charge_amount - (Numeric) # :recurring_charge_frequency - (String) def reset @response = { 'ReservedNodes' => [] } end def fresh_reserved_nodes {'RecurringCharges' => []} end def start_element(name, attrs = []) super case name when 'ReservedNodes' @reserved_node = fresh_reserved_nodes when 'RecurringCharges' @recurring_charge = {} end end def end_element(name) super case name when 'Marker' @response[name] = value when 'Duration', 'NodeCount' @reserved_node[name] = value.to_i when 'StartTime' @reserved_node[name] = Time.parse(value) when 'FixedPrice', 'UsagePrice' @reserved_node[name] = value.to_f when 'CurrencyCode', 'OfferingType', 'NodeType', 'ReservedNodeOfferingId', 'ReservedNodeId', 'State' @reserved_node[name] = value when 'RecurringChargeAmount' @recurring_charge[name] = value.to_f when 'RecurringChargeFrequency' @recurring_charge[name] = value when 'RecurringCharge' @reserved_node['RecurringCharges'] << {name => @recurring_charge} @recurring_charge = {} when 'ReservedNode' @response['ReservedNodes'] << {name => @reserved_node} @reserved_node = fresh_reserved_nodes end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/describe_resize.rb000066400000000000000000000036741437344660100242600ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class DescribeResize < Fog::Parsers::Base # :target_node_type - (String) # :target_number_of_nodes - (Integer) # :target_cluster_type - (String) # :status - (String) # :import_tables_completed - (Array) # :import_tables_in_progress - (Array) # :import_tables_not_started - (Array) def reset @response = { 'ImportTablesCompleted' => [], 'ImportTablesInProgress' => [], 'ImportTablesNotStarted' => []} end def start_element(name, attrs = []) super case name when 'ImportTablesCompleted' @in_import_tables_completed = true when 'ImportTablesInProgress' @in_import_tables_in_progress = true when 'ImportTablesNotStarted' @in_import_tables_not_started = true end end def end_element(name) super case name when 'TargetNodeType', 'TargetClusterType', 'Status' @response[name] = value when 'TargetNumberOfNodes' @response[name] = value.to_i when 'ImportTablesCompleted' @in_import_tables_completed = false when 'ImportTablesInProgress' @in_import_tables_in_progress = false when 'ImportTablesNotStarted' @in_import_tables_not_started = false when 'member' if @in_import_tables_completed @response['ImportTablesCompleted'] << value end if @in_import_tables_not_started @response['ImportTablesNotStarted'] << value end if @in_import_tables_in_progress @response['ImportTablesInProgress'] << value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/purchase_reserved_node_offering.rb000066400000000000000000000034501437344660100275040ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class PurchaseReservedNodeOffering < Fog::Parsers::Base # :reserved_node_id - (String) # :reserved_node_offering_id - (String) # :node_type - (String) # :start_time - (Time) # :duration - (Integer) # :fixed_price - (Numeric) # :usage_price - (Numeric) # :currency_code - (String) # :node_count - (Integer) # :state - (String) # :offering_type - (String) # :recurring_charges - (Array) # :recurring_charge_amount - (Numeric) # :recurring_charge_frequency - (String) def reset @response = { 'RecurringCharges' => [] } end def start_element(name, attrs = []) super case name when 'RecurringCharges' @recurring_charge = {} end end def end_element(name) super case name when 'ReservedNodeId', 'ReservedNodeOfferingId', 'NodeType', 'CurrencyCode', 'State', 'OfferingType' @response[name] = value when 'Duration', 'NodeCount' @response[name] = value.to_i when 'FixedPrice', 'UsagePrice' @response[name] = value.to_f when 'StartTime' @response[name] = Time.parse(value) when 'RecurringChargeAmount' @recurring_charge[name] = value.to_f when 'RecurringChargeFrequency' @recurring_charge[name] = value when 'RecurringCharge' @response['RecurringCharges'] << {name => @recurring_charge} @recurring_charge = {} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/revoke_cluster_security_group_ingress.rb000066400000000000000000000012341437344660100310360ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS require 'fog/aws/parsers/redshift/cluster_security_group_parser' class RevokeClusterSecurityGroupIngress < ClusterSecurityGroupParser # :cluster_security_group def reset super @response = {} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'ClusterSecurityGroup' @response['ClusterSecurityGroup'] = @cluster_security_group end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/redshift/update_cluster_parameter_group_parser.rb000066400000000000000000000011421437344660100307560ustar00rootroot00000000000000module Fog module Parsers module Redshift module AWS class UpdateClusterParameterGroupParser < Fog::Parsers::Base # :parameter_group_name - (String) # :parameter_group_status - (String) def reset @response = {} end def start_element(name, attrs = []) super end def end_element(name) super case name when 'ParameterGroupName', 'ParameterGroupStatus' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/000077500000000000000000000000001437344660100175425ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/ses/delete_verified_email_address.rb000066400000000000000000000006451437344660100260670ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class DeleteVerifiedEmailAddress < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/get_send_quota.rb000066400000000000000000000010001437344660100230570ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class GetSendQuota < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when "Max24HourSend", "MaxSendRate", "SentLast24Hours" @response[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/get_send_statistics.rb000066400000000000000000000013241437344660100241310ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class GetSendStatistics < Fog::Parsers::Base def reset @response = { 'SendDataPoints' => [], 'ResponseMetadata' => {} } @send_data_point = {} end def end_element(name) case name when "Bounces", "Complaints", "DeliveryAttempts", "Rejects", "Timestamp" @send_data_point[name] = value when 'member' @response['SendDataPoints'] << @send_data_point @send_data_point = {} when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/list_verified_email_addresses.rb000066400000000000000000000010321437344660100261170ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class ListVerifiedEmailAddresses < Fog::Parsers::Base def reset @response = { 'VerifiedEmailAddresses' => [], 'ResponseMetadata' => {} } end def end_element(name) case name when 'member' @response['VerifiedEmailAddresses'] << value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/send_email.rb000066400000000000000000000007271437344660100221750ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class SendEmail < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'MessageId' @response[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/send_raw_email.rb000066400000000000000000000007321437344660100230420ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class SendRawEmail < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'MessageId' @response[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/verify_domain_identity.rb000066400000000000000000000007521437344660100246370ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class VerifyDomainIdentity < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'VerificationToken' @response[name] = value when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/ses/verify_email_address.rb000066400000000000000000000006351437344660100242530ustar00rootroot00000000000000module Fog module Parsers module AWS module SES class VerifyEmailAddress < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/000077500000000000000000000000001437344660100205475ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/basic.rb000066400000000000000000000010731437344660100221560ustar00rootroot00000000000000module Fog module Parsers module AWS module SimpleDB class Basic < Fog::Parsers::Base def initialize(nil_string) @nil_string = nil_string reset end def end_element(name) case(name) when 'BoxUsage' response[name] = value.to_f when 'RequestId' response[name] = value end end def sdb_decode(value) value.eql?(@nil_string) ? nil : value end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/domain_metadata.rb000066400000000000000000000014001437344660100241760ustar00rootroot00000000000000require 'fog/aws/parsers/simpledb/basic' module Fog module Parsers module AWS module SimpleDB class DomainMetadata < Fog::Parsers::AWS::SimpleDB::Basic def reset @response = {} end def end_element(name) case name when 'AttributeNameCount', 'AttributeNamesSizeBytes', 'AttributeValueCount', 'AttributeValuesSizeBytes', 'ItemCount', 'ItemNamesSizeBytes' response[name] = value.to_i when 'BoxUsage' response[name] = value.to_f when 'RequestId' response[name] = value when 'Timestamp' response[name] = Time.at(value.to_i) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/get_attributes.rb000066400000000000000000000014531437344660100241240ustar00rootroot00000000000000require 'fog/aws/parsers/simpledb/basic' module Fog module Parsers module AWS module SimpleDB class GetAttributes < Fog::Parsers::AWS::SimpleDB::Basic def reset @attribute = nil @response = { 'Attributes' => {} } end def end_element(name) case name when 'Attribute' @attribute = nil when 'BoxUsage' response[name] = value.to_f when 'Name' @attribute = value response['Attributes'][@attribute] ||= [] when 'RequestId' response[name] = value when 'Value' response['Attributes'][@attribute] << sdb_decode(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/list_domains.rb000066400000000000000000000011241437344660100235570ustar00rootroot00000000000000require 'fog/aws/parsers/simpledb/basic' module Fog module Parsers module AWS module SimpleDB class ListDomains < Fog::Parsers::AWS::SimpleDB::Basic def reset @response = { 'Domains' => [] } end def end_element(name) case(name) when 'BoxUsage' response[name] = value.to_f when 'DomainName' response['Domains'] << value when 'NextToken', 'RequestId' response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/simpledb/select.rb000066400000000000000000000020101437344660100223440ustar00rootroot00000000000000require 'fog/aws/parsers/simpledb/basic' module Fog module Parsers module AWS module SimpleDB class Select < Fog::Parsers::AWS::SimpleDB::Basic def reset @item_name = @attribute_name = nil @response = { 'Items' => {} } end def end_element(name) case name when 'BoxUsage' response[name] = value.to_f when 'Item' @item_name = @attribute_name = nil when 'Name' if @item_name.nil? @item_name = value response['Items'][@item_name] = {} else @attribute_name = value response['Items'][@item_name][@attribute_name] ||= [] end when 'NextToken', 'RequestId' response[name] = value when 'Value' response['Items'][@item_name][@attribute_name] << sdb_decode(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/000077500000000000000000000000001437344660100175535ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/sns/add_permission.rb000066400000000000000000000005531437344660100231030ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class AddPermission < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/confirm_subscription.rb000066400000000000000000000006121437344660100243400ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class ConfirmSubscription < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'SubscriptionArn', 'RequestId' @response[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/create_topic.rb000066400000000000000000000005731437344660100225460ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class CreateTopic < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'TopicArn', 'RequestId' @response[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/delete_topic.rb000066400000000000000000000005511437344660100225410ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class DeleteTopic < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/get_topic_attributes.rb000066400000000000000000000014131437344660100243220ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class GetTopicAttributes < Fog::Parsers::Base def reset @response = { 'Attributes' => {} } end def end_element(name) case name when 'key' @key = @value.rstrip when 'value' case @key when 'SubscriptionsConfirmed', 'SubscriptionsDeleted', 'SubscriptionsPending' @response['Attributes'][@key] = @value.rstrip.to_i else @response['Attributes'][@key] = (@value && @value.rstrip) || nil end when 'RequestId' @response[name] = @value.rstrip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/list_subscriptions.rb000066400000000000000000000012561437344660100240460ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class ListSubscriptions < Fog::Parsers::Base def reset @response = { 'Subscriptions' => [] } @subscription = {} end def end_element(name) case name when "TopicArn", "Protocol", "SubscriptionArn", "Owner", "Endpoint" @subscription[name] = @value.strip when "member" @response['Subscriptions'] << @subscription @subscription = {} when 'RequestId', 'NextToken' @response[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/list_topics.rb000066400000000000000000000007221437344660100224350ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class ListTopics < Fog::Parsers::Base def reset @response = { 'Topics' => [] } end def end_element(name) case name when 'TopicArn' @response['Topics'] << @value.strip when 'NextToken', 'RequestId' response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/publish.rb000066400000000000000000000005711437344660100215510ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class Publish < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'MessageId', 'RequestId' @response[name] = @value.rstrip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/remove_permission.rb000066400000000000000000000005561437344660100236530ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class RemovePermission < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/set_topic_attributes.rb000066400000000000000000000005601437344660100243400ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class SetTopicAttributes < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/subscribe.rb000066400000000000000000000006001437344660100220550ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class Subscribe < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'SubscriptionArn', 'RequestId' @response[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sns/unsubscribe.rb000066400000000000000000000005511437344660100224250ustar00rootroot00000000000000module Fog module Parsers module AWS module SNS class Unsubscribe < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/000077500000000000000000000000001437344660100175565ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/sqs/basic.rb000066400000000000000000000006211437344660100211630ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class Basic < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/create_queue.rb000066400000000000000000000007401437344660100225530ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class CreateQueue < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value when 'QueueUrl' @response['QueueUrl'] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/get_queue_attributes.rb000066400000000000000000000020671437344660100243410ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class GetQueueAttributes < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {}, 'Attributes' => {}} end def end_element(name) case name when 'RequestId' @response['ResponseMetadata']['RequestId'] = @value when 'Name' @current_attribute_name = @value when 'Value' case @current_attribute_name when 'ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesNotVisible', 'MaximumMessageSize', 'MessageRetentionPeriod', 'VisibilityTimeout' @response['Attributes'][@current_attribute_name] = @value.to_i when 'CreatedTimestamp', 'LastModifiedTimestamp' @response['Attributes'][@current_attribute_name] = Time.at(@value.to_i) else @response['Attributes'][@current_attribute_name] = @value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/list_queues.rb000066400000000000000000000007641437344660100224540ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class ListQueues < Fog::Parsers::Base def reset @response = { 'QueueUrls' => [], 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value when 'QueueUrl' @response['QueueUrls'] << @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/receive_message.rb000066400000000000000000000023361437344660100232350ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class ReceiveMessage < Fog::Parsers::Base def reset @message = { 'Attributes' => {} } @response = { 'ResponseMetadata' => {}, 'Message' => []} end def end_element(name) case name when 'RequestId' @response['ResponseMetadata']['RequestId'] = @value when 'Message' @response['Message'] << @message @message = { 'Attributes' => {} } when 'Body', 'MD5OfBody', 'MessageId', 'ReceiptHandle' @message[name] = @value when 'Name' @current_attribute_name = @value when 'Value' case @current_attribute_name when 'ApproximateFirstReceiveTimestamp', 'SentTimestamp' @message['Attributes'][@current_attribute_name] = Time.at(@value.to_i / 1000.0) when 'ApproximateReceiveCount' @message['Attributes'][@current_attribute_name] = @value.to_i else @message['Attributes'][@current_attribute_name] = @value end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sqs/send_message.rb000066400000000000000000000010731437344660100225410ustar00rootroot00000000000000module Fog module Parsers module AWS module SQS class SendMessage < Fog::Parsers::Base def reset @response = { 'ResponseMetadata' => {} } end def end_element(name) case name when 'RequestId' @response['ResponseMetadata'][name] = @value when 'MessageId' @response['MessageId'] = @value when 'MD5OfMessageBody' @response['MD5OfMessageBody'] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/000077500000000000000000000000001437344660100204145ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/storage/access_control_list.rb000066400000000000000000000022151437344660100247750ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class AccessControlList < Fog::Parsers::Base def reset @in_access_control_list = false @grant = { 'Grantee' => {} } @response = { 'Owner' => {}, 'AccessControlList' => [] } end def start_element(name, attrs = []) super if name == 'AccessControlList' @in_access_control_list = true end end def end_element(name) case name when 'AccessControlList' @in_access_control_list = false when 'Grant' @response['AccessControlList'] << @grant @grant = { 'Grantee' => {} } when 'DisplayName', 'ID' if @in_access_control_list @grant['Grantee'][name] = value else @response['Owner'][name] = value end when 'Permission' @grant[name] = value when 'URI' @grant['Grantee'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/complete_multipart_upload.rb000066400000000000000000000006431437344660100262210ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class CompleteMultipartUpload < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'Bucket', 'ETag', 'Key', 'Location', 'Code', 'Message' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/copy_object.rb000066400000000000000000000006101437344660100232360ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class CopyObject < Fog::Parsers::Base def end_element(name) case name when 'ETag' @response[name] = value.gsub('"', '') when 'LastModified' @response[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/cors_configuration.rb000066400000000000000000000020531437344660100246360ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class CorsConfiguration < Fog::Parsers::Base def reset @in_cors_configuration_list = false @cors_rule = {} @response = { 'CORSConfiguration' => [] } end def start_element(name, attrs = []) super if name == 'CORSConfiguration' @in_cors_configuration_list = true end end def end_element(name) case name when 'CORSConfiguration' @in_cors_configuration_list = false when 'CORSRule' @response['CORSConfiguration'] << @cors_rule @cors_rule = {} when 'MaxAgeSeconds' @cors_rule[name] = value.to_i when 'ID' @cors_rule[name] = value when 'AllowedOrigin', 'AllowedMethod', 'AllowedHeader', 'ExposeHeader' (@cors_rule[name] ||= []) << value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/delete_multiple_objects.rb000066400000000000000000000023341437344660100256310ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class DeleteMultipleObjects < Fog::Parsers::Base def reset @deleted = { 'Deleted' => {} } @error = { 'Error' => {} } @response = { 'DeleteResult' => [] } end def start_element(name, attrs = []) super case name when 'Deleted' @in_deleted = true end end def end_element(name) case name when 'Deleted' @response['DeleteResult'] << @deleted @deleted = { 'Deleted' => {} } @in_deleted = false when 'Error' @response['DeleteResult'] << @error @error = { 'Error' => {} } when 'Key', 'VersionId' if @in_deleted @deleted['Deleted'][name] = value else @error['Error'][name] = value end when 'DeleteMarker', 'DeletemarkerVersionId' @deleted['Deleted'][name] = value when 'Code', 'Message' @error['Error'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket.rb000066400000000000000000000033161437344660100230600ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucket < Fog::Parsers::Base def reset @object = { 'Owner' => {} } @response = { 'Contents' => [], 'CommonPrefixes' => [] } end def start_element(name, attrs = []) super case name when 'CommonPrefixes' @in_common_prefixes = true end end def end_element(name) case name when 'CommonPrefixes' @in_common_prefixes = false when 'Contents' @response['Contents'] << @object @object = { 'Owner' => {} } when 'DisplayName', 'ID' @object['Owner'][name] = value when 'ETag' @object[name] = value.gsub('"', '') if value != nil when 'IsTruncated' if value == 'true' @response['IsTruncated'] = true else @response['IsTruncated'] = false end when 'LastModified' @object['LastModified'] = Time.parse(value) when 'Marker', 'Name', 'NextContinuationToken' @response[name] = value when 'MaxKeys' @response['MaxKeys'] = value.to_i when 'Prefix' if @in_common_prefixes @response['CommonPrefixes'] << value else @response[name] = value end when 'Size' @object['Size'] = value.to_i when 'Delimiter', 'Key', 'StorageClass' @object[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_lifecycle.rb000066400000000000000000000057201437344660100251000ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketLifecycle < Fog::Parsers::Base def reset @expiration = {} @version_expiration = {} @transition = {} @version_transition = {} @rule = {} @response = { 'Rules' => [] } end def start_element(name, attrs=[]) super case name when 'Expiration' @in_expiration = true when 'Transition' @in_transition = true when 'NoncurrentVersionExpiration' @in_version_expiration = true when 'NoncurrentVersionTransition' @in_version_transition = true end end def end_element(name) if @in_expiration case name when 'Days' @expiration[name] = value.to_i when 'Date' @expiration[name] = value when 'Expiration' @rule['Expiration'] = @expiration @in_expiration = false @expiration = {} end elsif @in_version_expiration case name when 'NoncurrentDays' @version_expiration[name] = value.to_i when 'Date' @version_expiration[name] = value when 'NoncurrentVersionExpiration' @rule['NoncurrentVersionExpiration'] = @version_expiration @in_version_expiration = false @version_expiration = {} end elsif @in_transition case name when 'StorageClass', @transition['StorageClass'] = value when 'Date' @transition[name] = value when 'Days' @transition[name] = value.to_i when 'Transition' @rule['Transition'] = @transition @in_transition = false @transition = {} end elsif @in_version_transition case name when 'StorageClass', @version_transition['StorageClass'] = value when 'Date' @version_transition[name] = value when 'NoncurrentDays' @version_transition[name] = value.to_i when 'NoncurrentVersionTransition' @rule['NoncurrentVersionTransition'] = @transition @in_version_transition = false @version_transition = {} end else case name when 'ID', 'Prefix' @rule[name] = value when 'Status' @rule['Enabled'] = value == 'Enabled' when 'Rule' @response['Rules'] << @rule @rule = {} end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_location.rb000066400000000000000000000004751437344660100247530ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketLocation < Fog::Parsers::Base def end_element(name) case name when 'LocationConstraint' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_logging.rb000066400000000000000000000021301437344660100245570ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketLogging < Fog::Parsers::Base def reset @grant = { 'Grantee' => {} } @response = { 'BucketLoggingStatus' => {} } end def end_element(name) case name when 'DisplayName', 'ID' if @in_access_control_list @grant['Grantee'][name] = value else @response['Owner'][name] = value end when 'Grant' @response['BucketLoggingStatus']['LoggingEnabled']['TargetGrants'] << @grant @grant = { 'Grantee' => {} } when 'LoggingEnabled' @response['BucketLoggingStatus']['LoggingEnabled'] = { 'TargetGrants' => [] } when 'Permission' @grant[name] = value when 'TargetBucket', 'TargetPrefix' @response['BucketLoggingStatus'][name] = value when 'URI' @grant['Grantee'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_notification.rb000066400000000000000000000032071437344660100256250ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketNotification < Fog::Parsers::Base def reset @func = {} @queue = {} @topic = {} @response = { 'Topics' => [], 'Queues' => [], 'CloudFunctions' => [] } end def start_element(name, attrs = []) super case name when 'TopicConfiguration' @configuration = 'topic' when 'QueueConfiguration' @configuration = 'queue' when 'CloudFunctionConfiguration' @configuration = 'func' end end def end_element(name) case @configuration when 'topic' case name when 'Id', 'Event', 'Topic' @topic[name] = value when 'TopicConfiguration' @response['Topics'] << @topic @topic = {} end when 'queue' case name when 'Id', 'Queue', 'Event' @queue[name] = value when 'QueueConfiguration' @response['Queues'] << @queue @queue = {} end when 'func' case name when 'Id', 'CloudFunction', 'InvocationRule', 'Event' @func[name] = value when 'CloudFunctionConfiguration' @response['CloudFunctions'] << @func @func = {} end end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_object_versions.rb000066400000000000000000000047641437344660100263460ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketObjectVersions < Fog::Parsers::Base def reset @delete_marker = { 'Owner' => {} } @version = { 'Owner' => {} } @in_delete_marke = false @in_version = false @response = { 'Versions' => [] } end def start_element(name, attrs = []) super case name when 'DeleteMarker' @in_delete_marker = true when 'Version' @in_version = true end end def end_element(name) case name when 'DeleteMarker' @response['Versions'] << {'DeleteMarker' => @delete_marker } @delete_marker = { 'Owner' => {} } @in_delete_marker = false when 'Version' @response['Versions'] << {'Version' => @version } @version = { 'Owner' => {} } @in_version = false when 'DisplayName', 'ID' if @in_delete_marker @delete_marker elsif @in_version @version end['Owner'][name] = value when 'ETag' @version[name] = value.gsub('"', '') when 'IsLatest' if @in_delete_marker @delete_marker elsif @in_version @version end['IsLatest'] = if value == 'true' true else false end when 'IsTruncated' if value == 'true' @response['IsTruncated'] = true else @response['IsTruncated'] = false end when 'LastModified' if @in_delete_marker @delete_marker elsif @in_version @version end['LastModified'] = Time.parse(value) when 'MaxKeys' @response['MaxKeys'] = value.to_i when 'Size' @version['Size'] = value.to_i when 'Key', 'KeyMarker', 'Name', 'NextKeyMarker', 'NextVersionIdMarker', 'Prefix', 'StorageClass', 'VersionId', 'VersionIdMarker' if @in_delete_marker @delete_marker elsif @in_version @version else @response end[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_tagging.rb000066400000000000000000000013421437344660100245550ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketTagging < Fog::Parsers::Base def reset @in_tag = {} @response = {'BucketTagging' => {}} end def start_element(name, *args) super if name == 'Tag' @in_tag = {} end end def end_element(name) case name when 'Tag' @response['BucketTagging'].merge!(@in_tag) @in_tag = {} when 'Key' @in_tag[value] = nil when 'Value' @in_tag = {@in_tag.keys.first => value} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_versioning.rb000066400000000000000000000006721437344660100253250ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetBucketVersioning < Fog::Parsers::Base def reset @response = { 'VersioningConfiguration' => {} } end def end_element(name) case name when 'Status', 'MfaDelete' @response['VersioningConfiguration'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_bucket_website.rb000066400000000000000000000013111437344660100245730ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage # http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETwebsite.html class GetBucketWebsite < Fog::Parsers::Base def reset @response = { 'ErrorDocument' => {}, 'IndexDocument' => {}, 'RedirectAllRequestsTo' => {} } end def end_element(name) case name when 'Key' @response['ErrorDocument'][name] = value when 'Suffix' @response['IndexDocument'][name] = value when 'HostName' @response['RedirectAllRequestsTo'][name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_object_tagging.rb000066400000000000000000000013421437344660100245460ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetObjectTagging < Fog::Parsers::Base def reset @in_tag = {} @response = {'ObjectTagging' => {}} end def start_element(name, *args) super if name == 'Tag' @in_tag = {} end end def end_element(name) case name when 'Tag' @response['ObjectTagging'].merge!(@in_tag) @in_tag = {} when 'Key' @in_tag[value] = nil when 'Value' @in_tag = {@in_tag.keys.first => value} end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_request_payment.rb000066400000000000000000000004601437344660100250250ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetRequestPayment < Fog::Parsers::Base def end_element(name) case name when 'Payer' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/get_service.rb000066400000000000000000000012561437344660100232440ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class GetService < Fog::Parsers::Base def reset @bucket = {} @response = { 'Owner' => {}, 'Buckets' => [] } end def end_element(name) case name when 'Bucket' @response['Buckets'] << @bucket @bucket = {} when 'CreationDate' @bucket['CreationDate'] = Time.parse(value) when 'DisplayName', 'ID' @response['Owner'][name] = value when 'Name' @bucket[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/initiate_multipart_upload.rb000066400000000000000000000006101437344660100262110ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class InitiateMultipartUpload < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'Bucket', 'Key', 'UploadId' @response[name] = value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/list_multipart_uploads.rb000066400000000000000000000030001437344660100255350ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class ListMultipartUploads < Fog::Parsers::Base def reset @upload = { 'Initiator' => {}, 'Owner' => {} } @response = { 'Upload' => [] } end def start_element(name, attrs = []) super case name when 'Initiator' @in_initiator = true when 'Owner' @in_owner = true end end def end_element(name) case name when 'Bucket', 'KeyMarker', 'NextKeyMarker', 'NextUploadIdMarker', 'UploadIdMarker' @response[name] = value when 'DisplayName', 'ID' if @in_initiator @upload['Initiator'][name] = value elsif @in_owner @upload['Owner'][name] = value end when 'Initiated' @upload[name] = Time.parse(value) when 'Initiator' @in_initiator = false when 'IsTruncated' @response[name] = value == 'true' when 'Key', 'StorageClass', 'UploadId' @upload[name] = value when 'MaxUploads' @response[name] = value.to_i when 'Owner' @in_owner = false when 'Upload' @response['Upload'] << @upload @upload = { 'Initiator' => {}, 'Owner' => {} } end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/list_parts.rb000066400000000000000000000020131437344660100231210ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class ListParts < Fog::Parsers::Base def reset @part = {} @response = { 'Initiator' => {}, 'Part' => [] } end def end_element(name) case name when 'Bucket', 'Key', 'NextPartNumberMarker', 'PartNumberMarker', 'StorageClass', 'UploadId' @response[name] = value when 'DisplayName', 'ID' @response['Initiator'][name] = value when 'ETag' @part[name] = value when 'IsTruncated' @response[name] = value == 'true' when 'LastModified' @part[name] = Time.parse(value) when 'MaxParts' @response[name] = value.to_i when 'Part' @response['Part'] << @part @part = {} when 'PartNumber', 'Size' @part[name] = value.to_i end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/storage/upload_part_copy_object.rb000066400000000000000000000006221437344660100256330ustar00rootroot00000000000000module Fog module Parsers module AWS module Storage class UploadPartCopyObject < Fog::Parsers::Base def end_element(name) case name when 'ETag' @response[name] = value.gsub('"', '') when 'LastModified' @response[name] = Time.parse(value) end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sts/000077500000000000000000000000001437344660100175615ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/parsers/sts/assume_role.rb000066400000000000000000000012051437344660100224220ustar00rootroot00000000000000module Fog module Parsers module AWS module STS class AssumeRole < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'SessionToken', 'SecretAccessKey', 'Expiration', 'AccessKeyId' @response[name] = @value.strip when 'Arn', 'AssumedRoleId' @response[name] = @value.strip when 'PackedPolicySize' @response[name] = @value when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sts/assume_role_with_saml.rb000066400000000000000000000012151437344660100244720ustar00rootroot00000000000000module Fog module Parsers module AWS module STS class AssumeRoleWithSAML < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'SessionToken', 'SecretAccessKey', 'Expiration', 'AccessKeyId' @response[name] = @value.strip when 'Arn', 'AssumedRoleId' @response[name] = @value.strip when 'PackedPolicySize' @response[name] = @value when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sts/assume_role_with_web_identity.rb000066400000000000000000000007351437344660100262320ustar00rootroot00000000000000module Fog module Parsers module AWS module STS class AssumeRoleWithWebIdentity < Fog::Parsers::Base def reset @response = {} end def end_element(name) case name when 'AssumedRoleUser', 'Audience', 'Credentials', 'PackedPolicySize', 'Provider', 'SubjectFromWebIdentityToken' @response[name] = @value.strip end end end end end end end fog-aws-3.18.0/lib/fog/aws/parsers/sts/get_session_token.rb000066400000000000000000000012661437344660100236350ustar00rootroot00000000000000module Fog module Parsers module AWS module STS class GetSessionToken < Fog::Parsers::Base # http://docs.amazonwebservices.com/IAM/latest/UserGuide/index.html?CreatingFedTokens.html def reset @response = {} end def end_element(name) case name when 'SessionToken', 'SecretAccessKey', 'Expiration', 'AccessKeyId' @response[name] = @value.strip when 'Arn', 'FederatedUserId', 'PackedPolicySize' @response[name] = @value.strip when 'RequestId' @response[name] = @value end end end end end end end fog-aws-3.18.0/lib/fog/aws/rds.rb000066400000000000000000000254521437344660100164160ustar00rootroot00000000000000module Fog module AWS class RDS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class IdentifierTaken < Fog::Errors::Error; end class InvalidParameterCombination < Fog::Errors::Error; end class AuthorizationAlreadyExists < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :version, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/rds' request :describe_events request :create_db_instance request :modify_db_instance request :describe_db_instances request :delete_db_instance request :reboot_db_instance request :create_db_instance_read_replica request :describe_db_engine_versions request :describe_db_reserved_instances request :add_tags_to_resource request :list_tags_for_resource request :remove_tags_from_resource request :describe_db_snapshots request :create_db_snapshot request :delete_db_snapshot request :modify_db_snapshot_attribute request :copy_db_snapshot request :create_db_parameter_group request :delete_db_parameter_group request :modify_db_parameter_group request :describe_db_parameter_groups request :describe_db_security_groups request :create_db_security_group request :delete_db_security_group request :authorize_db_security_group_ingress request :revoke_db_security_group_ingress request :describe_db_parameters request :restore_db_instance_from_db_snapshot request :restore_db_instance_to_point_in_time request :create_db_subnet_group request :describe_db_subnet_groups request :delete_db_subnet_group request :modify_db_subnet_group request :describe_orderable_db_instance_options request :describe_db_log_files request :download_db_logfile_portion request :promote_read_replica request :describe_event_subscriptions request :create_event_subscription request :delete_event_subscription request :describe_engine_default_parameters request :describe_db_clusters request :describe_db_cluster_snapshots request :create_db_cluster request :create_db_cluster_snapshot request :delete_db_cluster request :delete_db_cluster_snapshot model_path 'fog/aws/models/rds' model :server collection :servers model :cluster collection :clusters collection :cluster_snapshots model :snapshot collection :snapshots model :parameter_group collection :parameter_groups model :parameter collection :parameters model :security_group collection :security_groups model :subnet_group collection :subnet_groups model :instance_option collection :instance_options model :log_file collection :log_files model :event_subscription collection :event_subscriptions class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :clusters => {}, :cluster_snapshots => {}, :servers => {}, :security_groups => {}, :subnet_groups => {}, :snapshots => {}, :event_subscriptions => {}, :default_parameters => [ { "DataType" => "integer", "Source" => "engine-default", "Description" => "Intended for use with master-to-master replication, and can be used to control the operation of AUTO_INCREMENT columns", "ApplyType" => "dynamic", "AllowedValues" => "1-65535", "ParameterName" => "auto_increment_increment" } ], :db_engine_versions => [ { 'Engine' => "mysql", 'DBParameterGroupFamily' => "mysql5.1", 'DBEngineDescription' => "MySQL Community Edition", 'EngineVersion' => "5.1.57", 'DBEngineVersionDescription' => "MySQL 5.1.57" }, { 'Engine' => "postgres", 'DBParameterGroupFamily' => "postgres9.3", 'DBEngineDescription' => "PostgreSQL", 'EngineVersion' => "9.3.5", 'DBEngineVersionDescription' => "PostgreSQL 9.3.5" }, ], :parameter_groups => { "default.mysql5.1" => { "DBParameterGroupFamily" => "mysql5.1", "Description" => "Default parameter group for mysql5.1", "DBParameterGroupName" => "default.mysql5.1" }, "default.mysql5.5" => { "DBParameterGroupFamily" => "mysql5.5", "Description" => "Default parameter group for mysql5.5", "DBParameterGroupName" => "default.mysql5.5" } } } end end end def self.reset @data = nil end attr_accessor :region, :aws_access_key_id def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) setup_credentials(options) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real attr_reader :region include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to ELB # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # elb = ELB.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1' and etc. # # ==== Returns # * ELB object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.rds' @connection_options = options[:connection_options] || {} @region = options[:region] || 'us-east-1' @host = options[:host] || "rds.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version = options[:version] || '2014-10-31' setup_credentials(options) end def owner_id @owner_id ||= security_groups.get('default').owner_id end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'rds') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, {'Content-Type' => 'application/x-www-form-urlencoded' }, { :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port, :version => @version, :method => 'POST' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) if match.empty? case error.message when 'Not Found' raise Fog::AWS::RDS::NotFound.slurp(error, 'RDS Instance not found') else raise end else raise case match[:code] when 'DBInstanceNotFound', 'DBParameterGroupNotFound', 'DBSnapshotNotFound', 'DBSecurityGroupNotFound', 'SubscriptionNotFound', 'DBClusterNotFoundFault' Fog::AWS::RDS::NotFound.slurp(error, match[:message]) when 'DBParameterGroupAlreadyExists' Fog::AWS::RDS::IdentifierTaken.slurp(error, match[:message]) when 'AuthorizationAlreadyExists' Fog::AWS::RDS::AuthorizationAlreadyExists.slurp(error, match[:message]) else Fog::AWS::RDS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end end fog-aws-3.18.0/lib/fog/aws/redshift.rb000066400000000000000000000116231437344660100174310ustar00rootroot00000000000000module Fog module AWS class Redshift < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/redshift' request :describe_clusters request :describe_cluster_parameter_groups request :describe_cluster_parameters request :describe_cluster_security_groups request :describe_cluster_snapshots request :describe_cluster_subnet_groups request :describe_cluster_versions request :describe_default_cluster_parameters request :describe_events request :describe_orderable_cluster_options request :describe_reserved_node_offerings request :describe_reserved_nodes request :describe_resize request :create_cluster request :create_cluster_parameter_group request :create_cluster_security_group request :create_cluster_snapshot request :create_cluster_subnet_group request :modify_cluster request :modify_cluster_parameter_group request :modify_cluster_subnet_group request :delete_cluster request :delete_cluster_parameter_group request :delete_cluster_security_group request :delete_cluster_snapshot request :delete_cluster_subnet_group request :authorize_cluster_security_group_ingress request :authorize_snapshot_access request :copy_cluster_snapshot request :purchase_reserved_node_offering request :reboot_cluster request :reset_cluster_parameter_group request :restore_from_cluster_snapshot request :revoke_cluster_security_group_ingress request :revoke_snapshot_access class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to Redshift # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # ses = SES.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'us-east-1' and etc. # # ==== Returns # * Redshift object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.redshift' @connection_options = options[:connection_options] || {} @host = options[:host] || "redshift.#{@region}.amazonaws.com" @version = '2012-12-01' @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key,@region,'redshift') end def request(params, &block) refresh_credentials_if_expired parser = params.delete(:parser) date = Fog::Time.now params[:headers]['Date'] = date.to_date_header params[:headers]['x-amz-date'] = date.to_iso8601_basic params[:headers]['Host'] = @host params[:headers]['x-amz-redshift-version'] = @version params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token params[:headers]['Authorization'] = @signer.sign params, date params[:parser] = parser if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(params, &block) end else _request(params, &block) end end def _request(params, &block) @connection.request(params, &block) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/000077500000000000000000000000001437344660100171445ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/000077500000000000000000000000001437344660100216145ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/attach_instances.rb000066400000000000000000000041551437344660100254610ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Removes one or more instances from the specified Auto Scaling group. # # cli equiv: # `aws autoscaling attach-instances --instance-ids i-93633f9b --auto-scaling-group-name my-auto-scaling-group` # # ==== Parameters # # * AutoScalingGroupName<~String> - The name of the Auto Scaling group`` # * 'InstanceIds'<~Array> - The list of Auto Scaling instances to detach. # # ==== See Also # # http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_AttachInstances.html ExpectedOptions[:asg_name] = %w[AutoScalingGroupName] ExpectedOptions[:instance_ids] = %w[InstanceIds] def attach_instances(auto_scaling_group_name, options = {}) if instance_ids = options.delete('InstanceIds') options.merge!(AWS.indexed_param('InstanceIds.member.%d', [*instance_ids])) end request({ 'Action' => 'AttachInstances', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def attach_instances(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:asg_name] - ExpectedOptions[:instance_ids] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/attach_load_balancer_target_groups.rb000066400000000000000000000040601437344660100312000ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Attaches one or more load balancer target groups to the specified Auto Scaling # group. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # 'TagetGroupARNs'<~Array> - A list of target group arns to use. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_AttachLoadBalancerTargetGroups.html # ExpectedOptions[:attach_load_balancer_target_groups] = %w[TargetGroupARNs] def attach_load_balancer_target_groups(auto_scaling_group_name, options = {}) if target_group_arns = options.delete('TargetGroupARNs') options.merge!(AWS.indexed_param('TargetGroupARNs.member.%d', *target_group_arns)) end request({ 'Action' => 'AttachLoadBalancerTargetGroups', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def attach_load_balancer_target_groups(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:attach_load_balancer_target_groups] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/attach_load_balancers.rb000066400000000000000000000037441437344660100264260ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Attaches one or more load balancers to the specified Auto Scaling # group. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # 'LoadBalancerNames'<~Array> - A list of LoadBalancers to use. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_AttachLoadBalancers.html # ExpectedOptions[:attach_load_balancers] = %w[LoadBalancerNames] def attach_load_balancers(auto_scaling_group_name, options = {}) if load_balancer_names = options.delete('LoadBalancerNames') options.merge!(AWS.indexed_param('LoadBalancerNames.member.%d', [*load_balancer_names])) end request({ 'Action' => 'AttachLoadBalancers', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def attach_load_balancers(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:attach_load_balancers] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/create_auto_scaling_group.rb000066400000000000000000000161201437344660100273500ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Creates a new Auto Scaling group with the specified name. Once the # creation request is completed, the AutoScalingGroup is ready to be # used in other calls. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * availability_zones<~Array> - A list of availability zones for the # Auto Scaling group. # * launch_configuration_name<~String> - The name of the launch # configuration to use with the Auto Scaling group. # * max_size<~Integer> - The maximum size of the Auto Scaling group. # * min_size<~Integer> - The minimum size of the Auto Scaling group. # * options<~Hash>: # * 'DefaultCooldown'<~Integer> - The amount of time, in seconds, # after a scaling activity completes before any further trigger- # related scaling activities can start. # * 'DesiredCapacity'<~Integer> - The number of Amazon EC2 instances # that should be running in the group. # * 'HealthCheckGracePeriod'<~Integer> - Length of time in seconds # after a new Amazon EC2 instance comes into service that Auto # Scaling starts checking its health. # * 'HealthCheckType'<~String> - The service you want the health # status from, Amazon EC2 or Elastic Load Balancer. Valid values # are "EC2" or "ELB". # * 'LoadBalancerNames'<~Array> - A list of LoadBalancers to use. # * 'PlacementGroup'<~String> - Physical location of your cluster # placement group created in Amazon EC2. # * 'Tags'<~Array>: # * tag<~Hash>: # * 'Key'<~String> - The key of the tag. # * 'PropagateAtLaunch'<~Boolean> - Specifies whether the new tag # will be applied to instances launched after the tag is # created. The same behavior applies to updates: If you change # a tag, the changed tag will be applied to all instances # launched after you made the change. # * 'ResourceId'<~String>: The name of the AutoScaling group. # * 'ResourceType'<~String>: The kind of resource to which the # tag is applied. Currently, Auto Scaling supports the # auto-scaling-group resource type. # * 'Value'<~String>: The value of the tag. # * 'TerminationPolicies'<~Array> - A standalone termination policy # or a list of termination policies used to select the instance to # terminate. The policies are executed in the order that they are # listed. # * 'VPCZoneIdentifier'<~String> - A comma-separated list of subnet # identifiers of Amazon Virtual Private Clouds (Amazon VPCs). # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_CreateAutoScalingGroup.html # ExpectedOptions[:create_auto_scaling_group] = %w[DefaultCooldown DesiredCapacity HealthCheckGracePeriod HealthCheckType LoadBalancerNames PlacementGroup Tags TerminationPolicies VPCZoneIdentifier] def create_auto_scaling_group(auto_scaling_group_name, availability_zones, launch_configuration_name, max_size, min_size, options = {}) options.merge!(AWS.indexed_param('AvailabilityZones.member.%d', [*availability_zones])) options.delete('AvailabilityZones') if load_balancer_names = options.delete('LoadBalancerNames') options.merge!(AWS.indexed_param('LoadBalancerNames.member.%d', [*load_balancer_names])) end if tags = options.delete('Tags') options.merge!(AWS.indexed_param("Tags.member.%d", [*tags])) end if termination_policies = options.delete('TerminationPolicies') options.merge!(AWS.indexed_param('TerminationPolicies.member.%d', [*termination_policies])) end request({ 'Action' => 'CreateAutoScalingGroup', 'AutoScalingGroupName' => auto_scaling_group_name, 'LaunchConfigurationName' => launch_configuration_name, 'MaxSize' => max_size, 'MinSize' => min_size, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def create_auto_scaling_group(auto_scaling_group_name, availability_zones, launch_configuration_name, max_size, min_size, options = {}) unexpected_options = options.keys - ExpectedOptions[:create_auto_scaling_group] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end if self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::IdentifierTaken.new("AutoScalingGroup by this name already exists - A group with the name #{auto_scaling_group_name} already exists") end unless self.data[:launch_configurations].key?(launch_configuration_name) raise Fog::AWS::AutoScaling::ValidationError.new('Launch configuration name not found - null') end self.data[:auto_scaling_groups][auto_scaling_group_name] = { 'AutoScalingGroupARN' => Fog::AWS::Mock.arn('autoscaling', self.data[:owner_id], "autoScalingGroup:00000000-0000-0000-0000-000000000000:autoScalingGroupName/#{auto_scaling_group_name}", @region), 'AutoScalingGroupName' => auto_scaling_group_name, 'AvailabilityZones' => [*availability_zones], 'CreatedTime' => Time.now.utc, 'DefaultCooldown' => 300, 'DesiredCapacity' => 0, 'EnabledMetrics' => [], 'HealthCheckGracePeriod' => 0, 'HealthCheckType' => 'EC2', 'Instances' => [], 'LaunchConfigurationName' => launch_configuration_name, 'LoadBalancerNames' => [], 'MaxSize' => max_size, 'MinSize' => min_size, 'PlacementGroup' => nil, 'SuspendedProcesses' => [], 'Tags' => [], 'TargetGroupARNs' => [], 'TerminationPolicies' => ['Default'], 'VPCZoneIdentifier' => nil }.merge!(options) response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/create_launch_configuration.rb000066400000000000000000000133171437344660100276720ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Creates a new launch configuration. When created, the new launch # configuration is available for immediate use. # # ==== Parameters # * image_id<~String> - Unique ID of the Amazon Machine Image (AMI) # which was assigned during registration. # * instance_type<~String> - The instance type of the EC2 instance. # * launch_configuration_name<~String> - The name of the launch # configuration to create. # * options<~Hash>: # * 'BlockDeviceMappings'<~Array>: # * 'DeviceName'<~String> - The name of the device within Amazon # EC2. # * 'Ebs.SnapshotId'<~String> - The snapshot ID. # * 'Ebs.VolumeSize'<~Integer> - The volume size, in GigaBytes. # * 'VirtualName'<~String> - The virtual name associated with the # device. # * 'IamInstanceProfile'<~String> The name or the Amazon Resource # Name (ARN) of the instance profile associated with the IAM role # for the instance. # * 'InstanceMonitoring.Enabled'<~Boolean> - Enables detailed # monitoring, which is enabled by default. # * 'KernelId'<~String> - The ID of the kernel associated with the # Amazon EC2 AMI. # * 'KeyName'<~String> - The name of the Amazon EC2 key pair. # * 'RamdiskId'<~String> - The ID of the RAM disk associated with the # Amazon EC2 AMI. # * 'SecurityGroups'<~Array> - The names of the security groups with # which to associate Amazon EC2 or Amazon VPC instances. # * 'SpotPrice'<~String> - The maximum hourly price to be paid for # any Spot Instance launched to fulfill the request. Spot Instances # are launched when the price you specify exceeds the current Spot # market price. # * 'UserData'<~String> - The user data available to the launched # Amazon EC2 instances. # * 'EbsOptimized'<~Boolean> - Whether the instance is optimized for # EBS I/O. Not required, default false. # * 'PlacementTenancy'<~String> - The tenancy of the instance. Valid # values: default | dedicated. Default: default # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_CreateLaunchConfiguration.html # def create_launch_configuration(image_id, instance_type, launch_configuration_name, options = {}) if block_device_mappings = options.delete('BlockDeviceMappings') block_device_mappings.each_with_index do |mapping, i| for key, value in mapping options.merge!({ format("BlockDeviceMappings.member.%d.#{key}", i+1) => value }) end end end if security_groups = options.delete('SecurityGroups') options.merge!(AWS.indexed_param('SecurityGroups.member.%d', [*security_groups])) end if classic_link_groups = options.delete('ClassicLinkVPCSecurityGroups') options.merge!(AWS.indexed_param('ClassicLinkVPCSecurityGroups.member.%d', [*classic_link_groups])) end if options['UserData'] options['UserData'] = Base64.encode64(options['UserData']) end request({ 'Action' => 'CreateLaunchConfiguration', 'ImageId' => image_id, 'InstanceType' => instance_type, 'LaunchConfigurationName' => launch_configuration_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def create_launch_configuration(image_id, instance_type, launch_configuration_name, options = {}) if self.data[:launch_configurations].key?(launch_configuration_name) raise Fog::AWS::AutoScaling::IdentifierTaken.new("Launch Configuration by this name already exists - A launch configuration already exists with the name #{launch_configuration_name}") end self.data[:launch_configurations][launch_configuration_name] = { 'AssociatePublicIpAddress' => nil, 'BlockDeviceMappings' => [], 'CreatedTime' => Time.now.utc, 'EbsOptimized' => false, 'IamInstanceProfile' => nil, 'ImageId' => image_id, 'InstanceMonitoring' => {'Enabled' => true}, 'InstanceType' => instance_type, 'KernelId' => nil, 'KeyName' => nil, 'LaunchConfigurationARN' => Fog::AWS::Mock.arn('autoscaling', self.data[:owner_id], "launchConfiguration:00000000-0000-0000-0000-000000000000:launchConfigurationName/#{launch_configuration_name}", @region), 'LaunchConfigurationName' => launch_configuration_name, 'PlacementTenancy' => nil, 'RamdiskId' => nil, 'SecurityGroups' => [], 'UserData' => nil }.merge!(options) response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/create_or_update_tags.rb000066400000000000000000000041321437344660100264640ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Creates new tags or updates existing tags for an Auto Scaling group. # # ==== Parameters # * tags<~Array>: # * tag<~Hash>: # * Key<~String> - The key of the tag. # * PropagateAtLaunch<~Boolean> - Specifies whether the new tag # will be applied to instances launched after the tag is created. # The same behavior applies to updates: If you change a tag, the # changed tag will be applied to all instances launched after you # made the change. # * ResourceId<~String> - The name of the Auto Scaling group. # * ResourceType<~String> - The kind of resource to which the tag # is applied. Currently, Auto Scaling supports the # auto-scaling-group resource type. # * Value<~String> - The value of the tag. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_CreateOrUpdateTags.html # def create_or_update_tags(tags) params = {} tags.each_with_index do |tag, i| tag.each do |key, value| params["Tags.member.#{i+1}.#{key}"] = value unless value.nil? end end request({ 'Action' => 'CreateOrUpdateTags', :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(params)) end end class Mock def create_or_update_tags(tags) if tags.to_a.empty? raise Fog::AWS::AutoScaling::ValidationError.new("1 validation error detected: Value null at 'tags' failed to satisfy constraint: Member must not be null") end raise Fog::Mock::NotImplementedError end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_auto_scaling_group.rb000066400000000000000000000037721437344660100273600ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Deletes the specified auto scaling group if the group has no # instances and no scaling activities in progress. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # * 'ForceDelete'<~Boolean> - Starting with API version 2011-01-01, # specifies that the Auto Scaling group will be deleted along with # all instances associated with the group, without waiting for all # instances to be terminated. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeleteAutoScalingGroup.html # def delete_auto_scaling_group(auto_scaling_group_name, options = {}) request({ 'Action' => 'DeleteAutoScalingGroup', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def delete_auto_scaling_group(auto_scaling_group_name, options = {}) unless self.data[:auto_scaling_groups].delete(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError, "The auto scaling group '#{auto_scaling_group_name}' does not exist." end self.data[:notification_configurations].delete(auto_scaling_group_name) response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_launch_configuration.rb000066400000000000000000000033501437344660100276650ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Deletes the specified launch configuration. # # The specified launch configuration must not be attached to an Auto # Scaling group. Once this call completes, the launch configuration is # no longer available for use. # # ==== Parameters # * launch_configuration_name<~String> - The name of the launch # configuration. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeleteLaunchConfiguration.html # def delete_launch_configuration(launch_configuration_name) request({ 'Action' => 'DeleteLaunchConfiguration', 'LaunchConfigurationName' => launch_configuration_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }) end end class Mock def delete_launch_configuration(launch_configuration_name) unless self.data[:launch_configurations].delete(launch_configuration_name) raise Fog::AWS::AutoScaling::NotFound, "The launch configuration '#{launch_configuration_name}' does not exist." end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_notification_configuration.rb000066400000000000000000000044261437344660100311060ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Deletes notifications created by put_notification_configuration. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * topic_arn<~String> - The Amazon Resource Name (ARN) of the Amazon # Simple Notification Service (SNS) topic. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeleteNotificationConfiguration.html # def delete_notification_configuration(auto_scaling_group_name, topic_arn) request({ 'Action' => 'DeleteNotificationConfiguration', 'AutoScalingGroupName' => auto_scaling_group_name, 'TopicARN' => topic_arn, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }) end end class Mock def delete_notification_configuration(auto_scaling_group_name, topic_arn) unless self.data[:notification_configurations].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - %s' % auto_scaling_group_name) end unless self.data[:notification_configurations][auto_scaling_group_name].key?(topic_arn) raise Fog::AWS::AutoScaling::ValidationError.new("Notification Topic '#{topic_arn}' doesn't exist for '#{self.data[:owner_id]}'") end self.data[:notification_configurations][auto_scaling_group_name].delete(topic_arn) if self.data[:notification_configurations][auto_scaling_group_name].empty? self.data[:notification_configurations].delete(auto_scaling_group_name) end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_policy.rb000066400000000000000000000031421437344660100247620ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Deletes a policy created by put_scaling_policy # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * policy_name<~String> - The name or PolicyARN of the policy you want # to delete. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeletePolicy.html # def delete_policy(auto_scaling_group_name, policy_name) request({ 'Action' => 'DeletePolicy', 'AutoScalingGroupName' => auto_scaling_group_name, 'PolicyName' => policy_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }) end end class Mock def delete_policy(auto_scaling_group_name, policy_name) unless self.data[:scaling_policies].delete(policy_name) raise Fog::AWS::AutoScaling::NotFound, "The scaling policy '#{policy_name}' does not exist." end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_scheduled_action.rb000066400000000000000000000025601437344660100267630ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Deletes a scheduled action previously created using the # put_scheduled_update_group_action. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * scheduled_action_name<~String> - The name of the action you want to # delete. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeleteScheduledAction.html # def delete_scheduled_action(auto_scaling_group_name, scheduled_action_name) request({ 'Action' => 'DeleteScheduledAction', 'AutoScalingGroupName' => auto_scaling_group_name, 'ScheduledActionName' => scheduled_action_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }) end end class Mock def delete_scheduled_action(auto_scaling_group_name, scheduled_action_name) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/delete_tags.rb000066400000000000000000000040661437344660100244270ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Removes the specified tags or a set of tags from a set of resources. # # ==== Parameters # * tags<~Array>: # * tag<~Hash>: # * Key<~String> - The key of the tag. # * PropagateAtLaunch<~Boolean> - Specifies whether the new tag # will be applied to instances launched after the tag is created. # The same behavior applies to updates: If you change a tag, the # changed tag will be applied to all instances launched after you # made the change. # * ResourceId<~String> - The name of the Auto Scaling group. # * ResourceType<~String> - The kind of resource to which the tag # is applied. Currently, Auto Scaling supports the # auto-scaling-group resource type. # * Value<~String> - The value of the tag. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DeleteTags.html # def delete_tags(tags) params = {} tags.each_with_index do |tag, i| tag.each do |key, value| params["Tags.member.#{i+1}.#{key}"] = value unless value.nil? end end request({ 'Action' => 'DeleteTags', :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(params)) end end class Mock def delete_tags(tags) if tags.to_a.empty? raise Fog::AWS::AutoScaling::ValidationError.new("1 validation error detected: Value null at 'tags' failed to satisfy constraint: Member must not be null") end raise Fog::Mock::NotImplementedError end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_adjustment_types.rb000066400000000000000000000031021437344660100273770ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_adjustment_types' # Returns policy adjustment types for use in the put_scaling_policy # action. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeAdjustmentTypesResponse'<~Hash>: # * 'AdjustmentTypes'<~Array>: # * 'AdjustmentType'<~String> - A policy adjustment type. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeAdjustmentTypes.html # def describe_adjustment_types() request({ 'Action' => 'DescribeAdjustmentTypes', :idempotent => true, :parser => Fog::Parsers::AWS::AutoScaling::DescribeAdjustmentTypes.new }) end end class Mock def describe_adjustment_types() results = { 'AdjustmentTypes' => [] } self.data[:adjustment_types].each do |adjustment_type| results['AdjustmentTypes'] << { 'AdjustmentType' => adjustment_type } end response = Excon::Response.new response.status = 200 response.body = { 'DescribeAdjustmentTypesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_auto_scaling_groups.rb000066400000000000000000000150271437344660100300550ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_auto_scaling_groups' # Returns a full description of each Auto Scaling group in the given # list. This includes all Amazon EC2 instances that are members of the # group. If a list of names is not provided, the service returns the # full details of all Auto Scaling groups. # # This action supports pagination by returning a token if there are # more pages to retrieve. To get the next page, call this action again # with the returned token as the NextToken parameter. # # ==== Parameters # * options<~Hash>: # * 'AutoScalingGroupNames'<~Array> - A list of Auto Scaling group # names. # * 'MaxRecords'<~Integer> - The maximum number of records to return. # * 'NextToken'<~String> - A string that marks the start of the next # batch of returned results. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeAutoScalingGroupsResponse'<~Hash>: # * 'AutoScalingGroups'<~Array>: # * 'AutoScalingGroup'<~Hash>: # * 'AutoScalingGroupARN'<~String> - The Amazon Resource Name # (ARN) of the Auto Scaling group. # * 'AutoScalingGroupName'<~String> - Specifies the name of # the group. # * 'AvailabilityZones'<~Array> - Contains a list of # availability zones for the group. # * 'CreatedTime'<~Time> - Specifies the date and time the # Auto Scaling group was created. # * 'DefaultCooldown'<~Integer> - The number of seconds after # a scaling activity completes before any further scaling # activities can start. # * 'DesiredCapacity'<~Integer> - Specifies the desired # capacity of the Auto Scaling group. # * 'EnabledMetrics'<~Array>: # * enabledmetric<~Hash>: # * 'Granularity'<~String> - The granularity of the # enabled metric. # * 'Metrics'<~String> - The name of the enabled metric. # * 'HealthCheckGracePeriod'<~Integer>: The length of time # that Auto Scaling waits before checking an instance's # health status. The grace period begins when an instance # comes into service. # * 'HealthCheckType'<~String>: The service of interest for # the health status check, either "EC2" for Amazon EC2 or # "ELB" for Elastic Load Balancing. # * 'Instances'<~Array>: # * instance<~Hash>: # * 'AvailabilityZone'<~String>: Availability zone # associated with this instance. # * 'HealthStatus'<~String>: The instance's health # status. # * 'InstanceId'<~String>: Specifies the EC2 instance ID. # * 'LaunchConfigurationName'<~String>: The launch # configuration associated with this instance. # * 'LifecycleState'<~String>: Contains a description of # the current lifecycle state. # * 'LaunchConfigurationName'<~String> - Specifies the name # of the associated launch configuration. # * 'LoadBalancerNames'<~Array> - A list of load balancers # associated with this Auto Scaling group. # * 'MaxSize'<~Integer> - The maximum size of the Auto # Scaling group. # * 'MinSize'<~Integer> - The minimum size of the Auto # Scaling group. # * 'PlacementGroup'<~String> - The name of the cluster # placement group, if applicable. # * 'SuspendedProcesses'<~Array>: # * suspendedprocess'<~Hash>: # * 'ProcessName'<~String> - The name of the suspended # process. # * 'SuspensionReason'<~String> - The reason that the # process was suspended. # * 'TerminationPolicies'<~Array> - A standalone termination # policy or a list of termination policies for this Auto # Scaling group. # * 'VPCZoneIdentifier'<~String> - The subnet identifier for # the Amazon VPC connection, if applicable. You can specify # several subnets in a comma-separated list. # * 'NextToken'<~String> - A string that marks the start of the # next batch of returned results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeAutoScalingGroups.html # def describe_auto_scaling_groups(options = {}) if auto_scaling_group_names = options.delete('AutoScalingGroupNames') options.merge!(AWS.indexed_param('AutoScalingGroupNames.member.%d', [*auto_scaling_group_names])) end request({ 'Action' => 'DescribeAutoScalingGroups', :parser => Fog::Parsers::AWS::AutoScaling::DescribeAutoScalingGroups.new }.merge!(options)) end end class Mock def describe_auto_scaling_groups(options = {}) results = { 'AutoScalingGroups' => [] } asg_set = self.data[:auto_scaling_groups] if !options["AutoScalingGroupNames"].nil? asg_set = asg_set.reject do |asg_name, asg_data| ![*options["AutoScalingGroupNames"]].include?(asg_name) end end asg_set.each do |asg_name, asg_data| results['AutoScalingGroups'] << { 'AutoScalingGroupName' => asg_name }.merge!(asg_data) end response = Excon::Response.new response.status = 200 response.body = { 'DescribeAutoScalingGroupsResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_auto_scaling_instances.rb000066400000000000000000000100711437344660100305170ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_auto_scaling_instances' # Returns a description of each Auto Scaling instance in the # instance_ids list. If a list is not provided, the service returns the # full details of all instances. # # This action supports pagination by returning a token if there are # more pages to retrieve. To get the next page, call this action again # with the returned token as the NextToken parameter. # # ==== Parameters # * options<~Hash>: # * 'InstanceIds'<~Array> - The list of Auto Scaling instances to # describe. If this list is omitted, all auto scaling instances are # described. The list of requested instances cannot contain more # than 50 items. If unknown instances are requested, they are # ignored with no error. # * 'MaxRecords'<~Integer> - The aximum number of Auto Scaling # instances to be described with each call. # * 'NextToken'<~String> - The token returned by a previous call to # indicate that there is more data available. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeAutoScalingInstancesResponse'<~Hash>: # * 'AutoScalingInstances'<~Array>: # * autoscalinginstancedetails<~Hash>: # * 'AutoScalingGroupName'<~String> - The name of the Auto # Scaling Group associated with this instance. # * 'AvailabilityZone'<~String> - The availability zone in # which this instance resides. # * 'HealthStatus'<~String> - The health status of this # instance. "Healthy" means that the instance is healthy # and should remain in service. "Unhealthy" means that the # instance is unhealthy. Auto Scaling should terminate and # replace it. # * 'InstanceId'<~String> - The instance's EC2 instance ID. # * 'LaunchConfigurationName'<~String> - The launch # configuration associated with this instance. # * 'LifecycleState'<~String> - The life cycle state of this # instance. # * 'NextToken'<~String> - Acts as a paging mechanism for large # result sets. Set to a non-empty string if there are # additional results waiting to be returned. Pass this in to # subsequent calls to return additional results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeAutoScalingInstances.html # def describe_auto_scaling_instances(options = {}) if instance_ids = options.delete('InstanceIds') options.merge!(AWS.indexed_param('InstanceIds.member.%d', [*instance_ids])) end request({ 'Action' => 'DescribeAutoScalingInstances', :parser => Fog::Parsers::AWS::AutoScaling::DescribeAutoScalingInstances.new }.merge!(options)) end end class Mock def describe_auto_scaling_instances(options = {}) results = { 'AutoScalingInstances' => [] } self.data[:auto_scaling_groups].each do |asg_name, asg_data| asg_data['Instances'].each do |instance| results['AutoScalingInstances'] << { 'AutoScalingGroupName' => asg_name }.merge!(instance) end end response = Excon::Response.new response.status = 200 response.body = { 'DescribeAutoScalingInstancesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_auto_scaling_notification_types.rb000066400000000000000000000033321437344660100324440ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_auto_scaling_notification_types' # Returns a list of all notification types that are supported by Auto # Scaling. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeAutoScalingNotificationTypesResult'<~Hash>: # * 'AutoScalingNotificationTypes'<~Array>: # * 'notificationType'<~String> - A notification type. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeAutoScalingNotificationTypes.html # def describe_auto_scaling_notification_types() request({ 'Action' => 'DescribeAutoScalingNotificationTypes', :idempotent => true, :parser => Fog::Parsers::AWS::AutoScaling::DescribeAutoScalingNotificationTypes.new }) end end class Mock def describe_auto_scaling_notification_types() results = { 'AutoScalingNotificationTypes' => [], } self.data[:notification_types].each do |notification_type| results['AutoScalingNotificationTypes'] << notification_type end response = Excon::Response.new response.status = 200 response.body = { 'DescribeAutoScalingNotificationTypesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_launch_configurations.rb000066400000000000000000000121771437344660100303750ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_launch_configurations' # Returns a full description of the launch configurations given the # specified names. # # If no names are specified, then the full details of all launch # configurations are returned. # # ==== Parameters # * options<~Hash>: # * 'LaunchConfigurationNames'<~Array> - A list of launch # configuration names. # * 'MaxRecords'<~Integer> - The maximum number of launch # configurations. # * 'NextToken'<~String> - The token returned by a previous call to # indicate that there is more data available. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLaunchConfigurationsResponse'<~Hash>: # * 'LaunchConfigurations'<~Array>: # * launchconfiguration'<~Hash>: # * 'BlockDeviceMappings'<~Array>: # * blockdevicemapping<~Hash>: # * 'DeviceName'<~String> - The name of the device within # EC2. # * 'Ebs'<~Hash>: # * 'SnapshotId'<~String> - The snapshot ID # * 'VolumeSize'<~Integer> - The volume size, in # GigaBytes. # * 'VirtualName'<~String> - The virtual name associated # with the device. # * 'CreatedTime'<~Time> - Provides the creation date and # time for this launch configuration. # * 'ImageId'<~String> - Provides the unique ID of the Amazon # Machine Image (AMI) that was assigned during # registration. # * 'InstanceMonitoring'<~Hash>: # * 'Enabled'<~Boolean> - If true, instance monitoring is # enabled. # * 'InstanceType'<~String> - Specifies the instance type of # the EC2 instance. # * 'KernelId'<~String> - Provides the ID of the kernel # associated with the EC2 AMI. # * 'KeyName'<~String> - Provides the name of the EC2 key # pair. # * 'LaunchConfigurationARN'<~String> - The launch # configuration's Amazon Resource Name (ARN). # * 'LaunchConfigurationName'<~String> - Specifies the name # of the launch configuration. # * 'RamdiskId'<~String> - Provides ID of the RAM disk # associated with the EC2 AMI. # * 'PlacementTenancy'<~String> - The tenancy of the instance. # * 'SecurityGroups'<~Array> - A description of the security # groups to associate with the EC2 instances. # * 'UserData'<~String> - The user data available to the # launched EC2 instances. # * 'NextToken'<~String> - Acts as a paging mechanism for large # result sets. Set to a non-empty string if there are # additional results waiting to be returned. Pass this in to # subsequent calls to return additional results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeLaunchConfigurations.html # def describe_launch_configurations(options = {}) if launch_configuration_names = options.delete('LaunchConfigurationNames') options.merge!(AWS.indexed_param('LaunchConfigurationNames.member.%d', [*launch_configuration_names])) end request({ 'Action' => 'DescribeLaunchConfigurations', :parser => Fog::Parsers::AWS::AutoScaling::DescribeLaunchConfigurations.new }.merge!(options)) end end class Mock def describe_launch_configurations(options = {}) launch_configuration_names = options.delete('LaunchConfigurationNames') # even a nil object will turn into an empty array lc = [*launch_configuration_names] launch_configurations = if lc.any? lc.map do |lc_name| l_conf = self.data[:launch_configurations].find { |name, data| name == lc_name } #raise Fog::AWS::AutoScaling::NotFound unless l_conf l_conf[1].dup if l_conf end.compact else self.data[:launch_configurations].map { |lc, values| values.dup } end response = Excon::Response.new response.status = 200 response.body = { 'DescribeLaunchConfigurationsResult' => { 'LaunchConfigurations' => launch_configurations }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_metric_collection_types.rb000066400000000000000000000036521437344660100307310ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_metric_collection_types' # Returns a list of metrics and a corresponding list of granularities # for each metric. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeMetricCollectionTypesResult'<~Hash>: # * 'Granularities'<~Array>: # * 'Granularity'<~String> - The granularity of a Metric. # * 'Metrics'<~Array>: # * 'Metric'<~String> - The name of a Metric. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeMetricCollectionTypes.html # def describe_metric_collection_types() request({ 'Action' => 'DescribeMetricCollectionTypes', :idempotent => true, :parser => Fog::Parsers::AWS::AutoScaling::DescribeMetricCollectionTypes.new }) end end class Mock def describe_metric_collection_types() results = { 'Granularities' => [], 'Metrics' => [] } self.data[:metric_collection_types][:granularities].each do |granularity| results['Granularities'] << { 'Granularity' => granularity } end self.data[:metric_collection_types][:metrics].each do |metric| results['Metrics'] << { 'Metric' => metric } end response = Excon::Response.new response.status = 200 response.body = { 'DescribeMetricCollectionTypesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_notification_configurations.rb000066400000000000000000000060411437344660100316020ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_notification_configurations' # Returns a list of notification actions associated with Auto Scaling # groups for specified events. # # ==== Parameters # * options<~Hash>: # * 'AutoScalingGroupNames'<~String> - The name of the Auto Scaling # group. # * 'MaxRecords'<~Integer> - The maximum number of records to return. # * 'NextToken'<~String> - A string that is used to mark the start of # the next batch of returned results for pagination. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeNotificationConfigurationsResult'<~Hash>: # * 'NotificationConfigurations'<~Array>: # * notificationConfiguration<~Hash>: # * 'AutoScalingGroupName'<~String> - Specifies the Auto # Scaling group name. # * 'NotificationType'<~String> - The types of events for an # action to start. # * 'TopicARN'<~String> - The Amazon Resource Name (ARN) of the # Amazon Simple Notification Service (SNS) topic. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeNotificationConfigurations.html # def describe_notification_configurations(options = {}) if auto_scaling_group_names = options.delete('AutoScalingGroupNames') options.merge!(AWS.indexed_param('AutoScalingGroupNames.member.%d', [*auto_scaling_group_names])) end request({ 'Action' => 'DescribeNotificationConfigurations', :parser => Fog::Parsers::AWS::AutoScaling::DescribeNotificationConfigurations.new }.merge!(options)) end end class Mock def describe_notification_configurations(options = {}) results = { 'NotificationConfigurations' => [] } (options['AutoScalingGroupNames']||self.data[:notification_configurations].keys).each do |asg_name| (self.data[:notification_configurations][asg_name]||{}).each do |topic_arn, notification_types| notification_types.each do |notification_type| results['NotificationConfigurations'] << { 'AutoScalingGroupName' => asg_name, 'NotificationType' => notification_type, 'TopicARN' => topic_arn, } end end end response = Excon::Response.new response.status = 200 response.body = { 'DescribeNotificationConfigurationsResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_policies.rb000066400000000000000000000113341437344660100256120ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_policies' # Returns descriptions of what each policy does. This action supports # pagination. If the response includes a token, there are more records # available. To get the additional records, repeat the request with the # response token as the NextToken parameter. # # ==== Parameters # * options<~Hash>: # * 'AutoScalingGroupName'<~String> - The name of the Auto Scaling # group. # * 'MaxRecords'<~Integer> - The maximum number of policies that will # be described with each call. # * 'NextToken'<~String> - The token returned by a previous call to # indicate that there is more data available. # * PolicyNames<~Array> - A list of policy names or policy ARNs to be # described. If this list is omitted, all policy names are # described. If an auto scaling group name is provided, the results # are limited to that group.The list of requested policy names # cannot contain more than 50 items. If unknown policy names are # requested, they are ignored with no error. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribePoliciesResult'<~Hash>: # * 'ScalingPolicies'<~Array>: # * 'AdjustmentType'<~String> - Specifies whether the # adjustment is an absolute number or a percentage of the # current capacity. # * 'Alarms'<~Array>: # * 'AlarmARN'<~String> - The Amazon Resource Name (ARN) of # the alarm. # * 'AlarmName'<~String> - The name of the alarm. # * 'AutoScalingGroupName'<~String> - The name of the Auto # Scaling group associated with this scaling policy. # * 'Cooldown'<~Integer> - The amount of time, in seconds, # after a scaling activity completes before any further # trigger-related scaling activities can start. # * 'PolicyARN'<~String> - The Amazon Resource Name (ARN) of # the policy. # * 'PolicyName'<~String> - The name of the scaling policy. # * 'ScalingAdjustment'<~Integer> - The number associated with # the specified AdjustmentType. A positive value adds to the # current capacity and a negative value removes from the # current capacity. # * 'NextToken'<~String> - Acts as a paging mechanism for large # result sets. Set to a non-empty string if there are # additional results waiting to be returned. Pass this in to # subsequent calls to return additional results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribePolicies.html # def describe_policies(options = {}) if policy_names = options.delete('PolicyNames') options.merge!(AWS.indexed_param('PolicyNames.member.%d', [*policy_names])) end request({ 'Action' => 'DescribePolicies', :parser => Fog::Parsers::AWS::AutoScaling::DescribePolicies.new }.merge!(options)) end end class Mock def describe_policies(options = {}) results = { 'ScalingPolicies' => [] } policy_set = self.data[:scaling_policies] for opt_key, opt_value in options if opt_key == "PolicyNames" && opt_value != nil && opt_value != "" policy_set = policy_set.reject do |asp_name, asp_data| ![*options["PolicyNames"]].include?(asp_name) end elsif opt_key == "AutoScalingGroupName" && opt_value != nil && opt_value != "" policy_set = policy_set.reject do |asp_name, asp_data| options["AutoScalingGroupName"] != asp_data["AutoScalingGroupName"] end end end policy_set.each do |asp_name, asp_data| results['ScalingPolicies'] << { 'PolicyName' => asp_name }.merge!(asp_data) end response = Excon::Response.new response.status = 200 response.body = { 'DescribePoliciesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_scaling_activities.rb000066400000000000000000000074431437344660100276550ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_scaling_activities' # Returns the scaling activities for the specified Auto Scaling group. # # If the specified activity_ids list is empty, all the activities from # the past six weeks are returned. Activities are sorted by completion # time. Activities still in progress appear first on the list. # # This action supports pagination. If the response includes a token, # there are more records available. To get the additional records, # repeat the request with the response token as the NextToken # parameter. # # ==== Parameters # * options<~Hash>: # * 'ActivityIds'<~Array> - A list containing the activity IDs of the # desired scaling activities. If this list is omitted, all # activities are described. If an AutoScalingGroupName is provided, # the results are limited to that group. The list of requested # activities cannot contain more than 50 items. If unknown # activities are requested, they are ignored with no error. # * 'AutoScalingGroupName'<~String> - The name of the Auto Scaling # group. # * 'MaxRecords'<~Integer> - The maximum number of scaling activities # to return. # * 'NextToken'<~String> - The token returned by a previous call to # indicate that there is more data available. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeScalingActivitiesResponse'<~Hash>: # * 'Activities'<~Array>: # * 'ActivityId'<~String> - Specifies the ID of the activity. # * 'AutoScalingGroupName'<~String> - The name of the Auto # Scaling group. # * 'Cause'<~String> - Contins the reason the activity was # begun. # * 'Description'<~String> - Contains a friendly, more verbose # description of the scaling activity. # * 'EndTime'<~Time> - Provides the end time of this activity. # * 'Progress'<~Integer> - Specifies a value between 0 and 100 # that indicates the progress of the activity. # * 'StartTime'<~Time> - Provides the start time of this # activity. # * 'StatusCode'<~String> - Contains the current status of the # activity. # * 'StatusMessage'<~String> - Contains a friendly, more # verbose description of the activity status. # * 'NextToken'<~String> - Acts as a paging mechanism for large # result sets. Set to a non-empty string if there are # additional results waiting to be returned. Pass this in to # subsequent calls to return additional results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeScalingActivities.html # def describe_scaling_activities(options = {}) if activity_ids = options.delete('ActivityIds') options.merge!(AWS.indexed_param('ActivityIds.member.%d', [*activity_ids])) end request({ 'Action' => 'DescribeScalingActivities', :parser => Fog::Parsers::AWS::AutoScaling::DescribeScalingActivities.new }.merge!(options)) end end class Mock def describe_scaling_activities(options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_scaling_process_types.rb000066400000000000000000000031741437344660100304100ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_scaling_process_types' # Returns scaling process types for use in the resume_processes and # suspend_processes actions. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeScalingProcessTypesResult'<~Hash>: # * 'Processes'<~Array>: # * processtype<~Hash>: # * 'ProcessName'<~String> - The name of a process. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeScalingProcessTypes.html # def describe_scaling_process_types() request({ 'Action' => 'DescribeScalingProcessTypes', :idempotent => true, :parser => Fog::Parsers::AWS::AutoScaling::DescribeScalingProcessTypes.new }) end end class Mock def describe_scaling_process_types() results = { 'Processes' => [] } self.data[:process_types].each do |process_type| results['Processes'] << { 'ProcessName' => process_type } end response = Excon::Response.new response.status = 200 response.body = { 'DescribeScalingProcessTypesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_scheduled_actions.rb000066400000000000000000000100021437344660100274520ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_scheduled_actions' # List all the actions scheduled for your Auto Scaling group that # haven't been executed. To see a list of action already executed, see # the activity record returned in describe_scaling_activities. # # ==== Parameters # * options<~Hash>: # * 'AutoScalingGroupName'<~String> - The name of the Auto Scaling # group. # * 'EndTime'<~Time> - The latest scheduled start time to return. If # scheduled action names are provided, this field will be ignored. # * 'MaxRecords'<~Integer> - The maximum number of scheduled actions # to return. # * 'NextToken'<~String> - The token returned by a previous call to # indicate that there is more data available. # * 'ScheduledActionNames'<~Array> - A list of scheduled actions to # be described. If this list is omitted, all scheduled actions are # described. The list of requested scheduled actions cannot contain # more than 50 items. If an auto scaling group name is provided, # the results are limited to that group. If unknown scheduled # actions are requested, they are ignored with no error. # * 'StartTime'<~Time> - The earliest scheduled start time to return. # If scheduled action names are provided, this field will be # ignored. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeScheduledActionsResponse'<~Hash>: # * 'ScheduledUpdateGroupActions'<~Array>: # * scheduledupdatesroupAction<~Hash>: # * 'AutoScalingGroupName'<~String> - The name of the Auto # Scaling group to be updated. # * 'DesiredCapacity'<~Integer> -The number of instances you # prefer to maintain in your Auto Scaling group. # * 'EndTime'<~Time> - The time for this action to end. # * 'MaxSize'<~Integer> - The maximum size of the Auto Scaling # group. # * 'MinSize'<~Integer> - The minimum size of the Auto Scaling # group. # * 'Recurrence'<~String> - The time when recurring future # actions will start. Start time is specified by the user # following the Unix cron syntax format. # * 'ScheduledActionARN'<~String> - The Amazon Resource Name # (ARN) of this scheduled action. # * 'StartTime'<~Time> - The time for this action to start. # * 'Time'<~Time> - The time that the action is scheduled to # occur. This value can be up to one month in the future. # * 'NextToken'<~String> - Acts as a paging mechanism for large # result sets. Set to a non-empty string if there are # additional results waiting to be returned. Pass this in to # subsequent calls to return additional results. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeScheduledActions.html # def describe_scheduled_actions(options = {}) if scheduled_action_names = options.delete('ScheduledActionNames') options.merge!(AWS.indexed_param('ScheduledActionNames.member.%d', [*scheduled_action_names])) end request({ 'Action' => 'DescribeScheduledActions', :parser => Fog::Parsers::AWS::AutoScaling::DescribeScheduledActions.new }.merge!(options)) end end class Mock def describe_scheduled_actions(options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_tags.rb000066400000000000000000000054751437344660100247520ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_tags' # Lists the Auto Scaling group tags. # # ==== Parameters # * options<~Hash>: # * tag<~Hash>: # * Key<~String> - The key of the tag. # * PropagateAtLaunch<~Boolean> - Specifies whether the new tag # will be applied to instances launched after the tag is created. # The same behavior applies to updates: If you change a tag, # changed tag will be applied to all instances launched after you # made the change. # * ResourceId<~String> - The name of the Auto Scaling group. # * ResourceType<~String> - The kind of resource to which the tag # is applied. Currently, Auto Scaling supports the # auto-scaling-group resource type. # * Value<~String> - The value of the tag. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeTagsResult'<~Hash>: # * 'NextToken'<~String> - A string used to mark the start of the # next batch of returned results. # * 'Tags'<~Hash>: # * tagDescription<~Hash>: # * 'Key'<~String> - The key of the tag. # * 'PropagateAtLaunch'<~Boolean> - Specifies whether the new # tag will be applied to instances launched after the tag # is created. The same behavior applies to updates: If you # change a tag, the changed tag will be applied to all # instances launched after you made the change. # * 'ResourceId'<~String> - The name of the Auto Scaling # group. # * 'ResourceType'<~String> - The kind of resource to which # the tag is applied. Currently, Auto Scaling supports the # auto-scaling-group resource type. # * 'Value'<~String> - The value of the tag. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeTags.html # def describe_tags(options={}) if filters = options.delete('Filters') options.merge!(Fog::AWS.indexed_filters(filters)) end request({ 'Action' => 'DescribeTags', :parser => Fog::Parsers::AWS::AutoScaling::DescribeTags.new }.merge!(options)) end end class Mock def describe_tags(options={}) raise Fog::Mock::NotImplementedError end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/describe_termination_policy_types.rb000066400000000000000000000031461437344660100311410ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/describe_termination_policy_types' # Returns a list of all termination policies supported by Auto Scaling. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeTerminationPolicyTypesResult'<~Hash>: # * 'TerminationPolicyTypes'<~Array>: # * terminationtype<~String>: # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DescribeTerminationPolicyTypes.html # def describe_termination_policy_types() request({ 'Action' => 'DescribeTerminationPolicyTypes', :idempotent => true, :parser => Fog::Parsers::AWS::AutoScaling::DescribeTerminationPolicyTypes.new }) end end class Mock def describe_termination_policy_types() results = { 'TerminationPolicyTypes' => [] } self.data[:termination_policy_types].each do |termination_policy_type| results['TerminationPolicyTypes'] << termination_policy_type end response = Excon::Response.new response.status = 200 response.body = { 'DescribeTerminationPolicyTypesResult' => results, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/detach_instances.rb000066400000000000000000000052731437344660100254470ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Removes one or more instances from the specified Auto Scaling group. # # cli equiv: # `aws autoscaling detach-instances --instance-ids i-2a2d8978 --auto-scaling-group-name my-asg --should-decrement-desired-capacity` # # ==== Parameters # # * AutoScalingGroupName<~String> - The name of the Auto Scaling group`` # * 'InstanceIds'<~Array> - The list of Auto Scaling instances to detach. # * ShouldDecrementDesiredCapacity<~Boolean> - decrement the asg capacity or not (it will boot another if an instance id detached) # # ==== See Also # # http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_DetachInstances.html ExpectedOptions[:asg_name] = %w[AutoScalingGroupName] ExpectedOptions[:instance_ids] = %w[InstanceIds] ExpectedOptions[:should_decrement_desired_capacity] = %w[ShouldDecrementDesiredCapacity] def detach_instances(auto_scaling_group_name, options = {}) if should_decrement_desired_capacity = options.delete('ShouldDecrementDesiredCapacity') options.merge!('ShouldDecrementDesiredCapacity' => true.to_s) else options.merge!('ShouldDecrementDesiredCapacity' => false.to_s) end if instance_ids = options.delete('InstanceIds') options.merge!(AWS.indexed_param('InstanceIds.member.%d', [*instance_ids])) end request({ 'Action' => 'DetachInstances', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def detach_instances(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:asg_name] - ExpectedOptions[:instance_ids] - ExpectedOptions[:should_decrement_desired_capacity] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/detach_load_balancer_target_groups.rb000066400000000000000000000040601437344660100311640ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Removes one or more load balancer target groups from the specified # Auto Scaling group. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # 'TargetGroupARNs'<~Array> - A list of target groups to detach. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DetachLoadBalancerTargetGroups.html # ExpectedOptions[:detach_load_balancer_target_groups] = %w[TargetGroupARNs] def detach_load_balancer_target_groups(auto_scaling_group_name, options = {}) if target_group_arns = options.delete('TargetGroupARNs') options.merge!(AWS.indexed_param('TargetGroupARNs.member.%d', *target_group_arns)) end request({ 'Action' => 'DetachLoadBalancerTargetGroups', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def detach_load_balancer_target_groups(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:detach_load_balancer_target_groups] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/detach_load_balancers.rb000066400000000000000000000044401437344660100264040ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Removes one or more load balancers from the specified Auto Scaling # group. # # When you detach a load balancer, it enters the Removing state while # deregistering the instances in the group. When all instances are # deregistered, then you can no longer describe the load balancer using # DescribeLoadBalancers. Note that the instances remain running. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # 'LoadBalancerNames'<~Array> - A list of LoadBalancers to use. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DetachLoadBalancers.html # ExpectedOptions[:detach_load_balancers] = %w[LoadBalancerNames] def detach_load_balancers(auto_scaling_group_name, options = {}) if load_balancer_names = options.delete('LoadBalancerNames') options.merge!(AWS.indexed_param('LoadBalancerNames.member.%d', [*load_balancer_names])) end request({ 'Action' => 'DetachLoadBalancers', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def detach_load_balancers(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:detach_load_balancers] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/disable_metrics_collection.rb000066400000000000000000000034541437344660100275130ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Disables monitoring of group metrics for the Auto Scaling group # specified in AutoScalingGroupName. You can specify the list of # affected metrics with the Metrics parameter. # # ==== Parameters # * 'AutoScalingGroupName'<~String> - The name or ARN of the Auto # Scaling group. # * options<~Hash>: # * Metrics<~Array> - The list of metrics to disable. If no metrics # are specified, all metrics are disabled. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_DisableMetricsCollection.html # def disable_metrics_collection(auto_scaling_group_name, options = {}) if metrics = options.delete('Metrics') options.merge!(AWS.indexed_param('Metrics.member.%d', [*metrics])) end request({ 'Action' => 'DisableMetricsCollection', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def disable_metrics_collection(auto_scaling_group_name, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) Fog::AWS::AutoScaling::ValidationError.new("Group #{auto_scaling_group_name} not found") end Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/enable_metrics_collection.rb000066400000000000000000000045701437344660100273360ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Enables monitoring of group metrics for the Auto Scaling group # specified in auto_scaling_group_name. You can specify the list of # enabled metrics with the metrics parameter. # # Auto scaling metrics collection can be turned on only if the # instance_monitoring.enabled flag, in the Auto Scaling group's launch # configuration, is set to true. # # ==== Parameters # * 'AutoScalingGroupName'<~String>: The name or ARN of the Auto # Scaling group # * options<~Hash>: # * Granularity<~String>: The granularity to associate with the # metrics to collect. # * Metrics<~Array>: The list of metrics to collect. If no metrics # are specified, all metrics are enabled. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_EnableMetricsCollection.html # def enable_metrics_collection(auto_scaling_group_name, granularity, options = {}) if metrics = options.delete('Metrics') options.merge!(AWS.indexed_param('Metrics.member.%d', [*metrics])) end request({ 'Action' => 'EnableMetricsCollection', 'AutoScalingGroupName' => auto_scaling_group_name, 'Granularity' => granularity, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def enable_metrics_collection(auto_scaling_group_name, granularity, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) Fog::AWS::AutoScaling::ValidationError.new("Group #{auto_scaling_group_name} not found") end unless self.data[:metric_collection_types][:granularities].include?(granularity) Fog::AWS::AutoScaling::ValidationError.new('Valid metrics granularity type is: [1Minute].') end Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/execute_policy.rb000066400000000000000000000026051437344660100251650ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Runs the policy you create for your Auto Scaling group in # put_scaling_policy. # # ==== Parameters # * 'PolicyName'<~String> - The name or PolicyARN of the policy you # want to run. # * options<~Hash>: # * 'AutoScalingGroupName'<~String> - The name or ARN of the Auto # Scaling group. # * 'HonorCooldown'<~Boolean> - Set to true if you want Auto Scaling # to reject this request if the Auto Scaling group is in cooldown. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_ExecutePolicy.html # def execute_policy(policy_name, options = {}) request({ 'Action' => 'ExecutePolicy', 'PolicyName' => policy_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def execute_policy(policy_name, options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/put_notification_configuration.rb000066400000000000000000000055521437344660100304550ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/put_notification_configuration' # Creates a notification configuration for an Auto Scaling group. To # update an existing policy, overwrite the existing notification # configuration name and set the parameter(s) you want to change. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * notification_types<~Array> - The type of events that will trigger # the notification. # * topic_arn<~String> - The Amazon Resource Name (ARN) of the Amazon # Simple Notification Service (SNS) topic. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_PutNotificationConfiguration.html # def put_notification_configuration(auto_scaling_group_name, notification_types, topic_arn) params = AWS.indexed_param('NotificationTypes.member.%d', [*notification_types]) request({ 'Action' => 'PutNotificationConfiguration', 'AutoScalingGroupName' => auto_scaling_group_name, 'TopicARN' => topic_arn, :parser => Fog::Parsers::AWS::AutoScaling::PutNotificationConfiguration.new }.merge!(params)) end end class Mock def put_notification_configuration(auto_scaling_group_name, notification_types, topic_arn) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new("AutoScalingGroup name not found - #{auto_scaling_group_name}") end if notification_types.to_a.empty? raise Fog::AWS::AutoScaling::ValidationError.new("1 validation error detected: Value null at 'notificationTypes' failed to satisfy constraint: Member must not be null") end invalid_types = notification_types.to_a - self.data[:notification_types] unless invalid_types.empty? raise Fog::AWS::AutoScaling::ValidationError.new(""#{invalid_types.first}" is not a valid Notification Type.") end self.data[:notification_configurations][auto_scaling_group_name] ||= {} self.data[:notification_configurations][auto_scaling_group_name][topic_arn] = notification_types.to_a.uniq response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/put_scaling_policy.rb000066400000000000000000000072511437344660100260350ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/put_scaling_policy' # Creates or updates a policy for an Auto Scaling group. To update an # existing policy, use the existing policy name and set the # parameter(s) you want to change. Any existing parameter not changed # in an update to an existing policy is not changed in this update # request. # # ==== Parameters # * adjustment_type<~String> - Specifies whether the scaling_adjustment # is an absolute number or a percentage of the current capacity. # * auto_scaling_group_name<~String> - The name or ARN of the Auto # Scaling group. # * policy_name<~String> - The name of the policy you want to create or # update. # * scaling_adjustment<~Integer> - The number of instances by which to # scale. AdjustmentType determines the interpretation of this number # (e.g., as an absolute number or as a percentage of the existing # Auto Scaling group size). A positive increment adds to the current # capacity and a negative value removes from the current capacity. # * options<~Hash>: # * 'Cooldown'<~Integer> - The amount of time, in seconds, after a # scaling activity completes before any further trigger-related # scaling activities can start # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'PutScalingPolicyResult'<~Hash>: # * 'PolicyARN'<~String> - A policy's Amazon Resource Name (ARN). # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_PutScalingPolicy.html # def put_scaling_policy(adjustment_type, auto_scaling_group_name, policy_name, scaling_adjustment, options = {}) request({ 'Action' => 'PutScalingPolicy', 'AdjustmentType' => adjustment_type, 'AutoScalingGroupName' => auto_scaling_group_name, 'PolicyName' => policy_name, 'ScalingAdjustment' => scaling_adjustment, :parser => Fog::Parsers::AWS::AutoScaling::PutScalingPolicy.new }.merge!(options)) end end class Mock def put_scaling_policy(adjustment_type, auto_scaling_group_name, policy_name, scaling_adjustment, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('Auto Scaling Group name not found - null') end self.data[:scaling_policies][policy_name] = { 'AdjustmentType' => adjustment_type, 'Alarms' => [], 'AutoScalingGroupName' => auto_scaling_group_name, 'Cooldown' => 0, 'MinAdjustmentStep' => 0, 'PolicyARN' => Fog::AWS::Mock.arn('autoscaling', self.data[:owner_id], "scalingPolicy:00000000-0000-0000-0000-000000000000:autoScalingGroupName/#{auto_scaling_group_name}:policyName/#{policy_name}", self.region), 'PolicyName' => policy_name, 'ScalingAdjustment' => scaling_adjustment }.merge!(options) response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/put_scheduled_update_group_action.rb000066400000000000000000000054771437344660100311210ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Creates a scheduled scaling action for a Auto Scaling group. If you # leave a parameter unspecified, the corresponding value remains # unchanged in the affected Auto Scaling group. # # ==== Parameters # * auto_scaling_group_name<~String> - The name or ARN of the Auto # Scaling Group. # * scheduled_action_name<~String> - Name of this scaling action. # * time<~Datetime> - The time for this action to start (deprecated: # use StartTime, EndTime and Recurrence). # * options<~Hash>: # * 'DesiredCapacity'<~Integer> - The number of EC2 instances that # should be running in this group. # * 'EndTime'<~DateTime> - The time for this action to end. # * 'MaxSize'<~Integer> - The maximum size for the Auto Scaling # group. # * 'MinSize'<~Integer> - The minimum size for the Auto Scaling # group. # * 'Recurrence'<~String> - The time when recurring future actions # will start. Start time is specified by the user following the # Unix cron syntax format. When StartTime and EndTime are specified # with Recurrence, they form the boundaries of when the recurring # action will start and stop. # * 'StartTime'<~DateTime> - The time for this action to start # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_PutScheduledUpdateGroupAction.html # def put_scheduled_update_group_action(auto_scaling_group_name, scheduled_action_name, time=nil, options = {}) # The 'Time' paramenter is now an alias for StartTime and needs to be identical if specified. time = options['StartTime'].nil? ? time : options['StartTime'] if !time.nil? time = time.class == Time ? time.utc.iso8601 : Time.parse(time).utc.iso8601 end request({ 'Action' => 'PutScheduledUpdateGroupAction', 'AutoScalingGroupName' => auto_scaling_group_name, 'ScheduledActionName' => scheduled_action_name, 'Time' => time, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def put_scheduled_update_group_action(auto_scaling_group_name, scheduled_policy_name, time, options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/resume_processes.rb000066400000000000000000000036421437344660100255340ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Resumes Auto Scaling processes for an Auto Scaling group. # # ==== Parameters # * auto_scaling_group_name'<~String> - The name or Amazon Resource # Name (ARN) of the Auto Scaling group. # * options<~Hash>: # * 'ScalingProcesses'<~Array> - The processes that you want to # resume. To resume all process types, omit this parameter. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_ResumeProcesses.html # def resume_processes(auto_scaling_group_name, options = {}) if scaling_processes = options.delete('ScalingProcesses') options.merge!(AWS.indexed_param('ScalingProcesses.member.%d', [*scaling_processes])) end request({ 'Action' => 'ResumeProcesses', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def resume_processes(auto_scaling_group_name, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new("AutoScalingGroup name not found - no such group: #{auto_scaling_group_name}") end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/set_desired_capacity.rb000066400000000000000000000071541437344660100263170ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Adjusts the desired size of the AutoScalingGroup by initiating # scaling activities. When reducing the size of the group, it is not # possible to define which EC2 instances will be terminated. This # applies to any auto-scaling decisions that might result in # terminating instances. # # There are two common use cases for set_desired_capacity: one for # users of the Auto Scaling triggering system, and another for # developers who write their own triggering systems. Both use cases # relate to the concept of cooldown. # # In the first case, if you use the Auto Scaling triggering system, # set_desired_capacity changes the size of your Auto Scaling group # without regard to the cooldown period. This could be useful, for # example, if Auto Scaling did something unexpected for some reason. If # your cooldown period is 10 minutes, Auto Scaling would normally # reject requests to change the size of the group for that entire 10 # minute period. The set_desired_capacity command allows you to # circumvent this restriction and change the size of the group before # the end of the cooldown period. # # In the second case, if you write your own triggering system, you can # use set_desired_capacity to control the size of your Auto Scaling # group. If you want the same cooldown functionality that Auto Scaling # offers, you can configure set_desired_capacity to honor cooldown by # setting the HonorCooldown parameter to true. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * desired_capacity<~Integer> - The new capacity setting for the Auto # Scaling group. # * options<~Hash>: # * 'HonorCooldown'<~Boolean> - By default, set_desired_capacity # overrides any cooldown period. Set to true if you want Auto # Scaling to reject this request if the Auto Scaling group is in # cooldown. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_SetDesiredCapacity.html # def set_desired_capacity(auto_scaling_group_name, desired_capacity, options = {}) request({ 'Action' => 'SetDesiredCapacity', 'AutoScalingGroupName' => auto_scaling_group_name, 'DesiredCapacity' => desired_capacity, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def set_desired_capacity(auto_scaling_group_name, desired_capacity, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end self.data[:auto_scaling_groups][auto_scaling_group_name]['DesiredCapacity'] = desired_capacity response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/set_instance_health.rb000066400000000000000000000034271437344660100261530ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Sets the health status of an instance. # # ==== Parameters # * health_status<~String> - The health status of the instance. # "Healthy" means that the instance is healthy and should remain in # service. "Unhealthy" means that the instance is unhealthy. Auto # Scaling should terminate and replace it. # * instance_id<~String> - The identifier of the EC2 instance. # * options<~Hash>: # * 'ShouldRespectGracePeriod'<~Boolean> - If true, this call should # respect the grace period associated with the group. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_SetInstanceHealth.html # def set_instance_health(health_status, instance_id, options = {}) request({ 'Action' => 'SetInstanceHealth', 'HealthStatus' => health_status, 'InstanceId' => instance_id, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def set_instance_health(health_status, instance_id, options = {}) unless self.data[:health_states].include?(health_status) raise Fog::AWS::AutoScaling::ValidationError.new('Valid instance health states are: [#{self.data[:health_states].join(", ")}].') end Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/set_instance_protection.rb000066400000000000000000000051211437344660100270650ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Sets or removes scale in instance protection from one or more instances from the specified # Auto Scaling group. # # cli equiv: # `aws autoscaling set-instance-protection --instance-ids i-5f2e8a0d --auto-scaling-group-name my-asg --protected-from-scale-in` # # ==== Parameters # # * AutoScalingGroupName<~String> - The name of the Auto Scaling group`` # * 'InstanceIds'<~Array> - The list of Auto Scaling instances to set or remove protection on. # * 'ProtectedFromScaleIn'<~Boolean> - Protection state # # ==== See Also # # https://docs.aws.amazon.com/autoscaling/latest/APIReference/API_SetInstanceProtection.html ExpectedOptions[:asg_name] = %w[AutoScalingGroupName] ExpectedOptions[:instance_ids] = %w[InstanceIds] ExpectedOptions[:protected_from_scale_in] = %w[ProtectedFromScaleIn] def set_instance_protection(auto_scaling_group_name, options = {}) if instance_ids = options.delete('InstanceIds') options.merge!(AWS.indexed_param('InstanceIds.member.%d', [*instance_ids])) end protected_from_scale_in = options.delete('ProtectedFromScaleIn') request({ 'Action' => 'SetInstanceProtection', 'AutoScalingGroupName' => auto_scaling_group_name, 'ProtectedFromScaleIn' => protected_from_scale_in, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def set_instance_protection(auto_scaling_group_name, options = {}) unexpected_options = options.keys - \ ExpectedOptions[:asg_name] - \ ExpectedOptions[:instance_ids] - \ ExpectedOptions[:protected_from_scale_in] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/suspend_processes.rb000066400000000000000000000041561437344660100257160ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Suspends Auto Scaling processes for an Auto Scaling group. To suspend # specific process types, specify them by name with the # ScalingProcesses parameter. To suspend all process types, omit the # ScalingProcesses.member.N parameter. # # ==== Parameters # * 'AutoScalingGroupName'<~String> - The name or Amazon Resource Name # (ARN) of the Auto Scaling group. # * options<~Hash>: # * 'ScalingProcesses'<~Array> - The processes that you want to # suspend. To suspend all process types, omit this parameter. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_SuspendProcesses.html # def suspend_processes(auto_scaling_group_name, options = {}) if scaling_processes = options.delete('ScalingProcesses') options.merge!(AWS.indexed_param('ScalingProcesses.member.%d', [*scaling_processes])) end request({ 'Action' => 'SuspendProcesses', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def suspend_processes(auto_scaling_group_name, options = {}) unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new("AutoScalingGroup name not found - no such group: #{auto_scaling_group_name}") end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/terminate_instance_in_auto_scaling_group.rb000066400000000000000000000051741437344660100324560ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/terminate_instance_in_auto_scaling_group' # Terminates the specified instance. Optionally, the desired group size # can be adjusted. # # ==== Parameters # * instance_id<~String> - The ID of the EC2 instance to be terminated. # * should_decrement_desired_capacity<~Boolean> - Specifies whether # (true) or not (false) terminating this instance should also # decrement the size of the AutoScalingGroup. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'TerminateGroupInAutoScalingInstanceResult'<~Hash>: # * 'ActivityId'<~String> - Specifies the ID of the activity. # * 'AutoScalingGroupName'<~String> - The name of the Auto # Scaling group. # * 'Cause'<~String> - Contains the reason the activity was # begun. # * 'Description'<~String> - Contains a friendly, more verbose # description of the scaling activity. # * 'EndTime'<~Time> - Provides the end time of this activity. # * 'Progress'<~Integer> - Specifies a value between 0 and 100 # that indicates the progress of the activity. # * 'StartTime'<~Time> - Provides the start time of this # activity. # * 'StatusCode'<~String> - Contains the current status of the # activity. # * 'StatusMessage'<~String> - Contains a friendly, more verbose # description of the activity status. # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_TerminateInstanceInAutoScalingGroup.html # def terminate_instance_in_auto_scaling_group(instance_id, should_decrement_desired_capacity) request({ 'Action' => 'TerminateInstanceInAutoScalingGroup', 'InstanceId' => instance_id, 'ShouldDecrementDesiredCapacity' => should_decrement_desired_capacity.to_s, :parser => Fog::Parsers::AWS::AutoScaling::TerminateInstanceInAutoScalingGroup.new }) end end class Mock def terminate_instance_in_auto_scaling_group(instance_id, should_decrement_desired_capacity) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/auto_scaling/update_auto_scaling_group.rb000066400000000000000000000106021437344660100273660ustar00rootroot00000000000000module Fog module AWS class AutoScaling class Real require 'fog/aws/parsers/auto_scaling/basic' # Updates the configuration for the specified AutoScalingGroup. # # The new settings are registered upon the completion of this call. Any # launch configuration settings take effect on any triggers after this # call returns. Triggers that are currently in progress aren't # affected. # # ==== Parameters # * auto_scaling_group_name<~String> - The name of the Auto Scaling # group. # * options<~Hash>: # * 'AvailabilityZones'<~Array> - Availability zones for the group. # * 'DefaultCooldown'<~Integer> - The amount of time, in seconds, # after a scaling activity completes before any further trigger- # related scaling activities can start # * 'DesiredCapacity'<~Integer> - The desired capacity for the Auto # Scaling group. # * 'HealthCheckGracePeriod'<~Integer> - The length of time that Auto # Scaling waits before checking an instance's health status.The # grace period begins when an instance comes into service. # * 'HealthCheckType'<~String> - The service of interest for the # health status check, either "EC2" for Amazon EC2 or "ELB" for # Elastic Load Balancing. # * 'LaunchConfigurationName'<~String> - The name of the launch # configuration. # * 'MaxSize'<~Integer> - The maximum size of the Auto Scaling group. # * 'MinSize'<~Integer> - The minimum size of the Auto Scaling group. # * 'PlacementGroup'<~String> - The name of the cluster placement # group, if applicable. # * 'TerminationPolicies'<~Array> - A standalone termination policy # or a list of termination policies used to select the instance to # terminate. The policies are executed in the order that they are # listed. # * 'VPCZoneIdentifier'<~String> - The subnet identifier for the # Amazon VPC connection, if applicable. You can specify several # subnets in a comma-separated list. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # # ==== See Also # http://docs.amazonwebservices.com/AutoScaling/latest/APIReference/API_UpdateAutoScalingGroup.html # ExpectedOptions[:update_auto_scaling_group] = %w[AvailabilityZones DefaultCooldown DesiredCapacity HealthCheckGracePeriod HealthCheckType LaunchConfigurationName MaxSize MinSize PlacementGroup TerminationPolicies VPCZoneIdentifier] def update_auto_scaling_group(auto_scaling_group_name, options = {}) if availability_zones = options.delete('AvailabilityZones') options.merge!(AWS.indexed_param('AvailabilityZones.member.%d', [*availability_zones])) end if termination_policies = options.delete('TerminationPolicies') options.merge!(AWS.indexed_param('TerminationPolicies.member.%d', [*termination_policies])) end request({ 'Action' => 'UpdateAutoScalingGroup', 'AutoScalingGroupName' => auto_scaling_group_name, :parser => Fog::Parsers::AWS::AutoScaling::Basic.new }.merge!(options)) end end class Mock def update_auto_scaling_group(auto_scaling_group_name, options = {}) unexpected_options = options.keys - ExpectedOptions[:update_auto_scaling_group] unless unexpected_options.empty? raise Fog::AWS::AutoScaling::ValidationError.new("Options #{unexpected_options.join(',')} should not be included in request") end unless self.data[:auto_scaling_groups].key?(auto_scaling_group_name) raise Fog::AWS::AutoScaling::ValidationError.new('AutoScalingGroup name not found - null') end self.data[:auto_scaling_groups][auto_scaling_group_name].merge!(options) response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/000077500000000000000000000000001437344660100211105ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/check_dns_availability.rb000066400000000000000000000014551437344660100261150ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/check_dns_availability' # Checks if the specified CNAME is available. # # ==== Options # * CNAMEPrefix<~String>: The prefix used when this CNAME is reserved # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CheckDNSAvailability.html # def check_dns_availability(options) request({ 'Operation' => 'CheckDNSAvailability', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CheckDNSAvailability.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/create_application.rb000066400000000000000000000016251437344660100252670ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/create_application' # Creates an application that has one configuration template named default and no application versions. # # ==== Options # * ApplicationName<~String>: The name of the application. # * Description<~String>: Describes the application. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateApplication.html # def create_application(options={}) request({ 'Operation' => 'CreateApplication', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CreateApplication.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/create_application_version.rb000066400000000000000000000033011437344660100270250ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/create_application_version' # Creates an application version for the specified application. # # ==== Options # * ApplicationName<~String>: The name of the application. If no application is found with this name, # and AutoCreateApplication is false, returns an InvalidParameterValue error. # * AutoCreateApplication<~Boolean>: If true, create the application if it doesn't exist. # * Description<~String>: Describes this version. # * SourceBundle<~Hash>: The Amazon S3 bucket and key that identify the location of the source bundle # for this version. Use keys 'S3Bucket' and 'S3Key' to describe location. # * VersionLabel<~String>: A label identifying this version. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateApplicationVersion.html # def create_application_version(options={}) if source_bundle = options.delete('SourceBundle') # flatten hash options.merge!({ 'SourceBundle.S3Bucket' => source_bundle['S3Bucket'], 'SourceBundle.S3Key' => source_bundle['S3Key'] }) end request({ 'Operation' => 'CreateApplicationVersion', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CreateApplicationVersion.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/create_configuration_template.rb000066400000000000000000000047621437344660100275330ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/create_configuration_template' # Creates a configuration template. Templates are associated with a specific application and are used to # deploy different versions of the application with the same configuration settings. # # ==== Options # * ApplicationName<~String>: The name of the application to associate with this configuration template. # If no application is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error. # * Description<~String>: Describes this configuration. # * EnvironmentId<~String>: The ID of the environment used with this configuration template. # * OptionSettings<~Hash>: If specified, AWS Elastic Beanstalk sets the specified configuration option # to the requested value. The new value overrides the value obtained from the solution stack or the # source configuration template. # * SolutionStackName<~String>: The name of the solution stack used by this configuration. The solution # stack specifies the operating system, architecture, and application server for a configuration template. # It determines the set of configuration options as well as the possible and default values. # * SourceConfiguration<~String>: If specified, AWS Elastic Beanstalk uses the configuration values from the # specified configuration template to create a new configuration. # * TemplateName<~String>: The name of the configuration template. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateConfigurationTemplate.html # def create_configuration_template(options={}) if option_settings = options.delete('OptionSettings') options.merge!(AWS.indexed_param('OptionSettings.member.%d', [*option_settings])) end if option_settings = options.delete('SourceConfiguration') options.merge!(AWS.serialize_keys('SourceConfiguration', option_settings)) end request({ 'Operation' => 'CreateConfigurationTemplate', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CreateConfigurationTemplate.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/create_environment.rb000066400000000000000000000052441437344660100253310ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/create_environment' # Launches an environment for the specified application using the specified configuration. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # * CNAMEPrefix<~String>: If specified, the environment attempts to use this value as the prefix for the CNAME. # If not specified, the environment uses the environment name. # * Description<~String>: Describes this environment. # * EnvironmentName<~String>: A unique name for the deployment environment. Used in the application URL. # * OptionSettings<~Array>: If specified, AWS Elastic Beanstalk sets the specified configuration options to # the requested value in the configuration set for the new environment. These override the values obtained # from the solution stack or the configuration template. # * OptionsToRemove<~Array>: A list of custom user-defined configuration options to remove from the # configuration set for this new environment. # * SolutionStackName<~String>: This is an alternative to specifying a configuration name. If specified, # AWS Elastic Beanstalk sets the configuration values to the default values associated with the # specified solution stack. # * TemplateName<~String>: The name of the configuration template to use in deployment. If no configuration # template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error. # * VersionLabel<~String>: The name of the application version to deploy. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateEnvironment.html # def create_environment(options={}) if option_settings = options.delete('OptionSettings') options.merge!(AWS.indexed_param('OptionSettings.member.%d', [*option_settings])) end if options_to_remove = options.delete('OptionsToRemove') options.merge!(AWS.indexed_param('OptionsToRemove.member.%d', [*options_to_remove])) end request({ 'Operation' => 'CreateEnvironment', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CreateEnvironment.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/create_storage_location.rb000066400000000000000000000013441437344660100263160ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/create_storage_location' # Creates the Amazon S3 storage location for the account. # # ==== Options # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateStorageLocation.html # def create_storage_location() request({ 'Operation' => 'CreateStorageLocation', :parser => Fog::Parsers::AWS::ElasticBeanstalk::CreateStorageLocation.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/delete_application.rb000066400000000000000000000016011437344660100252600ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Deletes the specified application along with all associated versions and configurations. # # ==== Options # * application_name<~String>: The name of the application to delete. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DeleteApplication.html # def delete_application(application_name) options = { 'ApplicationName' => application_name } request({ 'Operation' => 'DeleteApplication', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/delete_application_version.rb000066400000000000000000000024231437344660100270300ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Deletes the specified version from the specified application. # # ==== Options # * application_name<~String>: The name of the application to delete releases from. # * version_label<~String>: The label of the version to delete. # * delete_source_bundle<~Boolean>: Indicates whether to delete the associated source bundle from Amazon S3. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DeleteApplication.html # def delete_application_version(application_name, version_label, delete_source_bundle = nil) options = { 'ApplicationName' => application_name, 'VersionLabel' => version_label } options['DeleteSourceBundle'] = delete_source_bundle unless delete_source_bundle.nil? request({ 'Operation' => 'DeleteApplicationVersion', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/delete_configuration_template.rb000066400000000000000000000021021437344660100275140ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Deletes the specified configuration template. # # ==== Options # * application_name<~String>: The name of the application to delete the configuration template from. # * template_name<~String>: The name of the configuration template to delete. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DeleteConfigurationTemplate.html # def delete_configuration_template(application_name, template_name) options = { 'ApplicationName' => application_name, 'TemplateName' => template_name } request({ 'Operation' => 'DeleteConfigurationTemplate', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/delete_environment_configuration.rb000066400000000000000000000021721437344660100302540ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Deletes the draft configuration associated with the running environment. # # ==== Options # * application_name<~String>: The name of the application the environment is associated with. # * environment_name<~String>: The name of the environment to delete the draft configuration from. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DeleteConfigurationTemplate.html # def delete_environment_configuration(application_name, environment_name) options = { 'ApplicationName' => application_name, 'EnvironmentName' => environment_name } request({ 'Operation' => 'DeleteEnvironmentConfiguration', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_application_versions.rb000066400000000000000000000024501437344660100275310ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_application_versions' # Returns descriptions for existing application versions. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions to # only include ones that are associated with the specified application. # * VersionLabels<~Array>: If specified, restricts the returned descriptions to only include ones that have # the specified version labels. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeApplicationVersions.html # def describe_application_versions(options={}) if version_labels = options.delete('VersionLabels') options.merge!(AWS.indexed_param('VersionLabels.member.%d', [*version_labels])) end request({ 'Operation' => 'DescribeApplicationVersions', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeApplicationVersions.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_applications.rb000066400000000000000000000020571437344660100257670ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_applications' # Returns the descriptions of existing applications. # # ==== Options # * application_names<~Array>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to only include those with the specified names. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeApplications.html # def describe_applications(application_names=[]) options = {} options.merge!(AWS.indexed_param('ApplicationNames.member.%d', [*application_names])) request({ 'Operation' => 'DescribeApplications', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeApplications.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_configuration_options.rb000066400000000000000000000037021437344660100277210ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_configuration_options' # Describes the configuration options that are used in a particular configuration template or environment, # or that a specified solution stack defines. The description includes the values the options, # their default values, and an indication of the required action on a running environment # if an option value is changed. # # ==== Options # * ApplicationName<~String>: The name of the application associated with the configuration template or # environment. Only needed if you want to describe the configuration options associated with either the # configuration template or environment. # * EnvironmentName<~String>: The name of the environment whose configuration options you want to describe. # * Options<~Array>: If specified, restricts the descriptions to only the specified options. # * SolutionStackName<~String>: The name of the solution stack whose configuration options you want to describe. # * TemplateName<~String>: The name of the configuration template whose configuration options you want to describe. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeConfigurationOptions.html # def describe_configuration_options(options={}) if option_filters = options.delete('Options') options.merge!(AWS.indexed_param('Options.member.%d', [*option_filters])) end request({ 'Operation' => 'DescribeConfigurationOptions', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeConfigurationOptions.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_configuration_settings.rb000066400000000000000000000022661437344660100300720ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_configuration_settings' # Returns a description of the settings for the specified configuration set, that is, either a configuration # template or the configuration set associated with a running environment. # # ==== Options # * ApplicationName<~String>: The application for the environment or configuration template. # * EnvironmentName<~String>: The name of the environment to describe. # * TemplateName<~String>: The name of the configuration template to describe. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeConfigurationSettings.html # def describe_configuration_settings(options={}) request({ 'Operation' => 'DescribeConfigurationSettings', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeConfigurationSettings.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_environment_resources.rb000066400000000000000000000015121437344660100277320ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_environment_resources' # Returns AWS resources for this environment. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeEnvironmentResources.html # def describe_environment_resources(options={}) request({ 'Operation' => 'DescribeEnvironmentResources', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeEnvironmentResources.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_environments.rb000066400000000000000000000026411437344660100260270ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_environments' # Returns descriptions for existing environments. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # * EnvironmentIds # * EnvironmentNames # * IncludeDeleted # * IncludedDeletedBackTo # * VersionLabel<~String>: # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html # def describe_environments(options={}) if environment_ids = options.delete('EnvironmentIds') options.merge!(AWS.indexed_param('EnvironmentIds.member.%d', [*environment_ids])) end if environment_names = options.delete('EnvironmentNames') options.merge!(AWS.indexed_param('EnvironmentNames.member.%d', [*environment_names])) end request({ 'Operation' => 'DescribeEnvironments', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeEnvironments.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/describe_events.rb000066400000000000000000000016561437344660100246110ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/describe_events' # Returns list of event descriptions matching criteria up to the last 6 weeks. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html # def describe_events(options={}) request({ 'Operation' => 'DescribeEvents', :parser => Fog::Parsers::AWS::ElasticBeanstalk::DescribeEvents.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/list_available_solution_stacks.rb000066400000000000000000000013611437344660100277150ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/list_available_solution_stacks' # Checks if the specified CNAME is available. # # ==== Options # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CheckDNSAvailability.html # def list_available_solution_stacks() request({ 'Operation' => 'ListAvailableSolutionStacks', :parser => Fog::Parsers::AWS::ElasticBeanstalk::ListAvailableSolutionStacks.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/rebuild_environment.rb000066400000000000000000000015651437344660100255160ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Deletes and recreates all of the AWS resources (for example: the Auto Scaling group, load balancer, etc.) # for a specified environment and forces a restart. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_RebuildEnvironment.html # def rebuild_environment(options={}) request({ 'Operation' => 'RebuildEnvironment', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/request_environment_info.rb000066400000000000000000000014101437344660100265600ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Returns AWS resources for this environment. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_RequestEnvironmentInfo.html # def request_environment_info(options={}) request({ 'Operation' => 'RequestEnvironmentInfo', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/restart_app_server.rb000066400000000000000000000013661437344660100253550ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Returns AWS resources for this environment. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_RestartAppServer.html # def restart_app_server(options={}) request({ 'Operation' => 'RestartAppServer', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/retrieve_environment_info.rb000066400000000000000000000014611437344660100267230ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/retrieve_environment_info' # Returns AWS resources for this environment. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_RetrieveEnvironmentInfo.html # def retrieve_environment_info(options={}) request({ 'Operation' => 'RetrieveEnvironmentInfo', :parser => Fog::Parsers::AWS::ElasticBeanstalk::RetrieveEnvironmentInfo.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/swap_environment_cnames.rb000066400000000000000000000013771437344660100263710ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/empty' # Swaps the CNAMEs of two environments. # # ==== Options # * EnvironmentId # * EnvironmentName # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_SwapEnvironmentCNAMEs.html # def swap_environment_cnames(options={}) request({ 'Operation' => 'SwapEnvironmentCNAMEs', :parser => Fog::Parsers::AWS::ElasticBeanstalk::Empty.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/terminate_environment.rb000066400000000000000000000020331437344660100260470ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/terminate_environment' # Terminates the specified environment. # # ==== Options # * EnvironmentId<~String>: The ID of the environment to terminate. # * EnvironmentName<~String>: The name of the environment to terminate. # * TerminateResources<~Boolean>: Indicates whether the associated AWS resources should shut down when the # environment is terminated # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_TerminateEnvironment.html # def terminate_environment(options={}) request({ 'Operation' => 'TerminateEnvironment', :parser => Fog::Parsers::AWS::ElasticBeanstalk::TerminateEnvironment.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/update_application.rb000066400000000000000000000017551437344660100253120ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/update_application' # Updates the specified application to have the specified properties. # # ==== Options # * ApplicationName<~String>: The name of the application to update. If no such application is found, # UpdateApplication returns an InvalidParameterValue error. # * Description<~String>: A new description for the application. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_UpdateApplication.html # def update_application(options) request({ 'Operation' => 'UpdateApplication', :parser => Fog::Parsers::AWS::ElasticBeanstalk::UpdateApplication.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/update_application_version.rb000066400000000000000000000020111437344660100270410ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/update_application_version' # Updates the specified application version to have the specified properties. # # ==== Options # * ApplicationName<~String>: The name of the application associated with this version. # * VersionLabel<~String>: The name of the version to update. # * Description<~String>: A new description for this release. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_UpdateApplicationVersion.html # def update_application_version(options) request({ 'Operation' => 'UpdateApplicationVersion', :parser => Fog::Parsers::AWS::ElasticBeanstalk::UpdateApplicationVersion.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/update_configuration_template.rb000066400000000000000000000026151437344660100275450ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/update_configuration_template' # Updates the specified configuration template to have the specified properties or configuration option values. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # * VersionLabel<~String>: # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateConfigurationTemplate.html # def update_configuration_template(options={}) if option_settings = options.delete('OptionSettings') options.merge!(AWS.indexed_param('OptionSettings.member.%d', [*option_settings])) end if options_to_remove = options.delete('OptionsToRemove') options.merge!(AWS.indexed_param('OptionsToRemove.member.%d', [*options_to_remove])) end request({ 'Operation' => 'UpdateConfigurationTemplate', :parser => Fog::Parsers::AWS::ElasticBeanstalk::UpdateConfigurationTemplate.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/update_environment.rb000066400000000000000000000031241437344660100253430ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/update_environment' # Updates the environment description, deploys a new application version, updates the configuration settings # to an entirely new configuration template, or updates select configuration option values in # the running environment. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # * EnvironmentIds # * EnvironmentNames # * IncludeDeleted # * IncludedDeletedBackTo # * VersionLabel<~String>: # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateEnvironment.html # def update_environment(options={}) if option_settings = options.delete('OptionSettings') options.merge!(AWS.indexed_param('OptionSettings.member.%d', [*option_settings])) end if options_to_remove = options.delete('OptionsToRemove') options.merge!(AWS.indexed_param('OptionsToRemove.member.%d', [*options_to_remove])) end request({ 'Operation' => 'UpdateEnvironment', :parser => Fog::Parsers::AWS::ElasticBeanstalk::UpdateEnvironment.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/beanstalk/validate_configuration_settings.rb000066400000000000000000000023431437344660100300770ustar00rootroot00000000000000module Fog module AWS class ElasticBeanstalk class Real require 'fog/aws/parsers/beanstalk/validate_configuration_settings' # Updates the specified configuration template to have the specified properties or configuration option values. # # ==== Options # * ApplicationName<~String>: If specified, AWS Elastic Beanstalk restricts the returned descriptions # to include only those that are associated with this application. # * VersionLabel<~String>: # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/elasticbeanstalk/latest/api/API_CreateConfigurationTemplate.html # def validate_configuration_settings(options={}) if option_settings = options.delete('OptionSettings') options.merge!(AWS.indexed_param('OptionSettings.member.%d', [*option_settings])) end request({ 'Operation' => 'ValidateConfigurationSettings', :parser => Fog::Parsers::AWS::ElasticBeanstalk::ValidateConfigurationSettings.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/000077500000000000000000000000001437344660100177105ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/cdn/delete_distribution.rb000066400000000000000000000033021437344660100242740ustar00rootroot00000000000000module Fog module AWS class CDN class Real # Delete a distribution from CloudFront. # # @param distribution_id [String] Id of distribution to delete. # @param etag [String] etag of that distribution from earlier get or put # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/DeleteDistribution.html def delete_distribution(distribution_id, etag) request({ :expects => 204, :headers => { 'If-Match' => etag }, :idempotent => true, :method => 'DELETE', :path => "/distribution/#{distribution_id}" }) end end class Mock def delete_distribution(distribution_id, etag) distribution = self.data[:distributions][distribution_id] if distribution if distribution['ETag'] != etag Fog::AWS::CDN::Mock.error(:invalid_if_match_version) end unless distribution['DistributionConfig']['CallerReference'] Fog::AWS::CDN::Mock.error(:illegal_update) end if distribution['DistributionConfig']['Enabled'] Fog::AWS::CDN::Mock.error(:distribution_not_disabled) end self.data[:distributions].delete(distribution_id) self.data[:invalidations].delete(distribution_id) response = Excon::Response.new response.status = 204 response.body = "x-amz-request-id: #{Fog::AWS::Mock.request_id}" response else Fog::AWS::CDN::Mock.error(:no_such_distribution) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/delete_streaming_distribution.rb000066400000000000000000000033451437344660100263540ustar00rootroot00000000000000module Fog module AWS class CDN class Real # Delete a streaming distribution from CloudFront. # # @param [String] distribution_id Id of distribution to delete. # @param [String] etag Etag of that distribution from earlier get or put # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/DeleteStreamingDistribution.html def delete_streaming_distribution(distribution_id, etag) request({ :expects => 204, :headers => { 'If-Match' => etag }, :idempotent => true, :method => 'DELETE', :path => "/streaming-distribution/#{distribution_id}" }) end end class Mock def delete_streaming_distribution(distribution_id, etag) distribution = self.data[:streaming_distributions][distribution_id] if distribution if distribution['ETag'] != etag Fog::AWS::CDN::Mock.error(:invalid_if_match_version) end unless distribution['StreamingDistributionConfig']['CallerReference'] Fog::AWS::CDN::Mock.error(:illegal_update) end if distribution['StreamingDistributionConfig']['Enabled'] Fog::AWS::CDN::Mock.error(:distribution_not_disabled) end self.data[:streaming_distributions].delete(distribution_id) response = Excon::Response.new response.status = 204 response.body = "x-amz-request-id: #{Fog::AWS::Mock.request_id}" response else Fog::AWS::CDN::Mock.error(:no_such_streaming_distribution) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_distribution.rb000066400000000000000000000062551437344660100236230ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/distribution' # Get information about a distribution from CloudFront. # # @param distribution_id [String] Id of distribution. # # @return [Excon::Response] # * body [Hash]: # * S3Origin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] - Optional: Used when serving private content. # or # * CustomOrigin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'www.example.com'. # * HTTPPort [Integer] - HTTP port of origin, in [80, 443] or (1024...65535). # * HTTPSPort [Integer] - HTTPS port of origin, in [80, 443] or (1024...65535). # * OriginProtocolPolicy [String] - Policy on using http vs https, in ['http-only', 'match-viewer']. # # * Id [String] Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Status [String] - Status of distribution. # * DistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * InProgressInvalidationBatches [Integer] - Number of invalidation batches in progress. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # * Origin [String] - S3 origin bucket. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetDistribution.html def get_distribution(distribution_id) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::Distribution.new, :path => "/distribution/#{distribution_id}" }) end end class Mock def get_distribution(distribution_id) response = Excon::Response.new distribution = self.data[:distributions][distribution_id] unless distribution Fog::AWS::CDN::Mock.error(:no_such_distribution) end if distribution['Status'] == 'InProgress' && (Time.now - Time.parse(distribution['LastModifiedTime']) >= Fog::Mock.delay * 2) distribution['Status'] = 'Deployed' end etag = Fog::AWS::CDN::Mock.generic_id response.status = 200 response.body = { 'InProgressInvalidationBatches' => 0, }.merge(distribution.reject { |k,v| k == 'ETag' }) response.headers['ETag'] = etag distribution['ETag'] = etag response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_distribution_list.rb000066400000000000000000000064701437344660100246550ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/get_distribution_list' # List information about distributions in CloudFront. # # @param options [Hash] Config arguments for list. # @option options Marker [String] Limits object keys to only those that appear lexicographically after its value. # @option options MaxItems [Integer] Limits number of object keys returned. # # @return [Excon::Response] # * body [Hash]: # * IsTruncated [Boolean] - Whether or not the listing is truncated. # * Marker [String] Marker specified for query. # * MaxItems [Integer] - Maximum number of keys specified for query. # * NextMarker [String] - Marker to specify for next page (id of last result of current page). # * DistributionSummary [Array]: # * S3Origin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] - Optional: Used when serving private content. # or # * CustomOrigin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'www.example.com'. # * HTTPPort [Integer] - HTTP port of origin, in [80, 443] or (1024...65535). # * HTTPSPort [Integer] - HTTPS port of origin, in [80, 443] or (1024...65535). # * OriginProtocolPolicy [String] - Policy on using http vs https, in ['http-only', 'match-viewer']. # * Comment [String] - Comment associated with distribution. # * CNAME [Array] - Array of associated cnames. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Origin [String] - S3 origin bucket. # * Status [String] - Status of distribution. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListDistributions.html # def get_distribution_list(options = {}) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::GetDistributionList.new, :path => "/distribution", :query => options }) end end class Mock def get_distribution_list(options = {}) response = Excon::Response.new response.status = 200 distributions = self.data[:distributions].values response.body = { 'Marker' => Fog::Mock.random_hex(16), 'IsTruncated' => false, 'MaxItems' => 100, 'DistributionSummary' => distributions.map { |d| to_distribution_summary(d) } } response end private def to_distribution_summary(d) { 'DomainName' => d['DomainName'], 'Id' => d['Id'], 'LastModifiedTime' => d['LastModifiedTime'] }.merge(d['DistributionConfig']) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_invalidation.rb000066400000000000000000000035611437344660100235620ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/get_invalidation' # Get invalidation. # # @param distribution_id [String] Distribution id. # @param invalidation_id [String] Invalidation id. # # @return [Excon::Response] # * body [Hash]: # * Id [String] - Invalidation id. # * Status [String] # * CreateTime [String] # * InvalidationBatch [Array]: # * Path [String] # # @see http://docs.amazonwebservices.com/AmazonCloudFront/2010-11-01/APIReference/GetInvalidation.html def get_invalidation(distribution_id, invalidation_id) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::GetInvalidation.new, :path => "/distribution/#{distribution_id}/invalidation/#{invalidation_id}" }) end end class Mock def get_invalidation(distribution_id, invalidation_id) distribution = self.data[:distributions][distribution_id] unless distribution Fog::AWS::CDN::Mock.error(:no_such_distribution) end invalidation = self.data[:invalidations][distribution_id][invalidation_id] unless invalidation Fog::AWS::CDN::Mock.error(:no_such_invalidation) end if invalidation['Status'] == 'InProgress' && (Time.now - Time.parse(invalidation['CreateTime']) >= Fog::Mock.delay * 2) invalidation['Status'] = 'Completed' distribution['InProgressInvalidationBatches'] -= 1 end response = Excon::Response.new response.status = 200 response.body = invalidation response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_invalidation_list.rb000066400000000000000000000051501437344660100246110ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/get_invalidation_list' # Get invalidation list. # # @param options [Hash] Config arguments for list. # @option options Marker [String] Limits object keys to only those that appear lexicographically after its value. # @option options MaxItems [Integer] Limits number of object keys returned. # # @return [Excon::Response] # * body [Hash]: # * IsTruncated [Boolean] - Whether or not the listing is truncated. # * Marker [String] - Marker specified for query. # * MaxItems [Integer] - Maximum number of keys specified for query. # * NextMarker [String] - Marker to specify for next page (id of last result of current page). # * InvalidationSummary [Array]: # * Id [String] # * Status [String] # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListInvalidation.html def get_invalidation_list(distribution_id, options = {}) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::GetInvalidationList.new, :path => "/distribution/#{distribution_id}/invalidation", :query => options }) end end class Mock def get_invalidation_list(distribution_id, options = {}) distribution = self.data[:distributions][distribution_id] unless distribution Fog::AWS::CDN::Mock.error(:no_such_distribution) end invalidations = (self.data[:invalidations][distribution_id] || {}).values invalidations.each do |invalidation| if invalidation['Status'] == 'InProgress' && (Time.now - Time.parse(invalidation['CreateTime']) >= Fog::Mock.delay * 2) invalidation['Status'] = 'Completed' distribution['InProgressInvalidationBatches'] -= 1 end end response = Excon::Response.new response.status = 200 response.body = { 'Marker' => Fog::Mock.random_hex(16), 'IsTruncated' => false, 'MaxItems' => 100, 'InvalidationSummary' => invalidations.map { |i| to_invalidation_summary(i) } } response end private def to_invalidation_summary(d) { 'Id' => d['Id'], 'Status' => d['Status'] } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_streaming_distribution.rb000066400000000000000000000053721437344660100256730ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/streaming_distribution' # Get information about a streaming distribution from CloudFront. # # @param distribution_id [String] Id of distribution. # # @return [Excon::Response] # * body [Hash]: # * S3Origin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] - Optional: Used when serving private content. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Status [String] - Status of distribution. # * StreamingDistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * InProgressInvalidationBatches [Integer] - Number of invalidation batches in progress. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # * Origin [String] - S3 origin bucket. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/GetStreamingDistribution.html def get_streaming_distribution(distribution_id) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::StreamingDistribution.new, :path => "/streaming-distribution/#{distribution_id}" }) end end class Mock def get_streaming_distribution(distribution_id) response = Excon::Response.new distribution = self.data[:streaming_distributions][distribution_id] unless distribution Fog::AWS::CDN::Mock.error(:no_such_streaming_distribution) end if distribution['Status'] == 'InProgress' && (Time.now - Time.parse(distribution['LastModifiedTime']) >= Fog::Mock.delay * 2) distribution['Status'] = 'Deployed' end etag = Fog::AWS::CDN::Mock.generic_id response.status = 200 response.body = distribution.reject { |k,v| k == 'ETag' } response.headers['ETag'] = etag distribution['ETag'] = etag response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/get_streaming_distribution_list.rb000066400000000000000000000066431437344660100267300ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/get_streaming_distribution_list' # List information about distributions in CloudFront. # # @param options [Hash] Config arguments for list. # @option options Marker [String] Limits object keys to only those that appear lexicographically after its value. # @option options MaxItems [Integer] Limits number of object keys returned. # # @return [Excon::Response] # * body [Hash]: # * IsTruncated [Boolean] - Whether or not the listing is truncated. # * Marker [String] - Marker specified for query. # * MaxItems [Integer] - Maximum number of keys specified for query. # * NextMarker [String] - Marker to specify for next page (id of last result of current page). # * StreamingDistributionSummary [Array]: # * S3Origin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] - Optional: Used when serving private content. # or # * CustomOrigin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'www.example.com'. # * HTTPPort [Integer] - HTTP port of origin, in [80, 443] or (1024...65535). # * HTTPSPort [Integer] - HTTPS port of origin, in [80, 443] or (1024...65535). # * OriginProtocolPolicy [String] - Policy on using http vs https, in ['http-only', 'match-viewer']. # * Comment [String] - Comment associated with distribution. # * CNAME [Array] - Array of associated cnames. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Origin [String] - S3 origin bucket. # * Status [String] - Status of distribution. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/ListStreamingDistributions.html def get_streaming_distribution_list(options = {}) request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::CDN::GetStreamingDistributionList.new, :path => "/streaming-distribution", :query => options }) end end class Mock def get_streaming_distribution_list(options = {}) response = Excon::Response.new response.status = 200 distributions = self.data[:streaming_distributions].values response.body = { 'Marker' => Fog::Mock.random_hex(16), 'IsTruncated' => false, 'MaxItems' => 100, 'StreamingDistributionSummary' => distributions.map { |d| to_streaming_distribution_summary(d) } } response end private def to_streaming_distribution_summary(d) { 'DomainName' => d['DomainName'], 'Id' => d['Id'], 'LastModifiedTime' => d['LastModifiedTime'] }.merge(d['StreamingDistributionConfig']) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/post_distribution.rb000066400000000000000000000132661437344660100240310ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/distribution' # Create a new distribution in CloudFront. # # @param options [Hash] Config for distribution. # # REQUIRED: # * S3Origin [Hash]: # * DNSName [String] Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] Optional: used when serving private content. # or # * CustomOrigin [Hash]: # * DNSName [String] Origin to associate with distribution, ie 'www.example.com'. # * HTTPPort [Integer] Optional HTTP port of origin, in [80, 443] or (1024...65535), defaults to 80. # * HTTPSPort [Integer] Optional HTTPS port of origin, in [80, 443] or (1024...65535), defaults to 443. # * OriginProtocolPolicy [String] Policy on using http vs https, in ['http-only', 'match-viewer']. # OPTIONAL: # * CallerReference [String] Used to prevent replay, defaults to Time.now.to_i.to_s. # * Comment [String] Optional comment about distribution. # * CNAME [Array] Optional array of strings to set as CNAMEs. # * DefaultRootObject [String] Optional default object to return for '/'. # * Enabled [Boolean] Whether or not distribution should accept requests, defaults to true. # * Logging [Hash]: Optional logging config. # * Bucket [String] Bucket to store logs in, ie 'mylogs.s3.amazonaws.com'. # * Prefix [String] Optional prefix for log filenames, ie 'myprefix/'. # * OriginAccessIdentity [String] Used for serving private content, in format 'origin-access-identity/cloudfront/ID'. # * RequiredProtocols [String] Optional, set to 'https' to force https connections. # * TrustedSigners [Array] Optional grant of rights to up to 5 aws accounts to generate signed URLs for private content, elements are either 'Self' for your own account or an AWS Account Number. # # @return [Excon::Response] # * body [Hash]: # * DomainName [String] - Domain name of distribution. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Status [String] - Status of distribution. # * DistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # * Origin [String] - S3 origin bucket. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateDistribution.html def post_distribution(options = {}) options['CallerReference'] = Time.now.to_i.to_s data = '' data << "" for key, value in options case value when Array for item in value data << "<#{key}>#{item}" end when Hash data << "<#{key}>" for inner_key, inner_value in value data << "<#{inner_key}>#{inner_value}" end data << "" else data << "<#{key}>#{value}" end end data << "" request({ :body => data, :expects => 201, :headers => { 'Content-Type' => 'text/xml' }, :idempotent => true, :method => 'POST', :parser => Fog::Parsers::AWS::CDN::Distribution.new, :path => "/distribution" }) end end class Mock require 'time' def post_distribution(options = {}) if self.data[:distributions].values.any? { |d| (d['CNAME'] & (options['CNAME']||[])).empty? } Fog::AWS::CDN::Mock.error(:invalid_argument, 'CNAME is already in use') end response = Excon::Response.new response.status = 201 options['CallerReference'] = Time.now.to_i.to_s dist_id = Fog::AWS::CDN::Mock.distribution_id distribution = { 'DomainName' => Fog::AWS::CDN::Mock.domain_name, 'Id' => dist_id, 'Status' => 'InProgress', 'LastModifiedTime' => Time.now.utc.iso8601, 'InProgressInvalidationBatches' => 0, 'DistributionConfig' => { 'CallerReference' => options['CallerReference'], 'CNAME' => options['CNAME'] || [], 'Comment' => options['Comment'], 'Enabled' => options['Enabled'], 'Logging' => { 'Bucket' => options['Bucket'], 'Prefix' => options['Prefix'] }, 'S3Origin' => options['S3Origin'], 'CustomOrigin' => options['CustomOrigin'], 'TrustedSigners' => options['TrustedSigners'] || [] } } self.data[:distributions][dist_id] = distribution response.body = distribution response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/post_invalidation.rb000066400000000000000000000055351437344660100237730ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/post_invalidation' # List information about distributions in CloudFront. # # @param distribution_id [String] Id of distribution for invalidations. # @param paths [Array] Array of string paths to objects to invalidate. # @param caller_reference [String] Used to prevent replay, defaults to Time.now.to_i.to_s. # # @return [Excon::Response] # * body [Hash]: # * Id [String] - Id of invalidation. # * Status [String] - Status of invalidation. # * CreateTime [Integer] - Time of invalidation creation. # * InvalidationBatch [Array]: # * Path [Array] - Array of strings of objects to invalidate. # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateInvalidation.html def post_invalidation(distribution_id, paths, caller_reference = Time.now.to_i.to_s) body = '' body << "" for path in [*paths] body << "" << path << "" end body << "" << caller_reference << "" body << "" request({ :body => body, :expects => 201, :headers => {'Content-Type' => 'text/xml'}, :idempotent => true, :method => 'POST', :parser => Fog::Parsers::AWS::CDN::PostInvalidation.new, :path => "/distribution/#{distribution_id}/invalidation" }) end end class Mock def post_invalidation(distribution_id, paths, caller_reference = Time.now.to_i.to_s) distribution = self.data[:distributions][distribution_id] if distribution invalidation_id = Fog::AWS::CDN::Mock.distribution_id invalidation = { 'Id' => invalidation_id, 'Status' => 'InProgress', 'CreateTime' => Time.now.utc.iso8601, 'InvalidationBatch' => { 'CallerReference' => caller_reference, 'Path' => paths } } distribution['InProgressInvalidationBatches'] += 1 self.data[:invalidations][distribution_id] ||= {} self.data[:invalidations][distribution_id][invalidation_id] = invalidation response = Excon::Response.new response.status = 201 response.body = invalidation response else Fog::AWS::CDN::Mock.error(:no_such_distribution) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/post_streaming_distribution.rb000066400000000000000000000110261437344660100260720ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/streaming_distribution' # Create a new streaming distribution in CloudFront. # # @param options [Hash] Config for distribution. # # REQUIRED: # * S3Origin [Hash]: # * DNSName [String] Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # OPTIONAL: # * CallerReference [String] Used to prevent replay, defaults to Time.now.to_i.to_s. # * Comment [String] Optional comment about distribution. # * CNAME [Array] Optional array of strings to set as CNAMEs. # * Enabled [Boolean] Whether or not distribution should accept requests, defaults to true. # * Logging [Hash]: Optional logging config. # * Bucket [String] Bucket to store logs in, ie 'mylogs.s3.amazonaws.com'. # * Prefix [String] Optional prefix for log filenames, ie 'myprefix/'. # # @return [Excon::Response] # * body[Hash]: # * Id [String] - Id of distribution. # * Status'[String] - Status of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * DomainName [String] - Domain name of distribution. # * StreamingDistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateStreamingDistribution.html def post_streaming_distribution(options = {}) options['CallerReference'] = Time.now.to_i.to_s data = '' data << "" for key, value in options case value when Array for item in value data << "<#{key}>#{item}" end when Hash data << "<#{key}>" for inner_key, inner_value in value data << "<#{inner_key}>#{inner_value}" end data << "" else data << "<#{key}>#{value}" end end data << "" request({ :body => data, :expects => 201, :headers => { 'Content-Type' => 'text/xml' }, :idempotent => true, :method => 'POST', :parser => Fog::Parsers::AWS::CDN::StreamingDistribution.new, :path => "/streaming-distribution" }) end end class Mock require 'time' def post_streaming_distribution(options = {}) if self.data[:streaming_distributions].values.any? { |d| (d['CNAME'] & (options['CNAME']||[])).empty? } Fog::AWS::CDN::Mock.error(:invalid_argument, 'CNAME is already in use') end response = Excon::Response.new response.status = 201 options['CallerReference'] = Time.now.to_i.to_s dist_id = Fog::AWS::CDN::Mock.distribution_id distribution = { 'DomainName' => Fog::AWS::CDN::Mock.domain_name, 'Id' => dist_id, 'Status' => 'InProgress', 'LastModifiedTime' => Time.now.utc.iso8601, 'StreamingDistributionConfig' => { 'CallerReference' => options['CallerReference'], 'CNAME' => options['CNAME'] || [], 'Comment' => options['Comment'], 'Enabled' => options['Enabled'], 'Logging' => { 'Bucket' => options['Bucket'], 'Prefix' => options['Prefix'] }, 'S3Origin' => options['S3Origin'], 'TrustedSigners' => options['TrustedSigners'] || [] } } self.data[:streaming_distributions][dist_id] = distribution response.body = distribution response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/put_distribution_config.rb000066400000000000000000000124651437344660100252010ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/distribution' # Update a distribution in CloudFront. # # @param distribution_id [String] Id of distribution to update config for. # @param options [Hash] Config for distribution. # # REQUIRED: # * S3Origin [Hash]: # * DNSName [String] - origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # * OriginAccessIdentity [String] - Optional: Used when serving private content. # or # * CustomOrigin [Hash]: # * DNSName [String] - Origin to associate with distribution, ie 'www.example.com'. # * HTTPPort [Integer] - HTTP port of origin, in [80, 443] or (1024...65535). # * HTTPSPort [Integer] - HTTPS port of origin, in [80, 443] or (1024...65535). # * OriginProtocolPolicy [String] - Policy on using http vs https, in ['http-only', 'match-viewer']. # OPTIONAL: # * CallerReference [String] Used to prevent replay, defaults to Time.now.to_i.to_s. # * Comment [String] Optional comment about distribution. # * CNAME [Array] Optional array of strings to set as CNAMEs. # * DefaultRootObject [String] Optional default object to return for '/'. # * Enabled [Boolean] Whether or not distribution should accept requests, defaults to true. # * Logging [Hash]: Optional logging config. # * Bucket [String] Bucket to store logs in, ie 'mylogs.s3.amazonaws.com'. # * Prefix [String] Optional prefix for log filenames, ie 'myprefix/'. # * OriginAccessIdentity [String] Used for serving private content, in format 'origin-access-identity/cloudfront/ID'. # * RequiredProtocols [String] Optional, set to 'https' to force https connections. # * TrustedSigners [Array] Optional grant of rights to up to 5 aws accounts to generate signed URLs for private content, elements are either 'Self' for your own account or an AWS Account Number. # # @return [Excon::Response] # * body [Hash]: # * DomainName [String]: Domain name of distribution. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Status [String] - Status of distribution. # * DistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # * Origin [String] - S3 origin bucket. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/CreateDistribution.html def put_distribution_config(distribution_id, etag, options = {}) data = '' data << "" for key, value in options case value when Array for item in value data << "<#{key}>#{item}" end when Hash data << "<#{key}>" for inner_key, inner_value in value data << "<#{inner_key}>#{inner_value}" end data << "" else data << "<#{key}>#{value}" end end data << "" request({ :body => data, :expects => 200, :headers => { 'Content-Type' => 'text/xml', 'If-Match' => etag }, :idempotent => true, :method => 'PUT', :parser => Fog::Parsers::AWS::CDN::Distribution.new, :path => "/distribution/#{distribution_id}/config" }) end end class Mock def put_distribution_config(distribution_id, etag, options = {}) distribution = self.data[:distributions][distribution_id] if distribution if distribution['ETag'] != etag Fog::AWS::CDN::Mock.error(:invalid_if_match_version) end unless distribution['DistributionConfig']['CallerReference'] Fog::AWS::CDN::Mock.error(:illegal_update) end distribution['DistributionConfig'].merge!(options) distribution['Status'] = 'InProgress' response = Excon::Response.new response.status = 200 response.headers['ETag'] = Fog::AWS::CDN::Mock.generic_id response.body = distribution.merge({ 'LastModifiedTime' => Time.now.utc.iso8601 }).reject{ |k,v| k == 'ETag' } response else Fog::AWS::CDN::Mock.error(:no_such_distribution) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cdn/put_streaming_distribution_config.rb000066400000000000000000000107251437344660100272470ustar00rootroot00000000000000module Fog module AWS class CDN class Real require 'fog/aws/parsers/cdn/streaming_distribution' # Update a streaming distribution in CloudFront. # # @param distribution_id [String] - Id of distribution to update config for. # @param options [Hash] - Config for distribution. # # REQUIRED: # * S3Origin [Hash]: # * DNSName [String] Origin to associate with distribution, ie 'mybucket.s3.amazonaws.com'. # OPTIONAL: # @option options CallerReference [String] Used to prevent replay, defaults to Time.now.to_i.to_s # @option options Comment [String] Optional comment about distribution # @option options CNAME [Array] Optional array of strings to set as CNAMEs # @option options Enabled [Boolean] Whether or not distribution should accept requests, defaults to true # @option options Logging [Hash]: Optional logging config # * Bucket [String] Bucket to store logs in, ie 'mylogs.s3.amazonaws.com' # * Prefix String] Optional prefix for log filenames, ie 'myprefix/' # # @return [Excon::Response] # * body [Hash]: # * DomainName [String] - Domain name of distribution. # * Id [String] - Id of distribution. # * LastModifiedTime [String] - Timestamp of last modification of distribution. # * Status [String] - Status of distribution. # * StreamingDistributionConfig [Array]: # * CallerReference [String] - Used to prevent replay, defaults to Time.now.to_i.to_s. # * CNAME [Array] - Array of associated cnames. # * Comment [String] - Comment associated with distribution. # * Enabled [Boolean] - Whether or not distribution is enabled. # * Logging [Hash]: # * Bucket [String] - Bucket logs are stored in. # * Prefix [String] - Prefix logs are stored with. # * Origin [String] - S3 origin bucket. # * TrustedSigners [Array] - Trusted signers. # # @see http://docs.amazonwebservices.com/AmazonCloudFront/latest/APIReference/PutStreamingDistribution.html def put_streaming_distribution_config(distribution_id, etag, options = {}) data = '' data << "" for key, value in options case value when Array for item in value data << "<#{key}>#{item}" end when Hash data << "<#{key}>" for inner_key, inner_value in value data << "<#{inner_key}>#{inner_value}" end data << "" else data << "<#{key}>#{value}" end end data << "" request({ :body => data, :expects => 200, :headers => { 'Content-Type' => 'text/xml', 'If-Match' => etag }, :idempotent => true, :method => 'PUT', :parser => Fog::Parsers::AWS::CDN::StreamingDistribution.new, :path => "/streaming-distribution/#{distribution_id}/config" }) end end class Mock def put_streaming_distribution_config(distribution_id, etag, options = {}) distribution = self.data[:streaming_distributions][distribution_id] if distribution if distribution['ETag'] != etag Fog::AWS::CDN::Mock.error(:invalid_if_match_version) end unless distribution['StreamingDistributionConfig']['CallerReference'] Fog::AWS::CDN::Mock.error(:illegal_update) end distribution['StreamingDistributionConfig'].merge!(options) distribution['Status'] = 'InProgress' response = Excon::Response.new response.status = 200 response.headers['ETag'] = Fog::AWS::CDN::Mock.generic_id response.body = distribution.merge({ 'LastModifiedTime' => Time.now.utc.iso8601 }).reject{ |k,v| k == 'ETag' } response else Fog::AWS::CDN::Mock.error(:no_such_streaming_distribution) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/000077500000000000000000000000001437344660100223305ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/cancel_update_stack.rb000066400000000000000000000012761437344660100266370ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Cancels an update on the specified stack. # # @param stack_name String] Name of the stack to cancel update. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_CancelUpdateStack.html def cancel_update_stack(stack_name) request( 'Action' => 'CancelUpdateStack', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/continue_update_rollback.rb000066400000000000000000000015351437344660100277200ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # For a specified stack that is in the UPDATE_ROLLBACK_FAILED state, # continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. # # @param stack_name [String] The name or the unique ID of the stack that you want to continue rolling back. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_ContinueUpdateRollback.html def continue_update_rollback(stack_name) request( 'Action' => 'ContinueUpdateRollback', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/create_change_set.rb000066400000000000000000000056121437344660100263040ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/create_change_set' # Create a Change Set. # # * stack_name [String] Name of the stack to create. # * options [Hash]: # * ChangeSetName [String] The name of the change set. # * Description [String] A description to help you identify this change set. # * TemplateBody [String] Structure containing the template body. # or (one of the two Template parameters is required) # * TemplateURL [String] URL of file containing the template body. # * UsePreviousTemplate [Boolean] Reuse the template that is associated with the stack to create the change set. # * NotificationARNs [Array] List of SNS topics to publish events to. # * Parameters [Hash] Hash of providers to supply to template. # * Capabilities [Array] List of capabilties the stack is granted. Currently CAPABILITY_IAM for allowing the creation of IAM resources. # # @return [Excon::Response]: # * body [Hash: # * Id [String] - The Amazon Resource Name (ARN) of the change set # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_CreateChangeSet.html def create_change_set(stack_name, options = {}) params = { 'StackName' => stack_name, } if options['ChangeSetName'] params['ChangeSetName'] = options['ChangeSetName'] end if options['Description'] params['Description'] = options['Description'] end if options['UsePreviousTemplate'] params['UsePreviousTemplate'] = options['UsePreviousTemplate'] end if options['NotificationARNs'] params.merge!(Fog::AWS.indexed_param("NotificationARNs.member", [*options['NotificationARNs']])) end if options['Parameters'] options['Parameters'].keys.each_with_index do |key, index| index += 1 # params are 1-indexed params.merge!({ "Parameters.member.#{index}.ParameterKey" => key, "Parameters.member.#{index}.ParameterValue" => options['Parameters'][key] }) end end if options['TemplateBody'] params['TemplateBody'] = options['TemplateBody'] elsif options['TemplateURL'] params['TemplateURL'] = options['TemplateURL'] end if options['Capabilities'] params.merge!(Fog::AWS.indexed_param("Capabilities.member", [*options['Capabilities']])) end request({ 'Action' => 'CreateChangeSet', :parser => Fog::Parsers::AWS::CloudFormation::CreateChangeSet.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/create_stack.rb000066400000000000000000000076001437344660100253100ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/create_stack' # Create a stack. # # * stack_name [String] Name of the stack to create. # * options [Hash]: # * TemplateBody [String] Structure containing the template body. # or (one of the two Template parameters is required) # * TemplateURL [String] URL of file containing the template body. # * DisableRollback [Boolean] Controls rollback on stack creation failure, defaults to false. # * OnFailure [String] Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. # * NotificationARNs [Array] List of SNS topics to publish events to. # * Parameters [Hash] Hash of providers to supply to template # * TimeoutInMinutes [Integer] Minutes to wait before status is set to CREATE_FAILED # * Capabilities [Array] List of capabilties the stack is granted. Currently CAPABILITY_IAM for allowing the creation of IAM resources # * StackPolicyBody [String] Structure containing the stack policy body. # * StackPolicyURL [String] URL of file containing the stack policy. # * Tags [Array] Key-value pairs to associate with this stack. # # @return [Excon::Response]: # * body [Hash: # * StackId [String] - Id of the new stack # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html def create_stack(stack_name, options = {}) params = { 'StackName' => stack_name, } if options['DisableRollback'] params['DisableRollback'] = options['DisableRollback'] end if options['OnFailure'] params['OnFailure'] = options['OnFailure'] end if options['NotificationARNs'] params.merge!(Fog::AWS.indexed_param("NotificationARNs.member", [*options['NotificationARNs']])) end if options['Parameters'] options['Parameters'].keys.each_with_index do |key, index| index += 1 # params are 1-indexed params.merge!({ "Parameters.member.#{index}.ParameterKey" => key, "Parameters.member.#{index}.ParameterValue" => options['Parameters'][key] }) end end num_tags = 0 if options['Tags'] options['Tags'].keys.each_with_index do |key, index| index += 1 # tags are 1-indexed num_tags += 1 # 10 tag max params.merge!({ "Tags.member.#{index}.Key" => key, "Tags.member.#{index}.Value" => options['Tags'][key] }) end end if num_tags > 10 raise ArgumentError.new("a maximum of 10 tags can be specified <#{num_tags}>") end if options['TemplateBody'] params['TemplateBody'] = options['TemplateBody'] elsif options['TemplateURL'] params['TemplateURL'] = options['TemplateURL'] end if options['StackPolicyBody'] params['StackPolicyBody'] = options['StackPolicyBody'] elsif options['StackPolicyURL'] params['StackPolicyURL'] = options['StackPolicyURL'] end if options['TimeoutInMinutes'] params['TimeoutInMinutes'] = options['TimeoutInMinutes'] end if options['Capabilities'] params.merge!(Fog::AWS.indexed_param("Capabilities.member", [*options['Capabilities']])) end request({ 'Action' => 'CreateStack', :parser => Fog::Parsers::AWS::CloudFormation::CreateStack.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/delete_change_set.rb000066400000000000000000000015101437344660100262740ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Delete a change set. # # @param ChangeSetName [String] The name of the change set to delete. # @option options StackName [String] The Stack name or ID (ARN) that is associated with change set. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DeleteChangeSet.html def delete_change_set(change_set_name, options = {}) options['ChangeSetName'] = change_set_name request({ 'Action' => 'DeleteChangeSet', :parser => Fog::Parsers::AWS::CloudFormation::Basic.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/delete_stack.rb000066400000000000000000000012131437344660100253010ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Delete a stack. # # @param stack_name [String] Name of the stack to create. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DeleteStack.html def delete_stack(stack_name) request( 'Action' => 'DeleteStack', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_account_limits.rb000066400000000000000000000013431437344660100275330ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_account_limits' # Describe account_limits. # # # @return [Excon::Response] # * body [Hash]: # * AccountLimits [Array] # * member [Hash]: # * StackLimit [Integer] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeAccountLimits.html def describe_account_limits() request( 'Action' => 'DescribeAccountLimits', :parser => Fog::Parsers::AWS::CloudFormation::DescribeAccountLimits.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_change_set.rb000066400000000000000000000030271437344660100266170ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_change_set' # Describe change_set. # # * ChangeSetName [String] The name of the change set to describe. # @param options [Hash] # @option options StackName [String] Name of the stack for the change set. # # @return [Excon::Response] # * body [Hash]: # * ChangeSetId [String] - # * ChangeSetName [String] - # * Description [String] - # * CreationTime [Time] - # * ExecutionStatus [String] - # * StackId [String] - # * StackName [String] - # * Status [String] - # * StackReason [String] - # * NotificationARNs [Array] - # * NotificationARN [String] - # * Parameters [Array] - # * parameter [Hash]: # * ParameterKey [String] - # * ParameterValue [String] - # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeChangeSet.html def describe_change_set(change_set_name, options = {}) options['ChangeSetName'] = change_set_name request({ 'Action' => 'DescribeChangeSet', :parser => Fog::Parsers::AWS::CloudFormation::DescribeChangeSet.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_stack_events.rb000066400000000000000000000025771437344660100272210ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_stack_events' # Describe stack events. # # @param stack_name [String] stack name to return events for. # @param options [Hash] # @option options NextToken [String] Identifies the start of the next list of events, if there is one. # # @return [Excon::Response] # * body [Hash]: # * StackEvents [Array] - Matching resources # * event [Hash]: # * EventId [String] - # * StackId [String] - # * StackName [String] - # * LogicalResourceId [String] - # * PhysicalResourceId [String] - # * ResourceType [String] - # * Timestamp [Time] - # * ResourceStatus [String] - # * ResourceStatusReason [String] - # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeStackEvents.html def describe_stack_events(stack_name, options = {}) request({ 'Action' => 'DescribeStackEvents', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::DescribeStackEvents.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_stack_resource.rb000066400000000000000000000027121437344660100275330ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_stack_resource' # Describe stack resource. # # @param options Hash]: # * LogicalResourceId [String] Logical name of the resource as specified in the template # * StackName [String] The name or the unique stack ID # # @return [Excon::Response] # * body [Hash]: # * StackResourceDetail [Hash] - Matching resources # *Description [String] - # * LastUpdatedTimestamp [Timestamp] - # * LogicalResourceId [String] - # * Metadata [String] - # * PhysicalResourceId [String] - # * ResourceStatus [String] - # * ResourceStatusReason [String] - # * ResourceType [String] - # * StackId [String] - # * StackName [String] - # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeStackResource.html def describe_stack_resource(logical_resource_id, stack_name ) request( 'Action' => 'DescribeStackResource', 'LogicalResourceId' => logical_resource_id, 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::DescribeStackResource.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_stack_resources.rb000066400000000000000000000026721437344660100277230ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_stack_resources' # Describe stack resources. # # @param options Hash]: # * PhysicalResourceId [String] name or unique identifier that corresponds to a physical instance ID # or (one of PhysicalResourceId and StackName is required) # * StackName [String] Only return events related to this stack name # * LogicalResourceId [String] Logical name of the resource as specified in the template # # @return [Excon::Response] # * body [Hash]: # * StackResources [Array] - Matching resources # * resource [Hash]: # * StackId [String] - # * StackName [String] - # * LogicalResourceId [String] - # * PhysicalResourceId [String] - # * ResourceType [String] - # * Timestamp [Time] - # * ResourceStatus [String] - # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeStackResources.html def describe_stack_resources(options = {}) request({ 'Action' => 'DescribeStackResources', :parser => Fog::Parsers::AWS::CloudFormation::DescribeStackResources.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/describe_stacks.rb000066400000000000000000000022501437344660100260040ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/describe_stacks' # Describe stacks. # # @param options [Hash] # @option options StackName [String] Name of the stack to describe. # # @return [Excon::Response] # * body [Hash]: # * Stacks [Array] - Matching stacks # * stack [Hash]: # * StackName [String] - # * StackId [String] - # * CreationTime [String] - # * StackStatus [String] - # * DisableRollback [String] - # * Outputs [Array] - # * output [Hash]: # * OutputKey [String] - # * OutputValue [String] - # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_DescribeStacks.html def describe_stacks(options = {}) request({ 'Action' => 'DescribeStacks', :parser => Fog::Parsers::AWS::CloudFormation::DescribeStacks.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/estimate_template_cost.rb000066400000000000000000000033561437344660100274220ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/estimate_template_cost' # Returns the estimated monthly cost of a template. # # * options [Hash]: # * TemplateBody [String] Structure containing the template body. # or (one of the two Template parameters is required) # * TemplateURL [String] URL of file containing the template body. # * Parameters [Hash] Hash of providers to supply to template # # @return [Excon::Response]: # * body [Hash: # * Url [String] - An AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template. # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_EstimateTemplateCost.html def estimate_template_cost(options = {}) params = {} if options['Parameters'] options['Parameters'].keys.each_with_index do |key, index| index += 1 # params are 1-indexed params.merge!({ "Parameters.member.#{index}.ParameterKey" => key, "Parameters.member.#{index}.ParameterValue" => options['Parameters'][key] }) end end if options['TemplateBody'] params['TemplateBody'] = options['TemplateBody'] elsif options['TemplateURL'] params['TemplateURL'] = options['TemplateURL'] end request({ 'Action' => 'EstimateTemplateCost', :parser => Fog::Parsers::AWS::CloudFormation::EstimateTemplateCost.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/execute_change_set.rb000066400000000000000000000015141437344660100265000ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Execute a change set. # # @param ChangeSetName [String] The name of the change set to delete. # @option options StackName [String] The Stack name or ID (ARN) that is associated with change set. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_ExecuteChangeSet.html def execute_change_set(change_set_name, options = {}) options['ChangeSetName'] = change_set_name request({ 'Action' => 'ExecuteChangeSet', :parser => Fog::Parsers::AWS::CloudFormation::Basic.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/get_stack_policy.rb000066400000000000000000000015331437344660100262020ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/get_stack_policy' # Describe stacks. # # @param stack_name [String] The name or unique stack ID that is associated with the stack whose policy you want to get. # # @return [Excon::Response] # * body [Hash]: # * StackPolicyBody [String] - Structure containing the stack policy body. # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_GetStackPolicy.html def get_stack_policy(stack_name) request( 'Action' => 'GetStackPolicy', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::GetStackPolicy.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/get_template.rb000066400000000000000000000014151437344660100253300ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/get_template' # Describe stacks. # # @param stack_name [String] stack name to get template from # # @return [Excon::Response] # * body [Hash]: # * TemplateBody [String] - structure containing the template body (json) # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_GetTemplate.html def get_template(stack_name) request( 'Action' => 'GetTemplate', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::GetTemplate.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/get_template_summary.rb000066400000000000000000000034421437344660100271070ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/get_template_summary' # Returns information about a new or existing template. # # * options [Hash]: # * stack_name [String] Name of the stack or the stack ID. # or # * TemplateBody [String] Structure containing the template body. # or # * TemplateURL [String] URL of file containing the template body. # # @return [Excon::Response]: # * body [Hash: # * Capabilities [Array] List of capabilties in the template. # * CapabilitiesReason [String] The list of resources that generated the values in the Capabilities response element. # * Description [String] Template Description. # * Metadata [String] Template Metadata. # * Parameters [Array] A list of parameter declarations that describe various properties for each parameter. # * ResourceTypes [Array] all the template resource types that are defined in the template # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_GetTemplateSummary.html def get_template_summary(options = {}) params = {} if options['StackName'] params['StackName'] = options['StackName'] elsif options['TemplateBody'] params['TemplateBody'] = options['TemplateBody'] elsif options['TemplateURL'] params['TemplateURL'] = options['TemplateURL'] end request({ 'Action' => 'GetTemplateSummary', :parser => Fog::Parsers::AWS::CloudFormation::GetTemplateSummary.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/list_change_sets.rb000066400000000000000000000024741437344660100262020ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/list_change_sets' # List change sets. # # @param stack_name String] Name or the ARN of the stack for which you want to list change sets. # # @option options StackName [String] Name of the stack to describe. # # @return [Excon::Response] # * body [Hash]: # * Summaries [Array] - Matching change sets # * stack [Hash]: # * ChangeSetId [String] - # * ChangeSetName [String] - # * Description [String] - # * CreationTime [Time] - # * ExecutionStatus [String] - # * StackId [String] - # * StackName [String] - # * Status [String] - # * StackReason [String] - # # # @see http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListChangeSets.html def list_change_sets(stack_name, options = {}) request({ 'Action' => 'ListChangeSets', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::ListChangeSets.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/list_stack_resources.rb000066400000000000000000000021041437344660100271040ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/list_stack_resources' # List stack resources. # # @param options [Hash] # @option options StackName [String] Name of the stack to describe. # # @return [Excon::Response] # * body [Hash]: # * StackResourceSummaries [Array] - Matching stacks # * resources [Hash]: # * ResourceStatus [String] - # * LogicalResourceId [String] - # * PhysicalResourceId [String] - # * ResourceType [String] - # * LastUpdatedTimestamp [Time] - # # # @see http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStacks.html def list_stack_resources(options = {}) request({ 'Action' => 'ListStackResources', :parser => Fog::Parsers::AWS::CloudFormation::ListStackResources.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/list_stacks.rb000066400000000000000000000020001437344660100251700ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/list_stacks' # List stacks. # # @param options [Hash] # # @return [Excon::Response] # * body [Hash]: # * StackSummaries [Array] - Matching stacks # * stack [Hash]: # * StackId [String] - # * StackName [String] - # * TemplateDescription [String] - # * CreationTime [Time] - # * DeletionTime [Time] - # * StackStatus [String] - # * DeletionTime [String] - # # # @see http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_ListStacks.html def list_stacks(options = {}) request({ 'Action' => 'ListStacks', :parser => Fog::Parsers::AWS::CloudFormation::ListStacks.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/set_stack_policy.rb000066400000000000000000000024751437344660100262240ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Sets a stack policy for a specified stack. # # @param stack_name [String] Name or unique stack ID that you want to associate a policy with. # * options [Hash]: # * StackPolicyBody [String] Structure containing the stack policy body. # or (one of the two StackPolicy parameters is required) # * StackPolicyURL [String] URL of file containing the stack policy. # * Parameters [Hash] Hash of providers to supply to StackPolicy # # @return [Excon::Response]: # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_SetStackPolicy.html def set_stack_policy(stack_name, options = {}) params = {} if options['StackPolicyBody'] params['StackPolicyBody'] = options['StackPolicyBody'] elsif options['StackPolicyURL'] params['StackPolicyURL'] = options['StackPolicyURL'] end request({ 'Action' => 'SetStackPolicy', 'StackName' => stack_name, :parser => Fog::Parsers::AWS::CloudFormation::Basic.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/signal_resource.rb000066400000000000000000000022411437344660100260400ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/basic' # Sends a signal to the specified resource. # # @param options Hash]: # * LogicalResourceId [String] The logical ID of the resource that you want to signal. # * StackName [String] The stack name or unique stack ID that includes the resource that you want to signal. # * Status [String] The status of the signal, which is either success or failure. # * UniqueId [String] A unique ID of the signal. # # @return [Excon::Response] # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_SignalResource.html def signal_resource(logical_resource_id, stack_name, status, unique_id ) request( 'Action' => 'SignalResource', 'LogicalResourceId' => logical_resource_id, 'StackName' => stack_name, 'Status' => status, 'UniqueId' => unique_id, :parser => Fog::Parsers::AWS::CloudFormation::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/update_stack.rb000066400000000000000000000103721437344660100253270ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/update_stack' # Update a stack. # # @param [String] stack_name Name of the stack to update. # @param [Hash] options # * TemplateBody [String] Structure containing the template body. # or (one of the two Template parameters is required) # * TemplateURL [String] URL of file containing the template body. # * Parameters [Hash] Hash of providers to supply to template. # * Capabilities [Array] List of capabilties the stack is granted. Currently CAPABILITY_IAM for allowing the creation of IAM resources. # * NotificationARNs [Array] List of SNS topics to publish events to. # * ResourceTypes [Array] The template resource types that you have permissions to work. # * StackPolicyBody [String] Structure containing the stack policy body. # * StackPolicyURL [String] URL of file containing the stack policy. # * StackPolicyDuringUpdateBody [String] Structure containing the stack policy body to use during update. # * StackPolicyDuringUpdateURL [String] URL of file containing the stack policy to use during update. # * Tags [Array] Key-value pairs to associate with this stack. # * UsePreviousTemplate [Boolean] Reuse the existing template that is associated with the stack that you are updating. # # @return [Excon::Response] # * body [Hash]: # * StackId [String] - Id of the stack being updated # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_UpdateStack.html # def update_stack(stack_name, options = {}) params = { 'StackName' => stack_name, } if options['Parameters'] options['Parameters'].keys.each_with_index do |key, index| index += 1 # params are 1-indexed params.merge!({ "Parameters.member.#{index}.ParameterKey" => key, "Parameters.member.#{index}.ParameterValue" => options['Parameters'][key] }) end end if options['TemplateBody'] params['TemplateBody'] = options['TemplateBody'] elsif options['TemplateURL'] params['TemplateURL'] = options['TemplateURL'] end if options['StackPolicyBody'] params['StackPolicyBody'] = options['StackPolicyBody'] elsif options['StackPolicyURL'] params['StackPolicyURL'] = options['StackPolicyURL'] end if options['StackPolicyDuringUpdateBody'] params['StackPolicyDuringUpdateBody'] = options['StackPolicyDuringUpdateBody'] elsif options['StackPolicyDuringUpdateURL'] params['StackPolicyDuringUpdateURL'] = options['StackPolicyDuringUpdateURL'] end num_tags = 0 if options['Tags'] options['Tags'].keys.each_with_index do |key, index| index += 1 # tags are 1-indexed num_tags += 1 # 10 tag max params.merge!({ "Tags.member.#{index}.Key" => key, "Tags.member.#{index}.Value" => options['Tags'][key] }) end end if num_tags > 10 raise ArgumentError.new("a maximum of 10 tags can be specified <#{num_tags}>") end if options['Capabilities'] params.merge!(Fog::AWS.indexed_param("Capabilities.member", [*options['Capabilities']])) end if options['NotificationARNs'] params.merge!(Fog::AWS.indexed_param("NotificationARNs.member", [*options['NotificationARNs']])) end if options['ResourceTypes'] params.merge!(Fog::AWS.indexed_param("ResourceTypes.member", [*options['ResourceTypes']])) end if options['UsePreviousTemplate'] params['UsePreviousTemplate'] = options['UsePreviousTemplate'] end request({ 'Action' => 'UpdateStack', :parser => Fog::Parsers::AWS::CloudFormation::UpdateStack.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_formation/validate_template.rb000066400000000000000000000016711437344660100263460ustar00rootroot00000000000000module Fog module AWS class CloudFormation class Real require 'fog/aws/parsers/cloud_formation/validate_template' # Describe stacks. # # @param [Hash] options # @option options [String] TemplateBody template structure # @option options [String] TemplateURL template url # # @return [Excon::Response] # * body [Hash]: # * Description [String] - description found within the template # * Parameters [String] - list of template parameter structures # # @see http://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_ValidateTemplate.html # def validate_template(options = {}) request({ 'Action' => 'ValidateTemplate', :parser => Fog::Parsers::AWS::CloudFormation::ValidateTemplate.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/000077500000000000000000000000001437344660100214405ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/delete_alarms.rb000066400000000000000000000026411437344660100245710ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/delete_alarms' # Delete a list of alarms # ==== Options # * AlarmNames<~Array>: A list of alarms to be deleted # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/index.html?API_DeleteAlarms.html # def delete_alarms(alarm_names) options = {} options.merge!(AWS.indexed_param('AlarmNames.member.%d', [*alarm_names])) request({ 'Action' => 'DeleteAlarms', :parser => Fog::Parsers::AWS::CloudWatch::DeleteAlarms.new }.merge(options)) end end class Mock def delete_alarms(alarm_names) [*alarm_names].each do |alarm_name| unless data[:metric_alarms].key?(alarm_name) raise Fog::AWS::AutoScaling::NotFound, "The alarm '#{alarm_name}' does not exist." end end [*alarm_names].each { |alarm_name| data[:metric_alarms].delete(alarm_name) } response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/describe_alarm_history.rb000066400000000000000000000022731437344660100265060ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/describe_alarm_history' # Retrieves history for the specified alarm # ==== Options # * AlarmName<~String>: The name of the alarm # * EndDate<~DateTime>: The ending date to retrieve alarm history # * HistoryItemType<~String>: The type of alarm histories to retrieve # * MaxRecords<~Integer>: The maximum number of alarm history records to retrieve # * NextToken<~String> The token returned by a previous call to indicate that there is more data available # * StartData<~DateTime>: The starting date to retrieve alarm history # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/index.html?API_DescribeAlarmHistory.html # def describe_alarm_history(options={}) request({ 'Action' => 'DescribeAlarmHistory', :parser => Fog::Parsers::AWS::CloudWatch::DescribeAlarmHistory.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/describe_alarms.rb000066400000000000000000000044571437344660100251160ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/describe_alarms' # Retrieves alarms with the specified names # ==== Options # * ActionPrefix<~String>: The action name prefix # * AlarmNamePrefix<~String>: The alarm name prefix. # AlarmNames cannot be specified if this parameter is specified # * AlarmNames<~Array>: A list of alarm names to retrieve information for. # * MaxRecords<~Integer>: The maximum number of alarm descriptions to retrieve # * NextToken<~String>: The token returned by a previous call to indicate that there is more data available # * NextToken<~String> The token returned by a previous call to indicate that there is more data available # * StateValue<~String>: The state value to be used in matching alarms # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html # def describe_alarms(options={}) if alarm_names = options.delete('AlarmNames') options.merge!(AWS.indexed_param('AlarmNames.member.%d', [*alarm_names])) end request({ 'Action' => 'DescribeAlarms', :parser => Fog::Parsers::AWS::CloudWatch::DescribeAlarms.new }.merge(options)) end end class Mock def describe_alarms(options={}) records = if alarm_names = options.delete('AlarmNames') [*alarm_names].inject({}) do |r, name| (record = data[:metric_alarms][name]) ? r.merge(name => record) : r end else self.data[:metric_alarms] end results = records.inject([]) do |r, (name, data)| r << {'AlarmName' => name}.merge(data) end response = Excon::Response.new response.status = 200 response.body = { 'DescribeAlarmsResult' => { 'MetricAlarms' => results }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/describe_alarms_for_metric.rb000066400000000000000000000027541437344660100273250ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/describe_alarms_for_metric' # Retrieves all alarms for a single metric # ==== Options # * Dimensions<~Array>: a list of dimensions to filter against # Name : The name of the dimension # Value : The value to filter against # * MetricName<~String>: The name of the metric # * Namespace<~String>: The namespace of the metric # * Period<~Integer>: The period in seconds over which the statistic is applied # * Statistics<~String>: The statistic for the metric # * Unit<~String> The unit for the metric # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html # def describe_alarms_for_metric(options) if dimensions = options.delete('Dimensions') options.merge!(AWS.indexed_param('Dimensions.member.%d.Name', dimensions.map {|dimension| dimension['Name']})) options.merge!(AWS.indexed_param('Dimensions.member.%d.Value', dimensions.map {|dimension| dimension['Value']})) end request({ 'Action' => 'DescribeAlarmsForMetric', :parser => Fog::Parsers::AWS::CloudWatch::DescribeAlarmsForMetric.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/disable_alarm_actions.rb000066400000000000000000000015751437344660100262740ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/disable_alarm_actions' # Disables actions for the specified alarms # ==== Options # * AlarmNames<~Array>: The names of the alarms to disable actions for # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_DisableAlarmActions.html # def disable_alarm_actions(alarm_names) options = {} options.merge!(AWS.indexed_param('AlarmNames.member.%d', [*alarm_names])) request({ 'Action' => 'DisableAlarmActions', :parser => Fog::Parsers::AWS::CloudWatch::DisableAlarmActions.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/enable_alarm_actions.rb000066400000000000000000000015661437344660100261170ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/enable_alarm_actions' # Enables actions for the specified alarms # ==== Options # * AlarmNames<~Array>: The names of the alarms to enable actions for # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_EnableAlarmActions.html # def enable_alarm_actions(alarm_names) options = {} options.merge!(AWS.indexed_param('AlarmNames.member.%d', [*alarm_names])) request({ 'Action' => 'EnableAlarmActions', :parser => Fog::Parsers::AWS::CloudWatch::EnableAlarmActions.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/get_metric_statistics.rb000066400000000000000000000042771437344660100263730ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/get_metric_statistics' # Fetch datapoints for a metric. At most 1440 datapoints will be returned, the most datapoints that can be queried is 50850 # StartTime is capped to 2 weeks ago # ==== Options # * Namespace<~String>: the namespace of the metric # * MetricName<~String>: the name of the metric # * StartTime<~Datetime>: when to start fetching datapoints from (inclusive) # * EndTime<~Datetime>: used to determine the last datapoint to fetch (exclusive) # * Period<~Integer>: Granularity, in seconds of the returned datapoints. Must be a multiple of 60, and at least 60 # * Statistics<~Array>: An array of up to 5 strings, which name the statistics to return # * Unit<~String>: The unit for the metric # * Dimensions<~Array>: a list of dimensions to filter against (optional) # Name : The name of the dimension # Value : The value to filter against # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html # def get_metric_statistics(options={}) %w{Statistics StartTime EndTime Period MetricName Namespace}.each do |required_parameter| raise ArgumentError, "Must provide #{required_parameter}" unless options.key?(required_parameter) end statistics = options.delete 'Statistics' options.merge!(AWS.indexed_param('Statistics.member.%d', [*statistics])) if dimensions = options.delete('Dimensions') options.merge!(AWS.indexed_param('Dimensions.member.%d.Name', dimensions.map {|dimension| dimension['Name']})) options.merge!(AWS.indexed_param('Dimensions.member.%d.Value', dimensions.map {|dimension| dimension['Value']})) end request({ 'Action' => 'GetMetricStatistics', :parser => Fog::Parsers::AWS::CloudWatch::GetMetricStatistics.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/list_metrics.rb000066400000000000000000000042071437344660100244710ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/list_metrics' # List availabe metrics # # ==== Options # * Dimensions<~Array>: a list of dimensions to filter against, # Name : The name of the dimension # Value : The value to filter against # * MetricName<~String>: The name of the metric to filter against # * Namespace<~String>: The namespace to filter against # * NextToken<~String> The token returned by a previous call to indicate that there is more data available # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html # def list_metrics(options={}) if dimensions = options.delete('Dimensions') options.merge!(AWS.indexed_param('Dimensions.member.%d.Name', dimensions.map {|dimension| dimension['Name']})) options.merge!(AWS.indexed_param('Dimensions.member.%d.Value', dimensions.map {|dimension| dimension['Value']})) end request({ 'Action' => 'ListMetrics', :parser => Fog::Parsers::AWS::CloudWatch::ListMetrics.new }.merge(options)) end end class Mock def list_metrics(options={}) body = case options["NextToken"] when nil { "ListMetricsResult" => { "Metrics" => (0...500).map{ {} }, "NextToken" => '1' }} when "1" { "ListMetricsResult" => { "Metrics" => (0...500).map{ {} }, "NextToken" => '2' }} when "2" { "ListMetricsResult" => { "Metrics" => (0...1).map{ {} } }} end Excon::Response.new.tap do |response| response.body = body response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/put_metric_alarm.rb000066400000000000000000000105621437344660100253200ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/put_metric_alarm' # Creates or updates an alarm and associates it with the specified Amazon CloudWatch metric # ==== Options # * ActionsEnabled<~Boolean>: Indicates whether or not actions should be executed during any changes to the alarm's state # * AlarmActions<~Array>: A list of actions to execute # * AlarmDescription<~String>: The description for the alarm # * AlarmName<~String> The unique name for the alarm # * ComparisonOperator<~String>: The arithmetic operation to use for comparison # * Dimensions<~Array>: a list of dimensions to filter against, # Name : The name of the dimension # Value : The value to filter against # * EvaluationPeriods<~Integer>: The number of periods over which data is compared to the specified threshold # * InsufficientDataActions<~Array>: A list of actions to execute # * MetricName<~String>: The name for the alarm's associated metric # * Namespace<~String>: The namespace for the alarm's associated metric # * OKActions<~Array>: A list of actions to execute # * Period<~Integer>: The period in seconds over which the specified statistic is applied # * Statistic<~String>: The statistic to apply to the alarm's associated metric # * Threshold<~Double>: The value against which the specified statistic is compared # * Unit<~String>: The unit for the alarm's associated metric # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html # def put_metric_alarm(options) if dimensions = options.delete('Dimensions') options.merge!(AWS.indexed_param('Dimensions.member.%d.Name', dimensions.map {|dimension| dimension['Name']})) options.merge!(AWS.indexed_param('Dimensions.member.%d.Value', dimensions.map {|dimension| dimension['Value']})) end if alarm_actions = options.delete('AlarmActions') options.merge!(AWS.indexed_param('AlarmActions.member.%d', [*alarm_actions])) end if insufficient_data_actions = options.delete('InsufficientDataActions') options.merge!(AWS.indexed_param('InsufficientDataActions.member.%d', [*insufficient_data_actions])) end if ok_actions = options.delete('OKActions') options.merge!(AWS.indexed_param('OKActions.member.%d', [*ok_actions])) end request({ 'Action' => 'PutMetricAlarm', :parser => Fog::Parsers::AWS::CloudWatch::PutMetricAlarm.new }.merge(options)) end end class Mock require 'fog/aws/parsers/cloud_watch/put_metric_alarm' # See: Fog::AWS::CloudWatch::Real.put_metric_alarm() # def put_metric_alarm(options) supported_actions = [ "InsufficientDataActions", "OKActions", "AlarmActions" ] found_actions = options.keys.select {|key| supported_actions.include? key } if found_actions.empty? raise Fog::AWS::Compute::Error.new("The request must contain at least one of #{supported_actions.join(", ")}'") end requirements = [ "AlarmName", "ComparisonOperator", "EvaluationPeriods", "Namespace", "Period", "Statistic", "Threshold" ] requirements.each do |req| unless options.key?(req) raise Fog::AWS::Compute::Error.new("The request must contain a the parameter '%s'" % req) end end data[:metric_alarms][options['AlarmName']] = { 'AlarmARN' => "arn:aws:cloudwatch:eu-west-1:000000000000:metricAlarm:00000000-0000-0000-0000-000000000000:alarmName/#{options['AlarmName']}", 'ActionsEnabled' => false, 'AlarmActions' => [], 'AlarmConfigurationUpdatedTimestamp' => Time.now.utc.strftime("%Y-%m-%dT%H:%M:%SZ"), 'Dimensions' => [], 'OKActions' => [], }.merge!(options) response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/put_metric_data.rb000066400000000000000000000061001437344660100251260ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/put_metric_data' # Publishes one or more data points to CloudWatch. A new metric is created if necessary # ==== Options # * Namespace<~String>: the namespace of the metric data # * MetricData<~Array>: the datapoints to publish of the metric # * MetricName<~String>: the name of the metric # * Timestamp<~String>: the timestamp for the data point. If omitted defaults to the time at which the data is received by CloudWatch # * Unit<~String>: the unit # * Value<~Double> the value for the metric # * StatisticValues<~Hash>: # * Maximum<~Double>: the maximum value of the sample set # * Sum<~Double>: the sum of the values of the sample set # * SampleCount<~Double>: the number of samples used for the statistic set # * Minimum<~Double>: the minimum value of the sample set # * Dimensions<~Array>: the dimensions for the metric. From 0 to 10 may be included # * Name<~String> # * Value<~String> # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html # def put_metric_data(namespace, metric_data) options = {'Namespace' => namespace} #first index the dimensions for any of the datums that have dimensions metric_data.map! do |metric_datum| if dimensions = metric_datum.delete('Dimensions') metric_datum.merge!(AWS.indexed_param('Dimensions.member.%d.Name', dimensions.map {|dimension| dimension['Name']})) metric_datum.merge!(AWS.indexed_param('Dimensions.member.%d.Value', dimensions.map {|dimension| dimension['Value']})) end metric_datum end #then flatten out an hashes in the metric_data array metric_data.map! { |metric_datum| flatten_hash(metric_datum) } #then index the metric_data array options.merge!(AWS.indexed_param('MetricData.member.%d', [*metric_data])) #then finally flatten out an hashes in the overall options array options = flatten_hash(options) request({ 'Action' => 'PutMetricData', :parser => Fog::Parsers::AWS::CloudWatch::PutMetricData.new }.merge(options)) end private def flatten_hash(starting) finishing = {} starting.each do |top_level_key, top_level_value| if top_level_value.is_a?(Hash) nested_hash = top_level_value nested_hash.each do |nested_key, nested_value| finishing["#{top_level_key}.#{nested_key}"] = nested_value end else finishing[top_level_key] = top_level_value end end return finishing end end end end end fog-aws-3.18.0/lib/fog/aws/requests/cloud_watch/set_alarm_state.rb000066400000000000000000000020111437344660100251260ustar00rootroot00000000000000module Fog module AWS class CloudWatch class Real require 'fog/aws/parsers/cloud_watch/set_alarm_state' # Temporarily sets the state of an alarm # ==== Options # * AlarmName<~String>: The names of the alarm # * StateReason<~String>: The reason that this alarm is set to this specific state (in human-readable text format) # * StateReasonData<~String>: The reason that this alarm is set to this specific state (in machine-readable JSON format) # * StateValue<~String>: The value of the state # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_SetAlarmState.html # def set_alarm_state(options) request({ 'Action' => 'SetAlarmState', :parser => Fog::Parsers::AWS::CloudWatch::SetAlarmState.new }.merge(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/000077500000000000000000000000001437344660100206205ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/compute/allocate_address.rb000066400000000000000000000035461437344660100244460ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/allocate_address' # Acquire an elastic IP address. # # ==== Parameters # * domain<~String> - Type of EIP, either standard or vpc # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'publicIp'<~String> - The acquired address # * 'requestId'<~String> - Id of the request # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AllocateAddress.html] def allocate_address(domain='standard') domain = domain == 'vpc' ? 'vpc' : 'standard' request( 'Action' => 'AllocateAddress', 'Domain' => domain, :parser => Fog::Parsers::AWS::Compute::AllocateAddress.new ) end end class Mock def allocate_address(domain = 'standard') unless describe_addresses.body['addressesSet'].size < self.data[:limits][:addresses] raise Fog::AWS::Compute::Error, "AddressLimitExceeded => Too many addresses allocated" end response = Excon::Response.new response.status = 200 domain = domain == 'vpc' ? 'vpc' : 'standard' public_ip = Fog::AWS::Mock.ip_address data = { 'instanceId' => nil, 'publicIp' => public_ip, 'domain' => domain, :origin => domain } if domain == 'vpc' data['allocationId'] = "eipalloc-#{Fog::Mock.random_hex(8)}" end self.data[:addresses][public_ip] = data response.body = data.reject {|k, v| k == 'instanceId' }.merge('requestId' => Fog::AWS::Mock.request_id) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/assign_private_ip_addresses.rb000066400000000000000000000045251437344660100267160ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/assign_private_ip_addresses' # Assigns one or more secondary private IP addresses to the specified network interface. # # ==== Parameters # * NetworkInterfaceId<~String> - The ID of the network interface # * PrivateIpAddresses<~Array> - One or more IP addresses to be assigned as a secondary private IP address (conditional) # * SecondaryPrivateIpAddressCount<~String> - The number of secondary IP addresses to assign (conditional) # * AllowReassignment<~Boolean> - Whether to reassign an IP address # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of the request. # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AssignPrivateIpAddresses.html] def assign_private_ip_addresses(network_interface_id, options={}) if options['PrivateIpAddresses'] && options['SecondaryPrivateIpAddressCount'] raise Fog::AWS::Compute::Error.new("You may specify secondaryPrivateIpAddressCount or specific secondary private IP addresses, but not both.") end if private_ip_addresses = options.delete('PrivateIpAddresses') options.merge!(Fog::AWS.indexed_param('PrivateIpAddress.%d', [*private_ip_addresses])) end request({ 'Action' => 'AssignPrivateIpAddresses', 'NetworkInterfaceId' => network_interface_id, :parser => Fog::Parsers::AWS::Compute::AssignPrivateIpAddresses.new }.merge(options)) end end class Mock def assign_private_ip_addresses(network_interface_id, options={}) if options['PrivateIpAddresses'] && options['SecondaryPrivateIpAddressCount'] raise Fog::AWS::Compute::Error.new("You may specify secondaryPrivateIpAddressCount or specific secondary private IP addresses, but not both.") end response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/associate_address.rb000066400000000000000000000146011437344660100246270ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/associate_address' # Associate an elastic IP address with an instance # # ==== Parameters # * instance_id<~String> - Id of instance to associate address with (conditional) # * public_ip<~String> - Public ip to assign to instance (conditional) # * network_interface_id<~String> - Id of a nic to associate address with (required in a vpc instance with more than one nic) (conditional) # * allocation_id<~String> - Allocation Id to associate address with (vpc only) (conditional) # * private_ip_address<~String> - Private Ip Address to associate address with (vpc only) # * allow_reassociation<~Boolean> - Allows an elastic ip address to be reassigned (vpc only) (conditional) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # * 'associationId'<~String> - association Id for eip to node (vpc only) # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateAddress.html] def associate_address(*args) if args.first.kind_of? Hash params = args.first else params = { :instance_id => args[0], :public_ip => args[1], :network_interface_id => args[2], :allocation_id => args[3], :private_ip_address => args[4], :allow_reassociation => args[5], } end # Cannot specify an allocation ip and a public IP at the same time. If you have an allocation Id presumably you are in a VPC # so we will null out the public IP params[:public_ip] = params[:allocation_id].nil? ? params[:public_ip] : nil request( 'Action' => 'AssociateAddress', 'AllocationId' => params[:allocation_id], 'InstanceId' => params[:instance_id], 'NetworkInterfaceId' => params[:network_interface_id], 'PublicIp' => params[:public_ip], 'PrivateIpAddress' => params[:private_ip_address], 'AllowReassociation' => params[:allow_reassociation], :idempotent => true, :parser => Fog::Parsers::AWS::Compute::AssociateAddress.new ) end end class Mock def associate_address(*args) if args.first.kind_of? Hash params = args.first else params = { :instance_id => args[0], :public_ip => args[1], :network_interface_id => args[2], :allocation_id => args[3], :private_ip_address => args[4], :allow_reassociation => args[5], } end params[:public_ip] = params[:allocation_id].nil? ? params[:public_ip] : nil response = Excon::Response.new response.status = 200 instance = self.data[:instances][params[:instance_id]] # address = self.data[:addresses][params[:public_ip]] address = params[:public_ip].nil? ? nil : self.data[:addresses][params[:public_ip]] # This is a classic server, a VPC with a single network interface id or a VPC with multiple network interfaces one of which is specified if ((instance && address) || (instance && !params[:allocation_id].nil?) || (!params[:allocation_id].nil? && !network_interface_id.nil?)) if !params[:allocation_id].nil? allocation_ip = describe_addresses( 'allocation-id' => "#{params[:allocation_id]}").body['addressesSet'].first if !allocation_ip.nil? public_ip = allocation_ip['publicIp'] address = public_ip.nil? ? nil : self.data[:addresses][public_ip] if instance['vpcId'] && vpc = self.data[:vpcs].detect { |v| v['vpcId'] == instance['vpcId'] } if vpc['enableDnsHostnames'] instance['dnsName'] = Fog::AWS::Mock.dns_name_for(public_ip) end end end end if !address.nil? if current_instance = self.data[:instances][address['instanceId']] current_instance['ipAddress'] = current_instance['originalIpAddress'] end address['instanceId'] = params[:instance_id] end # detach other address (if any) if self.data[:addresses][instance['ipAddress']] self.data[:addresses][instance['ipAddress']]['instanceId'] = nil end if !params[:public_ip].nil? instance['ipAddress'] = params[:public_ip] instance['dnsName'] = Fog::AWS::Mock.dns_name_for(params[:public_ip]) end response.status = 200 if !params[:instance_id].nil? && !params[:public_ip].nil? response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } elsif !params[:allocation_id].nil? association_id = "eipassoc-#{Fog::Mock.random_hex(8)}" address['associationId'] = association_id response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true, 'associationId' => association_id, } end response elsif !instance raise Fog::AWS::Compute::NotFound.new("You must specify either an InstanceId or a NetworkInterfaceID") elsif !address raise Fog::AWS::Compute::Error.new("AuthFailure => The address '#{public_ip}' does not belong to you.") elsif params[:network_interface_id].nil? && params[:allocation_id].nil? raise Fog::AWS::Compute::NotFound.new("You must specify an AllocationId when specifying a NetworkInterfaceID") else (!instance.nil? && params[:network_interface_id].nil?) || (params[:instance_id].nil? && !params[:network_interface_id].nil?) raise Fog::AWS::Compute::Error.new("You must specify either an InstanceId or a NetworkInterfaceID") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/associate_dhcp_options.rb000066400000000000000000000034421437344660100256740ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # # # ==== Parameters # * dhcp_options_id<~String> - The ID of the DHCP options you want to associate with the VPC, or "default" if you want the VPC # to use no DHCP options. # * vpc_id<~String> - The ID of the VPC # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateDhcpOptions.html] def associate_dhcp_options(dhcp_options_id, vpc_id) request( 'Action' => 'AssociateDhcpOptions', 'DhcpOptionsId' => dhcp_options_id, 'VpcId' => vpc_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def associate_dhcp_options(dhcp_options_id, vpc_id) response = Excon::Response.new if dhcp_options_id && vpc_id response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else if !dhcp_options_id message << 'The request must contain the parameter dhcp_options_id' elsif !vpc_id message << 'The request must contain the parameter vpc_id' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/associate_route_table.rb000066400000000000000000000050261437344660100255100ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/associate_route_table' # Associates a subnet with a route table. # # ==== Parameters # * RouteTableId<~String> - The ID of the route table # * SubnetId<~String> - The ID of the subnet # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of the request # * 'associationId'<~String> - The route table association ID (needed to disassociate the route table) # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-AssociateRouteTable.html] def associate_route_table(routeTableId, subnetId) request( 'Action' => 'AssociateRouteTable', 'RouteTableId' => routeTableId, 'SubnetId' => subnetId, :parser => Fog::Parsers::AWS::Compute::AssociateRouteTable.new ) end end class Mock def associate_route_table(routeTableId, subnetId) routetable = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? routeTableId } subnet = self.data[:subnets].find { |subnet| subnet["subnetId"].eql? subnetId } if !routetable.nil? && !subnet.nil? response = Excon::Response.new response.status = 200 association = add_route_association(routeTableId, subnetId) routetable["associationSet"].push(association) response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'associationId' => association['routeTableAssociationId'] } response elsif routetable.nil? raise Fog::AWS::Compute::NotFound.new("The routeTable ID '#{routeTableId}' does not exist") else raise Fog::AWS::Compute::NotFound.new("The subnet ID '#{subnetId}' does not exist") end end private def add_route_association(routeTableId, subnetId, main=nil) response = { "routeTableAssociationId" => "rtbassoc-#{Fog::Mock.random_hex(8)}", "routeTableId" => routeTableId, "subnetId" => nil, "main" => false } if main response['main'] = true else response['subnetId'] = subnetId end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/attach_classic_link_vpc.rb000066400000000000000000000054171437344660100260060ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups # # ==== Parameters # * vpc_id<~String> - The ID of a ClassicLink-enabled VPC. # * instance_id<~String> - The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. # * security_group_ids<~String> - The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC. # * dry_run<~Boolean> - defaults to false # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Whether the request succeeded # # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AttachClassicLinkVpc.html] def attach_classic_link_vpc(instance_id, vpc_id, security_group_ids, dry_run=false) request({ 'Action' => 'AttachClassicLinkVpc', 'VpcId' => vpc_id, 'InstanceId'=> instance_id, 'DryRun' => dry_run, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge(Fog::AWS.indexed_param('SecurityGroupId', security_group_ids))) end end class Mock def attach_classic_link_vpc(instance_id, vpc_id, security_group_ids, dry_run=false) response = Excon::Response.new vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } instance = self.data[:instances][instance_id] if vpc && instance if instance['instanceState']['name'] != 'running' || instance['vpcId'] raise Fog::AWS::Compute::Error.new("Client.InvalidInstanceID.NotLinkable => Instance #{instance_id} is unlinkable") end if instance['classicLinkVpcId'] raise Fog::AWS::Compute::Error.new("Client.InvalidInstanceID.InstanceAlreadyLinked => Instance #{instance_id} is already linked") end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } unless dry_run instance['classicLinkSecurityGroups'] = security_group_ids instance['classicLinkVpcId'] = vpc_id end response elsif !instance raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist.") elsif !vpc raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/attach_internet_gateway.rb000066400000000000000000000035051437344660100260450ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Attaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC # # ==== Parameters # * internet_gateway_id<~String> - The ID of the Internet gateway to attach # * vpc_id<~String> - The ID of the VPC # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AttachInternetGateway.html] def attach_internet_gateway(internet_gateway_id, vpc_id) request( 'Action' => 'AttachInternetGateway', 'InternetGatewayId' => internet_gateway_id, 'VpcId' => vpc_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def attach_internet_gateway(internet_gateway_id, vpc_id) response = Excon::Response.new if internet_gateway_id && vpc_id response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else if !internet_gateway_id message << 'The request must contain the parameter internet_gateway_id' elsif !vpc_id message << 'The request must contain the parameter vpc_id' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/attach_network_interface.rb000066400000000000000000000051441437344660100262060ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/attach_network_interface' # Attach a network interface # # ==== Parameters # * networkInterfaceId<~String> - ID of the network interface to attach # * instanceId<~String> - ID of the instance that will be attached to the network interface # * deviceIndex<~Integer> - index of the device for the network interface attachment on the instance # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'attachmentId'<~String> - ID of the attachment # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/index.html?ApiReference-query-AttachNetworkInterface.html] def attach_network_interface(nic_id, instance_id, device_index) request( 'Action' => 'AttachNetworkInterface', 'NetworkInterfaceId' => nic_id, 'InstanceId' => instance_id, 'DeviceIndex' => device_index, :parser => Fog::Parsers::AWS::Compute::AttachNetworkInterface.new ) end end class Mock def attach_network_interface(nic_id, instance_id, device_index) response = Excon::Response.new if ! self.data[:instances].find{ |i,i_conf| i_conf['instanceId'] == instance_id } raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist") elsif self.data[:network_interfaces].find{ |ni,ni_conf| ni_conf['attachment']['instanceId'] == instance_id && ni_conf['attachment']['deviceIndex'] == device_index } raise Fog::AWS::Compute::Error.new("InvalidParameterValue => Instance '#{instance_id}' already has an interface attached at device index '#{device_index}'.") elsif self.data[:network_interfaces][nic_id] attachment = self.data[:network_interfaces][nic_id]['attachment'] attachment['attachmentId'] = Fog::AWS::Mock.request_id attachment['instanceId'] = instance_id attachment['deviceIndex'] = device_index response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'attachmentId' => attachment['attachmentId'] } else raise Fog::AWS::Compute::NotFound.new("The network interface '#{nic_id}' does not exist") end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/attach_volume.rb000066400000000000000000000063701437344660100240060ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/attach_volume' # Attach an Amazon EBS volume with a running instance, exposing as specified device # # ==== Parameters # * instance_id<~String> - Id of instance to associate volume with # * volume_id<~String> - Id of amazon EBS volume to associate with instance # * device<~String> - Specifies how the device is exposed to the instance (e.g. "/dev/sdh") # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'attachTime'<~Time> - Time of attachment was initiated at # * 'device'<~String> - Device as it is exposed to the instance # * 'instanceId'<~String> - Id of instance for volume # * 'requestId'<~String> - Id of request # * 'status'<~String> - Status of volume # * 'volumeId'<~String> - Reference to volume # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AttachVolume.html] def attach_volume(instance_id, volume_id, device) request( 'Action' => 'AttachVolume', 'VolumeId' => volume_id, 'InstanceId' => instance_id, 'Device' => device, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::AttachVolume.new ) end end class Mock def attach_volume(instance_id, volume_id, device) response = Excon::Response.new if instance_id && volume_id && device response.status = 200 instance = self.data[:instances][instance_id] volume = self.data[:volumes][volume_id] if instance && volume unless volume['status'] == 'available' raise Fog::AWS::Compute::Error.new("Client.VolumeInUse => Volume #{volume_id} is unavailable") end data = { 'attachTime' => Time.now, 'device' => device, 'instanceId' => instance_id, 'status' => 'attaching', 'volumeId' => volume_id } volume['attachmentSet'] = [data] volume['status'] = 'attaching' response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response elsif !instance raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist.") elsif !volume raise Fog::AWS::Compute::NotFound.new("The volume '#{volume_id}' does not exist.") end else message = 'MissingParameter => ' if !instance_id message << 'The request must contain the parameter instance_id' elsif !volume_id message << 'The request must contain the parameter volume_id' else message << 'The request must contain the parameter device' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/authorize_security_group_egress.rb000066400000000000000000000115551437344660100277010ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Add permissions to a security group # # ==== Parameters # * group_name<~String> - Name of group, optional (can also be specifed as GroupName in options) # * options<~Hash>: # * 'GroupName'<~String> - Name of security group to modify # * 'GroupId'<~String> - Id of security group to modify # * 'SourceSecurityGroupName'<~String> - Name of security group to authorize # * 'SourceSecurityGroupOwnerId'<~String> - Name of owner to authorize # or # * 'CidrIp'<~String> - CIDR range # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # or # * 'IpPermissions'<~Array>: # * permission<~Hash>: # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'Groups'<~Array>: # * group<~Hash>: # * 'GroupName'<~String> - Name of security group to authorize # * 'UserId'<~String> - Name of owner to authorize # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'IpRanges'<~Array>: # * ip_range<~Hash>: # * 'CidrIp'<~String> - CIDR range # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AuthorizeSecurityGroupEgress.html] def authorize_security_group_egress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) if ip_permissions = options.delete('IpPermissions') options.merge!(indexed_ip_permissions_params(ip_permissions)) end request({ 'Action' => 'AuthorizeSecurityGroupEgress', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def authorize_security_group_egress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) group = if options.key?('GroupName') self.data[:security_groups].values.find { |v| v['groupName'] == options['GroupName'] } else self.data[:security_groups][options.fetch('GroupId')] end response = Excon::Response.new group || raise(Fog::AWS::Compute::NotFound.new("The security group '#{group_name}' does not exist")) verify_permission_options(options, group['vpcId'] != nil) normalized_permissions = normalize_permissions(options) normalized_permissions.each do |permission| if matching_group_permission = find_matching_permission_egress(group, permission) if permission['groups'].any? {|pg| matching_group_permission['groups'].include?(pg) } raise Fog::AWS::Compute::Error, "InvalidPermission.Duplicate => The permission '123' has already been authorized in the specified group" end if permission['ipRanges'].any? {|pr| matching_group_permission['ipRanges'].include?(pr) } raise Fog::AWS::Compute::Error, "InvalidPermission.Duplicate => The permission '123' has already been authorized in the specified group" end end end normalized_permissions.each do |permission| if matching_group_permission = find_matching_permission_egress(group, permission) matching_group_permission['groups'] += permission['groups'] matching_group_permission['ipRanges'] += permission['ipRanges'] else group['ipPermissionsEgress'] << permission end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end def find_matching_permission_egress(group, permission) group['ipPermissionsEgress'].find do |group_permission| permission['ipProtocol'] == group_permission['ipProtocol'] && permission['fromPort'] == group_permission['fromPort'] && permission['toPort'] == group_permission['toPort'] end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/authorize_security_group_ingress.rb000066400000000000000000000266471437344660100300730ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Add permissions to a security group # # ==== Parameters # * group_name<~String> - Name of group, optional (can also be specifed as GroupName in options) # * options<~Hash>: # * 'GroupName'<~String> - Name of security group to modify # * 'GroupId'<~String> - Id of security group to modify # * 'SourceSecurityGroupName'<~String> - Name of security group to authorize # * 'SourceSecurityGroupOwnerId'<~String> - Name of owner to authorize # or # * 'CidrIp'<~String> - CIDR range # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # or # * 'IpPermissions'<~Array>: # * permission<~Hash>: # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'Groups'<~Array>: # * group<~Hash>: # * 'GroupName'<~String> - Name of security group to authorize # * 'UserId'<~String> - Name of owner to authorize # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'IpRanges'<~Array>: # * ip_range<~Hash>: # * 'CidrIp'<~String> - CIDR range # * 'Ipv6Ranges'<~Array>: # * ip_range<~Hash>: # * 'CidrIpv6'<~String> - CIDR range # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-AuthorizeSecurityGroupIngress.html] def authorize_security_group_ingress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) if ip_permissions = options.delete('IpPermissions') options.merge!(indexed_ip_permissions_params(ip_permissions)) end request({ 'Action' => 'AuthorizeSecurityGroupIngress', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end private def indexed_ip_permissions_params(ip_permissions) params = {} ip_permissions.each_with_index do |permission, key_index| key_index += 1 params[format('IpPermissions.%d.IpProtocol', key_index)] = permission['IpProtocol'] params[format('IpPermissions.%d.FromPort', key_index)] = permission['FromPort'] params[format('IpPermissions.%d.ToPort', key_index)] = permission['ToPort'] (permission['Groups'] || []).each_with_index do |group, group_index| group_index += 1 params[format('IpPermissions.%d.Groups.%d.UserId', key_index, group_index)] = group['UserId'] params[format('IpPermissions.%d.Groups.%d.GroupName', key_index, group_index)] = group['GroupName'] params[format('IpPermissions.%d.Groups.%d.GroupId', key_index, group_index)] = group['GroupId'] end (permission['IpRanges'] || []).each_with_index do |ip_range, range_index| range_index += 1 params[format('IpPermissions.%d.IpRanges.%d.CidrIp', key_index, range_index)] = ip_range['CidrIp'] end (permission['Ipv6Ranges'] || []).each_with_index do |ip_range, range_index| range_index += 1 params[format('IpPermissions.%d.Ipv6Ranges.%d.CidrIpv6', key_index, range_index)] = ip_range['CidrIpv6'] end end params.reject {|k, v| v.nil? } end end class Mock def authorize_security_group_ingress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) group = if options.key?('GroupName') self.data[:security_groups].values.find { |v| v['groupName'] == options['GroupName'] } else self.data[:security_groups][options.fetch('GroupId')] end response = Excon::Response.new group || raise(Fog::AWS::Compute::NotFound.new("The security group '#{group_name}' does not exist")) verify_permission_options(options, group['vpcId'] != nil) normalized_permissions = normalize_permissions(options) normalized_permissions.each do |permission| if matching_group_permission = find_matching_permission(group, permission) if permission['groups'].any? {|pg| matching_group_permission['groups'].include?(pg) } raise Fog::AWS::Compute::Error, "InvalidPermission.Duplicate => The permission '123' has already been authorized in the specified group" end if permission['ipRanges'].any? {|pr| matching_group_permission['ipRanges'].include?(pr) } raise Fog::AWS::Compute::Error, "InvalidPermission.Duplicate => The permission '123' has already been authorized in the specified group" end end end normalized_permissions.each do |permission| if matching_group_permission = find_matching_permission(group, permission) matching_group_permission['groups'] += permission['groups'] matching_group_permission['ipRanges'] += permission['ipRanges'] else group['ipPermissions'] << permission end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end private def verify_permission_options(options, is_vpc) if options.size <= 1 raise Fog::AWS::Compute::Error.new("InvalidRequest => The request received was invalid.") end if !is_vpc && options['IpProtocol'] && !['tcp', 'udp', 'icmp'].include?(options['IpProtocol']) raise Fog::AWS::Compute::Error.new("InvalidPermission.Malformed => Unsupported IP protocol \"#{options['IpProtocol']}\" - supported: [tcp, udp, icmp]") end if !is_vpc && (options['IpProtocol'] && (!options['FromPort'] || !options['ToPort'])) raise Fog::AWS::Compute::Error.new("InvalidPermission.Malformed => TCP/UDP port (-1) out of range") end if options.key?('IpPermissions') if !options['IpPermissions'].is_a?(Array) || options['IpPermissions'].empty? raise Fog::AWS::Compute::Error.new("InvalidRequest => The request received was invalid.") end options['IpPermissions'].each {|p| verify_permission_options(p, is_vpc) } end end def normalize_permissions(options) normalized_permissions = [] if options['SourceSecurityGroupName'] group_name = if options['SourceSecurityGroupName'] =~ /default_elb/ "default" else options['SourceSecurityGroupName'] end source_group_id, _ = self.data[:security_groups].find { |_,v| v['groupName'] == group_name } ['tcp', 'udp'].each do |protocol| normalized_permissions << { 'ipProtocol' => protocol, 'fromPort' => 1, 'toPort' => 65535, 'groups' => [{ 'groupName' => group_name, 'userId' => options['SourceSecurityGroupOwnerId'] || self.data[:owner_id], 'groupId' => source_group_id }], 'ipRanges' => [] } end normalized_permissions << { 'ipProtocol' => 'icmp', 'fromPort' => -1, 'toPort' => -1, 'groups' => [{ 'groupName' => group_name, 'userId' => options['SourceSecurityGroupOwnerId'] || self.data[:owner_id], 'groupId' => source_group_id }], 'ipRanges' => [] } elsif options['CidrIp'] normalized_permissions << { 'ipProtocol' => options['IpProtocol'], 'fromPort' => Integer(options['FromPort']), 'toPort' => Integer(options['ToPort']), 'groups' => [], 'ipRanges' => [{'cidrIp' => options['CidrIp']}] } elsif options['CidrIpv6'] normalized_permissions << { 'ipProtocol' => options['IpProtocol'], 'fromPort' => Integer(options['FromPort']), 'toPort' => Integer(options['ToPort']), 'groups' => [], 'ipv6Ranges' => [{'cidrIpv6' => options['CidrIpv6']}] } elsif options['IpPermissions'] options['IpPermissions'].each do |permission| groups = (permission['Groups'] || []).map do |authorized_group| security_group = if group_name = authorized_group['GroupName'] self.data[:security_groups].values.find { |sg| sg['groupName'] == group_name } elsif group_id = authorized_group['GroupId'] self.data[:security_groups][group_id] end security_group || raise(Fog::AWS::Compute::NotFound.new("The security group '#{group_name || group_id}' does not exist")) { 'groupName' => authorized_group['GroupName'] || security_group['groupName'], 'userId' => authorized_group['UserId'] || self.data[:owner_id], 'groupId' => authorized_group["GroupId"] || security_group['groupId'] } end if ['tcp', 'udp', 'icmp'].include?(permission['IpProtocol']) normalized_permissions << { 'ipProtocol' => permission['IpProtocol'], 'fromPort' => Integer(permission['FromPort']), 'toPort' => Integer(permission['ToPort']), 'groups' => groups, 'ipRanges' => (permission['IpRanges'] || []).map {|r| { 'cidrIp' => r['CidrIp'] } } } else normalized_permissions << { 'ipProtocol' => permission['IpProtocol'], 'groups' => groups, 'ipRanges' => (permission['IpRanges'] || []).map {|r| { 'cidrIp' => r['CidrIp'] } } } end end end normalized_permissions end def find_matching_permission(group, permission) group['ipPermissions'].find {|group_permission| permission['ipProtocol'] == group_permission['ipProtocol'] && permission['fromPort'] == group_permission['fromPort'] && permission['toPort'] == group_permission['toPort'] } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/cancel_spot_instance_requests.rb000066400000000000000000000036231437344660100272620ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/cancel_spot_instance_requests' # Terminate specified spot instance requests # # ==== Parameters # * spot_instance_request_id<~Array> - Ids of instances to terminates # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> id of request # * 'spotInstanceRequestSet'<~Array>: # * 'spotInstanceRequestId'<~String> - id of cancelled spot instance # * 'state'<~String> - state of cancelled spot instance # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CancelSpotInstanceRequests.html] def cancel_spot_instance_requests(spot_instance_request_id) params = Fog::AWS.indexed_param('SpotInstanceRequestId', spot_instance_request_id) request({ 'Action' => 'CancelSpotInstanceRequests', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::CancelSpotInstanceRequests.new }.merge!(params)) end end class Mock def cancel_spot_instance_requests(spot_instance_request_id) response = Excon::Response.new spot_request = self.data[:spot_requests][spot_instance_request_id] unless spot_request raise Fog::AWS::Compute::NotFound.new("The spot instance request ID '#{spot_instance_request_id}' does not exist") end spot_request['fault']['code'] = 'request-cancelled' spot_request['state'] = 'cancelled' response.body = {'spotInstanceRequestSet' => [{'spotInstanceRequestId' => spot_instance_request_id, 'state' => 'cancelled'}], 'requestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/copy_image.rb000066400000000000000000000042001437344660100232550ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/copy_image' # Copy an image to a different region # # ==== Parameters # * source_image_id<~String> - The ID of the AMI to copy # * source_region<~String> - The name of the AWS region that contains the AMI to be copied # * name<~String> - The name of the new AMI in the destination region # * description<~String> - The description to set on the new AMI in the destination region # * client_token<~String> - Unique, case-sensitive identifier you provide to ensure idempotency of the request # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - id of request # * 'imageId'<~String> - id of image # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-CopyImage.html] def copy_image(source_image_id, source_region, name = nil, description = nil, client_token = nil) request( 'Action' => 'CopyImage', 'SourceImageId' => source_image_id, 'SourceRegion' => source_region, 'Name' => name, 'Description' => description, 'ClientToken' => client_token, :parser => Fog::Parsers::AWS::Compute::CopyImage.new ) end end class Mock # # Usage # # Fog::AWS[:compute].copy_image("ami-1aad5273", 'us-east-1') # def copy_image(source_image_id, source_region, name = nil, description = nil, client_token = nil) response = Excon::Response.new response.status = 200 image_id = Fog::AWS::Mock.image_id data = { 'imageId' => image_id, } self.data[:images][image_id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/copy_snapshot.rb000066400000000000000000000052331437344660100240410ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/copy_snapshot' # Copy a snapshot to a different region # # ==== Parameters # * source_snapshot_id<~String> - Id of snapshot # * source_region<~String> - Region to move it from # * options<~Hash>: # * 'Description'<~String> - A description for the EBS snapshot # * 'Encrypted'<~Boolean> - Specifies whether the destination snapshot should be encrypted # * 'KmsKeyId'<~String> - The full ARN of the AWS Key Management Service (AWS KMS) CMK # to use when creating the snapshot copy. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - id of request # * 'snapshotId'<~String> - id of snapshot # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CopySnapshot.html] def copy_snapshot(source_snapshot_id, source_region, options = {}) # For backward compatibility. In previous versions third param was a description if options.is_a?(String) Fog::Logger.warning("copy_snapshot with description as a string in third param is deprecated, use hash instead: copy_snapshot('source-id', 'source-region', { 'Description' => 'some description' })") options = { 'Description' => options } end params = { 'Action' => 'CopySnapshot', 'SourceSnapshotId' => source_snapshot_id, 'SourceRegion' => source_region, 'Description' => options['Description'], :parser => Fog::Parsers::AWS::Compute::CopySnapshot.new } params['Encrypted'] = true if options['Encrypted'] params['KmsKeyId'] = options['KmsKeyId'] if options['Encrypted'] && options['KmsKeyId'] request(params) end end class Mock # # Usage # # Fog::AWS[:compute].copy_snapshot("snap-1db0a957", 'us-east-1') # def copy_snapshot(source_snapshot_id, source_region, options = {}) response = Excon::Response.new response.status = 200 snapshot_id = Fog::AWS::Mock.snapshot_id data = { 'snapshotId' => snapshot_id, } self.data[:snapshots][snapshot_id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_dhcp_options.rb000066400000000000000000000052771437344660100251740ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_dhcp_options' # Creates a set of DHCP options for your VPC # # ==== Parameters # * DhcpConfigurationOptions<~Hash> - hash of key value dhcp options to assign # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html] def create_dhcp_options(dhcp_configurations = {}) params = {} params.merge!(indexed_multidimensional_params(dhcp_configurations)) request({ 'Action' => 'CreateDhcpOptions', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::CreateDhcpOptions.new }.merge!(params)) end private def indexed_multidimensional_params(multi_params) params = {} multi_params.keys.each_with_index do |key, key_index| key_index += 1 params[format('DhcpConfiguration.%d.Key', key_index)] = key [*multi_params[key]].each_with_index do |value, value_index| value_index += 1 params[format('DhcpConfiguration.%d.Value.%d', key_index, value_index)] = value end end params end end class Mock def create_dhcp_options(dhcp_configurations = {}) params = {} params.merge!(indexed_multidimensional_params(dhcp_configurations)) Excon::Response.new.tap do |response| response.status = 200 self.data[:dhcp_options].push({ 'dhcpOptionsId' => Fog::AWS::Mock.dhcp_options_id, 'dhcpConfigurationSet' => {}, 'tagSet' => {} }) response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'dhcpOptionsSet' => self.data[:dhcp_options] } end end private def indexed_multidimensional_params(multi_params) params = {} multi_params.keys.each_with_index do |key, key_index| key_index += 1 params[format('DhcpConfiguration.%d.Key', key_index)] = key [*multi_params[key]].each_with_index do |value, value_index| value_index += 1 params[format('DhcpConfiguration.%d.Value.%d', key_index, value_index)] = value end end params end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_image.rb000066400000000000000000000116321437344660100235550ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_image' # Create a bootable EBS volume AMI # # ==== Parameters # * instance_id<~String> - Instance used to create image. # * name<~Name> - Name to give image. # * description<~Name> - Description of image. # * no_reboot<~Boolean> - Optional, whether or not to reboot the image when making the snapshot # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'imageId'<~String> - The ID of the created AMI. # * 'requestId'<~String> - Id of request. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateImage.html] def create_image(instance_id, name, description, no_reboot = false, options={}) params = {} block_device_mappings = options[:block_device_mappings] || [] params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.DeviceName', block_device_mappings.map{|mapping| mapping['DeviceName']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.NoDevice', block_device_mappings.map{|mapping| mapping['NoDevice']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.VirtualName', block_device_mappings.map{|mapping| mapping['VirtualName']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.SnapshotId', block_device_mappings.map{|mapping| mapping['Ebs.SnapshotId']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.DeleteOnTermination', block_device_mappings.map{|mapping| mapping['Ebs.DeleteOnTermination']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.VolumeType', block_device_mappings.map{|mapping| mapping['Ebs.VolumeType']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.Encrypted', block_device_mappings.map{|mapping| mapping['Ebs.Encrypted']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.Iops', block_device_mappings.map{|mapping| mapping['Ebs.Iops']}) params.reject!{|k,v| v.nil?} request({ 'Action' => 'CreateImage', 'InstanceId' => instance_id, 'Name' => name, 'Description' => description, 'NoReboot' => no_reboot.to_s, :parser => Fog::Parsers::AWS::Compute::CreateImage.new }.merge!(params)) end end class Mock # Usage # # Fog::AWS[:compute].create_image("i-ac65ee8c", "test", "something") # def create_image(instance_id, name, description, no_reboot = false, options = {}) params = {} block_device_mappings = options[:block_device_mappings] || [] params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.DeviceName', block_device_mappings.map{|mapping| mapping['DeviceName']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.NoDevice', block_device_mappings.map{|mapping| mapping['NoDevice']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.VirtualName', block_device_mappings.map{|mapping| mapping['VirtualName']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.SnapshotId', block_device_mappings.map{|mapping| mapping['Ebs.SnapshotId']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.DeleteOnTermination', block_device_mappings.map{|mapping| mapping['Ebs.DeleteOnTermination']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.VolumeType', block_device_mappings.map{|mapping| mapping['Ebs.VolumeType']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.Encrypted', block_device_mappings.map{|mapping| mapping['Ebs.Encrypted']}) params.merge!Fog::AWS.indexed_param('BlockDeviceMapping.%d.Ebs.Iops', block_device_mappings.map{|mapping| mapping['Ebs.Iops']}) params.reject!{|k,v| v.nil?} reserved_ebs_root_device = '/dev/sda1' block_devices = options.delete(:block_device_mappings) || [] register_image_response = register_image(name, description, reserved_ebs_root_device, block_devices, options) response = Excon::Response.new if instance_id && !name.empty? response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'imageId' => register_image_response.body['imageId'] } else response.status = 400 response.body = { 'Code' => 'InvalidParameterValue' } if name.empty? response.body['Message'] = "Invalid value '' for name. Must be specified." end end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_internet_gateway.rb000066400000000000000000000033611437344660100260440ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_internet_gateway' # Creates an InternetGateway # # ==== Parameters # (none) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'internetGateway'<~Array>: # * 'attachmentSet'<~Array>: A list of VPCs attached to the Internet gateway # * 'vpcId'<~String> - The ID of the VPC the Internet gateway is attached to. # * 'state'<~String> - The current state of the attachment. # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-ItemType-InternetGatewayAttachmentType.html] def create_internet_gateway() request({ 'Action' => 'CreateInternetGateway', :parser => Fog::Parsers::AWS::Compute::CreateInternetGateway.new }) end end class Mock def create_internet_gateway() gateway_id = Fog::AWS::Mock.internet_gateway_id self.data[:internet_gateways][gateway_id] = { 'internetGatewayId' => gateway_id, 'attachmentSet' => {}, 'tagSet' => {} } Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'internetGatewaySet' => [self.data[:internet_gateways][gateway_id]] } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_key_pair.rb000066400000000000000000000033301437344660100242720ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_key_pair' # Create a new key pair # # ==== Parameters # * key_name<~String> - Unique name for key pair. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'keyFingerprint'<~String> - SHA-1 digest of DER encoded private key # * 'keyMaterial'<~String> - Unencrypted encoded PEM private key # * 'keyName'<~String> - Name of key # * 'requestId'<~String> - Id of request # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateKeyPair.html] def create_key_pair(key_name) request( 'Action' => 'CreateKeyPair', 'KeyName' => key_name, :parser => Fog::Parsers::AWS::Compute::CreateKeyPair.new ) end end class Mock def create_key_pair(key_name) response = Excon::Response.new unless self.data[:key_pairs][key_name] response.status = 200 data = { 'keyFingerprint' => Fog::AWS::Mock.key_fingerprint, 'keyMaterial' => Fog::AWS::Mock.key_material, 'keyName' => key_name } self.data[:key_pairs][key_name] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response else raise Fog::AWS::Compute::Error.new("InvalidKeyPair.Duplicate => The keypair '#{key_name}' already exists.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_network_acl.rb000066400000000000000000000113211437344660100247760ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_network_acl' # Creates a network ACL # # ==== Parameters # * vpcId<~String> - The ID of the VPC to create this network ACL under # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'networkAcl'<~Array>: - The network ACL # * 'networkAclId'<~String> - The ID of the network ACL # * 'vpcId'<~String> - The ID of the VPC for the network ACL # * 'default'<~Boolean> - Indicates whether this is the default network ACL for the VPC # * 'entrySet'<~Array>: - A list of entries (rules) in the network ACL # * 'ruleNumber'<~Integer> - The rule number for the entry. ACL entries are processed in ascending order by rule number # * 'protocol'<~Integer> - The protocol. A value of -1 means all protocols # * 'ruleAction'<~String> - Indicates whether to allow or deny the traffic that matches the rule # * 'egress'<~Boolean> - Indicates whether the rule is an egress rule (applied to traffic leaving the subnet) # * 'cidrBlock'<~String> - The network range to allow or deny, in CIDR notation # * 'icmpTypeCode'<~Hash> - ICMP protocol: The ICMP type and code # * 'code'<~Integer> - The ICMP code. A value of -1 means all codes for the specified ICMP type # * 'type'<~Integer> - The ICMP type. A value of -1 means all types # * 'portRange'<~Hash> - TCP or UDP protocols: The range of ports the rule applies to # * 'from'<~Integer> - The first port in the range # * 'to'<~Integer> - The last port in the range # * 'associationSet'<~Array>: - A list of associations between the network ACL and subnets # * 'networkAclAssociationId'<~String> - The ID of the association # * 'networkAclId'<~String> - The ID of the network ACL # * 'subnetId'<~String> - The ID of the subnet # * 'tagSet'<~Array>: - Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateNetworkAcl.html] def create_network_acl(vpcId, options = {}) request({ 'Action' => 'CreateNetworkAcl', 'VpcId' => vpcId, :parser => Fog::Parsers::AWS::Compute::CreateNetworkAcl.new }.merge!(options)) end end class Mock def create_network_acl(vpcId, options = {}) response = Excon::Response.new if vpcId id = Fog::AWS::Mock.network_acl_id unless self.data[:vpcs].find { |s| s['vpcId'] == vpcId } raise Fog::AWS::Compute::Error.new("Unknown VPC '#{vpcId}' specified") end data = { 'networkAclId' => id, 'vpcId' => vpcId, 'default' => false, 'entrySet' => [ { 'icmpTypeCode' => {}, 'portRange' => {}, 'ruleNumber' => 32767, 'protocol' => -1, 'ruleAction' => "deny", 'egress' => true, 'cidrBlock' => "0.0.0.0/0", }, { 'icmpTypeCode' => {}, 'portRange' => {}, 'ruleNumber' => 32767, 'protocol' => -1, 'ruleAction' => "deny", 'egress' => false, 'cidrBlock' => "0.0.0.0/0", }, ], 'associationSet' => [], 'tagSet' => {} } self.data[:network_acls][id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'networkAcl' => data } else response.status = 400 response.body = { 'Code' => 'InvalidParameterValue', 'Message' => "Invalid value '' for subnetId" } end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_network_acl_entry.rb000066400000000000000000000072431437344660100262270ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Creates a Network ACL entry # # ==== Parameters # * network_acl_id<~String> - The ID of the ACL to add this entry to # * rule_number<~Integer> - The rule number for the entry, between 100 and 32766 # * protocol<~Integer> - The IP protocol to which the rule applies. You can use -1 to mean all protocols. # * rule_action<~String> - Allows or denies traffic that matches the rule. (either allow or deny) # * cidr_block<~String> - The CIDR range to allow or deny # * egress<~Boolean> - Indicates whether this rule applies to egress traffic from the subnet (true) or ingress traffic to the subnet (false). # * options<~Hash>: # * 'Icmp.Code' - ICMP code, required if protocol is 1 # * 'Icmp.Type' - ICMP type, required if protocol is 1 # * 'PortRange.From' - The first port in the range, required if protocol is 6 (TCP) or 17 (UDP) # * 'PortRange.To' - The last port in the range, required if protocol is 6 (TCP) or 17 (UDP) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateNetworkAclEntry.html] def create_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options = {}) request({ 'Action' => 'CreateNetworkAclEntry', 'NetworkAclId' => network_acl_id, 'RuleNumber' => rule_number, 'Protocol' => protocol, 'RuleAction' => rule_action, 'Egress' => egress, 'CidrBlock' => cidr_block, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def create_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options = {}) response = Excon::Response.new if self.data[:network_acls][network_acl_id] if self.data[:network_acls][network_acl_id]['entrySet'].find { |r| r['ruleNumber'] == rule_number && r['egress'] == egress } raise Fog::AWS::Compute::Error.new("Already a rule with that number") end data = { 'ruleNumber' => rule_number, 'protocol' => protocol, 'ruleAction' => rule_action, 'egress' => egress, 'cidrBlock' => cidr_block, 'icmpTypeCode' => {}, 'portRange' => {} } data['icmpTypeCode']['code'] = options['Icmp.Code'] if options['Icmp.Code'] data['icmpTypeCode']['type'] = options['Icmp.Type'] if options['Icmp.Type'] data['portRange']['from'] = options['PortRange.From'] if options['PortRange.From'] data['portRange']['to'] = options['PortRange.To'] if options['PortRange.To'] self.data[:network_acls][network_acl_id]['entrySet'] << data response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network ACL '#{network_acl_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_network_interface.rb000066400000000000000000000147331437344660100262110ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'ipaddr' require 'fog/aws/parsers/compute/create_network_interface' # Creates a network interface # # ==== Parameters # * subnetId<~String> - The ID of the subnet to associate with the network interface # * options<~Hash>: # * PrivateIpAddress<~String> - The private IP address of the network interface # * Description<~String> - The description of the network interface # * GroupSet<~Array> - The security group IDs for use by the network interface # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'networkInterface'<~Hash> - The created network interface # * 'networkInterfaceId'<~String> - The ID of the network interface # * 'subnetId'<~String> - The ID of the subnet # * 'vpcId'<~String> - The ID of the VPC # * 'availabilityZone'<~String> - The availability zone # * 'description'<~String> - The description # * 'ownerId'<~String> - The ID of the person who created the interface # * 'requesterId'<~String> - The ID ot teh entity requesting this interface # * 'requesterManaged'<~String> - # * 'status'<~String> - "available" or "in-use" # * 'macAddress'<~String> - # * 'privateIpAddress'<~String> - IP address of the interface within the subnet # * 'privateDnsName'<~String> - The private DNS name # * 'sourceDestCheck'<~Boolean> - Flag indicating whether traffic to or from the instance is validated # * 'groupSet'<~Hash> - Associated security groups # * 'key'<~String> - ID of associated group # * 'value'<~String> - Name of associated group # * 'attachment'<~Hash>: - Describes the way this nic is attached # * 'attachmentID'<~String> # * 'instanceID'<~String> # * 'association'<~Hash>: - Describes an eventual instance association # * 'attachmentID'<~String> - ID of the network interface attachment # * 'instanceID'<~String> - ID of the instance attached to the network interface # * 'publicIp'<~String> - Address of the Elastic IP address bound to the network interface # * 'ipOwnerId'<~String> - ID of the Elastic IP address owner # * 'tagSet'<~Array>: - Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-CreateNetworkInterface.html] def create_network_interface(subnetId, options = {}) if security_groups = options.delete('GroupSet') options.merge!(Fog::AWS.indexed_param('SecurityGroupId', [*security_groups])) end request({ 'Action' => 'CreateNetworkInterface', 'SubnetId' => subnetId, :parser => Fog::Parsers::AWS::Compute::CreateNetworkInterface.new }.merge!(options)) end end class Mock def create_network_interface(subnetId, options = {}) response = Excon::Response.new if subnetId subnet = self.data[:subnets].find{ |s| s['subnetId'] == subnetId } if subnet.nil? raise Fog::AWS::Compute::Error.new("Unknown subnet '#{subnetId}' specified") else id = Fog::AWS::Mock.network_interface_id cidr_block = IPAddr.new(subnet['cidrBlock']) groups = {} if options['GroupSet'] options['GroupSet'].each do |group_id| group_obj = self.data[:security_groups][group_id] if group_obj.nil? raise Fog::AWS::Compute::Error.new("Unknown security group '#{group_id}' specified") end groups[group_id] = group_obj['groupName'] end end if options['PrivateIpAddress'].nil? range = cidr_block.to_range # Here we try to act like a DHCP server and pick the first # available IP (not including the first in the cidr block, # which is typically reserved for the gateway). range = range.drop(2)[0..-2] if cidr_block.ipv4? range.each do |p_ip| unless self.data[:network_interfaces].map{ |ni, ni_conf| ni_conf['privateIpAddress'] }.include?p_ip.to_s options['PrivateIpAddress'] = p_ip.to_s break end end elsif self.data[:network_interfaces].map{ |ni,ni_conf| ni_conf['privateIpAddress'] }.include?options['PrivateIpAddress'] raise Fog::AWS::Compute::Error.new('InUse => The specified address is already in use.') end data = { 'networkInterfaceId' => id, 'subnetId' => subnetId, 'vpcId' => 'mock-vpc-id', 'availabilityZone' => 'mock-zone', 'description' => options['Description'], 'ownerId' => '', 'requesterManaged' => 'false', 'status' => 'available', 'macAddress' => '00:11:22:33:44:55', 'privateIpAddress' => options['PrivateIpAddress'], 'sourceDestCheck' => true, 'groupSet' => groups, 'attachment' => {}, 'association' => {}, 'tagSet' => {} } self.data[:network_interfaces][id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'networkInterface' => data } response end else response.status = 400 response.body = { 'Code' => 'InvalidParameterValue', 'Message' => "Invalid value '' for subnetId" } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_placement_group.rb000066400000000000000000000017761437344660100256670ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Create a new placement group # # ==== Parameters # * group_name<~String> - Name of the placement group. # * strategy<~String> - Placement group strategy. Valid options in ['cluster'] # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreatePlacementGroup.html] def create_placement_group(name, strategy) request( 'Action' => 'CreatePlacementGroup', 'GroupName' => name, 'Strategy' => strategy, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_route.rb000066400000000000000000000111371437344660100236310ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Creates a route in a route table within a VPC. # # ==== Parameters # * RouteTableId<~String> - The ID of the route table for the route. # * DestinationCidrBlock<~String> - The CIDR address block used for the destination match. Routing decisions are based on the most specific match. # * GatewayId<~String> - The ID of an Internet gateway attached to your VPC. # * InstanceId<~String> - The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached. # * NetworkInterfaceId<~String> - The ID of a network interface. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRoute.html] def create_route(route_table_id, destination_cidr_block, internet_gateway_id=nil, instance_id=nil, network_interface_id=nil) request_vars = { 'Action' => 'CreateRoute', 'RouteTableId' => route_table_id, 'DestinationCidrBlock' => destination_cidr_block, :parser => Fog::Parsers::AWS::Compute::Basic.new } if internet_gateway_id request_vars['GatewayId'] = internet_gateway_id elsif instance_id request_vars['InstanceId'] = instance_id elsif network_interface_id request_vars['NetworkInterfaceId'] = network_interface_id end request(request_vars) end end class Mock def create_route(route_table_id, destination_cidr_block, internet_gateway_id=nil, instance_id=nil, network_interface_id=nil) instance_owner_id = nil route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? route_table_id } if !route_table.nil? && destination_cidr_block if !internet_gateway_id.nil? || !instance_id.nil? || !network_interface_id.nil? if !internet_gateway_id.nil? && self.internet_gateways.all('internet-gateway-id'=>internet_gateway_id).first.nil? raise Fog::AWS::Compute::NotFound.new("The gateway ID '#{internet_gateway_id}' does not exist") elsif !instance_id.nil? && self.servers.all('instance-id'=>instance_id).first.nil? raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist") elsif !network_interface_id.nil? && self.network_interfaces.all('networkInterfaceId'=>network_interface_id).first.nil? raise Fog::AWS::Compute::NotFound.new("The networkInterface ID '#{network_interface_id}' does not exist") elsif !route_table['routeSet'].find { |route| route['destinationCidrBlock'].eql? destination_cidr_block }.nil? raise Fog::AWS::Compute::Error, "RouteAlreadyExists => The route identified by #{destination_cidr_block} already exists." else response = Excon::Response.new route_table['routeSet'].push({ "destinationCidrBlock" => destination_cidr_block, "gatewayId" => internet_gateway_id, "instanceId"=>instance_id, "instanceOwnerId"=>instance_owner_id, "networkInterfaceId"=>network_interface_id, "state" => "pending", "origin" => "CreateRoute" }) response.status = 200 response.body = { 'requestId'=> Fog::AWS::Mock.request_id, 'return' => true } response end else message = 'MissingParameter => ' message << 'The request must contain either a gateway id, a network interface id, or an instance id' raise Fog::AWS::Compute::Error.new(message) end elsif route_table.nil? raise Fog::AWS::Compute::NotFound.new("The routeTable ID '#{route_table_id}' does not exist") elsif destination_cidr_block.empty? raise Fog::AWS::Compute::InvalidParameterValue.new("Value () for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_route_table.rb000066400000000000000000000046431437344660100250040ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_route_table' # Creates a route table for the specified VPC. # # ==== Parameters # * VpcId<~String> - The ID of the VPC. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'routeTable'<~Array> - Information about the newly created route table # * 'routeTableId'<~String> # * 'vpcId'<~String> # * 'routeSet'<~Array> # * 'item'<~Array> # * 'destinationCidrBlock'<~String> - The CIDR address block used for the destination match. # * 'gatewayId'<~String> - The ID of an Internet gateway attached to your VPC. # * 'state'<~String> - The state of the route. ['blackhole', 'available'] # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateRouteTable.html] def create_route_table(vpc_id) request({ 'Action' => 'CreateRouteTable', 'VpcId' => vpc_id, :parser => Fog::Parsers::AWS::Compute::CreateRouteTable.new }) end end class Mock def create_route_table(vpc_id) response = Excon::Response.new vpc = self.data[:vpcs].find { |vpc| vpc["vpcId"].eql? vpc_id } unless vpc.nil? response.status = 200 route_table = { 'routeTableId' => Fog::AWS::Mock.route_table_id, 'vpcId' => vpc["vpcId"], 'routeSet' => [{ "destinationCidrBlock" => vpc["cidrBlock"], "gatewayId" => "local", "instanceId"=>nil, "instanceOwnerId"=>nil, "networkInterfaceId"=>nil, "state" => "pending", "origin" => "CreateRouteTable" }], 'associationSet' => [], 'tagSet' => {} } self.data[:route_tables].push(route_table) response.body = { 'requestId'=> Fog::AWS::Mock.request_id, 'routeTable' => [route_table] } response else raise Fog::AWS::Compute::NotFound.new("The vpc ID '#{vpc_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_security_group.rb000066400000000000000000000043031437344660100255530ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_security_group' # Create a new security group # # ==== Parameters # * group_name<~String> - Name of the security group. # * group_description<~String> - Description of group. # * vpc_id<~String> - ID of the VPC # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # * 'groupId'<~String> - Id of created group # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateSecurityGroup.html] def create_security_group(name, description, vpc_id=nil) request( 'Action' => 'CreateSecurityGroup', 'GroupName' => name, 'GroupDescription' => description, 'VpcId' => vpc_id, :parser => Fog::Parsers::AWS::Compute::CreateSecurityGroup.new ) end end class Mock def create_security_group(name, description, vpc_id=nil) response = Excon::Response.new vpc_id ||= Fog::AWS::Mock.default_vpc_for(region) group_id = Fog::AWS::Mock.security_group_id if self.data[:security_groups].find { |_,v| v['groupName'] == name } raise Fog::AWS::Compute::Error, "InvalidGroup.Duplicate => The security group '#{name}' already exists" end self.data[:security_groups][group_id] = { 'groupDescription' => description, 'groupName' => name, 'groupId' => group_id, 'ipPermissionsEgress' => [], 'ipPermissions' => [], 'ownerId' => self.data[:owner_id], 'vpcId' => vpc_id } response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'groupId' => group_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_snapshot.rb000066400000000000000000000046451437344660100243400ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_snapshot' # Create a snapshot of an EBS volume and store it in S3 # # ==== Parameters # * volume_id<~String> - Id of EBS volume to snapshot # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'encrypted'<~Boolean>: The encryption status of the snapshot. # * 'progress'<~String> - The percentage progress of the snapshot # * 'requestId'<~String> - id of request # * 'snapshotId'<~String> - id of snapshot # * 'startTime'<~Time> - timestamp when snapshot was initiated # * 'status'<~String> - state of snapshot # * 'volumeId'<~String> - id of volume snapshot targets # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateSnapshot.html] def create_snapshot(volume_id, description = nil) request( 'Action' => 'CreateSnapshot', 'Description' => description, 'VolumeId' => volume_id, :parser => Fog::Parsers::AWS::Compute::CreateSnapshot.new ) end end class Mock # # Usage # # Fog::AWS[:compute].create_snapshot("vol-f7c23423", "latest snapshot") # def create_snapshot(volume_id, description = nil) response = Excon::Response.new if volume = self.data[:volumes][volume_id] response.status = 200 snapshot_id = Fog::AWS::Mock.snapshot_id data = { 'description' => description, 'encrypted' => false, 'ownerId' => self.data[:owner_id], 'progress' => nil, 'snapshotId' => snapshot_id, 'startTime' => Time.now, 'status' => 'pending', 'volumeId' => volume_id, 'volumeSize' => volume['size'] } self.data[:snapshots][snapshot_id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_spot_datafeed_subscription.rb000066400000000000000000000027031437344660100301000ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/spot_datafeed_subscription' # Create a spot datafeed subscription # # ==== Parameters # * bucket<~String> - bucket name to store datafeed in # * prefix<~String> - prefix to store data with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'spotDatafeedSubscription'<~Hash>: # * 'bucket'<~String> - S3 bucket where data is stored # * 'fault'<~Hash>: # * 'code'<~String> - fault code # * 'reason'<~String> - fault reason # * 'ownerId'<~String> - AWS id of account owner # * 'prefix'<~String> - prefix for datafeed items # * 'state'<~String> - state of datafeed subscription # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateSpotDatafeedSubscription.html] def create_spot_datafeed_subscription(bucket, prefix) request( 'Action' => 'CreateSpotDatafeedSubscription', 'Bucket' => bucket, 'Prefix' => prefix, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::SpotDatafeedSubscription.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_subnet.rb000066400000000000000000000114221437344660100237700ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'ipaddr' require 'fog/aws/parsers/compute/create_subnet' # Creates a Subnet with the CIDR block you specify. # # ==== Parameters # * vpcId<~String> - The ID of the VPC where you want to create the subnet. # * cidrBlock<~String> - The CIDR block you want the Subnet to cover (e.g., 10.0.0.0/16). # * options<~Hash>: # * AvailabilityZone<~String> - The Availability Zone you want the subnet in. Default: AWS selects a zone for you (recommended) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'subnet'<~Array>: # * 'subnetId'<~String> - The Subnet's ID # * 'state'<~String> - The current state of the Subnet. ['pending', 'available'] # * 'cidrBlock'<~String> - The CIDR block the Subnet covers. # * 'availableIpAddressCount'<~Integer> - The number of unused IP addresses in the subnet (the IP addresses for any stopped # instances are considered unavailable) # * 'availabilityZone'<~String> - The Availability Zone the subnet is in # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # * 'mapPublicIpOnLaunch'<~Boolean> - Indicates whether instances launched in this subnet receive a public IPv4 address. # * 'defaultForAz'<~Boolean> - Indicates whether this is the default subnet for the Availability Zone. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/ApiReference-query-CreateSubnet.html] def create_subnet(vpcId, cidrBlock, options = {}) request({ 'Action' => 'CreateSubnet', 'VpcId' => vpcId, 'CidrBlock' => cidrBlock, :parser => Fog::Parsers::AWS::Compute::CreateSubnet.new }.merge!(options)) end end class Mock def create_subnet(vpcId, cidrBlock, options = {}) av_zone = options['AvailabilityZone'].nil? ? 'us-east-1c' : options['AvailabilityZone'] Excon::Response.new.tap do |response| if cidrBlock && vpcId vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpcId } if vpc.nil? raise Fog::AWS::Compute::NotFound.new("The vpc ID '#{vpcId}' does not exist") end if ! ::IPAddr.new(vpc['cidrBlock']).include?(::IPAddr.new(cidrBlock)) raise Fog::AWS::Compute::Error.new("Range => The CIDR '#{cidrBlock}' is invalid.") end self.data[:subnets].select{ |s| s['vpcId'] == vpcId }.each do |subnet| if ::IPAddr.new(subnet['cidrBlock']).include?(::IPAddr.new(cidrBlock)) raise Fog::AWS::Compute::Error.new("Conflict => The CIDR '#{cidrBlock}' conflicts with another subnet") end end response.status = 200 data = { 'subnetId' => Fog::AWS::Mock.subnet_id, 'state' => 'pending', 'vpcId' => vpcId, 'cidrBlock' => cidrBlock, 'availableIpAddressCount' => "255", 'availabilityZone' => av_zone, 'tagSet' => {}, 'mapPublicIpOnLaunch' => true, 'defaultForAz' => true } # Add this subnet to the default network ACL accid = Fog::AWS::Mock.network_acl_association_id default_nacl = self.data[:network_acls].values.find { |nacl| nacl['vpcId'] == vpcId && nacl['default'] } default_nacl['associationSet'] << { 'networkAclAssociationId' => accid, 'networkAclId' => default_nacl['networkAclId'], 'subnetId' => data['subnetId'], } self.data[:subnets].push(data) response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'subnet' => data, } else response.status = 400 response.body = { 'Code' => 'InvalidParameterValue' } if cidrBlock.empty? response.body['Message'] = "Invalid value '' for cidrBlock. Must be specified." end if vpcId.empty? response.body['Message'] = "Invalid value '' for vpcId. Must be specified." end end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_tags.rb000066400000000000000000000037321437344660100234330ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Adds tags to resources # # ==== Parameters # * resources<~String> - One or more resources to tag # * tags<~String> - hash of key value tag pairs to assign # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateTags.html] def create_tags(resources, tags) resources = [*resources] for key, value in tags if value.nil? tags[key] = '' end end params = {} params.merge!(Fog::AWS.indexed_param('ResourceId', resources)) params.merge!(Fog::AWS.indexed_param('Tag.%d.Key', tags.keys)) params.merge!(Fog::AWS.indexed_param('Tag.%d.Value', tags.values)) request({ 'Action' => 'CreateTags', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end class Mock def create_tags(resources, tags) resources = [*resources] tagged = tagged_resources(resources) tags.each do |key, value| self.data[:tags][key] ||= {} self.data[:tags][key][value] ||= [] self.data[:tags][key][value] |= tagged tagged.each do |resource| self.data[:tag_sets][resource['resourceId']][key] = value end end response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_volume.rb000066400000000000000000000140031437344660100237750ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_volume' # Create an EBS volume # # ==== Parameters # * availability_zone<~String> - availability zone to create volume in # * size<~Integer> - Size in GiBs for volume. Must be between 1 and 1024. # * options<~Hash> # * 'SnapshotId'<~String> - Optional, snapshot to create volume from # * 'VolumeType'<~String> - Optional, volume type. standard or io1, default is standard. # * 'Iops'<~Integer> - Number of IOPS the volume supports. Required if VolumeType is io1, must be between 1 and 4000. # * 'Encrypted'<~Boolean> - Optional, specifies whether the volume should be encrypted, default is false. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'availabilityZone'<~String> - Availability zone for volume # * 'createTime'<~Time> - Timestamp for creation # * 'size'<~Integer> - Size in GiBs for volume # * 'snapshotId'<~String> - Snapshot volume was created from, if any # * 'status'<~String> - State of volume # * 'volumeId'<~String> - Reference to volume # * 'volumeType'<~String> - Type of volume # * 'iops'<~Integer> - Number of IOPS the volume supports # * 'encrypted'<~Boolean> - Indicates whether the volume will be encrypted # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVolume.html] def create_volume(availability_zone, size, options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("create_volume with a bare snapshot_id is deprecated, use create_volume(availability_zone, size, 'SnapshotId' => snapshot_id) instead [light_black](#{caller.first})[/]") options = { 'SnapshotId' => options } end request({ 'Action' => 'CreateVolume', 'AvailabilityZone' => availability_zone, 'Size' => size, :parser => Fog::Parsers::AWS::Compute::CreateVolume.new }.merge(options)) end end class Mock def create_volume(availability_zone, size, options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("create_volume with a bare snapshot_id is deprecated, use create_volume(availability_zone, size, 'SnapshotId' => snapshot_id) instead [light_black](#{caller.first})[/]") options = { 'SnapshotId' => options } end response = Excon::Response.new if availability_zone && (size || options['SnapshotId']) snapshot = self.data[:snapshots][options['SnapshotId']] if options['SnapshotId'] && !snapshot raise Fog::AWS::Compute::NotFound.new("The snapshot '#{options['SnapshotId']}' does not exist.") end if snapshot && size && size < snapshot['volumeSize'] raise Fog::AWS::Compute::NotFound.new("The snapshot '#{options['SnapshotId']}' has size #{snapshot['volumeSize']} which is greater than #{size}.") elsif snapshot && !size size = snapshot['volumeSize'] end if options['VolumeType'] == 'io1' iops = options['Iops'] if !iops raise Fog::AWS::Compute::Error.new("InvalidParameterCombination => The parameter iops must be specified for io1 volumes.") end if size < 10 raise Fog::AWS::Compute::Error.new("InvalidParameterValue => Volume of #{size}GiB is too small; minimum is 10GiB.") end if (iops_to_size_ratio = iops.to_f / size.to_f) > 30.0 raise Fog::AWS::Compute::Error.new("InvalidParameterValue => Iops to volume size ratio of #{"%.1f" % iops_to_size_ratio} is too high; maximum is 30.0") end if iops < 100 raise Fog::AWS::Compute::Error.new("VolumeIOPSLimit => Volume iops of #{iops} is too low; minimum is 100.") end if iops > 4000 raise Fog::AWS::Compute::Error.new("VolumeIOPSLimit => Volume iops of #{iops} is too high; maximum is 4000.") end end if options['KmsKeyId'] && !options['Encrypted'] raise Fog::AWS::Compute::Error.new("InvalidParameterDependency => The parameter KmsKeyId requires the parameter Encrypted to be set.") end response.status = 200 volume_id = Fog::AWS::Mock.volume_id data = { 'availabilityZone' => availability_zone, 'attachmentSet' => [], 'createTime' => Time.now, 'iops' => options['Iops'], 'encrypted' => options['Encrypted'] || false, 'size' => size, 'snapshotId' => options['SnapshotId'], 'kmsKeyId' => options['KmsKeyId'] || nil, # @todo validate 'status' => 'creating', 'volumeId' => volume_id, 'volumeType' => options['VolumeType'] || 'standard' } self.data[:volumes][volume_id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data.reject {|key,value| !['availabilityZone','createTime','encrypted','size','snapshotId','status','volumeId','volumeType'].include?(key) }) else response.status = 400 response.body = { 'Code' => 'MissingParameter' } unless availability_zone response.body['Message'] = 'The request must contain the parameter availability_zone' else response.body['Message'] = 'The request must contain the parameter size' end end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/create_vpc.rb000066400000000000000000000130031437344660100232550ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/create_vpc' # Creates a VPC with the CIDR block you specify. # # ==== Parameters # * cidrBlock<~String> - The CIDR block you want the VPC to cover (e.g., 10.0.0.0/16). # * options<~Hash>: # * InstanceTenancy<~String> - The allowed tenancy of instances launched into the VPC. A value of default # means instances can be launched with any tenancy; a value of dedicated means instances must be launched with tenancy as dedicated. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'vpc'<~Array>: # * 'vpcId'<~String> - The VPC's ID # * 'state'<~String> - The current state of the VPC. ['pending', 'available'] # * 'cidrBlock'<~String> - The CIDR block the VPC covers. # * 'dhcpOptionsId'<~String> - The ID of the set of DHCP options. # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/index.html?ApiReference-query-CreateVpc.html] def create_vpc(cidrBlock, options = {}) request({ 'Action' => 'CreateVpc', 'CidrBlock' => cidrBlock, :parser => Fog::Parsers::AWS::Compute::CreateVpc.new }.merge!(options)) end end class Mock def create_vpc(cidrBlock, options = {}) Excon::Response.new.tap do |response| if cidrBlock response.status = 200 vpc_id = Fog::AWS::Mock.vpc_id vpc = { 'vpcId' => vpc_id, 'state' => 'pending', 'cidrBlock' => cidrBlock, 'dhcpOptionsId' => Fog::AWS::Mock.request_id, 'tagSet' => {}, 'enableDnsSupport' => true, 'enableDnsHostnames' => false, 'mapPublicIpOnLaunch' => false, 'classicLinkEnabled' => false, 'classicLinkDnsSupport' => false, 'cidrBlockAssociationSet' => [{ 'cidrBlock' => cidrBlock, 'state' => 'associated', 'associationId' => "vpc-cidr-assoc-#{vpc_id}" }], 'ipv6CidrBlockAssociationSet' => [], 'instanceTenancy' => options['InstanceTenancy'] || 'default' } self.data[:vpcs].push(vpc) #Creates a default route for the subnet default_route = self.route_tables.new(:vpc_id => vpc_id) default_route.save # You are not able to push a main route in the normal AWS, so we are re-implementing some of the # associate_route_table here in order to accomplish this. route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? default_route.id } # This pushes a main route to the associationSet # add_route_association(routeTableId, subnetId, main=false) is declared in assocate_route_table.rb assoc = add_route_association(default_route.id, nil, true) route_table["associationSet"].push(assoc) # Create a default network ACL default_nacl = self.network_acls.new(:vpc_id => vpc_id) default_nacl.save # Manually override since Amazon doesn't let you create a default one self.data[:network_acls][default_nacl.network_acl_id]['default'] = true # create default security groups default_elb_group_name = "default_elb_#{Fog::Mock.random_hex(6)}" default_elb_group_id = Fog::AWS::Mock.security_group_id Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:security_groups][default_elb_group_id] = { 'groupDescription' => 'default_elb security group', 'groupName' => default_elb_group_name, 'groupId' => default_elb_group_id, 'ipPermissions' => [], 'ownerId' => self.data[:owner_id], 'vpcId' => vpc_id } default_group_name = 'default' default_group_id = Fog::AWS::Mock.security_group_id Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:security_groups][default_group_id] = { 'groupDescription' => default_group_name, 'groupName' => default_group_name, 'groupId' => default_group_id, 'ipPermissions' => [], 'ownerId' => self.data[:owner_id], 'vpcId' => vpc_id } response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'vpcSet' => [vpc] } else response.status = 400 response.body = { 'Code' => 'InvalidParameterValue' } if cidrBlock.empty? response.body['Message'] = "Invalid value '' for cidrBlock. Must be specified." end end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_dhcp_options.rb000066400000000000000000000034531437344660100251650ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' #Deletes a set of DHCP options that you specify. Amazon VPC returns an error if the set of options you specify is currently #associated with a VPC. You can disassociate the set of options by associating either a new set of options or the default #options with the VPC. # # ==== Parameters # * dhcp_options_id<~String> - The ID of the DHCP options set you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteDhcpOptions.html] def delete_dhcp_options(dhcp_options_id) request( 'Action' => 'DeleteDhcpOptions', 'DhcpOptionsId' => dhcp_options_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_dhcp_options(dhcp_options_id) Excon::Response.new.tap do |response| if dhcp_options_id response.status = 200 self.data[:dhcp_options].reject! { |v| v['dhcpOptionsId'] == dhcp_options_id } response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } else message = 'MissingParameter => ' message << 'The request must contain the parameter dhcp_options_id' raise Fog::AWS::Compute::Error.new(message) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_internet_gateway.rb000066400000000000000000000032021437344660100260350ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' #Deletes an Internet gateway from your AWS account. The gateway must not be attached to a VPC # # ==== Parameters # * internet_gateway_id<~String> - The ID of the InternetGateway you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteInternetGateway.html] def delete_internet_gateway(internet_gateway_id) request( 'Action' => 'DeleteInternetGateway', 'InternetGatewayId' => internet_gateway_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_internet_gateway(internet_gateway_id) Excon::Response.new.tap do |response| if internet_gateway_id response.status = 200 self.data[:internet_gateways].delete(internet_gateway_id) response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } else message = 'MissingParameter => ' message << 'The request must contain the parameter internet_gateway_id' raise Fog::AWS::Compute::Error.new(message) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_key_pair.rb000066400000000000000000000022741437344660100242770ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete a key pair that you own # # ==== Parameters # * key_name<~String> - Name of the key pair. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteKeyPair.html] def delete_key_pair(key_name) request( 'Action' => 'DeleteKeyPair', 'KeyName' => key_name, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_key_pair(key_name) response = Excon::Response.new self.data[:key_pairs].delete(key_name) response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_network_acl.rb000066400000000000000000000031331437344660100247770ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes a network ACL. # # ==== Parameters # * network_acl_id<~String> - The ID of the network ACL you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteNetworkAcl.html] def delete_network_acl(network_acl_id) request( 'Action' => 'DeleteNetworkAcl', 'NetworkAclId' => network_acl_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_network_acl(network_acl_id) response = Excon::Response.new if self.data[:network_acls][network_acl_id] if self.data[:network_acls][network_acl_id]['associationSet'].any? raise Fog::AWS::Compute::Error.new("ACL is in use") end self.data[:network_acls].delete(network_acl_id) response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network ACL '#{network_acl_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_network_acl_entry.rb000066400000000000000000000041231437344660100262200ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes a network ACL entry # # ==== Parameters # * network_acl_id<~String> - The ID of the network ACL # * rule_number<~Integer> - The rule number of the entry to delete. # * egress<~Boolean> - Indicates whether the rule is an egress rule (true) or ingress rule (false) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteNetworkAclEntry.html] def delete_network_acl_entry(network_acl_id, rule_number, egress) request( 'Action' => 'DeleteNetworkAclEntry', 'NetworkAclId' => network_acl_id, 'RuleNumber' => rule_number, 'Egress' => egress, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_network_acl_entry(network_acl_id, rule_number, egress) response = Excon::Response.new if self.data[:network_acls][network_acl_id] if self.data[:network_acls][network_acl_id]['entrySet'].find { |r| r['ruleNumber'] == rule_number && r['egress'] == egress } self.data[:network_acls][network_acl_id]['entrySet'].delete_if { |r| r['ruleNumber'] == rule_number && r['egress'] == egress } else raise Fog::AWS::Compute::Error.new("No rule with that number and egress value") end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network ACL '#{network_acl_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_network_interface.rb000066400000000000000000000033431437344660100262030ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes a network interface. # # ==== Parameters # * network_interface_id<~String> - The ID of the network interface you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-DeleteNetworkInterface.html] def delete_network_interface(network_interface_id) request( 'Action' => 'DeleteNetworkInterface', 'NetworkInterfaceId' => network_interface_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_network_interface(network_interface_id) response = Excon::Response.new if self.data[:network_interfaces][network_interface_id] if self.data[:network_interfaces][network_interface_id]['attachment']['attachmentId'] raise Fog::AWS::Compute::Error.new("Interface is in use") end self.data[:network_interfaces].delete(network_interface_id) response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network interface '#{network_interface_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_placement_group.rb000066400000000000000000000016021437344660100256520ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete a placement group that you own # # ==== Parameters # * group_name<~String> - Name of the placement group. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeletePlacementGroup.html] def delete_placement_group(name) request( 'Action' => 'DeletePlacementGroup', 'GroupName' => name, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_route.rb000066400000000000000000000047631437344660100236370ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes the specified route from the specified route table. # # ==== Parameters # * RouteTableId<~String> - The ID of the route table. # * DestinationCidrBlock<~String> - The CIDR range for the route. The value you specify must match the CIDR for the route exactly. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of the request. # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteRoute.html] def delete_route(route_table_id, destination_cidr_block) request( 'Action' => 'DeleteRoute', 'RouteTableId' => route_table_id, 'DestinationCidrBlock' => destination_cidr_block, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_route(route_table_id, destination_cidr_block) route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? route_table_id } unless route_table.nil? route = route_table['routeSet'].find { |route| route["destinationCidrBlock"].eql? destination_cidr_block } if !route.nil? && route['gatewayId'] != "local" route_table['routeSet'].delete(route) response = Excon::Response.new response.status = 200 response.body = { 'requestId'=> Fog::AWS::Mock.request_id, 'return' => true } response elsif route['gatewayId'] == "local" # Cannot delete the default route raise Fog::AWS::Compute::Error, "InvalidParameterValue => cannot remove local route #{destination_cidr_block} in route table #{route_table_id}" else raise Fog::AWS::Compute::NotFound.new("no route with destination-cidr-block #{destination_cidr_block} in route table #{route_table_id}") end else raise Fog::AWS::Compute::NotFound.new("no route with destination-cidr-block #{destination_cidr_block} in route table #{route_table_id}") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_route_table.rb000066400000000000000000000035001437344660100247720ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes the specified route table. # # ==== Parameters # * RouteTableId<~String> - The ID of the route table. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of request. # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteRouteTable.html] def delete_route_table(route_table_id) request( 'Action' => 'DeleteRouteTable', 'RouteTableId' => route_table_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_route_table(route_table_id) route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? route_table_id } if !route_table.nil? && route_table['associationSet'].empty? self.data[:route_tables].delete(route_table) response = Excon::Response.new response.status = 200 response.body = { 'requestId'=> Fog::AWS::Mock.request_id, 'return' => true } response elsif route_table.nil? raise Fog::AWS::Compute::NotFound.new("The routeTable ID '#{route_table_id}' does not exist") elsif !route_table['associationSet'].empty? raise Fog::AWS::Compute::Error, "DependencyViolation => The routeTable '#{route_table_id}' has dependencies and cannot be deleted." end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_security_group.rb000066400000000000000000000077111437344660100255600ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete a security group that you own # # ==== Parameters # * group_name<~String> - Name of the security group, must be nil if id is specified # * group_id<~String> - Id of the security group, must be nil if name is specified # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html] def delete_security_group(name, id = nil) if name && id raise Fog::AWS::Compute::Error.new("May not specify both group_name and group_id") end if name type_id = 'GroupName' identifier = name else type_id = 'GroupId' identifier = id end request( 'Action' => 'DeleteSecurityGroup', type_id => identifier, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_security_group(name, id = nil) if name == 'default' raise Fog::AWS::Compute::Error.new("InvalidGroup.Reserved => The security group 'default' is reserved") end if name && id raise Fog::AWS::Compute::Error.new("May not specify both group_name and group_id") end if name id, _ = self.data[:security_groups].find { |_,v| v['groupName'] == name } end unless self.data[:security_groups][id] raise Fog::AWS::Compute::NotFound.new("The security group '#{id}' does not exist") end response = Excon::Response.new used_by_groups = [] # ec2 authorizations self.region_data.each do |_, key_data| key_data[:security_groups].each do |group_id, group| next if group == self.data[:security_groups][group_id] group['ipPermissions'].each do |group_ip_permission| group_ip_permission['groups'].each do |group_group_permission| if group_group_permission['groupId'] == group_id && group_group_permission['userId'] == self.data[:owner_id] used_by_groups << "#{key_data[:owner_id]}:#{group['groupName']}" end end end end end # rds authorizations Fog::AWS::RDS::Mock.data[self.region].each do |_, data| (data[:security_groups] || []).each do |group_name, group| (group["EC2SecurityGroups"] || []).each do |ec2_group| if ec2_group["EC2SecurityGroupName"] == name used_by_groups << "#{group["OwnerId"]}:#{group_name}" end end end end active_instances = self.data[:instances].values.select do |instance| if instance['groupSet'].include?(name) && instance['instanceState'] != "terminated" instance end end unless used_by_groups.empty? raise Fog::AWS::Compute::Error.new("InvalidGroup.InUse => Group #{self.data[:owner_id]}:#{name} is used by groups: #{used_by_groups.uniq.join(" ")}") end if active_instances.any? raise Fog::AWS::Compute::Error.new("InUse => There are active instances using security group '#{name}'") end self.data[:security_groups].delete(id) response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_snapshot.rb000066400000000000000000000026061437344660100243320ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete a snapshot of an EBS volume that you own # # ==== Parameters # * snapshot_id<~String> - ID of snapshot to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSnapshot.html] def delete_snapshot(snapshot_id) request( 'Action' => 'DeleteSnapshot', 'SnapshotId' => snapshot_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_snapshot(snapshot_id) response = Excon::Response.new if snapshot = self.data[:snapshots].delete(snapshot_id) response.status = true response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The snapshot '#{snapshot_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_spot_datafeed_subscription.rb000066400000000000000000000014231437344660100300750ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete a spot datafeed subscription # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSpotDatafeedSubscription.html] def delete_spot_datafeed_subscription request( 'Action' => 'DeleteSpotDatafeedSubscription', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_subnet.rb000066400000000000000000000031231437344660100237660ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes a subnet from a VPC. You must terminate all running instances in the subnet before deleting it, otherwise Amazon # VPC returns an error # # ==== Parameters # * subnet_id<~String> - The ID of the Subnet you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/ApiReference-query-DeleteSubnet.html] def delete_subnet(subnet_id) request( 'Action' => 'DeleteSubnet', 'SubnetId' => subnet_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_subnet(subnet_id) Excon::Response.new.tap do |response| if subnet_id self.data[:subnets].reject! { |v| v['subnetId'] == subnet_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } else message = 'MissingParameter => ' message << 'The request must contain the parameter subnet_id' raise Fog::AWS::Compute::Error.new(message) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_tags.rb000066400000000000000000000041551437344660100234320ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Remove tags from resources # # ==== Parameters # * resources<~String> - One or more resources to remove tags from # * tags<~String> - hash of key value tag pairs to remove # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteTags.html] def delete_tags(resources, tags) resources = [*resources] params = {} params.merge!(Fog::AWS.indexed_param('ResourceId', resources)) # can not rely on indexed_param because nil values should be omitted tags.keys.each_with_index do |key, index| index += 1 # should start at 1 instead of 0 params.merge!("Tag.#{index}.Key" => key) unless tags[key].nil? params.merge!("Tag.#{index}.Value" => tags[key]) end end request({ 'Action' => 'DeleteTags', :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end class Mock def delete_tags(resources, tags) tagged = tagged_resources(resources) tags.each do |key, value| self.data[:tags][key][value] = self.data[:tags][key][value] - tagged end tagged.each do |resource| tags.each do |key, value| tagset = self.data[:tag_sets][resource['resourceId']] tagset.delete(key) if tagset.key?(key) && (value.nil? || tagset[key] == value) end end response = Excon::Response.new response.status = true response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_volume.rb000066400000000000000000000032431437344660100240000ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Delete an EBS volume # # ==== Parameters # * volume_id<~String> - Id of volume to delete. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteVolume.html] def delete_volume(volume_id) request( 'Action' => 'DeleteVolume', 'VolumeId' => volume_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_volume(volume_id) response = Excon::Response.new if volume = self.data[:volumes][volume_id] if volume["attachmentSet"].any? attach = volume["attachmentSet"].first raise Fog::AWS::Compute::Error.new("Client.VolumeInUse => Volume #{volume_id} is currently attached to #{attach["instanceId"]}") end self.data[:deleted_at][volume_id] = Time.now volume['status'] = 'deleting' response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The volume '#{volume_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/delete_vpc.rb000066400000000000000000000036461437344660100232700ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Deletes a VPC. You must detach or delete all gateways or other objects # that are dependent on the VPC first. For example, you must terminate # all running instances, delete all VPC security groups (except the # default), delete all the route tables (except the default), etc. # # ==== Parameters # * vpc_id<~String> - The ID of the VPC you want to delete. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/index.html?ApiReference-query-DeleteVpc.html] def delete_vpc(vpc_id) request( 'Action' => 'DeleteVpc', 'VpcId' => vpc_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def delete_vpc(vpc_id) Excon::Response.new.tap do |response| if vpc_id response.status = 200 self.data[:vpcs].reject! { |v| v['vpcId'] == vpc_id } # Delete the default network ACL network_acl_id = self.network_acls.all('vpc-id' => vpc_id, 'default' => true).first.network_acl_id self.data[:network_acls].delete(network_acl_id) response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } else message = 'MissingParameter => ' message << 'The request must contain the parameter vpc_id' raise Fog::AWS::Compute::Error.new(message) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/deregister_image.rb000066400000000000000000000027001437344660100244430ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/deregister_image' # deregister an image # # ==== Parameters # * image_id<~String> - Id of image to deregister # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'return'<~Boolean> - Returns true if deregistration succeeded # * 'requestId'<~String> - Id of request # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DeregisterImage.html] def deregister_image(image_id) request( 'Action' => 'DeregisterImage', 'ImageId' => image_id, :parser => Fog::Parsers::AWS::Compute::DeregisterImage.new ) end end class Mock def deregister_image(image_id) response = Excon::Response.new if image_id response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => "true" } response else message = 'MissingParameter => ' if !instance_id message << 'The request must contain the parameter image_id' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_account_attributes.rb000066400000000000000000000030251437344660100267070ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_account_attributes' # Describe account attributes # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> = Id of request # * 'accountAttributeSet'<~Array>: # * 'attributeName'<~String> - supported-platforms # * 'attributeValueSet'<~Array>: # * 'attributeValue'<~String> - Value of attribute # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeAccountAttributes.html] def describe_account_attributes(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeAccountAttributes', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeAccountAttributes.new }.merge!(params)) end end class Mock def describe_account_attributes(filters = {}) account_attributes = self.data[:account_attributes] Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'accountAttributeSet' => account_attributes } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_addresses.rb000066400000000000000000000045031437344660100247640ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_addresses' # Describe all or specified IP addresses. # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'addressesSet'<~Array>: # * 'instanceId'<~String> - instance for ip address # * 'publicIp'<~String> - ip address for instance # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeAddresses.html] def describe_addresses(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_addresses with #{filters.class} param is deprecated, use describe_addresses('public-ip' => []) instead [light_black](#{caller.first})[/]") filters = {'public-ip' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeAddresses', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeAddresses.new }.merge!(params)) end end class Mock def describe_addresses(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_addresses with #{filters.class} param is deprecated, use describe_addresses('public-ip' => []) instead [light_black](#{caller.first})[/]") filters = {'public-ip' => [*filters]} end response = Excon::Response.new addresses_set = self.data[:addresses].values aliases = {'public-ip' => 'publicIp', 'instance-id' => 'instanceId', 'allocation-id' => 'allocationId'} for filter_key, filter_value in filters aliased_key = aliases[filter_key] addresses_set = addresses_set.reject{|address| ![*filter_value].include?(address[aliased_key])} end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'addressesSet' => addresses_set } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_availability_zones.rb000066400000000000000000000156031437344660100267020ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_availability_zones' # Describe all or specified availability zones # # ==== Params # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'availabilityZoneInfo'<~Array>: # * 'regionName'<~String> - Name of region # * 'zoneName'<~String> - Name of zone # * 'zoneState'<~String> - State of zone # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeAvailabilityZones.html] def describe_availability_zones(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_availability_zones with #{filters.class} param is deprecated, use describe_availability_zones('zone-name' => []) instead [light_black](#{caller.first})[/]") filters = {'zone-name' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeAvailabilityZones', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeAvailabilityZones.new }.merge!(params)) end end class Mock def describe_availability_zones(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_availability_zones with #{filters.class} param is deprecated, use describe_availability_zones('zone-name' => []) instead [light_black](#{caller.first})[/]") filters = {'zone-name' => [*filters]} end response = Excon::Response.new all_zones = [ {"messageSet" => [], "regionName" => "us-east-1", "zoneName" => "us-east-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-1", "zoneName" => "us-east-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-1", "zoneName" => "us-east-1c", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-1", "zoneName" => "us-east-1d", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-1", "zoneName" => "us-east-1e", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-2", "zoneName" => "us-east-2a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-2", "zoneName" => "us-east-2b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-east-2", "zoneName" => "us-east-2c", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-west-1", "zoneName" => "us-west-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-west-1", "zoneName" => "us-west-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-west-1", "zoneName" => "us-west-1c", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-west-2", "zoneName" => "us-west-2a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "us-west-2", "zoneName" => "us-west-2b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "sa-east-1", "zoneName" => "sa-east-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "sa-east-1", "zoneName" => "sa-east-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-1", "zoneName" => "eu-west-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-1", "zoneName" => "eu-west-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-1", "zoneName" => "eu-west-1c", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-2", "zoneName" => "eu-west-2a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-2", "zoneName" => "eu-west-2b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-3", "zoneName" => "eu-west-3a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-west-3", "zoneName" => "eu-west-3b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-central-1", "zoneName" => "eu-central-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "eu-central-1", "zoneName" => "eu-central-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ca-central-1", "zoneName" => "ca-central-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ca-central-1", "zoneName" => "ca-central-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-northeast-1", "zoneName" => "ap-northeast-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-northeast-1", "zoneName" => "ap-northeast-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-northeast-2", "zoneName" => "ap-northeast-2a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-northeast-2", "zoneName" => "ap-northeast-2b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-southeast-1", "zoneName" => "ap-southeast-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-southeast-1", "zoneName" => "ap-southeast-1b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-southeast-2", "zoneName" => "ap-southeast-2a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-southeast-2", "zoneName" => "ap-southeast-2b", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-south-1", "zoneName" => "ap-south-1a", "zoneState" => "available"}, {"messageSet" => [], "regionName" => "ap-south-1", "zoneName" => "ap-south-1b", "zoneState" => "available"}, ] availability_zone_info = all_zones.select { |zoneinfo| zoneinfo["regionName"] == @region } aliases = {'region-name' => 'regionName', 'zone-name' => 'zoneName', 'state' => 'zoneState'} for filter_key, filter_value in filters aliased_key = aliases[filter_key] availability_zone_info = availability_zone_info.reject{|availability_zone| ![*filter_value].include?(availability_zone[aliased_key])} end response.status = 200 response.body = { 'availabilityZoneInfo' => availability_zone_info, 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_classic_link_instances.rb000066400000000000000000000071361437344660100275210ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_classic_link_instances' # Describes one or more of your linked EC2-Classic instances. This request only returns information about EC2-Classic instances linked to a VPC through ClassicLink; you cannot use this request to return information about other instances. # # ==== Parameters # * options<~Hash> # * instance_ids<~Array> - An array of instance ids to restruct the results to # * filters<~Hash> - Filters to restrict the results to. Recognises vpc-id, group-id, instance-id in addition # to tag-key, tag-value and tag:key # * max_results # * next_token # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'instancesSet'<~Array> - array of ClassicLinkInstance # * 'vpcId'<~String> # * 'instanceId'<~String> # * 'tagSet'<~Hash> # * 'groups'<~Array> # * groupId <~String> # * groupName <~String> # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeClassicLinkInstances.html def describe_classic_link_instances(options={}) params = {} params['MaxResults'] = options[:max_results] if options[:max_results] params['NextToken'] = options[:next_token] if options[:next_token] params.merge!(Fog::AWS.indexed_param('InstanceId', options[:instance_ids])) if options[:instance_ids] params.merge!(Fog::AWS.indexed_filters(options[:filters])) if options[:filters] request({ 'Action' => 'DescribeClassicLinkInstances', :parser => Fog::Parsers::AWS::Compute::DescribeClassicLinkInstances.new }.merge(params)) end end class Mock def describe_classic_link_instances(options={}) response = Excon::Response.new instances = self.data[:instances].values.select {|instance| instance['classicLinkVpcId']} if options[:filters] instances = apply_tag_filters(instances, options[:filters], 'instanceId') instances = instances.select {|instance| instance['classicLinkVpcId'] == options[:filters]['vpc-id']} if options[:filters]['vpc-id'] instances = instances.select {|instance| instance['instanceId'] == options[:filters]['instance-id']} if options[:filters]['instance-id'] instances = instances.select {|instance| instance['classicLinkSecurityGroups'].include?(options[:filters]['group-id'])} if options[:filters]['group-id'] end instances = instances.select {|instance| options[:instance_ids].include?(instance['instanceId'])} if options[:instance_ids] response.status = 200 instance_data = instances.collect do |instance| groups = self.data[:security_groups].values.select {|data| instance['classicLinkSecurityGroups'].include?(data['groupId'])} { 'instanceId' => instance['instanceId'], 'vpcId' => instance['classicLinkVpcId'], 'groups' => groups.collect {|group| {'groupId' => group['groupId'], 'groupName' => group['groupName']}}, 'tagSet' => self.data[:tag_sets][instance['instanceId']] || {} } end response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'instancesSet' => instance_data } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_dhcp_options.rb000066400000000000000000000040411437344660100254750ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_dhcp_options' # Describe all or specified dhcp_options # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'DhcpOptionsSet'<~Array>: # * 'dhcpOptionsId'<~String> - The ID of the Dhcp Options # * 'dhcpConfigurationSet'<~Array>: - The list of options in the set. # * 'key'<~String> - The name of a DHCP option. # * 'valueSet'<~Array>: A set of values for a DHCP option. # * 'value'<~String> - The value of a DHCP option. # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-ItemType-DhcpOptionsType.html] def describe_dhcp_options(filters = {}) unless filters.is_a?(Hash) Fog::Logger.warning("describe_dhcp_options with #{filters.class} param is deprecated, use dhcp_options('dhcp-options-id' => []) instead [light_black](#{caller.first})[/]") filters = {'dhcp-options-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeDhcpOptions', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeDhcpOptions.new }.merge!(params)) end end class Mock def describe_dhcp_options(filters = {}) Excon::Response.new.tap do |response| response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'dhcpOptionsSet' => self.data[:dhcp_options] } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_image_attribute.rb000066400000000000000000000056011437344660100261540ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_image_attribute' # Describes an image attribute value # # ==== Parameters # * image_id<~String> - The ID of the image you want to describe an attribute of # * attribute<~String> - The attribute to describe, must be one of the following: # -'description' # -'kernel' # -'ramdisk' # -'launchPermission' # -'productCodes' # -'blockDeviceMapping' # -'sriovNetSupport' # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'description'<~String> - The description for the AMI # * 'imageId'<~String> - The ID of the image # * 'kernelId'<~String> - The kernel ID # * 'ramdiskId'<~String> - The RAM disk ID # * 'blockDeviceMapping'<~List> - The block device mapping of the image # * 'productCodes'<~List> - A list of product codes # * 'sriovNetSupport'<~String> - The value to use for a resource attribute # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImageAttribute.html] def describe_image_attribute(image_id, attribute) request( 'Action' => 'DescribeImageAttribute', 'ImageId' => image_id, 'Attribute' => attribute, :parser => Fog::Parsers::AWS::Compute::DescribeImageAttribute.new ) end end class Mock def describe_image_attribute(image_id, attribute) response = Excon::Response.new if image = self.data[:images].values.find{ |i| i['imageId'] == image_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'imageId' => image_id } case attribute when 'kernel' response.body[attribute] = image["kernelId"] when 'ramdisk' response.body[attribute] = image["ramdiskId"] when 'sriovNetSupport' response.body[attribute] = 'simple' when 'launchPermission' if image_launch_permissions = self.data[:image_launch_permissions][image_id] response.body[attribute] = image_launch_permissions[:users] else response.body[attribute] = [] end else response.body[attribute] = image[attribute] end response else raise Fog::AWS::Compute::NotFound.new("The Image '#{image_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_images.rb000066400000000000000000000127111437344660100242540ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_images' # Describe all or specified images. # # ==== Params # * filters<~Hash> - List of filters to limit results with # * filters and/or the following # * 'ExecutableBy'<~String> - Only return images that the executable_by # user has explicit permission to launch # * 'ImageId'<~Array> - Ids of images to describe # * 'Owner'<~String> - Only return images belonging to owner. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'imagesSet'<~Array>: # * 'architecture'<~String> - Architecture of the image # * 'blockDeviceMapping'<~Array> - An array of mapped block devices # * 'description'<~String> - Description of image # * 'imageId'<~String> - Id of the image # * 'imageLocation'<~String> - Location of the image # * 'imageOwnerAlias'<~String> - Alias of the owner of the image # * 'imageOwnerId'<~String> - Id of the owner of the image # * 'imageState'<~String> - State of the image # * 'imageType'<~String> - Type of the image # * 'isPublic'<~Boolean> - Whether or not the image is public # * 'kernelId'<~String> - Kernel id associated with image, if any # * 'platform'<~String> - Operating platform of the image # * 'productCodes'<~Array> - Product codes for the image # * 'ramdiskId'<~String> - Ramdisk id associated with image, if any # * 'rootDeviceName'<~String> - Root device name, e.g. /dev/sda1 # * 'rootDeviceType'<~String> - Root device type, ebs or instance-store # * 'virtualizationType'<~String> - Type of virtualization # * 'creationDate'time<~Datetime> - Date and time the image was created # * 'enaSupport'<~Boolean> - whether or not the image supports enhanced networking # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeImages.html] def describe_images(filters = {}) options = {} for key in ['ExecutableBy', 'ImageId', 'Owner'] if filters.is_a?(Hash) && filters.key?(key) options.merge!(Fog::AWS.indexed_request_param(key, filters.delete(key))) end end params = Fog::AWS.indexed_filters(filters).merge!(options) request({ 'Action' => 'DescribeImages', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeImages.new }.merge!(params)) end end class Mock def describe_images(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_images with #{filters.class} param is deprecated, use describe_images('image-id' => []) instead [light_black](#{caller.first})[/]") filters = {'image-id' => [*filters]} end if filters.keys.any? {|key| key =~ /^block-device/} Fog::Logger.warning("describe_images block-device-mapping filters are not yet mocked [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end if owner = filters.delete('Owner') if owner == 'self' filters['owner-id'] = self.data[:owner_id] else filters['owner-alias'] = owner end end response = Excon::Response.new aliases = { 'architecture' => 'architecture', 'description' => 'description', 'hypervisor' => 'hypervisor', 'image-id' => 'imageId', 'image-type' => 'imageType', 'is-public' => 'isPublic', 'kernel-id' => 'kernelId', 'manifest-location' => 'manifestLocation', 'name' => 'name', 'owner-alias' => 'imageOwnerAlias', 'owner-id' => 'imageOwnerId', 'ramdisk-id' => 'ramdiskId', 'root-device-name' => 'rootDeviceName', 'root-device-type' => 'rootDeviceType', 'state' => 'imageState', 'virtualization-type' => 'virtualizationType' } image_set = visible_images.values image_set = apply_tag_filters(image_set, filters, 'imageId') for filter_key, filter_value in filters aliased_key = aliases[filter_key] image_set = image_set.reject{|image| ![*filter_value].include?(image[aliased_key])} end image_set = image_set.map do |image| case image['imageState'] when 'pending' if Time.now - image['registered'] >= Fog::Mock.delay image['imageState'] = 'available' end end image.reject { |key, value| ['registered'].include?(key) }.merge('tagSet' => self.data[:tag_sets][image['imageId']]) end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'imagesSet' => image_set } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_instance_attribute.rb000066400000000000000000000102451437344660100266760ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_instance_attribute' # Describes an instance attribute value # # ==== Parameters # * instance_id<~String> - The ID of the instance you want to describe an attribute of # * attribute<~String> - The attribute to describe, must be one of the following: # -'instanceType' # -'kernel' # -'ramdisk' # -'userData' # -'disableApiTermination' # -'instanceInitiatedShutdownBehavior' # -'rootDeviceName' # -'blockDeviceMapping' # -'productCodes' # -'sourceDestCheck' # -'groupSet' # -'ebsOptimized' # -'sriovNetSupport' # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'instanceId'<~String> - The ID of the instance # * 'instanceType'<~String> - Instance type # * 'kernelId'<~String> - The kernel ID # * 'ramdiskId'<~String> - The RAM disk ID # * 'userData'<~String> - The Base64-encoded MIME user data # * 'disableApiTermination'<~Boolean> - If the value is true , you can't terminate the instance through the Amazon EC2 console, CLI, or API; otherwise, you can. # * 'instanceInitiatedShutdownBehavior'<~String> - Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown) # * 'rootDeviceName'<~String> - The name of the root device (for example, /dev/sda1 or /dev/xvda ) # * 'blockDeviceMapping'<~List> - The block device mapping of the instance # * 'productCodes'<~List> - A list of product codes # * 'ebsOptimized'<~Boolean> - Indicates whether the instance is optimized for EBS I/O # * 'sriovNetSupport'<~String> - The value to use for a resource attribute # * 'sourceDestCheck'<~Boolean> - Indicates whether source/destination checking is enabled. A value of true means checking is enabled, and false means checking is disabled. This value must be false for a NAT instance to perform NAT # * 'groupSet'<~List> - The security groups associated with the instance # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceAttribute.html] def describe_instance_attribute(instance_id, attribute) request( 'Action' => 'DescribeInstanceAttribute', 'InstanceId' => instance_id, 'Attribute' => attribute, :parser => Fog::Parsers::AWS::Compute::DescribeInstanceAttribute.new ) end end class Mock def describe_instance_attribute(instance_id, attribute) response = Excon::Response.new if instance = self.data[:instances].values.find{ |i| i['instanceId'] == instance_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'instanceId' => instance_id } case attribute when 'kernel' response.body[attribute] = instance["kernelId"] when 'ramdisk' response.body[attribute] = instance["ramdiskId"] when 'disableApiTermination' response.body[attribute] = false when 'instanceInitiatedShutdownBehavior' response.body['instanceInitiatedShutdownBehavior'] = 'stop' when 'sourceDestCheck' response.body[attribute] = true when 'sriovNetSupport' response.body[attribute] = 'simple' else response.body[attribute] = instance[attribute] end response else raise Fog::AWS::Compute::NotFound.new("The Instance '#{instance_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_instance_status.rb000066400000000000000000000031151437344660100262140ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_instance_status' # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstanceStatus.html # def describe_instance_status(filters = {}) raise ArgumentError.new("Filters must be a hash, but is a #{filters.class}.") unless filters.is_a?(Hash) next_token = filters.delete('nextToken') || filters.delete('NextToken') max_results = filters.delete('maxResults') || filters.delete('MaxResults') all_instances = filters.delete('includeAllInstances') || filters.delete('IncludeAllInstances') params = Fog::AWS.indexed_request_param('InstanceId', filters.delete('InstanceId')) params.merge!(Fog::AWS.indexed_filters(filters)) params['NextToken'] = next_token if next_token params['MaxResults'] = max_results if max_results params['IncludeAllInstances'] = all_instances if all_instances request({ 'Action' => 'DescribeInstanceStatus', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeInstanceStatus.new }.merge!(params)) end end class Mock def describe_instance_status(filters = {}) response = Excon::Response.new response.status = 200 response.body = { 'instanceStatusSet' => [], 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_instances.rb000066400000000000000000000343741437344660100250070ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_instances' # Describe all or specified instances # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # * Also allows for passing of optional parameters to fetch instances in batches: # * 'maxResults' - The number of instances to return for the request # * 'nextToken' - The token to fetch the next set of items. This is returned by a previous request. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'nextToken' - The token to use when requesting the next set of items when fetching items in batches. # * 'reservationSet'<~Array>: # * 'groupSet'<~Array> - Group names for reservation # * 'ownerId'<~String> - AWS Access Key ID of reservation owner # * 'reservationId'<~String> - Id of the reservation # * 'instancesSet'<~Array>: # * instance<~Hash>: # * 'architecture'<~String> - architecture of image in [i386, x86_64] # * 'amiLaunchIndex'<~Integer> - reference to instance in launch group # * 'blockDeviceMapping'<~Array> # * 'attachTime'<~Time> - time of volume attachment # * 'deleteOnTermination'<~Boolean> - whether or not to delete volume on termination # * 'deviceName'<~String> - specifies how volume is exposed to instance # * 'status'<~String> - status of attached volume # * 'volumeId'<~String> - Id of attached volume # * 'dnsName'<~String> - public dns name, blank until instance is running # * 'ebsOptimized'<~Boolean> - Whether the instance is optimized for EBS I/O # * 'imageId'<~String> - image id of ami used to launch instance # * 'instanceId'<~String> - id of the instance # * 'instanceState'<~Hash>: # * 'code'<~Integer> - current status code # * 'name'<~String> - current status name # * 'instanceType'<~String> - type of instance # * 'ipAddress'<~String> - public ip address assigned to instance # * 'kernelId'<~String> - id of kernel used to launch instance # * 'keyName'<~String> - name of key used launch instances or blank # * 'launchTime'<~Time> - time instance was launched # * 'monitoring'<~Hash>: # * 'state'<~Boolean - state of monitoring # * 'placement'<~Hash>: # * 'availabilityZone'<~String> - Availability zone of the instance # * 'platform'<~String> - Platform of the instance (e.g., Windows). # * 'productCodes'<~Array> - Product codes for the instance # * 'privateDnsName'<~String> - private dns name, blank until instance is running # * 'privateIpAddress'<~String> - private ip address assigned to instance # * 'rootDeviceName'<~String> - specifies how the root device is exposed to the instance # * 'rootDeviceType'<~String> - root device type used by AMI in [ebs, instance-store] # * 'ramdiskId'<~String> - Id of ramdisk used to launch instance # * 'reason'<~String> - reason for most recent state transition, or blank # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html] def describe_instances(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_instances with #{filters.class} param is deprecated, use describe_instances('instance-id' => []) instead [light_black](#{caller.first})[/]") filters = {'instance-id' => [*filters]} end params = {} next_token = filters.delete('nextToken') || filters.delete('NextToken') max_results = filters.delete('maxResults') || filters.delete('MaxResults') if filters['instance-id'] instance_ids = filters.delete('instance-id') instance_ids = [instance_ids] unless instance_ids.is_a?(Array) instance_ids.each_with_index do |id, index| params.merge!("InstanceId.#{index}" => id) end end params['NextToken'] = next_token if next_token params['MaxResults'] = max_results if max_results params.merge!(Fog::AWS.indexed_filters(filters)) request({ 'Action' => 'DescribeInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeInstances.new }.merge!(params)) end end class Mock def describe_instances(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_instances with #{filters.class} param is deprecated, use describe_instances('instance-id' => []) instead [light_black](#{caller.first})[/]") filters = {'instance-id' => [*filters]} end response = Excon::Response.new instance_set = self.data[:instances].values instance_set = apply_tag_filters(instance_set, filters, 'instanceId') aliases = { 'architecture' => 'architecture', 'availability-zone' => 'availabilityZone', 'client-token' => 'clientToken', 'dns-name' => 'dnsName', 'group-id' => 'groupId', 'image-id' => 'imageId', 'instance-id' => 'instanceId', 'instance-lifecycle' => 'instanceLifecycle', 'instance-type' => 'instanceType', 'ip-address' => 'ipAddress', 'kernel-id' => 'kernelId', 'key-name' => 'key-name', 'launch-index' => 'launchIndex', 'launch-time' => 'launchTime', 'monitoring-state' => 'monitoringState', 'owner-id' => 'ownerId', 'placement-group-name' => 'placementGroupName', 'platform' => 'platform', 'private-dns-name' => 'privateDnsName', 'private-ip-address' => 'privateIpAddress', 'product-code' => 'productCode', 'ramdisk-id' => 'ramdiskId', 'reason' => 'reason', 'requester-id' => 'requesterId', 'reservation-id' => 'reservationId', 'root-device-name' => 'rootDeviceName', 'root-device-type' => 'rootDeviceType', 'spot-instance-request-id' => 'spotInstanceRequestId', 'subnet-id' => 'subnetId', 'virtualization-type' => 'virtualizationType', 'vpc-id' => 'vpcId' } block_device_mapping_aliases = { 'attach-time' => 'attachTime', 'delete-on-termination' => 'deleteOnTermination', 'device-name' => 'deviceName', 'status' => 'status', 'volume-id' => 'volumeId', } instance_state_aliases = { 'code' => 'code', 'name' => 'name' } state_reason_aliases = { 'code' => 'code', 'message' => 'message' } for filter_key, filter_value in filters if block_device_mapping_key = filter_key.split('block-device-mapping.')[1] aliased_key = block_device_mapping_aliases[block_device_mapping_key] instance_set = instance_set.reject{|instance| !instance['blockDeviceMapping'].find {|block_device_mapping| [*filter_value].include?(block_device_mapping[aliased_key])}} elsif instance_state_key = filter_key.split('instance-state-')[1] aliased_key = instance_state_aliases[instance_state_key] instance_set = instance_set.reject{|instance| ![*filter_value].include?(instance['instanceState'][aliased_key])} elsif state_reason_key = filter_key.split('state-reason-')[1] aliased_key = state_reason_aliases[state_reason_key] instance_set = instance_set.reject{|instance| ![*filter_value].include?(instance['stateReason'][aliased_key])} elsif filter_key == "availability-zone" aliased_key = aliases[filter_key] instance_set = instance_set.reject{|instance| ![*filter_value].include?(instance['placement'][aliased_key])} elsif filter_key == "group-name" instance_set = instance_set.reject {|instance| !instance['groupSet'].include?(filter_value)} elsif filter_key == "group-id" group_ids = [*filter_value] security_group_names = self.data[:security_groups].values.select { |sg| group_ids.include?(sg['groupId']) }.map { |sg| sg['groupName'] } instance_set = instance_set.reject {|instance| (security_group_names & instance['groupSet']).empty?} else aliased_key = aliases[filter_key] instance_set = instance_set.reject {|instance| ![*filter_value].include?(instance[aliased_key])} end end brand_new_instances = instance_set.select do |instance| instance['instanceState']['name'] == 'pending' && Time.now - instance['launchTime'] < Fog::Mock.delay * 2 end # Error if filtering for a brand new instance directly if (filters['instance-id'] || filters['instanceId']) && !brand_new_instances.empty? raise Fog::AWS::Compute::NotFound.new("The instance ID '#{brand_new_instances.first['instanceId']}' does not exist") end # Otherwise don't include it in the list instance_set = instance_set.reject {|instance| brand_new_instances.include?(instance) } response.status = 200 reservation_set = {} instance_set.each do |instance| case instance['instanceState']['name'] when 'pending' if Time.now - instance['launchTime'] >= Fog::Mock.delay * 2 instance['ipAddress'] = Fog::AWS::Mock.ip_address instance['originalIpAddress'] = instance['ipAddress'] instance['dnsName'] = Fog::AWS::Mock.dns_name_for(instance['ipAddress']) instance['instanceState'] = { 'code' => 16, 'name' => 'running' } end when 'rebooting' instance['instanceState'] = { 'code' => 16, 'name' => 'running' } when 'stopping' instance['instanceState'] = { 'code' => 0, 'name' => 'stopped' } instance['stateReason'] = { 'code' => 0 } when 'shutting-down' if Time.now - self.data[:deleted_at][instance['instanceId']] >= Fog::Mock.delay * 2 self.data[:deleted_at].delete(instance['instanceId']) self.data[:instances].delete(instance['instanceId']) elsif Time.now - self.data[:deleted_at][instance['instanceId']] >= Fog::Mock.delay instance['instanceState'] = { 'code' => 48, 'name' => 'terminating' } end when 'terminating' if Time.now - self.data[:deleted_at][instance['instanceId']] >= Fog::Mock.delay self.data[:deleted_at].delete(instance['instanceId']) self.data[:instances].delete(instance['instanceId']) end end if self.data[:instances][instance['instanceId']] nics = self.data[:network_interfaces].select{|ni,ni_conf| ni_conf['attachment']['instanceId'] == instance['instanceId'] } instance['networkInterfaces'] = nics.map{|ni,ni_conf| { 'ownerId' => ni_conf['ownerId'], 'subnetId' => ni_conf['subnetId'], 'vpcId' => ni_conf['vpcId'], 'networkInterfaceId' => ni_conf['networkInterfaceId'], 'groupSet' => ni_conf['groupSet'], 'attachmentId' => ni_conf['attachment']['attachmentId'] } } if nics.count > 0 instance['privateIpAddress'] = nics.sort_by {|ni, ni_conf| ni_conf['attachment']['deviceIndex'] }.map{ |ni, ni_conf| ni_conf['privateIpAddress'] }.first instance['privateDnsName'] = Fog::AWS::Mock.private_dns_name_for(instance['privateIpAddress']) else instance['privateIpAddress'] = '' instance['privateDnsName'] = '' end reservation_set[instance['reservationId']] ||= { 'groupSet' => instance['groupSet'], 'groupIds' => instance['groupIds'], 'instancesSet' => [], 'ownerId' => instance['ownerId'], 'reservationId' => instance['reservationId'] } reservation_set[instance['reservationId']]['instancesSet'] << instance.reject{|key,value| !['amiLaunchIndex', 'architecture', 'blockDeviceMapping', 'clientToken', 'dnsName', 'ebsOptimized', 'hypervisor', 'iamInstanceProfile', 'imageId', 'instanceId', 'instanceState', 'instanceType', 'ipAddress', 'kernelId', 'keyName', 'launchTime', 'monitoring', 'networkInterfaces', 'ownerId', 'placement', 'platform', 'privateDnsName', 'privateIpAddress', 'productCodes', 'ramdiskId', 'reason', 'rootDeviceName', 'rootDeviceType', 'spotInstanceRequestId', 'stateReason', 'subnetId', 'virtualizationType'].include?(key)}.merge('tagSet' => self.data[:tag_sets][instance['instanceId']]) end end response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'reservationSet' => reservation_set.values } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_internet_gateways.rb000066400000000000000000000045021437344660100265420ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_internet_gateways' # Describe all or specified internet_gateways # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'InternetGatewaySet'<~Array>: # * 'internetGatewayId'<~String> - The ID of the Internet gateway. # * 'attachmentSet'<~Array>: - A list of VPCs attached to the Internet gateway # * 'vpcId'<~String> - The ID of the VPC the Internet gateway is attached to # * 'state'<~String> - The current state of the attachment # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-ItemType-InternetGatewayType.html] def describe_internet_gateways(filters = {}) unless filters.is_a?(Hash) Fog::Logger.warning("describe_internet_gateways with #{filters.class} param is deprecated, use internet_gateways('internet-gateway-id' => []) instead [light_black](#{caller.first})[/]") filters = {'internet-gateway-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeInternetGateways', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeInternetGateways.new }.merge!(params)) end end class Mock def describe_internet_gateways(filters = {}) internet_gateways = self.data[:internet_gateways].values if filters['internet-gateway-id'] internet_gateways = internet_gateways.reject {|internet_gateway| internet_gateway['internetGatewayId'] != filters['internet-gateway-id']} end Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'internetGatewaySet' => internet_gateways } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_key_pairs.rb000066400000000000000000000045401437344660100247760ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_key_pairs' # Describe all or specified key pairs # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'keySet'<~Array>: # * 'keyName'<~String> - Name of key # * 'keyFingerprint'<~String> - Fingerprint of key # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeKeyPairs.html] def describe_key_pairs(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_key_pairs with #{filters.class} param is deprecated, use describe_key_pairs('key-name' => []) instead [light_black](#{caller.first})[/]") filters = {'key-name' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeKeyPairs', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeKeyPairs.new }.merge!(params)) end end class Mock def describe_key_pairs(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_key_pairs with #{filters.class} param is deprecated, use describe_key_pairs('key-name' => []) instead [light_black](#{caller.first})[/]") filters = {'key-name' => [*filters]} end response = Excon::Response.new key_set = self.data[:key_pairs].values aliases = {'fingerprint' => 'keyFingerprint', 'key-name' => 'keyName'} for filter_key, filter_value in filters aliased_key = aliases[filter_key] key_set = key_set.reject{|key_pair| ![*filter_value].include?(key_pair[aliased_key])} end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'keySet' => key_set.map do |key_pair| key_pair.reject {|key,value| !['keyFingerprint', 'keyName'].include?(key)} end } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_network_acls.rb000066400000000000000000000130441437344660100255020ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_network_acls' # Describe all or specified network ACLs # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'networkAclSet'<~Array>: - A list of network ACLs # * 'networkAclId'<~String> - The ID of the network ACL # * 'vpcId'<~String> - The ID of the VPC for the network ACL # * 'default'<~Boolean> - Indicates whether this is the default network ACL for the VPC # * 'entrySet'<~Array>: - A list of entries (rules) in the network ACL # * 'ruleNumber'<~Integer> - The rule number for the entry. ACL entries are processed in ascending order by rule number # * 'protocol'<~Integer> - The protocol. A value of -1 means all protocols # * 'ruleAction'<~String> - Indicates whether to allow or deny the traffic that matches the rule # * 'egress'<~Boolean> - Indicates whether the rule is an egress rule (applied to traffic leaving the subnet) # * 'cidrBlock'<~String> - The network range to allow or deny, in CIDR notation # * 'icmpTypeCode'<~Hash> - ICMP protocol: The ICMP type and code # * 'code'<~Integer> - The ICMP code. A value of -1 means all codes for the specified ICMP type # * 'type'<~Integer> - The ICMP type. A value of -1 means all types # * 'portRange'<~Hash> - TCP or UDP protocols: The range of ports the rule applies to # * 'from'<~Integer> - The first port in the range # * 'to'<~Integer> - The last port in the range # * 'associationSet'<~Array>: - A list of associations between the network ACL and subnets # * 'networkAclAssociationId'<~String> - The ID of the association # * 'networkAclId'<~String> - The ID of the network ACL # * 'subnetId'<~String> - The ID of the subnet # * 'tagSet'<~Array>: - Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeNetworkAcls.html] def describe_network_acls(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeNetworkAcls', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeNetworkAcls.new }.merge!(params)) end end class Mock def describe_network_acls(filters = {}) response = Excon::Response.new network_acls = self.data[:network_acls].values network_acls = apply_tag_filters(network_acls, filters, 'networkAclId') aliases = { 'vpc-id' => 'vpcId', 'network-acl-id' => 'networkAclId', 'default' => 'default', } association_aliases = { 'association-id' => 'networkAclAssociationId', 'network-acl-id' => 'networkAclId', 'subnet-id' => 'subnetId', } entry_aliases = { 'cidr' => 'cidrBlock', 'egress' => 'egress', 'rule-action' => 'ruleAction', 'rule-number' => 'ruleNumber', 'protocol' => 'protocol' } for filter_key, filter_value in filters filter_key = filter_key.to_s if association_key = filter_key.split('association.')[1] aliased_key = association_aliases[association_key] network_acls = network_acls.reject{|nacl| !nacl['associationSet'].find {|association| [*filter_value].include?(association[aliased_key])}} elsif entry_key = filter_key.split('entry.icmp.')[1] network_acls = network_acls.reject{|nacl| !nacl['entrySet'].find {|association| [*filter_value].include?(association['icmpTypeCode'][entry_key])}} elsif entry_key = filter_key.split('entry.port-range.')[1] network_acls = network_acls.reject{|nacl| !nacl['entrySet'].find {|association| [*filter_value].include?(association['portRange'][entry_key])}} elsif entry_key = filter_key.split('entry.')[1] aliased_key = entry_aliases[entry_key] network_acls = network_acls.reject{|nacl| !nacl['entrySet'].find {|association| [*filter_value].include?(association[aliased_key])}} else aliased_key = aliases[filter_key] network_acls = network_acls.reject{|nacl| ![*filter_value].include?(nacl[aliased_key])} end end network_acls.each do |acl| tags = self.data[:tag_sets][acl['networkAclId']] acl.merge!('tagSet' => tags) if tags end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'networkAclSet' => network_acls } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_network_interface_attribute.rb000066400000000000000000000057101437344660100306040ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_network_interface_attribute' # Describes a network interface attribute value # # ==== Parameters # * network_interface_id<~String> - The ID of the network interface you want to describe an attribute of # * attribute<~String> - The attribute to describe, must be one of 'description', 'groupSet', 'sourceDestCheck' or 'attachment' # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'networkInterfaceId'<~String> - The ID of the network interface # * 'description'<~String> - The description (if requested) # * 'groupSet'<~Hash> - Associated security groups (if requested) # * 'key'<~String> - ID of associated group # * 'value'<~String> - Name of associated group # * 'sourceDestCheck'<~Boolean> - Flag indicating whether traffic to or from the instance is validated (if requested) # * 'attachment'<~Hash>: - Describes the way this nic is attached (if requested) # * 'attachmentID'<~String> # * 'instanceID'<~String> # * 'instanceOwnerId'<~String> # * 'deviceIndex'<~Integer> # * 'status'<~String> # * 'attachTime'<~String> # * 'deleteOnTermination<~Boolean> # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-DescribeNetworkInterfaceAttribute.html] def describe_network_interface_attribute(network_interface_id, attribute) request( 'Action' => 'DescribeNetworkInterfaceAttribute', 'NetworkInterfaceId' => network_interface_id, 'Attribute' => attribute, :parser => Fog::Parsers::AWS::Compute::DescribeNetworkInterfaceAttribute.new ) end end class Mock def describe_network_interface_attribute(network_interface_id, attribute) response = Excon::Response.new network_interface = self.data[:network_interfaces][network_interface_id] unless network_interface raise Fog::AWS::Compute::NotFound.new("The network interface '#{network_interface_id}' does not exist") end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'networkInterfaceId' => network_interface_id } case attribute when 'description', 'groupSet', 'sourceDestCheck', 'attachment' response.body[attribute] = network_interface[attribute] else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_network_interfaces.rb000066400000000000000000000102311437344660100266760ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_network_interfaces' # Describe all or specified network interfaces # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'networkInterfaceSet'<~Array>: # * 'networkInterfaceId'<~String> - The ID of the network interface # * 'subnetId'<~String> - The ID of the subnet # * 'vpcId'<~String> - The ID of the VPC # * 'availabilityZone'<~String> - The availability zone # * 'description'<~String> - The description # * 'ownerId'<~String> - The ID of the person who created the interface # * 'requesterId'<~String> - The ID ot teh entity requesting this interface # * 'requesterManaged'<~String> - # * 'status'<~String> - "available" or "in-use" # * 'macAddress'<~String> - # * 'privateIpAddress'<~String> - IP address of the interface within the subnet # * 'privateDnsName'<~String> - The private DNS name # * 'sourceDestCheck'<~Boolean> - Flag indicating whether traffic to or from the instance is validated # * 'groupSet'<~Hash> - Associated security groups # * 'key'<~String> - ID of associated group # * 'value'<~String> - Name of associated group # * 'attachment'<~Hash>: - Describes the way this nic is attached # * 'attachmentID'<~String> # * 'instanceID'<~String> # * 'instanceOwnerId'<~String> # * 'deviceIndex'<~Integer> # * 'status'<~String> # * 'attachTime'<~String> # * 'deleteOnTermination'<~Boolean> # * 'association'<~Hash>: - Describes an eventual instance association # * 'attachmentID'<~String> - ID of the network interface attachment # * 'instanceID'<~String> - ID of the instance attached to the network interface # * 'publicIp'<~String> - Address of the Elastic IP address bound to the network interface # * 'ipOwnerId'<~String> - ID of the Elastic IP address owner # * 'tagSet'<~Array>: - Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # * 'privateIpAddresses' <~Array>: # * 'privateIpAddress'<~String> - One of the additional private ip address # * 'privateDnsName'<~String> - The private DNS associate to the ip address # * 'primay'<~String> - Whether main ip associate with NIC true of false # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/index.html?ApiReference-query-DescribeNetworkInterfaces.html] def describe_network_interfaces(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeNetworkInterfaces', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeNetworkInterfaces.new }.merge!(params)) end end class Mock def describe_network_interfaces(filters = {}) response = Excon::Response.new network_interface_info = self.data[:network_interfaces].values if subnet_filter = filters.delete('subnet-id') filters['subnetId'] = subnet_filter end for filter_key, filter_value in filters network_interface_info = network_interface_info.reject{|nic| ![*filter_value].include?(nic[filter_key])} end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'networkInterfaceSet' => network_interface_info } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_placement_groups.rb000066400000000000000000000022501437344660100263530ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_placement_groups' # Describe all or specified placement groups # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'placementGroupSet'<~Array>: # * 'groupName'<~String> - Name of placement group # * 'strategy'<~String> - Strategy of placement group # * 'state'<~String> - State of placement group # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribePlacementGroups.html] def describe_placement_groups(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribePlacementGroups', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribePlacementGroups.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_regions.rb000066400000000000000000000046431437344660100244620ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_regions' # Describe all or specified regions # # ==== Params # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'regionInfo'<~Array>: # * 'regionName'<~String> - Name of region # * 'regionEndpoint'<~String> - Service endpoint for region # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRegions.html] def describe_regions(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_regions with #{filters.class} param is deprecated, use describe_regions('region-name' => []) instead [light_black](#{caller.first})[/]") filters = {'region-name' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeRegions', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeRegions.new }.merge!(params)) end end class Mock def describe_regions(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_regions with #{filters.class} param is deprecated, use describe_regions('region-name' => []) instead [light_black](#{caller.first})[/]") filters = {'region-name' => [*filters]} end response = Excon::Response.new region_info = [ {"regionName"=>"eu-west-1", "regionEndpoint"=>"eu-west-1.ec2.amazonaws.com"}, {"regionName"=>"us-east-1", "regionEndpoint"=>"us-east-1.ec2.amazonaws.com"} ] aliases = {'region-name' => 'regionName', 'endpoint' => 'regionEndpoint'} for filter_key, filter_value in filters aliased_key = aliases[filter_key] region_info = region_info.reject{|region| ![*filter_value].include?(region[aliased_key])} end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'regionInfo' => region_info } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_reserved_instances.rb000066400000000000000000000056571437344660100267100ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_reserved_instances' # Describe all or specified reserved instances # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'reservedInstancesSet'<~Array>: # * 'availabilityZone'<~String> - availability zone of the instance # * 'duration'<~Integer> - duration of reservation, in seconds # * 'fixedPrice'<~Float> - purchase price of reserved instance # * 'instanceType'<~String> - type of instance # * 'instanceCount'<~Integer> - number of reserved instances # * 'productDescription'<~String> - reserved instance description # * 'recurringCharges'<~Array>: # * 'frequency'<~String> - frequency of a recurring charge while the reservation is active (only Hourly at this time) # * 'amount'<~Float> - recurring charge amount # * 'reservedInstancesId'<~String> - id of the instance # * 'scope'<~String> - scope of the reservation (i.e. 'Availability Zone' or 'Region' - as of version 2016/11/15) # * 'start'<~Time> - start time for reservation # * 'state'<~String> - state of reserved instance purchase, in .[pending-payment, active, payment-failed, retired] # * 'usagePrice"<~Float> - usage price of reserved instances, per hour # * 'end'<~Time> - time reservation stopped being applied (i.e. sold or canceled - as of version 2013/10/01) # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeReservedInstances.html] def describe_reserved_instances(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_reserved_instances with #{filters.class} param is deprecated, use describe_reserved_instances('reserved-instances-id' => []) instead [light_black](#{caller.first})[/]") filters = {'reserved-instances-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeReservedInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeReservedInstances.new }.merge!(params)) end end class Mock def describe_reserved_instances(filters = {}) response = Excon::Response.new response.status = 200 response.body = { 'reservedInstancesSet' => self.data[:reserved_instances].values, 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_reserved_instances_offerings.rb000066400000000000000000000102761437344660100307430ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_reserved_instances_offerings' # Describe all or specified reserved instances offerings # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # * filters and/or the following # * 'AvailabilityZone'<~String> - availability zone of offering # * 'InstanceType'<~String> - instance type of offering # * 'InstanceTenancy'<~String> - tenancy of offering in ['default', 'dedicated'] # * 'OfferingType'<~String> - type of offering, in ['Heavy Utilization', 'Medium Utilization', 'Light Utilization'] # * 'ProductDescription'<~String> - description of offering, in ['Linux/UNIX', 'Linux/UNIX (Amazon VPC)', 'Windows', 'Windows (Amazon VPC)'] # * 'MaxDuration'<~Integer> - maximum duration (in seconds) of offering # * 'MinDuration'<~Integer> - minimum duration (in seconds) of offering # * 'MaxResults'<~Integer> - The maximum number of results to return for the request in a single page # * 'NextToken'<~String> - The token to retrieve the next page of results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'reservedInstancesOfferingsSet'<~Array>: # * 'availabilityZone'<~String> - availability zone of offering # * 'duration'<~Integer> - duration, in seconds, of offering # * 'fixedPrice'<~Float> - purchase price of offering # * 'includeMarketplace'<~Boolean> - whether or not to include marketplace offerings # * 'instanceType'<~String> - instance type of offering # * 'offeringType'<~String> - type of offering, in ['Heavy Utilization', 'Medium Utilization', 'Light Utilization'] # * 'productDescription'<~String> - description of offering # * 'reservedInstancesOfferingId'<~String> - id of offering # * 'usagePrice'<~Float> - usage price of offering, per hour # * 'NextToken'<~String> - The token to retrieve the next page of results # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeReservedInstancesOfferings.html] def describe_reserved_instances_offerings(filters = {}) options = {} for key in %w(AvailabilityZone InstanceType InstanceTenancy OfferingType ProductDescription MaxDuration MinDuration MaxResults NextToken) if filters.is_a?(Hash) && filters.key?(key) options[key] = filters.delete(key) end end params = Fog::AWS.indexed_filters(filters).merge!(options) request({ 'Action' => 'DescribeReservedInstancesOfferings', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeReservedInstancesOfferings.new }.merge!(params)) end end class Mock def describe_reserved_instances_offerings(filters = {}) response = Excon::Response.new response.status = 200 self.data[:reserved_instances_offerings] ||= [{ 'reservedInstancesOfferingId' => Fog::AWS::Mock.reserved_instances_offering_id, 'instanceType' => 'm1.small', 'availabilityZone' => 'us-east-1d', 'duration' => 31536000, 'fixedPrice' => 350.0, 'offeringType' => 'Medium Utilization', 'usagePrice' => 0.03, 'productDescription' => 'Linux/UNIX', 'instanceTenancy' => 'default', 'currencyCode' => 'USD' }] response.body = { 'reservedInstancesOfferingsSet' => self.data[:reserved_instances_offerings], 'requestId' => Fog::AWS::Mock.request_id, 'nextToken' => (0...64).map { ('a'..'z').to_a[rand(26)] }.join } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_route_tables.rb000066400000000000000000000102331437344660100254740ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_route_tables' # Describe one or more of your route tables. # # ==== Parameters # * RouteTableId<~String> - One or more route table IDs. # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of the request. # * 'routeTableSet'<~Array>: # * 'routeTableId'<~String> - The route table's ID. # * 'vpcId'<~String> - The ID of the VPC for the route table. # * 'routeSet'<~Array>: # * 'destinationCidrBlock'<~String> - The CIDR address block used for the destination match. # * 'gatewayId'<~String> - The ID of a gateway attached to your VPC. # * 'instanceId'<~String> - The ID of a NAT instance in your VPC. # * 'instanceOwnerId'<~String> - The owner of the instance. # * 'networkInterfaceId'<~String> - The network interface ID. # * 'vpcPeeringConnectionId'<~String> - The peering connection ID. # * 'natGatewayId'<~String> - The ID of a NAT gateway attached to your VPC. # * 'state'<~String> - The state of the route. The blackhole state indicates that the route's target isn't available. # * 'origin'<~String> - Describes how the route was created. # * 'associationSet'<~Array>: # * 'RouteTableAssociationId'<~String> - An identifier representing the association between a route table and a subnet. # * 'routeTableId'<~String> - The ID of the route table. # * 'subnetId'<~String> - The ID of the subnet. # * 'main'<~Boolean> - Indicates whether this is the main route table. # * 'propagatingVgwSet'<~Array>: # * 'gatewayID'<~String> - The ID of the virtual private gateway (VGW). # * 'tagSet'<~Array>: # * 'key'<~String> - The tag key. # * 'value'<~String> - The tag value. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeRouteTables.html] def describe_route_tables(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_route_tables with #{filters.class} param is deprecated, use describe_route_tables('route-table-id' => []) instead [light_black](#{caller.first})[/]") filters = {'route-table-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeRouteTables', :parser => Fog::Parsers::AWS::Compute::DescribeRouteTables.new }.merge!(params)) end end class Mock def describe_route_tables(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_route_tables with #{filters.class} param is deprecated, use describe_route_tables('route-table-id' => []) instead [light_black](#{caller.first})[/]") filters = {'route-table-id' => [*filters]} end display_routes = self.data[:route_tables].dup aliases = { 'route-table-id' => 'routeTableId', 'vpc-id' => 'vpcId' } for filter_key, filter_value in filters filter_attribute = aliases[filter_key] case filter_attribute when 'routeTableId', 'vpcId' display_routes.reject! { |routetable| routetable[filter_attribute] != filter_value } end end display_routes.each do |route| tags = self.data[:tag_sets][route['routeTableId']] route.merge!('tagSet' => tags) if tags end Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'routeTableSet' => display_routes } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_security_groups.rb000066400000000000000000000133221437344660100262540ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_security_groups' # Describe all or specified security groups # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # * 'MaxResults'<~Integer> - The maximum number of results to return for the request in a single page # * 'NextToken'<~String> - The token to retrieve the next page of results # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'securityGroupInfo'<~Array>: # * 'groupDescription'<~String> - Description of security group # * 'groupId'<~String> - ID of the security group. # * 'groupName'<~String> - Name of security group # * 'ipPermissions'<~Array>: # * 'fromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'groups'<~Array>: # * 'groupName'<~String> - Name of security group # * 'userId'<~String> - AWS User Id of account # * 'ipProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'ipRanges'<~Array>: # * 'cidrIp'<~String> - CIDR range # * 'ipv6Ranges'<~Array>: # * 'cidrIpv6'<~String> - CIDR ipv6 range # * 'toPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # * 'ownerId'<~String> - AWS Access Key Id of the owner of the security group # * 'NextToken'<~String> - The token to retrieve the next page of results # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSecurityGroups.html] def describe_security_groups(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_security_groups with #{filters.class} param is deprecated, use describe_security_groups('group-name' => []) instead [light_black](#{caller.first})[/]") filters = {'group-name' => [*filters]} end options = {} for key in %w[MaxResults NextToken] if filters.is_a?(Hash) && filters.key?(key) options[key] = filters.delete(key) end end params = Fog::AWS.indexed_filters(filters).merge!(options) request({ 'Action' => 'DescribeSecurityGroups', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeSecurityGroups.new }.merge!(params)) end end class Mock def describe_security_groups(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_security_groups with #{filters.class} param is deprecated, use describe_security_groups('group-name' => []) instead [light_black](#{caller.first})[/]") filters = {'group-name' => [*filters]} end response = Excon::Response.new security_group_info = self.data[:security_groups].reject { |k,v| k['amazon-elb-sg'] }.values aliases = { 'description' => 'groupDescription', 'group-name' => 'groupName', 'group-id' => 'groupId', 'owner-id' => 'ownerId' } permission_aliases = { 'cidr' => 'cidrIp', 'from-port' => 'fromPort', 'protocol' => 'ipProtocol', 'to-port' => 'toPort' } security_group_groups = lambda do |security_group| (security_group['ipPermissions'] || []).map do |permission| permission['groups'] end.flatten.compact.uniq end for filter_key, filter_value in filters if permission_key = filter_key.split('ip-permission.')[1] if permission_key == 'group-name' security_group_info = security_group_info.reject do |security_group| !security_group_groups.call(security_group).find do |group| [*filter_value].include?(group['groupName']) end end elsif permission_key == 'group-id' security_group_info = security_group_info.reject do |security_group| !security_group_groups.call(security_group).find do |group| [*filter_value].include?(group['groupId']) end end elsif permission_key == 'user-id' security_group_info = security_group_info.reject do |security_group| !security_group_groups.call(security_group).find do |group| [*filter_value].include?(group['userId']) end end else aliased_key = permission_aliases[filter_key] security_group_info = security_group_info.reject do |security_group| !security_group['ipPermissions'].find do |permission| [*filter_value].include?(permission[aliased_key]) end end end else aliased_key = aliases[filter_key] security_group_info = security_group_info.reject do |security_group| ![*filter_value].include?(security_group[aliased_key]) end end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'securityGroupInfo' => security_group_info } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_snapshots.rb000066400000000000000000000121131437344660100250250ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_snapshots' # Describe all or specified snapshots # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # * options<~Hash>: # * 'Owner'<~String> - Owner of snapshot in ['self', 'amazon', account_id] # * 'RestorableBy'<~String> - Account id of user who can create volumes from this snapshot # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'snapshotSet'<~Array>: # * 'encrypted'<~Boolean>: The encryption status of the snapshot. # * 'progress'<~String>: The percentage progress of the snapshot # * 'snapshotId'<~String>: Id of the snapshot # * 'startTime'<~Time>: Timestamp of when snapshot was initiated # * 'status'<~String>: Snapshot state, in ['pending', 'completed'] # * 'volumeId'<~String>: Id of volume that snapshot contains # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html] def describe_snapshots(filters = {}, options = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_snapshots with #{filters.class} param is deprecated, use describe_snapshots('snapshot-id' => []) instead [light_black](#{caller.first})[/]") filters = {'snapshot-id' => [*filters]} end unless options.empty? Fog::Logger.deprecation("describe_snapshots with a second param is deprecated, use describe_snapshots(options) instead [light_black](#{caller.first})[/]") end for key in ['ExecutableBy', 'ImageId', 'Owner', 'RestorableBy'] if filters.key?(key) options[key] = filters.delete(key) end end options['RestorableBy'] ||= 'self' params = Fog::AWS.indexed_filters(filters).merge!(options) request({ 'Action' => 'DescribeSnapshots', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeSnapshots.new }.merge!(params)) end end class Mock def describe_snapshots(filters = {}, options = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_snapshots with #{filters.class} param is deprecated, use describe_snapshots('snapshot-id' => []) instead [light_black](#{caller.first})[/]") filters = {'snapshot-id' => [*filters]} end unless options.empty? Fog::Logger.deprecation("describe_snapshots with a second param is deprecated, use describe_snapshots(options) instead [light_black](#{caller.first})[/]") end response = Excon::Response.new snapshot_set = self.data[:snapshots].values if filters.delete('owner-alias') Fog::Logger.warning("describe_snapshots with owner-alias is not mocked [light_black](#{caller.first})[/]") end if (restorable_by = filters.delete('RestorableBy')) && restorable_by != 'self' Fog::Logger.warning("describe_snapshots with RestorableBy other than 'self' (wanted #{restorable_by.inspect}) is not mocked [light_black](#{caller.first})[/]") end snapshot_set = apply_tag_filters(snapshot_set, filters, 'snapshotId') aliases = { 'description' => 'description', 'encrypted' => 'encrypted', 'owner-id' => 'ownerId', 'progress' => 'progress', 'snapshot-id' => 'snapshotId', 'start-time' => 'startTime', 'status' => 'status', 'volume-id' => 'volumeId', 'volume-size' => 'volumeSize' } for filter_key, filter_value in filters aliased_key = aliases[filter_key] snapshot_set = snapshot_set.reject{|snapshot| ![*filter_value].include?(snapshot[aliased_key])} end snapshot_set.each do |snapshot| case snapshot['status'] when 'in progress', 'pending' if Time.now - snapshot['startTime'] >= Fog::Mock.delay * 2 snapshot['progress'] = '100%' snapshot['status'] = 'completed' elsif Time.now - snapshot['startTime'] >= Fog::Mock.delay snapshot['progress'] = '50%' snapshot['status'] = 'in progress' else snapshot['progress'] = '0%' snapshot['status'] = 'in progress' end end end snapshot_set = snapshot_set.map {|snapshot| snapshot.merge('tagSet' => self.data[:tag_sets][snapshot['snapshotId']]) } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'snapshotSet' => snapshot_set } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_spot_datafeed_subscription.rb000066400000000000000000000023321437344660100304130ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/spot_datafeed_subscription' # Describe spot datafeed subscription # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'spotDatafeedSubscription'<~Hash>: # * 'bucket'<~String> - S3 bucket where data is stored # * 'fault'<~Hash>: # * 'code'<~String> - fault code # * 'reason'<~String> - fault reason # * 'ownerId'<~String> - AWS id of account owner # * 'prefix'<~String> - prefix for datafeed items # * 'state'<~String> - state of datafeed subscription # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSpotDatafeedSubscription.html] def describe_spot_datafeed_subscription request({ 'Action' => 'DescribeSpotDatafeedSubscription', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::SpotDatafeedSubscription.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_spot_instance_requests.rb000066400000000000000000000075231437344660100276200ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/spot_instance_requests' # Describe all or specified spot instance requests # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'spotInstanceRequestSet'<~Array>: # * 'createTime'<~Time> - time of instance request creation # * 'instanceId'<~String> - instance id if one has been launched to fulfill request # * 'launchedAvailabilityZone'<~String> - availability zone of instance if one has been launched to fulfill request # * 'launchSpecification'<~Hash>: # * 'blockDeviceMapping'<~Hash> - list of block device mappings for instance # * 'groupSet'<~String> - security group(s) for instance # * 'keyName'<~String> - keypair name for instance # * 'imageId'<~String> - AMI for instance # * 'instanceType'<~String> - type for instance # * 'monitoring'<~Boolean> - monitoring status for instance # * 'subnetId'<~String> - VPC subnet ID for instance # * 'productDescription'<~String> - general description of AMI # * 'spotInstanceRequestId'<~String> - id of spot instance request # * 'spotPrice'<~Float> - maximum price for instances to be launched # * 'state'<~String> - spot instance request state # * 'type'<~String> - spot instance request type # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSpotInstanceRequests.html] def describe_spot_instance_requests(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeSpotInstanceRequests', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::SpotInstanceRequests.new }.merge!(params)) end end class Mock def describe_spot_instance_requests(filters = {}) response = Excon::Response.new spot_requests = self.data[:spot_requests].values if id = Array(filters['spot-instance-request-id']).first spot_requests = spot_requests.select { |r| r['spotInstanceRequestId'] == id } end spot_requests.select { |r| r['instanceId'].nil? }.each do |request| run_instance_options = { 'BlockDeviceMapping' => request['launchSpecification']['blockDeviceMapping'], 'EbsOptimized' => request['launchSpecification']['ebsOptimized'], 'KeyName' => request['launchSpecification']['keyName'], 'SecurityGroupId' => request['launchSpecification']['groupSet'].first, 'SpotInstanceRequestId' => request['spotInstanceRequestId'], 'SubnetId' => request['launchSpecification']['subnetId'] } instances = run_instances(request['launchSpecification']['imageId'], 1,1, run_instance_options).body['instancesSet'] request['instanceId'] = instances.first['instanceId'] request['state'] = 'active' request['fault'] = {'code' => 'fulfilled', 'message' => 'Your Spot request is fulfilled.'} request['launchedAvailabilityZone'] = instances.first['placement']['availabilityZone'] self.data[:spot_requests][request['spotInstanceRequestId']] = request end response.body = {'spotInstanceRequestSet' => spot_requests, 'requestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_spot_price_history.rb000066400000000000000000000122261437344660100267400ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_spot_price_history' # Describe all or specified spot price history # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # * filters and/or the following # * 'AvailabilityZone'<~String> - availability zone of offering # * 'InstanceType'<~Array> - instance types of offering # * 'ProductDescription'<~Array> - basic product descriptions # * 'StartTime'<~Time> - The date and time, up to the past 90 days, from which to start retrieving the price history data # * 'EndTime'<~Time> - The date and time, up to the current date, from which to stop retrieving the price history data # * 'MaxResults'<~Integer> - The maximum number of results to return for the request in a single page # * 'NextToken'<~String> - The token to retrieve the next page of results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'spotPriceHistorySet'<~Array>: # * 'availabilityZone'<~String> - availability zone for instance # * 'instanceType'<~String> - the type of instance # * 'productDescription'<~String> - general description of AMI # * 'spotPrice'<~Float> - maximum price to launch one or more instances # * 'timestamp'<~Time> - date and time of request creation # * 'nextToken'<~String> - token to retrieve the next page of results # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSpotPriceHistory.html] def describe_spot_price_history(filters = {}) params = {} for key in %w(AvailabilityZone StartTime EndTime MaxResults NextToken) if filters.is_a?(Hash) && filters.key?(key) params[key] = filters.delete(key) end end if instance_types = filters.delete('InstanceType') params.merge!(Fog::AWS.indexed_param('InstanceType', [*instance_types])) end if product_descriptions = filters.delete('ProductDescription') params.merge!(Fog::AWS.indexed_param('ProductDescription', [*product_descriptions])) end params.merge!(Fog::AWS.indexed_filters(filters)) request({ 'Action' => 'DescribeSpotPriceHistory', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeSpotPriceHistory.new }.merge!(params)) end end class Mock def describe_spot_price_history(filters = {}) params = {} spot_price_history_set = [] response = Excon::Response.new response.status = 200 for key in %w(StartTime EndTime NextToken) if filters.is_a?(Hash) && filters.key?(key) Fog::Logger.warning("#{key} filters are not yet mocked [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end end for key in %w(AvailabilityZone MaxResults) if filters.is_a?(Hash) && filters.key?(key) params[key] = filters.delete(key) end end all_zones = describe_availability_zones.body['availabilityZoneInfo'].map { |z| z['zoneName'] } zones = params['AvailabilityZone'] if (!zones.nil? && !all_zones.include?([*zones].shuffle.first)) az_error = "InvalidParameterValue => Invalid availability zone: [#{zones}]" raise Fog::AWS::Compute::Error, az_error end zones = all_zones if zones.nil? max_results = params['MaxResults'] || Fog::Mock.random_numbers(3).to_i if !(max_results.is_a?(Integer) && max_results > 0) max_results_error = "InvalidParameterValue => Invalid value '#{max_results}' for maxResults" raise Fog::AWS::Compute::Error, max_results_error end all_instance_types = flavors.map { |f| f.id } instance_types = filters.delete('InstanceType') || all_instance_types product_descriptions = filters.delete('ProductDescription') || Fog::AWS::Mock.spot_product_descriptions max_results.times do spot_price_history_set << { 'instanceType' => [*instance_types].shuffle.first, 'productDescription' => [*product_descriptions].shuffle.first, 'spotPrice' => ((rand + [0 , 1].shuffle.first) * 10000).round / 10000.0, 'timestamp' => Time.now - (1 + rand(86400)), 'availabilityZone' => [*zones].shuffle.first } end spot_price_history_set.sort! { |x,y| x['timestamp'] <=> y['timestamp'] } response.body = { 'spotPriceHistorySet' => spot_price_history_set, 'requestId' => Fog::AWS::Mock.request_id, 'nextToken' => nil } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_subnets.rb000066400000000000000000000054541437344660100245000ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_subnets' # Describe all or specified subnets # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'subnetSet'<~Array>: # * 'subnetId'<~String> - The Subnet's ID # * 'state'<~String> - The current state of the Subnet. ['pending', 'available'] # * 'vpcId'<~String> - The ID of the VPC the subnet is in # * 'cidrBlock'<~String> - The CIDR block the Subnet covers. # * 'availableIpAddressCount'<~Integer> - The number of unused IP addresses in the subnet (the IP addresses for any # stopped instances are considered unavailable) # * 'availabilityZone'<~String> - The Availability Zone the subnet is in. # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # * 'mapPublicIpOnLaunch'<~Boolean> - Indicates whether instances launched in this subnet receive a public IPv4 address. # * 'defaultForAz'<~Boolean> - Indicates whether this is the default subnet for the Availability Zone. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/index.html?ApiReference-query-DescribeSubnets.html] def describe_subnets(filters = {}) unless filters.is_a?(Hash) Fog::Logger.warning("describe_subnets with #{filters.class} param is deprecated, use describe_subnets('subnet-id' => []) instead [light_black](#{caller.first})[/]") filters = {'subnet-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeSubnets', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeSubnets.new }.merge!(params)) end end class Mock def describe_subnets(filters = {}) subnets = self.data[:subnets] # Transition from pending to available subnets.each do |subnet| case subnet['state'] when 'pending' subnet['state'] = 'available' end end if filters['subnet-id'] subnets = subnets.reject {|subnet| subnet['subnetId'] != filters['subnet-id']} end Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'subnetSet' => subnets } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_tags.rb000066400000000000000000000063371437344660100237540ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_tags' # Describe all or specified tags # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'tagSet'<~Array>: # * 'resourceId'<~String> - id of resource tag belongs to # * 'resourceType'<~String> - type of resource tag belongs to # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeTags.html] def describe_tags(filters = {}) params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeTags', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeTags.new }.merge!(params)) end end class Mock def describe_tags(filters = {}) response = Excon::Response.new tag_set = deep_clone(self.data[:tags]) aliases = { 'key' => 'key', 'resource-id' => 'resourceId', 'resource-type' => 'resourceType', 'value' => 'value' } for filter_key, filter_value in filters filter_attribute = aliases[filter_key] case filter_attribute when 'key' tag_set.reject! { |k,_| k != filter_value } when 'value' tag_set.each { |k,values| values.reject! { |v, _| v != filter_value } } when 'resourceId' filter_resources(tag_set, 'resourceId', filter_value) when 'resourceType' filter_resources(tag_set, 'resourceType', filter_value) end end tagged_resources = [] tag_set.each do |key, values| values.each do |value, resources| resources.each do |resource| tagged_resources << resource.merge({ 'key' => key, 'value' => value }) end end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'tagSet' => tagged_resources } response end private def filter_resources(tag_set, filter, value) value_hash_list = tag_set.values value_hash_list.each do |value_hash| value_hash.each do |_, resource_list| resource_list.reject! { |resource| resource[filter] != value } end end end def deep_clone(obj) case obj when Hash obj.reduce({}) { |h, pair| h[pair.first] = deep_clone(pair.last); h } when Array obj.map { |o| deep_clone(o) } else obj end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_volume_status.rb000066400000000000000000000025771437344660100257320ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_volume_status' # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumeStatus.html def describe_volume_status(filters = {}) raise ArgumentError.new("Filters must be a hash, but is a #{filters.class}.") unless filters.is_a?(Hash) next_token = filters.delete('nextToken') || filters.delete('NextToken') max_results = filters.delete('maxResults') || filters.delete('MaxResults') params = Fog::AWS.indexed_request_param('VolumeId', filters.delete('VolumeId')) params.merge!(Fog::AWS.indexed_filters(filters)) params['NextToken'] = next_token if next_token params['MaxResults'] = max_results if max_results request({ 'Action' => 'DescribeVolumeStatus', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeVolumeStatus.new }.merge!(params)) end end class Mock def describe_volume_status(filters = {}) response = Excon::Response.new response.status = 200 response.body = { 'volumeStatusSet' => [], 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_volumes.rb000066400000000000000000000117401437344660100245020ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_volumes' # Describe all or specified volumes. # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'volumeSet'<~Array>: # * 'availabilityZone'<~String> - Availability zone for volume # * 'createTime'<~Time> - Timestamp for creation # * 'encrypted'<~Boolean> - Indicates whether the volume will be encrypted # * 'iops'<~Integer> - Number of IOPS volume supports # * 'size'<~Integer> - Size in GiBs for volume # * 'snapshotId'<~String> - Snapshot volume was created from, if any # * 'status'<~String> - State of volume # * 'volumeId'<~String> - Reference to volume # * 'volumeType'<~String> - Type of volume # * 'attachmentSet'<~Array>: # * 'attachmentTime'<~Time> - Timestamp for attachment # * 'deleteOnTermination'<~Boolean> - Whether or not to delete volume on instance termination # * 'device'<~String> - How value is exposed to instance # * 'instanceId'<~String> - Reference to attached instance # * 'status'<~String> - Attachment state # * 'volumeId'<~String> - Reference to volume # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeVolumes.html] def describe_volumes(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_volumes with #{filters.class} param is deprecated, use describe_volumes('volume-id' => []) instead [light_black](#{caller.first})[/]") filters = {'volume-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeVolumes', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeVolumes.new }.merge!(params)) end end class Mock def describe_volumes(filters = {}) unless filters.is_a?(Hash) Fog::Logger.deprecation("describe_volumes with #{filters.class} param is deprecated, use describe_volumes('volume-id' => []) instead [light_black](#{caller.first})[/]") filters = {'volume-id' => [*filters]} end response = Excon::Response.new volume_set = self.data[:volumes].values volume_set = apply_tag_filters(volume_set, filters, 'volumeId') aliases = { 'availability-zone' => 'availabilityZone', 'create-time' => 'createTime', 'encrypted' => 'encrypted', 'size' => 'size', 'snapshot-id' => 'snapshotId', 'status' => 'status', 'volume-id' => 'volumeId' } attachment_aliases = { 'attach-time' => 'attachTime', 'delete-on-termination' => 'deleteOnTermination', 'device' => 'device', 'instance-id' => 'instanceId', 'status' => 'status' } for filter_key, filter_value in filters if attachment_key = filter_key.split('attachment.')[1] aliased_key = attachment_aliases[filter_key] volume_set = volume_set.reject{|volume| !volume['attachmentSet'].find {|attachment| [*filter_value].include?(attachment[aliased_key])}} else aliased_key = aliases[filter_key] volume_set = volume_set.reject{|volume| ![*filter_value].include?(volume[aliased_key])} end end volume_set.each do |volume| case volume['status'] when 'attaching' if Time.now - volume['attachmentSet'].first['attachTime'] >= Fog::Mock.delay volume['attachmentSet'].first['status'] = 'in-use' volume['status'] = 'in-use' end when 'creating' if Time.now - volume['createTime'] >= Fog::Mock.delay volume['status'] = 'available' end when 'deleting' if Time.now - self.data[:deleted_at][volume['volumeId']] >= Fog::Mock.delay self.data[:deleted_at].delete(volume['volumeId']) self.data[:volumes].delete(volume['volumeId']) end end end volume_set = volume_set.reject {|volume| !self.data[:volumes][volume['volumeId']]} volume_set = volume_set.map {|volume| volume.merge('tagSet' => self.data[:tag_sets][volume['volumeId']]) } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'volumeSet' => volume_set } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_volumes_modifications.rb000066400000000000000000000076351437344660100274220ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_volumes_modifications' # Reports the current modification status of EBS volumes. # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash> # * 'volumeModificationSet'<~Array>: # * 'targetIops'<~Integer> - Target IOPS rate of the volume being modified. # * 'originalIops'<~Integer> - Original IOPS rate of the volume being modified. # * 'modificationState'<~String> - Current state of modification. Modification state is null for unmodified volumes. # * 'targetSize'<~Integer> - Target size of the volume being modified. # * 'targetVolumeType'<~String> - Target EBS volume type of the volume being modified. # * 'volumeId'<~String> - ID of the volume being modified. # * 'progress'<~Integer> - Modification progress from 0 to 100%. # * 'startTime'<~Time> - Modification start time # * 'endTime'<~Time> - Modification end time # * 'originalSize'<~Integer> - Original size of the volume being modified. # * 'originalVolumeType'<~String> - Original EBS volume type of the volume being modified. def describe_volumes_modifications(filters = {}) params = {} if volume_id = filters.delete('volume-id') params.merge!(Fog::AWS.indexed_param('VolumeId.%d', [*volume_id])) end params.merge!(Fog::AWS.indexed_filters(filters)) request({ 'Action' => 'DescribeVolumesModifications', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeVolumesModifications.new }.merge(params)) end end class Mock def describe_volumes_modifications(filters = {}) response = Excon::Response.new modification_set = self.data[:volume_modifications].values aliases = { 'volume-id' => 'volumeId', 'modification-state' => 'modificationState', 'target-size' => 'targetSize', 'target-iops' => 'targetIops', 'target-volume-type' => 'targetVolumeType', 'original-size' => 'originalSize', 'original-iops' => 'originalIops', 'original-volume-type' => 'originalVolumeType', 'start-time' => 'startTime' } attribute_aliases = { 'targetSize' => 'size', 'targetVolumeType' => 'volumeType', 'targetIops' => 'iops' } for filter_key, filter_value in filters aliased_key = aliases[filter_key] modification_set = modification_set.reject { |m| ![*filter_value].include?(m[aliased_key]) } end modification_set.each do |modification| case modification['modificationState'] when 'modifying' volume = self.data[:volumes][modification['volumeId']] modification['modificationState'] = 'optimizing' %w(targetSize targetIops targetVolumeType).each do |attribute| aliased_attribute = attribute_aliases[attribute] volume[aliased_attribute] = modification[attribute] if modification[attribute] end self.data[:volumes][modification['volumeId']] = volume when 'optimizing' modification['modificationState'] = 'completed' modification['endTime'] = Time.now end end response.body = {'requestId' => Fog::AWS::Mock.request_id, 'volumeModificationSet' => modification_set} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_vpc_attribute.rb000066400000000000000000000041461437344660100256650ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_vpc_attribute' # Describes a vpc attribute value # # ==== Parameters # * vpc_id<~String> - The ID of the VPC you want to describe an attribute of # * attribute<~String> - The attribute to describe, must be one of 'enableDnsSupport' or 'enableDnsHostnames' # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'vpcId'<~String> - The ID of the VPC # * 'enableDnsSupport'<~Boolean> - Flag indicating whether DNS resolution is enabled for the VPC (if requested) # * 'enableDnsHostnames'<~Boolean> - Flag indicating whether the instances launched in the VPC get DNS hostnames (if requested) # # (Amazon API Reference)[http://docs.amazonwebservices.com/AWSEC2/2014-02-01/APIReference/ApiReference-query-DescribeVpcAttribute.html] def describe_vpc_attribute(vpc_id, attribute) request( 'Action' => 'DescribeVpcAttribute', 'VpcId' => vpc_id, 'Attribute' => attribute, :parser => Fog::Parsers::AWS::Compute::DescribeVpcAttribute.new ) end end class Mock def describe_vpc_attribute(vpc_id, attribute) response = Excon::Response.new if vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'vpcId' => vpc_id } case attribute when 'enableDnsSupport', 'enableDnsHostnames' response.body[attribute] = vpc[attribute] else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end response else raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_vpc_classic_link.rb000066400000000000000000000045751437344660100263260ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_vpc_classic_link' # Describes the ClassicLink status of one or more VPCs. # # ==== Parameters # * options<~Hash> # * vpc_ids<~Array> - An array of vpc ids to restruct the results to # * filters<~Hash> - Filters to restrict the results to. Recognises is-classic-link-enabled in addition # to tag-key, tag-value and tag:key # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'vpcSet'<~Array> - array of VpcClassicLink # * 'vpcId'<~String> # * 'classicLinkEnabled'<~Boolean> # * 'tagSet'<~Hash> # # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcClassicLink.html def describe_vpc_classic_link(options={}) params = {} params.merge!(Fog::AWS.indexed_param('VpcId', options[:vpc_ids])) if options[:vpc_ids] params.merge!(Fog::AWS.indexed_filters(options[:filters])) if options[:filters] request({ 'Action' => 'DescribeVpcClassicLink', :parser => Fog::Parsers::AWS::Compute::DescribeVpcClassicLink.new }.merge(params)) end end class Mock def describe_vpc_classic_link(options={}) response = Excon::Response.new vpcs = self.data[:vpcs] if vpc_ids = options[:vpc_ids] vpcs = vpc_ids.collect do |vpc_id| vpc = vpcs.find{ |v| v['vpcId'] == vpc_id } raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") unless vpc vpc end end vpcs = apply_tag_filters(vpcs, options[:filters], 'vpcId') if options[:filters] response.status = 200 vpc_data = vpcs.collect do |vpc| { 'vpcId' => vpc['vpcId'], 'classicLinkEnabled' => vpc['classicLinkEnabled'], 'tagSet' => self.data[:tag_sets][vpc['vpcId']] || {} } end response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'vpcSet' => vpc_data } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_vpc_classic_link_dns_support.rb000066400000000000000000000036351437344660100307620ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_vpc_classic_link_dns_support' # escribes the ClassicLink DNS support status of one or more VPCs # # ==== Parameters # * options<~Hash> # * vpc_ids<~Array> - An array of vpc ids to restrict results to # * 'MaxResults' - Maximum number of items to return # * 'NextToken' - The token for the next set of items to return # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'vpcs'<~Array> - Information about the ClassicLink DNS support status of the VPCs # * 'vpcId'<~String> # * 'classicLinkDnsSupported'<~Boolean> # # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcClassicLinkDnsSupport.html def describe_vpc_classic_link_dns_support(options={}) params = {} params.merge!(Fog::AWS.indexed_param('VpcIds', options[:vpc_ids])) if options[:vpc_ids] request({ 'Action' => 'DescribeVpcClassicLinkDnsSupport', 'MaxResults' => options['MaxResults'], 'NextToken' => options['NextToken'], :parser => Fog::Parsers::AWS::Compute::DescribeVpcClassicLinkDnsSupport.new }.merge(params)) end end class Mock def describe_vpc_classic_link_dns_support(options={}) response = Excon::Response.new vpcs = self.data[:vpcs] if options[:vpc_ids] vpcs = vpcs.select { |v| options[:vpc_ids].include?(v['vpcId']) } end response.body = {'vpcs' => vpcs.map { |v| {"vpcId" => v['vpcId'], "classicLinkDnsSupported" => v['classicLinkDnsSupport']} } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/describe_vpcs.rb000066400000000000000000000047561437344660100237740ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/describe_vpcs' # Describe all or specified vpcs # # ==== Parameters # * filters<~Hash> - List of filters to limit results with # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'vpcSet'<~Array>: # * 'vpcId'<~String> - The VPC's ID # * 'state'<~String> - The current state of the VPC. ['pending', 'available'] # * 'cidrBlock'<~String> - The CIDR block the VPC covers. # * 'dhcpOptionsId'<~String> - The ID of the set of DHCP options. # * 'tagSet'<~Array>: Tags assigned to the resource. # * 'key'<~String> - Tag's key # * 'value'<~String> - Tag's value # * 'instanceTenancy'<~String> - The allowed tenancy of instances launched into the VPC. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2011-07-15/APIReference/index.html?ApiReference-query-DescribeVpcs.html] def describe_vpcs(filters = {}) unless filters.is_a?(Hash) Fog::Logger.warning("describe_vpcs with #{filters.class} param is deprecated, use describe_vpcs('vpc-id' => []) instead [light_black](#{caller.first})[/]") filters = {'vpc-id' => [*filters]} end params = Fog::AWS.indexed_filters(filters) request({ 'Action' => 'DescribeVpcs', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DescribeVpcs.new }.merge!(params)) end end class Mock def describe_vpcs(filters = {}) vpcs = self.data[:vpcs] vpcs = apply_tag_filters(vpcs, filters, 'vpcId') # Transition from pending to available vpcs.each do |vpc| case vpc['state'] when 'pending' vpc['state'] = 'available' end end if filters['vpc-id'] vpcs = vpcs.reject {|vpc| vpc['vpcId'] != filters['vpc-id']} end vpcs.each do |vpc| tags = self.data[:tag_sets][vpc['vpcId']] vpc.merge!('tagSet' => tags) if tags end Excon::Response.new( :status => 200, :body => { 'requestId' => Fog::AWS::Mock.request_id, 'vpcSet' => vpcs } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/detach_classic_link_vpc.rb000066400000000000000000000043311437344660100257640ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more of the VPC's security groups # # ==== Parameters # * vpc_id<~String> - The ID of the vpc to which the instance is linkced. # * instance_id<~String> - The ID of an EC2-Classic instance to unlink from the vpc. # * security_group_ids<~String> - The ID of one or more of the VPC's security groups. You cannot specify security groups from a different VPC. # * dry_run<~Boolean> - defaults to false # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Whether the request succeeded # # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DetachClassicLinkVpc.html] def detach_classic_link_vpc(instance_id, vpc_id, dry_run=false) request( 'Action' => 'DetachClassicLinkVpc', 'VpcId' => vpc_id, 'InstanceId'=> instance_id, 'DryRun' => dry_run, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def detach_classic_link_vpc(instance_id, vpc_id, dry_run=false) response = Excon::Response.new vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } instance = self.data[:instances][instance_id] if vpc && instance response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } unless dry_run instance['classicLinkSecurityGroups'] = nil instance['classicLinkVpcId'] = nil end response elsif !instance raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist.") elsif !vpc raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/detach_internet_gateway.rb000066400000000000000000000035051437344660100260310ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Detaches an Internet gateway to a VPC, enabling connectivity between the Internet and the VPC # # ==== Parameters # * internet_gateway_id<~String> - The ID of the Internet gateway to detach # * vpc_id<~String> - The ID of the VPC # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DetachInternetGateway.html] def detach_internet_gateway(internet_gateway_id, vpc_id) request( 'Action' => 'DetachInternetGateway', 'InternetGatewayId' => internet_gateway_id, 'VpcId' => vpc_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def detach_internet_gateway(internet_gateway_id, vpc_id) response = Excon::Response.new if internet_gateway_id && vpc_id response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else if !internet_gateway_id message << 'The request must contain the parameter internet_gateway_id' elsif !vpc_id message << 'The request must contain the parameter vpc_id' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/detach_network_interface.rb000066400000000000000000000033151437344660100261700ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Detaches a network interface. # # ==== Parameters # * attachment_id<~String> - ID of the attachment to detach # * force<~Boolean> - Set to true to force a detachment # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-DetachNetworkInterface.html] def detach_network_interface(attachment_id, force = false) request( 'Action' => 'DetachNetworkInterface', 'AttachmentId' => attachment_id, 'Force' => force, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def detach_network_interface(attachment_id, force = false) response = Excon::Response.new nic_id = self.data[:network_interfaces].select { |k,v| v['attachment']['attachmentId'] == attachment_id} .first.first if nic_id self.data[:network_interfaces][nic_id]["attachment"] = {} response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network interface '#{network_interface_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/detach_volume.rb000066400000000000000000000045541437344660100237740ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/detach_volume' # Detach an Amazon EBS volume from a running instance # # ==== Parameters # * volume_id<~String> - Id of amazon EBS volume to associate with instance # * options<~Hash>: # * 'Device'<~String> - Specifies how the device is exposed to the instance (e.g. "/dev/sdh") # * 'Force'<~Boolean> - If true forces detach, can cause data loss/corruption # * 'InstanceId'<~String> - Id of instance to associate volume with # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'attachTime'<~Time> - Time of attachment was initiated at # * 'device'<~String> - Device as it is exposed to the instance # * 'instanceId'<~String> - Id of instance for volume # * 'requestId'<~String> - Id of request # * 'status'<~String> - Status of volume # * 'volumeId'<~String> - Reference to volume # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DetachVolume.html] def detach_volume(volume_id, options = {}) request({ 'Action' => 'DetachVolume', 'VolumeId' => volume_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::DetachVolume.new }.merge!(options)) end end class Mock def detach_volume(volume_id, options = {}) response = Excon::Response.new response.status = 200 if (volume = self.data[:volumes][volume_id]) if !volume['attachmentSet'].empty? data = volume['attachmentSet'].pop volume['status'] = 'available' response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response else # real response has spacing issue below raise Fog::AWS::Compute::Error.new("IncorrectState => Volume '#{volume_id}'is in the 'available' state.") end else raise Fog::AWS::Compute::NotFound.new("The volume '#{volume_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/disable_vpc_classic_link.rb000066400000000000000000000030541437344660100261400ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # disavbles classic link for a vpc # # ==== Parameters # * vpc_id<~String> - The ID of the VPC you want to describe an attribute of # * dry_run<~Boolean> - defaults to false # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Whether the request succeeded # # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisableVpcClassicLink.html] def disable_vpc_classic_link(vpc_id, dry_run=false) request( 'Action' => 'DisableVpcClassicLink', 'VpcId' => vpc_id, 'DryRun' => dry_run, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def disable_vpc_classic_link(vpc_id, dry_run=false) response = Excon::Response.new if vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } vpc['classicLinkEnabled'] = false unless dry_run response else raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/disable_vpc_classic_link_dns_support.rb000066400000000000000000000025711437344660100306030ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Disables DNS hostname resolution for ClassicLink # # ==== Parameters # * vpc_id<~String> - The ID of the ClassicLink-enabled VPC. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'return'<~Boolean> - Whether the request succeeded # # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisableVpcClassicLinkDnsSupport.html def disable_vpc_classic_link_dns_support(vpc_id) request( 'Action' => 'DisableVpcClassicLinkDnsSupport', 'VpcId' => vpc_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def disable_vpc_classic_link_dns_support(vpc_id) response = Excon::Response.new unless vpc = self.data[:vpcs].find { |v| v['vpcId'] == vpc_id } raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") end vpc['classicLinkDnsSupport'] = false response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/disassociate_address.rb000066400000000000000000000042771437344660100253370ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Disassociate an elastic IP address from its instance (if any) # # ==== Parameters # * public_ip<~String> - Public ip to assign to instance # * association_id<~String> - Id associating eip to an network interface # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DisassociateAddress.html] def disassociate_address(public_ip=nil, association_id=nil) request( 'Action' => 'DisassociateAddress', 'PublicIp' => public_ip, 'AssociationId' => association_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def disassociate_address(public_ip, association_id=nil) response = Excon::Response.new response.status = 200 if address = self.data[:addresses][public_ip] if address['allocationId'] && association_id.nil? raise Fog::AWS::Compute::Error.new("InvalidParameterValue => You must specify an association id when unmapping an address from a VPC instance") end instance_id = address['instanceId'] if instance = self.data[:instances][instance_id] instance['ipAddress'] = instance['originalIpAddress'] instance['dnsName'] = Fog::AWS::Mock.dns_name_for(instance['ipAddress']) end address['instanceId'] = nil response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::Error.new("AuthFailure => The address '#{public_ip}' does not belong to you.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/disassociate_route_table.rb000066400000000000000000000040721437344660100262100ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Disassociates a subnet from a route table. # # ==== Parameters # * AssociationId<~String> - The association ID representing the current association between the route table and subnet. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - The ID of the request. # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DisassociateRouteTable.html] def disassociate_route_table(association_id) request( 'Action' => 'DisassociateRouteTable', 'AssociationId' => association_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def disassociate_route_table(association_id) assoc_array = nil routetable = self.data[:route_tables].find { |routetable| assoc_array = routetable["associationSet"].find { |association| association['routeTableAssociationId'].eql? association_id } } if !assoc_array.nil? && assoc_array['main'] == false routetable['associationSet'].delete(assoc_array) response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response elsif assoc_array.nil? raise Fog::AWS::Compute::NotFound.new("The association ID '#{association_id}' does not exist") elsif assoc_array['main'] == true raise Fog::AWS::Compute::Error, "InvalidParameterValue => cannot disassociate the main route table association #{association_id}" end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/enable_vpc_classic_link.rb000066400000000000000000000030451437344660100257630ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # enables classic link for a vpc # # ==== Parameters # * vpc_id<~String> - The ID of the VPC you want to describe an attribute of # * dry_run<~Boolean> - defaults to false # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Whether the request succeeded # # (Amazon API Reference)[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EnableVpcClassicLink.html] def enable_vpc_classic_link(vpc_id, dry_run=false) request( 'Action' => 'EnableVpcClassicLink', 'VpcId' => vpc_id, 'DryRun' => dry_run, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def enable_vpc_classic_link(vpc_id, dry_run=false) response = Excon::Response.new if vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } vpc['classicLinkEnabled'] = true unless dry_run response else raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/enable_vpc_classic_link_dns_support.rb000066400000000000000000000026041437344660100304230ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Enables a VPC to support DNS hostname resolution for ClassicLink # # ==== Parameters # * vpc_id<~String> - The ID of the ClassicLink-enabled VPC. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'return'<~Boolean> - Whether the request succeeded # # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EnableVpcClassicLinkDnsSupport.html def enable_vpc_classic_link_dns_support(vpc_id) request( 'Action' => 'EnableVpcClassicLinkDnsSupport', 'VpcId' => vpc_id, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def enable_vpc_classic_link_dns_support(vpc_id) response = Excon::Response.new unless vpc = self.data[:vpcs].find { |v| v['vpcId'] == vpc_id } raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist") end vpc['classicLinkDnsSupport'] = true response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/get_console_output.rb000066400000000000000000000033621437344660100250720ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/get_console_output' # Retrieve console output for specified instance # # ==== Parameters # * instance_id<~String> - Id of instance to get console output from # # ==== Returns # # * response<~Excon::Response>: # * body<~Hash>: # * 'instanceId'<~String> - Id of instance # * 'output'<~String> - Console output # * 'requestId'<~String> - Id of request # * 'timestamp'<~Time> - Timestamp of last update to output # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-GetConsoleOutput.html] def get_console_output(instance_id) request( 'Action' => 'GetConsoleOutput', 'InstanceId' => instance_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::GetConsoleOutput.new ) end end class Mock def get_console_output(instance_id) response = Excon::Response.new if instance = self.data[:instances][instance_id] response.status = 200 response.body = { 'instanceId' => instance_id, 'output' => (Time.now - instance['launchTime'] >= Fog::Mock.delay) ? nil : Fog::AWS::Mock.console_output, 'requestId' => Fog::AWS::Mock.request_id, 'timestamp' => Time.now } response else; raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/get_password_data.rb000066400000000000000000000035271437344660100246460ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/get_password_data' # Retrieves the encrypted administrator password for an instance running Windows. # # ==== Parameters # * instance_id<~String> - A Windows instance ID # # ==== Returns # # * response<~Excon::Response>: # * body<~Hash>: # * 'instanceId'<~String> - Id of instance # * 'passwordData'<~String> - The encrypted, base64-encoded password of the instance. # * 'requestId'<~String> - Id of request # * 'timestamp'<~Time> - Timestamp of last update to output # # See http://docs.amazonwebservices.com/AWSEC2/2010-08-31/APIReference/index.html?ApiReference-query-GetPasswordData.html # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-GetPasswordData.html] def get_password_data(instance_id) request( 'Action' => 'GetPasswordData', 'InstanceId' => instance_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::GetPasswordData.new ) end end class Mock def get_password_data(instance_id) response = Excon::Response.new if instance = self.data[:instances][instance_id] response.status = 200 response.body = { 'instanceId' => instance_id, 'passwordData' => nil, 'requestId' => Fog::AWS::Mock.request_id, 'timestamp' => Time.now } response else; raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/import_key_pair.rb000066400000000000000000000034321437344660100243440ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/import_key_pair' # Import an existing public key to create a new key pair # # ==== Parameters # * key_name<~String> - Unique name for key pair. # * public_key_material<~String> - RSA public key # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'keyFingerprint'<~String> - SHA-1 digest of DER encoded private key # * 'keyName'<~String> - Name of key # * 'requestId'<~String> - Id of request # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ImportKeyPair.html] def import_key_pair(key_name, public_key_material) request( 'Action' => 'ImportKeyPair', 'KeyName' => key_name, 'PublicKeyMaterial' => Base64::encode64(public_key_material), :parser => Fog::Parsers::AWS::Compute::ImportKeyPair.new ) end end class Mock def import_key_pair(key_name, public_key_material) response = Excon::Response.new unless self.data[:key_pairs][key_name] response.status = 200 data = { 'keyFingerprint' => Fog::AWS::Mock.key_fingerprint, 'keyName' => key_name } self.data[:key_pairs][key_name] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response else raise Fog::AWS::Compute::Error.new("InvalidKeyPair.Duplicate => The keypair '#{key_name}' already exists.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_image_attribute.rb000066400000000000000000000057111437344660100256650ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modify image attributes # # ==== Parameters # * image_id<~String> - Id of machine image to modify # * attributes<~Hash>: # * 'Add.Group'<~Array> - One or more groups to grant launch permission to # * 'Add.UserId'<~Array> - One or more account ids to grant launch permission to # * 'Description.Value' - New description for image # * 'ProductCode'<~Array> - One or more product codes to add to image (these can not be removed) # * 'Remove.Group'<~Array> - One or more groups to revoke launch permission from # * 'Remove.UserId'<~Array> - One or more account ids to revoke launch permission from # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ModifyImageAttribute.html] # def modify_image_attribute(image_id, attributes) raise ArgumentError.new("image_id is required") unless image_id params = {} params.merge!(Fog::AWS.indexed_param('LaunchPermission.Add.%d.Group', attributes['Add.Group'] || [])) params.merge!(Fog::AWS.indexed_param('LaunchPermission.Add.%d.UserId', attributes['Add.UserId'] || [])) params.merge!(Fog::AWS.indexed_param('LaunchPermission.Remove.%d.Group', attributes['Remove.Group'] || [])) params.merge!(Fog::AWS.indexed_param('LaunchPermission.Remove.%d.UserId', attributes['Remove.UserId'] || [])) params.merge!(Fog::AWS.indexed_param('ProductCode', attributes['ProductCode'] || [])) request({ 'Action' => 'ModifyImageAttribute', 'ImageId' => image_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end class Mock def modify_image_attribute(image_id, attributes) raise ArgumentError.new("image_id is required") unless image_id unless self.data[:images][image_id] raise Fog::AWS::Compute::NotFound.new("The AMI ID '#{image_id}' does not exist") end (attributes['Add.UserId'] || []).each do |user_id| if image_launch_permissions = self.data[:image_launch_permissions][image_id] image_launch_permissions[:users].push(user_id) end end (attributes['Remove.UserId'] || []).each do |user_id| if image_launch_permissions = self.data[:image_launch_permissions][image_id] image_launch_permissions[:users].delete(user_id) end end response = Excon::Response.new response.status = 200 response.body = { 'return' => true, 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_instance_attribute.rb000066400000000000000000000034271437344660100264110ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modify instance attributes # # ==== Parameters # * instance_id<~String> - Id of instance to modify # * attributes<~Hash>: # 'InstanceType.Value'<~String> - New instance type # 'Kernel.Value'<~String> - New kernel value # 'Ramdisk.Value'<~String> - New ramdisk value # 'UserData.Value'<~String> - New userdata value # 'DisableApiTermination.Value'<~Boolean> - Change api termination value # 'InstanceInitiatedShutdownBehavior.Value'<~String> - New instance initiated shutdown behaviour, in ['stop', 'terminate'] # 'SourceDestCheck.Value'<~Boolean> - New sourcedestcheck value # 'GroupId'<~Array> - One or more groups to add instance to (VPC only) # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ModifyInstanceAttribute.html] # def modify_instance_attribute(instance_id, attributes) params = {} params.merge!(Fog::AWS.indexed_param('GroupId', attributes.delete('GroupId') || [])) params.merge!(attributes) request({ 'Action' => 'ModifyInstanceAttribute', 'InstanceId' => instance_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end def modify_instance_attributes(instance_id, attributes) Fog::Logger.deprecation("modify_instance_attributes method is deprecated, use 'modify_instance_attribute' instead") modify_instance_attribute(instance_id, attributes) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_instance_placement.rb000066400000000000000000000023231437344660100263500ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modify instance placement # # ==== Parameters # * instance_id<~String> - Id of instance to modify # * attributes<~Hash>: # 'Affinity.Value'<~String> - The affinity setting for the instance, in ['default', 'host'] # 'GroupName.Value'<~String> - The name of the placement group in which to place the instance # 'HostId.Value'<~String> - The ID of the Dedicated Host with which to associate the instance # 'Tenancy.Value'<~String> - The tenancy for the instance, in ['dedicated', 'host'] # # {Amazon API Reference}[https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstancePlacement.html] # def modify_instance_placement(instance_id, attributes) params = {} params.merge!(attributes) request({ 'Action' => 'ModifyInstancePlacement', 'InstanceId' => instance_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_network_interface_attribute.rb000066400000000000000000000076261437344660100303230ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modifies a network interface attribute value # # ==== Parameters # * network_interface_id<~String> - The ID of the network interface you want to describe an attribute of # * attribute<~String> - The attribute to modify, must be one of 'description', 'groupSet', 'sourceDestCheck' or 'attachment' # * value<~Object> - New value of attribute, the actual tyep depends on teh attribute: # description - a string # groupSet - a list of group id's # sourceDestCheck - a boolean value # attachment - a hash with: # attachmentid - the attachment to change # deleteOnTermination - a boolean # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-ModifyNetworkInterfaceAttribute.html] def modify_network_interface_attribute(network_interface_id, attribute, value) params = {} case attribute when 'description' params['Description.Value'] = value when 'groupSet' params.merge!(Fog::AWS.indexed_param('SecurityGroupId.%d', value)) when 'sourceDestCheck' params['SourceDestCheck.Value'] = value when 'attachment' params['Attachment.AttachmentId'] = value['attachmentId'] params['Attachment.DeleteOnTermination'] = value['deleteOnTermination'] else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end request({ 'Action' => 'ModifyNetworkInterfaceAttribute', 'NetworkInterfaceId' => network_interface_id, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end class Mock def modify_network_interface_attribute(network_interface_id, attribute, value) response = Excon::Response.new if self.data[:network_interfaces][network_interface_id] nic = self.data[:network_interfaces][network_interface_id] case attribute when 'description' nic['description'] = value.clone when 'groupSet' groups = {} value.each do |group_id| security_group = self.data[:security_groups][group_id] if security_group.nil? raise Fog::AWS::Compute::Error.new("Unknown security group '#{group_id}' specified") end groups[group_id] = security_group['groupName'] end nic['groupSet'] = groups when 'sourceDestCheck' nic['sourceDestCheck'] = value when 'attachment' if nic['attachment'].nil? || value['attachmentId'] != nic['attachment']['attachmentId'] raise Fog::AWS::Compute::Error.new("Illegal attachment '#{value['attachmentId']}' specified") end nic['attachment']['deleteOnTermination'] = value['deleteOnTermination'] else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network interface '#{network_interface_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_snapshot_attribute.rb000066400000000000000000000032401437344660100264350ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modify snapshot attributes # # ==== Parameters # * snapshot_id<~String> - Id of snapshot to modify # * attributes<~Hash>: # * 'Add.Group'<~Array> - One or more groups to grant volume create permission to # * 'Add.UserId'<~Array> - One or more account ids to grant volume create permission to # * 'Remove.Group'<~Array> - One or more groups to revoke volume create permission from # * 'Remove.UserId'<~Array> - One or more account ids to revoke volume create permission from # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ModifySnapshotAttribute.html] # def modify_snapshot_attribute(snapshot_id, attributes) params = {} params.merge!(Fog::AWS.indexed_param('CreateVolumePermission.Add.%d.Group', attributes['Add.Group'] || [])) params.merge!(Fog::AWS.indexed_param('CreateVolumePermission.Add.%d.UserId', attributes['Add.UserId'] || [])) params.merge!(Fog::AWS.indexed_param('CreateVolumePermission.Remove.%d.Group', attributes['Remove.Group'] || [])) params.merge!(Fog::AWS.indexed_param('CreateVolumePermission.Remove.%d.UserId', attributes['Remove.UserId'] || [])) request({ 'Action' => 'ModifySnapshotAttribute', 'SnapshotId' => snapshot_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_subnet_attribute.rb000066400000000000000000000042751437344660100261070ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/modify_subnet_attribute' # Modifies a subnet attribute. # # ==== Parameters # * SubnetId<~String> - The id of the subnet to modify # * options<~Hash>: # * MapPublicIpOnLaunch<~Boolean> - Modifies the public IP addressing behavior for the subnet. # Specify true to indicate that instances launched into the specified subnet should be assigned a public IP address. # If set to true, the instance receives a public IP address only if the instance is launched with a single, # new network interface with the device index of 0. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ModifySubnetAttribute.html def modify_subnet_attribute(subnet_id, options = {}) params = {} params['MapPublicIpOnLaunch.Value'] = options.delete 'MapPublicIpOnLaunch' if options['MapPublicIpOnLaunch'] request({ 'Action' => 'ModifySubnetAttribute', 'SubnetId' => subnet_id, :parser => Fog::Parsers::AWS::Compute::ModifySubnetAttribute.new }.merge(params)) end end class Mock def modify_subnet_attribute(subnet_id, options={}) Excon::Response.new.tap do |response| subnet = self.data[:subnets].detect { |v| v['subnetId'] == subnet_id } if subnet subnet['mapPublicIpOnLaunch'] = options['MapPublicIpOnLaunch'] response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } else response.status = 404 response.body = { 'Code' => 'InvalidParameterValue' } end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_volume.rb000066400000000000000000000065101437344660100240250ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/modify_volume' # Modifies a volume # # ==== Parameters # * volume_id<~String> - The ID of the volume # * options<~Hash>: # * 'VolumeType'<~String> - Type of volume # * 'Size'<~Integer> - Size in GiBs fo the volume # * 'Iops'<~Integer> - Number of IOPS the volume supports # # ==== Response # * response<~Excon::Response>: # * body<~Hash>: # * 'targetIops'<~Integer> - Target IOPS rate of the volume being modified. # * 'originalIops'<~Integer> - Original IOPS rate of the volume being modified. # * 'modificationState'<~String> - Current state of modification. Modification state is null for unmodified volumes. # * 'targetSize'<~Integer> - Target size of the volume being modified. # * 'targetVolumeType'<~String> - Target EBS volume type of the volume being modified. # * 'volumeId'<~String> - ID of the volume being modified. # * 'progress'<~Integer> - Modification progress from 0 to 100%. # * 'startTime'<~Time> - Modification start time # * 'endTime'<~Time> - Modification end time # * 'originalSize'<~Integer> - Original size of the volume being modified. # * 'originalVolumeType'<~String> - Original EBS volume type of the volume being modified. def modify_volume(volume_id, options={}) request({ 'Action' => "ModifyVolume", 'VolumeId' => volume_id, :parser => Fog::Parsers::AWS::Compute::ModifyVolume.new }.merge(options)) end end class Mock def modify_volume(volume_id, options={}) response = Excon::Response.new volume = self.data[:volumes][volume_id] if volume["volumeType"] == 'standard' && options['VolumeType'] raise Fog::AWS::Compute::Error.new("InvalidParameterValue => Volume type EBS Magnetic is not supported.") end volume_modification = { 'modificationState' => 'modifying', 'progress' => 0, 'startTime' => Time.now, 'volumeId' => volume_id } if options['Size'] volume_modification.merge!( 'originalSize' => volume['size'], 'targetSize' => options['Size'] ) end if options['Iops'] volume_modification.merge!( 'originalIops' => volume['iops'], 'targetIops' => options['Iops'] ) end if options['VolumeType'] if options["VolumeType"] == 'standard' raise Fog::AWS::Compute::Error.new("InvalidParameterValue => Volume type EBS Magnetic is not supported.") end volume_modification.merge!( 'originalVolumeType' => volume['volumeType'], 'targetVolumeType' => options['VolumeType'] ) end self.data[:volume_modifications][volume_id] = volume_modification response.body = {'volumeModification' => volume_modification, 'requestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_volume_attribute.rb000066400000000000000000000032051437344660100261060ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modifies a volume attribute. # # ==== Parameters # * volume_id<~String> - The ID of the volume. # * auto_enable_io_value<~Boolean> - This attribute exists to auto-enable the I/O operations to the volume. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ModifyVolumeAttribute.html] def modify_volume_attribute(volume_id=nil, auto_enable_io_value=false) request( 'Action' => 'ModifyVolumeAttribute', 'VolumeId' => volume_id, 'AutoEnableIO.Value' => auto_enable_io_value, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def modify_volume_attribute(volume_id=nil, auto_enable_io_value=false) response = Excon::Response.new if volume = self.data[:volumes][volume_id] response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The volume '#{volume_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/modify_vpc_attribute.rb000066400000000000000000000054641437344660100254000ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Modifies the specified attribute of the specified VPC. # # ==== Parameters # * vpc_id<~String> - The ID of the VPC. # * options<~Hash>: # * enableDnsSupport<~Boolean> - Indicates whether DNS resolution is supported for the VPC. If this attribute is true, the Amazon DNS # server resolves DNS hostnames for your instances to their corresponding IP addresses; otherwise, it does not. # * enableDnsHostnames<~Boolean> - Indicates whether the instances launched in the VPC get DNS hostnames. If this attribute is true, # instances in the VPC get DNS hostnames; otherwise, they do not. You can only set enableDnsHostnames to true if you also set the # EnableDnsSupport attribute to true. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ModifyVpcAttribute.html] def modify_vpc_attribute(vpc_id, options = {}) request({ 'Action' => 'ModifyVpcAttribute', 'VpcId' => vpc_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def modify_vpc_attribute(vpc_id, options = {}) response = Excon::Response.new if options.size == 0 raise Fog::AWS::Compute::Error.new("InvalidParameterCombination => No attributes specified.") elsif options.size > 1 raise Fog::AWS::Compute::Error.new("InvalidParameterCombination => InvalidParameterCombination => Fields for multiple attribute types specified: #{options.keys.join(', ')}") elsif vpc = self.data[:vpcs].find{ |v| v['vpcId'] == vpc_id } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } attribute = options.keys.first case attribute when 'EnableDnsSupport.Value' vpc['enableDnsSupport'] = options[attribute] when 'EnableDnsHostnames.Value' vpc['enableDnsHostnames'] = options[attribute] else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end response else raise Fog::AWS::Compute::NotFound.new("The VPC '#{vpc_id}' does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/monitor_instances.rb000066400000000000000000000036611437344660100247110ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/monitor_unmonitor_instances' # Monitor specified instance # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-MonitorInstances.html # # ==== Parameters # * instance_ids<~Array> - Arrays of instances Ids to monitor # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'instancesSet': http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-ItemType-MonitorInstancesResponseSetItemType.html # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-MonitorInstances.html] def monitor_instances(instance_ids) params = Fog::AWS.indexed_param('InstanceId', instance_ids) request({ 'Action' => 'MonitorInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::MonitorUnmonitorInstances.new }.merge!(params)) end end class Mock def monitor_instances(instance_ids) response = Excon::Response.new response.status = 200 [*instance_ids].each do |instance_id| if instance = self.data[:instances][instance_id] instance['monitoring']['state'] = 'enabled' else raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_ids}' does not exist") end end instances_set = [*instance_ids].reduce([]) { |memo, id| memo << {'instanceId' => id, 'monitoring' => 'enabled'} } response.body = {'requestId' => 'some_request_id', 'instancesSet' => instances_set} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/move_address_to_vpc.rb000066400000000000000000000027711437344660100252010ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/move_address_to_vpc' # Move address to VPC scope # # === Returns # * response<~Excon::Response>: # * body<~: # * 'allocationId'<~String> - The allocation ID for the Elastic IP address # * 'requestId'<~String> - Id of the request # * 'status'<~String> - The status of the move of the IP address (MoveInProgress | InVpc | InClassic) def move_address_to_vpc(public_ip) request( 'Action' => 'MoveAddressToVpc', 'PublicIp' => public_ip, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::MoveAddressToVpc.new ) end end class Mock def move_address_to_vpc(public_ip) response = Excon::Response.new allocation_id = "eip-#{Fog::Mock.random_hex(8)}" address = self.data[:addresses][public_ip] if address address['domain'] = 'vpc' address['allocationId'] = allocation_id response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'allocationId' => allocation_id, 'status' => "InVpc" } response else raise Fog::AWS::Compute::NotFound.new("Address does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/purchase_reserved_instances_offering.rb000066400000000000000000000053371437344660100306140ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/purchase_reserved_instances_offering' # Purchases a Reserved Instance for use with your account. # # ==== Parameters # * reserved_instances_offering_id<~String> - ID of the Reserved Instance offering you want to purchase. # * instance_count<~Integer> - The number of Reserved Instances to purchase. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'reservedInstancesId'<~String> - Id of the purchased reserved instances. # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-PurchaseReservedInstancesOffering.html] def purchase_reserved_instances_offering(reserved_instances_offering_id, instance_count = 1) request({ 'Action' => 'PurchaseReservedInstancesOffering', 'ReservedInstancesOfferingId' => reserved_instances_offering_id, 'InstanceCount' => instance_count, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::PurchaseReservedInstancesOffering.new }) end end class Mock def purchase_reserved_instances_offering(reserved_instances_offering_id, instance_count = 1) response = Excon::Response.new response.status = 200 # Need to implement filters in the mock to find this there instead of here # Also there's no information about what to do when the specified reserved_instances_offering_id doesn't exist raise unless reserved_instance_offering = describe_reserved_instances_offerings.body["reservedInstancesOfferingsSet"].find { |offering| offering["reservedInstancesOfferingId"] == reserved_instances_offering_id } reserved_instances_id = Fog::AWS::Mock.reserved_instances_id reserved_instance_offering.delete('reservedInstancesOfferingId') self.data[:reserved_instances][reserved_instances_id] = reserved_instance_offering.merge({ 'reservedInstancesId' => reserved_instances_id, 'start' => Time.now, 'end' => Time.now, 'instanceCount' => instance_count, 'state' => 'payment-pending', 'tagSet' => [] }) response.body = { 'reservedInstancesId' => reserved_instances_id, 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/reboot_instances.rb000066400000000000000000000031551437344660100245120ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Reboot specified instances # # ==== Parameters # * instance_id<~Array> - Ids of instances to reboot # # ==== Returns # # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RebootInstances.html] def reboot_instances(instance_id = []) params = Fog::AWS.indexed_param('InstanceId', instance_id) request({ 'Action' => 'RebootInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(params)) end end class Mock def reboot_instances(instance_id = []) response = Excon::Response.new instance_id = [*instance_id] if (self.data[:instances].keys & instance_id).length == instance_id.length for instance_id in instance_id self.data[:instances][instance_id]['status'] = 'rebooting' end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The instance ID #{instance_id.inspect} does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/register_image.rb000066400000000000000000000116241437344660100241370ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/register_image' # register an image # # ==== Parameters # * Name<~String> - Name of the AMI to be registered # * Description<~String> - AMI description # * Location<~String> - S3 manifest location (for S3 backed AMIs) # or # * RootDeviceName<~String> - Name of Root Device (for EBS snapshot backed AMIs) # * BlockDevices<~Array>: # * BlockDeviceOptions<~Hash>: # * DeviceName<~String> - Name of the Block Device # * VirtualName<~String> - Name of the Virtual Device # * SnapshotId<~String> - id of the EBS Snapshot # * VolumeSize<~Integer> - Size of the snapshot (optional) # * NoDevice<~Boolean> - Do not use an ebs device (def: true) # * DeleteOnTermation<~Boolean> - Delete EBS volume on instance term (def: true) # * Options<~Hash>: # * Architecture<~String> - i386 or x86_64 # * KernelId<~String> - kernelId # * RamdiskId<~String> - ramdiskId # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'imageId'<~String> - Id of newly created AMI # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RegisterImage.html] def register_image(name, description, location, block_devices=[], options={}) common_options = { 'Action' => 'RegisterImage', 'Name' => name, 'Description' => description, :parser => Fog::Parsers::AWS::Compute::RegisterImage.new } # This determines if we are doing a snapshot or a S3 backed AMI. if(location =~ /^\/dev\/(xvd|sd)[a-p]\d{0,2}$/) common_options['RootDeviceName'] = location else common_options['ImageLocation'] = location end block_devices.each_with_index do |bd, index| index += 1 ["DeviceName","VirtualName"].each do |n| common_options["BlockDeviceMapping.#{index}.#{n}"] = bd[n] if bd[n] end ["SnapshotId","VolumeSize","NoDevice","DeleteOnTermination"].each do |n| common_options["BlockDeviceMapping.#{index}.Ebs.#{n}"] = bd[n] if bd[n] end end request(common_options.merge!(options)) end end class Mock def register_image(name, description, location, block_devices=[], options={}) unless name.empty? image = { 'imageId' => Fog::AWS::Mock.image_id, 'imageLocation' => '', 'imageState' => 'pending', 'imageOwnerId' => self.data[:owner_id], 'isPublic' => false, 'productCodes' => [], 'architecture' => options['Architecture'] || 'i386', 'imageType' => 'machine', 'kernelId' => options['KernelId'] || Fog::AWS::Mock.kernel_id, 'ramdiskId' => options['RamdiskId'] || Fog::AWS::Mock.ramdisk_id, 'platform' => 'Linux', 'stateReason' => {}, 'imageOwnerAlias' => self.data[:owner_id], 'name' => name, 'description' => description, 'rootDeviceType' => '', 'rootDeviceName' => '', 'blockDeviceMapping' => [], 'virtualizationType' => 'paravirtual', 'hypervisor' => 'xen', 'registered' => Time.now } if location[/^\/dev\/(xvd|sd)[a-p]\d{0,2}$/] image['rootDeviceName'] = location image['rootDeviceType'] = 'ebs' else image['imageLocation'] = location end block_devices.each do |bd| block_device_mapping = { 'ebs' => {} } ["DeviceName","VirtualName"].each do |n| block_device_mapping[n] = bd[n] if bd[n] end ["SnapshotId","VolumeSize","NoDevice","DeleteOnTermination"].each do |n| block_device_mapping['ebs'][n] = bd[n] if bd[n] end image['blockDeviceMapping'] << block_device_mapping end self.data[:images][image['imageId']] = image response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'imageId' => image['imageId'] } response else message = 'MissingParameter => ' if name.empty? message << 'The request must contain the parameter name' end raise Fog::AWS::Compute::Error.new(message) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/release_address.rb000066400000000000000000000040121437344660100242670ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Release an elastic IP address. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ReleaseAddress.html] # # non-VPC: requires public_ip only # VPC: requires allocation_id only def release_address(ip_or_allocation) field = if ip_or_allocation.to_s =~ /^(\d|\.)+$/ "PublicIp" else "AllocationId" end request( 'Action' => 'ReleaseAddress', field => ip_or_allocation, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def release_address(public_ip_or_allocation_id) response = Excon::Response.new address = self.data[:addresses][public_ip_or_allocation_id] || self.data[:addresses].values.find {|a| a['allocationId'] == public_ip_or_allocation_id } if address if address['allocationId'] && public_ip_or_allocation_id == address['publicIp'] raise Fog::AWS::Compute::Error, "InvalidParameterValue => You must specify an allocation id when releasing a VPC elastic IP address" end self.data[:addresses].delete(address['publicIp']) response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::Error.new("AuthFailure => The address '#{public_ip_or_allocation_id}' does not belong to you.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/replace_network_acl_association.rb000066400000000000000000000051061437344660100275460ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/replace_network_acl_association' # Replace the network ACL for a subnet with a # # ==== Parameters # * association_id<~String> - The ID of the current association between the original network ACL and the subnet # * network_acl_id<~String> - The ID of the network ACL # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ReplaceNetworkAclAssociation.html] def replace_network_acl_association(association_id, network_acl_id) request({ 'Action' => 'ReplaceNetworkAclAssociation', 'AssociationId' => association_id, 'NetworkAclId' => network_acl_id, :parser => Fog::Parsers::AWS::Compute::ReplaceNetworkAclAssociation.new }) end end class Mock def replace_network_acl_association(association_id, network_acl_id) response = Excon::Response.new if self.data[:network_acls][network_acl_id] # find the old assoc old_nacl = self.data[:network_acls].values.find do |n| n['associationSet'].find { |assoc| assoc['networkAclAssociationId'] == association_id } end unless old_nacl raise Fog::AWS::Compute::Error.new("Invalid association_id #{association_id}") end subnet_id = old_nacl['associationSet'].find { |assoc| assoc['networkAclAssociationId'] == association_id }['subnetId'] old_nacl['associationSet'].delete_if { |assoc| assoc['networkAclAssociationId'] == association_id } id = Fog::AWS::Mock.network_acl_association_id self.data[:network_acls][network_acl_id]['associationSet'] << { 'networkAclAssociationId' => id, 'networkAclId' => network_acl_id, 'subnetId' => subnet_id, } response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'newAssociationId' => id } response else raise Fog::AWS::Compute::NotFound.new("The network ACL '#{network_acl_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/replace_network_acl_entry.rb000066400000000000000000000075121437344660100263760ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Replaces a Network ACL entry with the same rule number # # ==== Parameters # * network_acl_id<~String> - The ID of the ACL to add this entry to # * rule_number<~Integer> - The rule number for the entry, between 100 and 32766 # * protocol<~Integer> - The IP protocol to which the rule applies. You can use -1 to mean all protocols. # * rule_action<~String> - Allows or denies traffic that matches the rule. (either allow or deny) # * cidr_block<~String> - The CIDR range to allow or deny # * egress<~Boolean> - Indicates whether this rule applies to egress traffic from the subnet (true) or ingress traffic to the subnet (false). # * options<~Hash>: # * 'Icmp.Code' - ICMP code, required if protocol is 1 # * 'Icmp.Type' - ICMP type, required if protocol is 1 # * 'PortRange.From' - The first port in the range, required if protocol is 6 (TCP) or 17 (UDP) # * 'PortRange.To' - The last port in the range, required if protocol is 6 (TCP) or 17 (UDP) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - Returns true if the request succeeds. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ReplaceNetworkAclEntry.html] def replace_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options = {}) request({ 'Action' => 'ReplaceNetworkAclEntry', 'NetworkAclId' => network_acl_id, 'RuleNumber' => rule_number, 'Protocol' => protocol, 'RuleAction' => rule_action, 'Egress' => egress, 'CidrBlock' => cidr_block, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def replace_network_acl_entry(network_acl_id, rule_number, protocol, rule_action, cidr_block, egress, options = {}) response = Excon::Response.new if self.data[:network_acls][network_acl_id] unless self.data[:network_acls][network_acl_id]['entrySet'].find { |r| r['ruleNumber'] == rule_number && r['egress'] == egress } raise Fog::AWS::Compute::Error.new("No rule with that number") end self.data[:network_acls][network_acl_id]['entrySet'].delete_if { |r| r['ruleNumber'] == rule_number && r['egress'] == egress } data = { 'ruleNumber' => rule_number, 'protocol' => protocol, 'ruleAction' => rule_action, 'egress' => egress, 'cidrBlock' => cidr_block, 'icmpTypeCode' => {}, 'portRange' => {} } data['icmpTypeCode']['code'] = options['Icmp.Code'] if options['Icmp.Code'] data['icmpTypeCode']['type'] = options['Icmp.Type'] if options['Icmp.Type'] data['portRange']['from'] = options['PortRange.From'] if options['PortRange.From'] data['portRange']['to'] = options['PortRange.To'] if options['PortRange.To'] self.data[:network_acls][network_acl_id]['entrySet'] << data response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response else raise Fog::AWS::Compute::NotFound.new("The network ACL '#{network_acl_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/replace_route.rb000066400000000000000000000103371437344660100240020ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Replaces a route in a route table within a VPC. # # ==== Parameters # * RouteTableId<~String> - The ID of the route table for the route. # * options<~Hash>: # * DestinationCidrBlock<~String> - The CIDR address block used for the destination match. Routing decisions are based on the most specific match. # * GatewayId<~String> - The ID of an Internet gateway attached to your VPC. # * InstanceId<~String> - The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached. # * NetworkInterfaceId<~String> - The ID of a network interface. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of the request # * 'return'<~Boolean> - Returns true if the request succeeds. Otherwise, returns an error. # # {Amazon API Reference}[http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-ReplaceRoute.html] def replace_route(route_table_id, destination_cidr_block, options = {}) options['DestinationCidrBlock'] ||= destination_cidr_block request({ 'Action' => 'ReplaceRoute', 'RouteTableId' => route_table_id, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def replace_route(route_table_id, destination_cidr_block, options = {}) options['instanceOwnerId'] ||= nil options['DestinationCidrBlock'] ||= destination_cidr_block route_table = self.data[:route_tables].find { |routetable| routetable["routeTableId"].eql? route_table_id } if !route_table.nil? && destination_cidr_block if !options['gatewayId'].nil? || !options['instanceId'].nil? || !options['networkInterfaceId'].nil? if !options['gatewayId'].nil? && self.internet_gateways.all('internet-gateway-id'=>options['gatewayId']).first.nil? raise Fog::AWS::Compute::NotFound.new("The gateway ID '#{options['gatewayId']}' does not exist") elsif !options['instanceId'].nil? && self.servers.all('instance-id'=>options['instanceId']).first.nil? raise Fog::AWS::Compute::NotFound.new("The instance ID '#{options['instanceId']}' does not exist") elsif !options['networkInterfaceId'].nil? && self.network_interfaces.all('networkInterfaceId'=>options['networkInterfaceId']).first.nil? raise Fog::AWS::Compute::NotFound.new("The networkInterface ID '#{options['networkInterfaceId']}' does not exist") elsif route_table['routeSet'].find { |route| route['destinationCidrBlock'].eql? destination_cidr_block }.nil? raise Fog::AWS::Compute::Error, "RouteAlreadyExists => The route identified by #{destination_cidr_block} doesn't exist." else response = Excon::Response.new route_set = route_table['routeSet'].find { |routeset| routeset['destinationCidrBlock'].eql? destination_cidr_block } route_set.merge!(options) route_set['state'] = 'pending' route_set['origin'] = 'ReplaceRoute' response.status = 200 response.body = { 'requestId'=> Fog::AWS::Mock.request_id, 'return' => true } response end else message = 'MissingParameter => ' message << 'The request must contain either a gateway id, a network interface id, or an instance id' raise Fog::AWS::Compute::Error.new(message) end elsif route_table.nil? raise Fog::AWS::Compute::NotFound.new("The routeTable ID '#{route_table_id}' does not exist") elsif destination_cidr_block.empty? raise Fog::AWS::Compute::InvalidParameterValue.new("Value () for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/request_spot_instances.rb000066400000000000000000000226431437344660100257600ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/spot_instance_requests' # Launch specified instances # # ==== Parameters # * 'image_id'<~String> - Id of machine image to load on instances # * 'instance_type'<~String> - Type of instance # * 'spot_price'<~Float> - maximum hourly price for instances launched # * options<~Hash>: # * 'AvailabilityZoneGroup'<~String> - specify whether or not to launch all instances in the same availability group # * 'InstanceCount'<~Integer> - maximum number of instances to launch # * 'LaunchGroup'<~String> - whether or not to launch/shutdown instances as a group # * 'LaunchSpecification.BlockDeviceMapping'<~Array>: array of hashes # * 'DeviceName'<~String> - where the volume will be exposed to instance # * 'VirtualName'<~String> - volume virtual device name # * 'Ebs.SnapshotId'<~String> - id of snapshot to boot volume from # * 'Ebs.NoDevice'<~String> - specifies that no device should be mapped # * 'Ebs.VolumeSize'<~String> - size of volume in GiBs required unless snapshot is specified # * 'Ebs.DeleteOnTermination'<~String> - specifies whether or not to delete the volume on instance termination # * 'LaunchSpecification.KeyName'<~String> - Name of a keypair to add to booting instances # * 'LaunchSpecification.Monitoring.Enabled'<~Boolean> - Enables monitoring, defaults to disabled # * 'LaunchSpecification.SubnetId'<~String> - VPC subnet ID in which to launch the instance # * 'LaunchSpecification.Placement.AvailabilityZone'<~String> - Placement constraint for instances # * 'LaunchSpecification.SecurityGroup'<~Array> or <~String> - Name of security group(s) for instances, not supported in VPC # * 'LaunchSpecification.SecurityGroupId'<~Array> or <~String> - Id of security group(s) for instances, use this or LaunchSpecification.SecurityGroup # * 'LaunchSpecification.UserData'<~String> - Additional data to provide to booting instances # * 'LaunchSpecification.EbsOptimized'<~Boolean> - Whether the instance is optimized for EBS I/O # * 'Type'<~String> - spot instance request type in ['one-time', 'persistent'] # * 'ValidFrom'<~Time> - start date for request # * 'ValidUntil'<~Time> - end date for request # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'spotInstanceRequestSet'<~Array>: # * 'createTime'<~Time> - time of instance request creation # * 'instanceId'<~String> - instance id if one has been launched to fulfill request # * 'launchedAvailabilityZone'<~String> - availability zone of instance if one has been launched to fulfill request # * 'launchSpecification'<~Hash>: # * 'blockDeviceMapping'<~Hash> - list of block device mappings for instance # * 'groupSet'<~String> - security group(s) for instance # * 'keyName'<~String> - keypair name for instance # * 'imageId'<~String> - AMI for instance # * 'instanceType'<~String> - type for instance # * 'monitoring'<~Boolean> - monitoring status for instance # * 'subnetId'<~String> - VPC subnet ID for instance # * 'productDescription'<~String> - general description of AMI # * 'spotInstanceRequestId'<~String> - id of spot instance request # * 'spotPrice'<~Float> - maximum price for instances to be launched # * 'state'<~String> - spot instance request state # * 'type'<~String> - spot instance request type # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RequestSpotInstances.html] def request_spot_instances(image_id, instance_type, spot_price, options = {}) if block_device_mapping = options.delete('LaunchSpecification.BlockDeviceMapping') block_device_mapping.each_with_index do |mapping, index| for key, value in mapping options.merge!({ format("LaunchSpecification.BlockDeviceMapping.%d.#{key}", index) => value }) end end end if security_groups = options.delete('LaunchSpecification.SecurityGroup') options.merge!(Fog::AWS.indexed_param('LaunchSpecification.SecurityGroup', [*security_groups])) end if security_group_ids = options.delete('LaunchSpecification.SecurityGroupId') options.merge!(Fog::AWS.indexed_param('LaunchSpecification.SecurityGroupId', [*security_group_ids])) end if options['LaunchSpecification.UserData'] options['LaunchSpecification.UserData'] = Base64.encode64(options['LaunchSpecification.UserData']).chomp! end if options['ValidFrom'] && options['ValidFrom'].is_a?(Time) options['ValidFrom'] = options['ValidFrom'].iso8601 end if options['ValidUntil'] && options['ValidUntil'].is_a?(Time) options['ValidUntil'] = options['ValidUntil'].iso8601 end request({ 'Action' => 'RequestSpotInstances', 'LaunchSpecification.ImageId' => image_id, 'LaunchSpecification.InstanceType' => instance_type, 'SpotPrice' => spot_price, :parser => Fog::Parsers::AWS::Compute::SpotInstanceRequests.new }.merge!(options)) end end class Mock def request_spot_instances(image_id, instance_type, spot_price, options = {}) response = Excon::Response.new id = Fog::AWS::Mock.spot_instance_request_id if (image_id && instance_type && spot_price) response.status = 200 all_instance_types = flavors.map { |f| f.id } if !all_instance_types.include?(instance_type) message = "InvalidParameterValue => Invalid value '#{instance_type}' for InstanceType." raise Fog::AWS::Compute::Error.new(message) end spot_price = spot_price.to_f if !(spot_price > 0) message = "InvalidParameterValue => Value (#{spot_price}) for parameter price is invalid." message << " \"#{spot_price}\" is an invalid spot instance price" raise Fog::AWS::Compute::Error.new(message) end if !image_id.match(/^ami-[a-f0-9]{8,17}$/) message = "The image id '[#{image_id}]' does not exist" raise Fog::AWS::Compute::NotFound.new(message) end else message = 'MissingParameter => ' message << 'The request must contain the parameter ' if !image_id message << 'image_id' elsif !instance_type message << 'instance_type' else message << 'spot_price' end raise Fog::AWS::Compute::Error.new(message) end for key in %w(AvailabilityZoneGroup LaunchGroup) if options.is_a?(Hash) && options.key?(key) Fog::Logger.warning("#{key} filters are not yet mocked [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end end launch_spec = { 'iamInstanceProfile' => {}, 'blockDeviceMapping' => options['LaunchSpecification.BlockDeviceMapping'] || [], 'groupSet' => options['LaunchSpecification.SecurityGroupId'] || ['default'], 'imageId' => image_id, 'instanceType' => instance_type, 'monitoring' => options['LaunchSpecification.Monitoring.Enabled'] || false, 'subnetId' => options['LaunchSpecification.SubnetId'] || nil, 'ebsOptimized' => options['LaunchSpecification.EbsOptimized'] || false, 'keyName' => options['LaunchSpecification.KeyName'] || nil } if iam_arn = options['LaunchSpecification.IamInstanceProfile.Arn'] launch_spec['iamInstanceProfile'].merge!('Arn' => iam_arn) end if iam_name = options['LaunchSpecification.IamInstanceProfile.Name'] launch_spec['iamInstanceProfile'].merge!('Name' => iam_name) end spot_request = { 'launchSpecification' => launch_spec, 'spotInstanceRequestId' => id, 'spotPrice' => spot_price, 'type' => options['Type'] || 'one-time', 'state' => 'open', 'fault' => { 'code' => 'pending-evaluation', 'message' => 'Your Spot request has been submitted for review, and is pending evaluation.' }, 'createTime' => Time.now, 'productDescription' => 'Linux/UNIX' } self.data[:spot_requests][id] = spot_request response.body = { 'spotInstanceRequestSet' => [spot_request], 'requestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/reset_network_interface_attribute.rb000066400000000000000000000041561437344660100301510ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Resets a network interface attribute value # # ==== Parameters # * network_interface_id<~String> - The ID of the network interface you want to describe an attribute of # * attribute<~String> - The attribute to reset, only 'sourceDestCheck' is supported. # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/2012-03-01/APIReference/ApiReference-query-DescribeNetworkInterfaceAttribute.html] def reset_network_interface_attribute(network_interface_id, attribute) if attribute != 'sourceDestCheck' raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end request( 'Action' => 'ResetNetworkInterfaceAttribute', 'NetworkInterfaceId' => network_interface_id, 'Attribute' => attribute, :parser => Fog::Parsers::AWS::Compute::Basic.new ) end end class Mock def reset_network_interface_attribute(network_interface_id, attribute) response = Excon::Response.new if self.data[:network_interfaces][network_interface_id] response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } if attribute == 'sourceDestCheck' self.data[:network_interfaces][network_interface_id]['sourceDestCheck'] = true else raise Fog::AWS::Compute::Error.new("Illegal attribute '#{attribute}' specified") end response else raise Fog::AWS::Compute::NotFound.new("The network interface '#{network_interface_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/restore_address_to_classic.rb000066400000000000000000000032351437344660100265430ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/restore_address_to_classic' # Move address from VPC to Classic # # === Returns # * response<~Excon::Response>: # * body<~: # * 'publicIp'<~String> - IP address # * 'requestId'<~String> - Id of the request # * 'status'<~String> - The status of the move of the IP address (MoveInProgress | InVpc | InClassic) def restore_address_to_classic(public_ip) request( 'Action' => 'RestoreAddressToClassic', 'PublicIp' => public_ip, :idempotent => true, :parser => Fog::Parsers::AWS::Compute::RestoreAddressToClassic.new ) end end class Mock def restore_address_to_classic(public_ip) response = Excon::Response.new address = self.data[:addresses][public_ip] if address if address[:origin] == 'vpc' raise Fog::AWS::Compute::Error.new("InvalidState => You cannot migrate an Elastic IP address that was originally allocated for use in EC2-VPC to EC2-Classic.") end address['domain'] = 'standard' address.delete("allocationId") response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'publicIp' => public_ip, 'status' => "InClassic" } response else raise Fog::AWS::Compute::NotFound.new("Address does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/revoke_security_group_egress.rb000066400000000000000000000073171437344660100271630ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Remove permissions from a security group # # ==== Parameters # * group_name<~String> - Name of group, optional (can also be specifed as GroupName in options) # * options<~Hash>: # * 'GroupName'<~String> - Name of security group to modify # * 'GroupId'<~String> - Id of security group to modify # * 'SourceSecurityGroupName'<~String> - Name of security group to authorize # * 'SourceSecurityGroupOwnerId'<~String> - Name of owner to authorize # or # * 'CidrIp'<~String> - CIDR range # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # or # * 'IpPermissions'<~Array>: # * permission<~Hash>: # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'Groups'<~Array>: # * group<~Hash>: # * 'GroupName'<~String> - Name of security group to authorize # * 'UserId'<~String> - Name of owner to authorize # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'IpRanges'<~Array>: # * ip_range<~Hash>: # * 'CidrIp'<~String> - CIDR range # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RevokeSecurityGroupEgress.html] def revoke_security_group_egress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) if ip_permissions = options.delete('IpPermissions') options.merge!(indexed_ip_permissions_params(ip_permissions)) end request({ 'Action' => 'RevokeSecurityGroupEgress', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def revoke_security_group_egress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) group = self.data[:security_groups].values.find { |v| v['groupName'] == group_name } group || raise(Fog::AWS::Compute::NotFound.new("The security group '#{group_name}' does not exist")) response = Excon::Response.new verify_permission_options(options, group['vpcId'] != nil) normalized_permissions = normalize_permissions(options) normalized_permissions.each do |permission| if matching_permission = find_matching_permission_egress(group, permission) matching_permission['ipRanges'] -= permission['ipRanges'] matching_permission['groups'] -= permission['groups'] if matching_permission['ipRanges'].empty? && matching_permission['groups'].empty? group['ipPermissionsEgress'].delete(matching_permission) end end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/revoke_security_group_ingress.rb000066400000000000000000000073051437344660100273420ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/basic' # Remove permissions from a security group # # ==== Parameters # * group_name<~String> - Name of group, optional (can also be specifed as GroupName in options) # * options<~Hash>: # * 'GroupName'<~String> - Name of security group to modify # * 'GroupId'<~String> - Id of security group to modify # * 'SourceSecurityGroupName'<~String> - Name of security group to authorize # * 'SourceSecurityGroupOwnerId'<~String> - Name of owner to authorize # or # * 'CidrIp'<~String> - CIDR range # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # or # * 'IpPermissions'<~Array>: # * permission<~Hash>: # * 'FromPort'<~Integer> - Start of port range (or -1 for ICMP wildcard) # * 'Groups'<~Array>: # * group<~Hash>: # * 'GroupName'<~String> - Name of security group to authorize # * 'UserId'<~String> - Name of owner to authorize # * 'IpProtocol'<~String> - Ip protocol, must be in ['tcp', 'udp', 'icmp'] # * 'IpRanges'<~Array>: # * ip_range<~Hash>: # * 'CidrIp'<~String> - CIDR range # * 'ToPort'<~Integer> - End of port range (or -1 for ICMP wildcard) # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'return'<~Boolean> - success? # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RevokeSecurityGroupIngress.html] def revoke_security_group_ingress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) if ip_permissions = options.delete('IpPermissions') options.merge!(indexed_ip_permissions_params(ip_permissions)) end request({ 'Action' => 'RevokeSecurityGroupIngress', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::Basic.new }.merge!(options)) end end class Mock def revoke_security_group_ingress(group_name, options = {}) options = Fog::AWS.parse_security_group_options(group_name, options) group = self.data[:security_groups].values.find { |v| v['groupName'] == group_name } group || raise(Fog::AWS::Compute::NotFound.new("The security group '#{group_name}' does not exist")) response = Excon::Response.new verify_permission_options(options, group['vpcId'] != nil) normalized_permissions = normalize_permissions(options) normalized_permissions.each do |permission| if matching_permission = find_matching_permission(group, permission) matching_permission['ipRanges'] -= permission['ipRanges'] matching_permission['groups'] -= permission['groups'] if matching_permission['ipRanges'].empty? && matching_permission['groups'].empty? group['ipPermissions'].delete(matching_permission) end end end response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'return' => true } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/run_instances.rb000066400000000000000000000470331437344660100240270ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/run_instances' # Launch specified instances # # ==== Parameters # * image_id<~String> - Id of machine image to load on instances # * min_count<~Integer> - Minimum number of instances to launch. If this # exceeds the count of available instances, no instances will be # launched. Must be between 1 and maximum allowed for your account # (by default the maximum for an account is 20) # * max_count<~Integer> - Maximum number of instances to launch. If this # exceeds the number of available instances, the largest possible # number of instances above min_count will be launched instead. Must # be between 1 and maximum allowed for you account # (by default the maximum for an account is 20) # * options<~Hash>: # * 'Placement.AvailabilityZone'<~String> - Placement constraint for instances # * 'Placement.GroupName'<~String> - Name of existing placement group to launch instance into # * 'Placement.Tenancy'<~String> - Tenancy option in ['dedicated', 'default'], defaults to 'default' # * 'BlockDeviceMapping'<~Array>: array of hashes # * 'DeviceName'<~String> - where the volume will be exposed to instance # * 'VirtualName'<~String> - volume virtual device name # * 'Ebs.SnapshotId'<~String> - id of snapshot to boot volume from # * 'Ebs.VolumeSize'<~String> - size of volume in GiBs required unless snapshot is specified # * 'Ebs.DeleteOnTermination'<~Boolean> - specifies whether or not to delete the volume on instance termination # * 'Ebs.Encrypted'<~Boolean> - specifies whether or not the volume is to be encrypted unless snapshot is specified # * 'Ebs.VolumeType'<~String> - Type of EBS volue. Valid options in ['standard', 'io1'] default is 'standard'. # * 'Ebs.Iops'<~String> - The number of I/O operations per second (IOPS) that the volume supports. Required when VolumeType is 'io1' # * 'HibernationOptions'<~Array>: array of hashes # * 'Configured'<~Boolean> - specifies whether or not the instance is configued for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. # * 'NetworkInterfaces'<~Array>: array of hashes # * 'NetworkInterfaceId'<~String> - An existing interface to attach to a single instance # * 'DeviceIndex'<~String> - The device index. Applies both to attaching an existing network interface and creating a network interface # * 'SubnetId'<~String> - The subnet ID. Applies only when creating a network interface # * 'Description'<~String> - A description. Applies only when creating a network interface # * 'PrivateIpAddress'<~String> - The primary private IP address. Applies only when creating a network interface # * 'SecurityGroupId'<~Array> or <~String> - ids of security group(s) for network interface. Applies only when creating a network interface. # * 'DeleteOnTermination'<~String> - Indicates whether to delete the network interface on instance termination. # * 'PrivateIpAddresses.PrivateIpAddress'<~String> - The private IP address. This parameter can be used multiple times to specify explicit private IP addresses for a network interface, but only one private IP address can be designated as primary. # * 'PrivateIpAddresses.Primary'<~Bool> - Indicates whether the private IP address is the primary private IP address. # * 'SecondaryPrivateIpAddressCount'<~Bool> - The number of private IP addresses to assign to the network interface. # * 'AssociatePublicIpAddress'<~String> - Indicates whether to assign a public IP address to an instance in a VPC. The public IP address is assigned to a specific network interface # * 'TagSpecifications'<~Array>: array of hashes # * 'ResourceType'<~String> - Type of resource to apply tags on, e.g: instance or volume # * 'Tags'<~Array> - List of hashs reprensenting tag to be set # * 'Key'<~String> - Tag name # * 'Value'<~String> - Tag value # * 'ClientToken'<~String> - unique case-sensitive token for ensuring idempotency # * 'DisableApiTermination'<~Boolean> - specifies whether or not to allow termination of the instance from the api # * 'SecurityGroup'<~Array> or <~String> - Name of security group(s) for instances (not supported for VPC) # * 'SecurityGroupId'<~Array> or <~String> - id's of security group(s) for instances, use this or SecurityGroup # * 'InstanceInitiatedShutdownBehaviour'<~String> - specifies whether volumes are stopped or terminated when instance is shutdown, in [stop, terminate] # * 'InstanceType'<~String> - Type of instance to boot. Valid options # in ['t1.micro', 't2.nano', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'g2.2xlarge', 'hs1.8xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge', 'm3.xlarge', 'm3.2xlarge', 'hi1.4xlarge', 'cc1.4xlarge', 'cc2.8xlarge', 'cg1.4xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge'] # default is 'm1.small' # * 'KernelId'<~String> - Id of kernel with which to launch # * 'KeyName'<~String> - Name of a keypair to add to booting instances # * 'Monitoring.Enabled'<~Boolean> - Enables monitoring, defaults to # disabled # * 'PrivateIpAddress<~String> - VPC option to specify ip address within subnet # * 'RamdiskId'<~String> - Id of ramdisk with which to launch # * 'SubnetId'<~String> - VPC option to specify subnet to launch instance into # * 'UserData'<~String> - Additional data to provide to booting instances # * 'EbsOptimized'<~Boolean> - Whether the instance is optimized for EBS I/O # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'groupSet'<~Array>: groups the instances are members in # * 'groupName'<~String> - Name of group # * 'instancesSet'<~Array>: returned instances # * instance<~Hash>: # * 'amiLaunchIndex'<~Integer> - reference to instance in launch group # * 'architecture'<~String> - architecture of image in [i386, x86_64] # * 'blockDeviceMapping'<~Array> # * 'attachTime'<~Time> - time of volume attachment # * 'deleteOnTermination'<~Boolean> - whether or not to delete volume on termination # * 'deviceName'<~String> - specifies how volume is exposed to instance # * 'status'<~String> - status of attached volume # * 'volumeId'<~String> - Id of attached volume # * 'hibernationOptions'<~Array> # * 'configured'<~Boolean> - whether or not the instance is enabled for hibernation # * 'dnsName'<~String> - public dns name, blank until instance is running # * 'imageId'<~String> - image id of ami used to launch instance # * 'instanceId'<~String> - id of the instance # * 'instanceState'<~Hash>: # * 'code'<~Integer> - current status code # * 'name'<~String> - current status name # * 'instanceType'<~String> - type of instance # * 'ipAddress'<~String> - public ip address assigned to instance # * 'kernelId'<~String> - Id of kernel used to launch instance # * 'keyName'<~String> - name of key used launch instances or blank # * 'launchTime'<~Time> - time instance was launched # * 'monitoring'<~Hash>: # * 'state'<~Boolean - state of monitoring # * 'placement'<~Hash>: # * 'availabilityZone'<~String> - Availability zone of the instance # * 'privateDnsName'<~String> - private dns name, blank until instance is running # * 'privateIpAddress'<~String> - private ip address assigned to instance # * 'productCodes'<~Array> - Product codes for the instance # * 'ramdiskId'<~String> - Id of ramdisk used to launch instance # * 'reason'<~String> - reason for most recent state transition, or blank # * 'rootDeviceName'<~String> - specifies how the root device is exposed to the instance # * 'rootDeviceType'<~String> - root device type used by AMI in [ebs, instance-store] # * 'ebsOptimized'<~Boolean> - Whether the instance is optimized for EBS I/O # * 'ownerId'<~String> - Id of owner # * 'requestId'<~String> - Id of request # * 'reservationId'<~String> - Id of reservation # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-RunInstances.html] def run_instances(image_id, min_count, max_count, options = {}) if block_device_mapping = options.delete('BlockDeviceMapping') block_device_mapping.each_with_index do |mapping, index| for key, value in mapping options.merge!({ format("BlockDeviceMapping.%d.#{key}", index) => value }) end end end if hibernation_options = options.delete('HibernationOptions') hibernation_options.each_with_index do |mapping, index| for key, value in mapping options.merge!({ format("HibernationOptions.%d.#{key}", index) => value }) end end end if security_groups = options.delete('SecurityGroup') options.merge!(Fog::AWS.indexed_param('SecurityGroup', [*security_groups])) end if security_group_ids = options.delete('SecurityGroupId') options.merge!(Fog::AWS.indexed_param('SecurityGroupId', [*security_group_ids])) end if options['UserData'] options['UserData'] = Base64.encode64(options['UserData']) end if network_interfaces = options.delete('NetworkInterfaces') network_interfaces.each_with_index do |mapping, index| iface = format("NetworkInterface.%d", index) for key, value in mapping case key when "SecurityGroupId" options.merge!(Fog::AWS.indexed_param("#{iface}.SecurityGroupId", [*value])) else options.merge!({ "#{iface}.#{key}" => value }) end end end end if tag_specifications = options.delete('TagSpecifications') # From https://docs.aws.amazon.com/sdk-for-ruby/v2/api/Aws/EC2/Client.html#run_instances-instance_method # And https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html # Discussed at https://github.com/fog/fog-aws/issues/603 # # Example # # TagSpecifications: [ # { # ResourceType: "instance", # Tags: [ # { # Key: "Project", # Value: "MyProject", # }, # ], # }, # { # ResourceType: "volume", # Tags: [ # { # Key: "Project", # Value: "MyProject", # }, # ], # }, # ] tag_specifications.each_with_index do |val, idx| resource_type = val["ResourceType"] tags = val["Tags"] options["TagSpecification.#{idx}.ResourceType"] = resource_type tags.each_with_index do |tag, tag_idx| aws_tag_key = "TagSpecification.#{idx}.Tag.#{tag_idx}.Key" aws_tag_value = "TagSpecification.#{idx}.Tag.#{tag_idx}.Value" options[aws_tag_key] = tag["Key"] options[aws_tag_value] = tag["Value"] end end end idempotent = !(options['ClientToken'].nil? || options['ClientToken'].empty?) request({ 'Action' => 'RunInstances', 'ImageId' => image_id, 'MinCount' => min_count, 'MaxCount' => max_count, :idempotent => idempotent, :parser => Fog::Parsers::AWS::Compute::RunInstances.new }.merge!(options)) end end class Mock def run_instances(image_id, min_count, max_count, options = {}) response = Excon::Response.new response.status = 200 group_set = [ (options['SecurityGroup'] || 'default') ].flatten instances_set = [] reservation_id = Fog::AWS::Mock.reservation_id if options['KeyName'] && describe_key_pairs('key-name' => options['KeyName']).body['keySet'].empty? raise Fog::AWS::Compute::NotFound.new("The key pair '#{options['KeyName']}' does not exist") end min_count.times do |i| instance_id = Fog::AWS::Mock.instance_id availability_zone = options['Placement.AvailabilityZone'] || Fog::AWS::Mock.availability_zone(@region) block_device_mapping = (options['BlockDeviceMapping'] || []).reduce([]) do |mapping, device| device_name = device.fetch("DeviceName", "/dev/sda1") volume_size = device.fetch("Ebs.VolumeSize", 15) # @todo should pull this from the image delete_on_termination = device.fetch("Ebs.DeleteOnTermination", true) # @todo should pull this from the image volume_id = create_volume(availability_zone, volume_size).data[:body]["volumeId"] self.data[:volumes][volume_id].merge!("DeleteOnTermination" => delete_on_termination) mapping << { "deviceName" => device_name, "volumeId" => volume_id, "status" => "attached", "attachTime" => Time.now, "deleteOnTermination" => delete_on_termination, } end hibernation_options = (options['HibernationOptions'] || []).reduce([]) do |mapping, device| configure = device.fetch("Configure", true) mapping << { "Configure" => configure, } end if options['SubnetId'] if options['PrivateIpAddress'] ni_options = {'PrivateIpAddress' => options['PrivateIpAddress']} else ni_options = {} end network_interface_id = create_network_interface(options['SubnetId'], ni_options).body['networkInterface']['networkInterfaceId'] end network_interfaces = (options['NetworkInterfaces'] || []).reduce([]) do |mapping, device| device_index = device.fetch("DeviceIndex", 0) subnet_id = device.fetch("SubnetId", options[:subnet_id] || Fog::AWS::Mock.subnet_id) private_ip_address = device.fetch("PrivateIpAddress", options[:private_ip_address] || Fog::AWS::Mock.private_ip_address) delete_on_termination = device.fetch("DeleteOnTermination", true) description = device.fetch("Description", "mock_network_interface") security_group_id = device.fetch("SecurityGroupId", self.data[:security_groups]['default']['groupId']) interface_options = { "PrivateIpAddress" => private_ip_address, "GroupSet" => device.fetch("GroupSet", [security_group_id]), "Description" => description } interface_id = device.fetch("NetworkInterfaceId", create_network_interface(subnet_id, interface_options)) mapping << { "networkInterfaceId" => interface_id, "subnetId" => subnet_id, "status" => "attached", "attachTime" => Time.now, "deleteOnTermination" => delete_on_termination, } end instance = { 'amiLaunchIndex' => i, 'associatePublicIP' => options['associatePublicIP'] || false, 'architecture' => 'i386', 'blockDeviceMapping' => block_device_mapping, 'hibernationOptions' => hibernation_options, 'networkInterfaces' => network_interfaces, 'clientToken' => options['clientToken'], 'dnsName' => nil, 'ebsOptimized' => options['EbsOptimized'] || false, 'hypervisor' => 'xen', 'imageId' => image_id, 'instanceId' => instance_id, 'instanceState' => { 'code' => 0, 'name' => 'pending' }, 'instanceType' => options['InstanceType'] || 'm1.small', 'kernelId' => options['KernelId'] || Fog::AWS::Mock.kernel_id, 'keyName' => options['KeyName'], 'launchTime' => Time.now, 'monitoring' => { 'state' => options['Monitoring.Enabled'] || false }, 'placement' => { 'availabilityZone' => availability_zone, 'groupName' => nil, 'tenancy' => options['Placement.Tenancy'] || 'default' }, 'privateDnsName' => nil, 'productCodes' => [], 'reason' => nil, 'rootDeviceName' => block_device_mapping.first && block_device_mapping.first["deviceName"], 'rootDeviceType' => 'instance-store', 'spotInstanceRequestId' => options['SpotInstanceRequestId'], 'subnetId' => options['SubnetId'], 'virtualizationType' => 'paravirtual' } instances_set << instance self.data[:instances][instance_id] = instance.merge({ 'groupIds' => [], 'groupSet' => group_set, 'iamInstanceProfile' => {}, 'ownerId' => self.data[:owner_id], 'reservationId' => reservation_id, 'stateReason' => {} }) if options['SubnetId'] self.data[:instances][instance_id]['vpcId'] = self.data[:subnets].find{|subnet| subnet['subnetId'] == options['SubnetId'] }['vpcId'] attachment_id = attach_network_interface(network_interface_id, instance_id, '0').data[:body]['attachmentId'] modify_network_interface_attribute(network_interface_id, 'attachment', {'attachmentId' => attachment_id, 'deleteOnTermination' => 'true'}) end end response.body = { 'groupSet' => group_set, 'instancesSet' => instances_set, 'ownerId' => self.data[:owner_id], 'requestId' => Fog::AWS::Mock.request_id, 'reservationId' => reservation_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/start_instances.rb000066400000000000000000000042041437344660100243510ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/start_stop_instances' # Start specified instance # # ==== Parameters # * instance_id<~Array> - Id of instance to start # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * TODO: fill in the blanks # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-StartInstances.html] def start_instances(instance_id) params = Fog::AWS.indexed_param('InstanceId', instance_id) request({ 'Action' => 'StartInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::StartStopInstances.new }.merge!(params)) end end class Mock def start_instances(instance_id) instance_ids = Array(instance_id) instance_set = self.data[:instances].values instance_set = apply_tag_filters(instance_set, {'instance_id' => instance_ids}, 'instanceId') instance_set = instance_set.select {|x| instance_ids.include?(x["instanceId"]) } if instance_set.empty? raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_ids.first}' does not exist") else response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'instancesSet' => instance_set.reduce([]) do |ia, instance| ia << {'currentState' => { 'code' => 0, 'name' => 'pending' }, 'previousState' => instance['instanceState'], 'instanceId' => instance['instanceId'] } instance['instanceState'] = {'code'=>0, 'name'=>'pending'} ia end } response end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/stop_instances.rb000066400000000000000000000054121437344660100242030ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/start_stop_instances' # Stop specified instance # # ==== Parameters # * instance_id<~Array> - Id of instance to start # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * TODO: fill in the blanks # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-StopInstances.html] def stop_instances(instance_id, options = {}) params = Fog::AWS.indexed_param('InstanceId', instance_id) unless options.is_a?(Hash) Fog::Logger.warning("stop_instances with #{options.class} param is deprecated, use stop_instances('force' => boolean) instead [light_black](#{caller.first})[/]") options = {'force' => options} end params.merge!('Force' => 'true') if options['force'] if options['hibernate'] params.merge!('Hibernate' => 'true') params.merge!('Force' => 'false') end request({ 'Action' => 'StopInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::StartStopInstances.new }.merge!(params)) end end class Mock def stop_instances(instance_id, options = {}) instance_ids = Array(instance_id) instance_set = self.data[:instances].values instance_set = apply_tag_filters(instance_set, {'instance_id' => instance_ids}, 'instanceId') instance_set = instance_set.select {|x| instance_ids.include?(x["instanceId"]) } if instance_set.empty? raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_ids.first}' does not exist") else response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'instancesSet' => instance_set.reduce([]) do |ia, instance| instance['classicLinkSecurityGroups'] = nil instance['classicLinkVpcId'] = nil ia << {'currentState' => { 'code' => 0, 'name' => 'stopping' }, 'previousState' => instance['instanceState'], 'instanceId' => instance['instanceId'] } instance['instanceState'] = {'code'=>0, 'name'=>'stopping'} ia end } response end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/terminate_instances.rb000066400000000000000000000066101437344660100252070ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/terminate_instances' # Terminate specified instances # # ==== Parameters # * instance_id<~Array> - Ids of instances to terminates # # ==== Returns # # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'instancesSet'<~Array>: # * 'instanceId'<~String> - id of the terminated instance # * 'previousState'<~Hash>: previous state of instance # * 'code'<~Integer> - previous status code # * 'name'<~String> - name of previous state # * 'shutdownState'<~Hash>: shutdown state of instance # * 'code'<~Integer> - current status code # * 'name'<~String> - name of current state # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-TerminateInstances.html] def terminate_instances(instance_id) params = Fog::AWS.indexed_param('InstanceId', instance_id) request({ 'Action' => 'TerminateInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::TerminateInstances.new }.merge!(params)) end end class Mock def terminate_instances(instance_id) response = Excon::Response.new instance_id = [*instance_id] if (self.data[:instances].keys & instance_id).length == instance_id.length response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'instancesSet' => [] } response.status = 200 for id in instance_id instance = self.data[:instances][id] instance['classicLinkSecurityGroups'] = nil instance['classicLinkVpcId'] = nil self.data[:deleted_at][id] = Time.now code = case instance['instanceState']['name'] when 'pending' 0 when 'running' 16 when 'shutting-down' 32 when 'terminated' 48 when 'stopping' 64 when 'stopped' 80 end state = { 'name' => 'shutting-down', 'code' => 32} response.body['instancesSet'] << { 'instanceId' => id, 'previousState' => instance['instanceState'], 'currentState' => state } instance['instanceState'] = state end describe_addresses.body['addressesSet'].each do |address| if instance_id.include?(address['instanceId']) disassociate_address(address['publicIp'], address['associationId']) end end describe_volumes.body['volumeSet'].each do |volume| if volume['attachmentSet'].first && instance_id.include?(volume['attachmentSet'].first['instanceId']) detach_volume(volume['volumeId']) end end response else raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_id}' does not exist") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/compute/unmonitor_instances.rb000066400000000000000000000036761437344660100252620ustar00rootroot00000000000000module Fog module AWS class Compute class Real require 'fog/aws/parsers/compute/monitor_unmonitor_instances' # UnMonitor specified instance # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-UnmonitorInstances.html # # ==== Parameters # * instance_ids<~Array> - Arrays of instances Ids to monitor # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'requestId'<~String> - Id of request # * 'instancesSet': http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-ItemType-MonitorInstancesResponseSetItemType.html # # {Amazon API Reference}[http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-UnmonitorInstances.html] def unmonitor_instances(instance_ids) params = Fog::AWS.indexed_param('InstanceId', instance_ids) request({ 'Action' => 'UnmonitorInstances', :idempotent => true, :parser => Fog::Parsers::AWS::Compute::MonitorUnmonitorInstances.new }.merge!(params)) end end class Mock def unmonitor_instances(instance_ids) response = Excon::Response.new response.status = 200 [*instance_ids].each do |instance_id| if instance = self.data[:instances][instance_id] instance['monitoring']['state'] = 'enabled' else raise Fog::AWS::Compute::NotFound.new("The instance ID '#{instance_ids}' does not exist") end end instances_set = [*instance_ids].reduce([]) { |memo, id| memo << {'instanceId' => id, 'monitoring' => 'disabled'} } response.body = {'requestId' => 'some_request_id', 'instancesSet' => instances_set} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/000077500000000000000000000000001437344660100217425ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/activate_pipeline.rb000066400000000000000000000016051437344660100257560ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Activate a pipeline # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_ActivatePipeline.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline to activate # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def activate_pipeline(id) params = { 'pipelineId' => id } response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.ActivatePipeline' } }) end end class Mock def activate_pipeline(id) response = Excon::Response.new pipeline = find_pipeline(id) pipeline[:active] = true response.body = {} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/create_pipeline.rb000066400000000000000000000037651437344660100254320ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Create a pipeline # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_CreatePipeline.html # ==== Parameters # * UniqueId <~String> - A unique ID for of the pipeline # * Name <~String> - The name of the pipeline # * Tags <~Hash> - Key/value string pairs to categorize the pipeline # * Description <~String> - Description of the pipeline # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_pipeline(unique_id, name, description=nil, tags=nil) params = { 'uniqueId' => unique_id, 'name' => name, } params['tags'] = tags.map {|k,v| {"key" => k.to_s, "value" => v.to_s}} unless tags.nil? || tags.empty? params['Description'] = description if description response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.CreatePipeline' }, }) end end class Mock def create_pipeline(unique_id, name, description=nil, tags=nil) response = Excon::Response.new if existing_pipeline = self.data[:pipelines][unique_id] {"pipelineId" => existing_pipeline["pipelineId"]} else pipeline_id = Fog::AWS::Mock.data_pipeline_id mapped_tags = if tags tags.map { |k,v| {"key" => k.to_s, "value" => v.to_s}} else [] end pipeline = { "name" => name, "description" => description, "fields" => mapped_tags, "pipelineId" => pipeline_id, } self.data[:pipelines][unique_id] = pipeline response.body = {"pipelineId" => pipeline_id} end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/deactivate_pipeline.rb000066400000000000000000000023201437344660100262620ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Activate a pipeline # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DectivatePipeline.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline to activate # ' cancelActive <~Boolean> - Indicates whether to cancel any running objects. The default is true, which sets the state of any running objects to CANCELED. If this value is false, the pipeline is deactivated after all running objects finish. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def deactivate_pipeline(id, cancel_active=true) params = { 'pipelineId' => id, 'cancelActive' => cancel_active } response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.DectivatePipeline' } }) end end class Mock def deactivate_pipeline(id, cancel_active=true) response = Excon::Response.new pipeline = find_pipeline(id) pipeline[:active] = false response.body = {} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/delete_pipeline.rb000066400000000000000000000015731437344660100254240ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Delete a pipeline # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DeletePipeline.html # ==== Parameters # * PipelineId <~String> - The id of the pipeline to delete # ==== Returns # * success<~Boolean> - Whether the delete was successful def delete_pipeline(id) params = { 'pipelineId' => id } response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.DeletePipeline' }, }) 200 == response.status end end class Mock def delete_pipeline(id) response = Excon::Response.new pipeline = find_pipeline(id) pipeline[:deleted] = true true end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/describe_objects.rb000066400000000000000000000032751437344660100255670ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Queries a pipeline for the names of objects that match a specified set of conditions. # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DescribeObjects.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline # * ObjectIds <~Array> - Identifiers of the pipeline objects that contain the definitions # to be described. You can pass as many as 25 identifiers in a # single call to DescribeObjects. # * Options <~Hash> - A Hash of additional options desrcibed in the API docs. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_objects(id, objectIds, options={}) params = options.merge({ 'pipelineId' => id, 'objectIds' => objectIds, }) response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.DescribeObjects' }, }) end end class Mock def describe_objects(id, objects, options={}) response = Excon::Response.new find_pipeline(id) pipeline_objects = self.data[:pipeline_definitions][id]["pipelineObjects"].select { |o| objects.include?(o["id"]) } response.body = { "hasMoreResults" => false, "marker" => options[:marker], "pipelineObjects" => [ { "fields" => pipeline_objects } ] } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/describe_pipelines.rb000066400000000000000000000017211437344660100261200ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Describe pipelines # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_DescribePipelines.html # ==== Parameters # * PipelineIds <~String> - ID of pipeline to retrieve information for # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_pipelines(ids) params = {} params['pipelineIds'] = ids response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.DescribePipelines' }, }) end end class Mock def describe_pipelines(ids) response = Excon::Response.new response.body = {"pipelineDescriptionList" => self.data[:pipelines].values.select { |p| !p[:deleted] && ids.include?(p["pipelineId"]) } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/get_pipeline_definition.rb000066400000000000000000000017141437344660100271460ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Get pipeline definition JSON # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_GetPipelineDefinition.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def get_pipeline_definition(id) params = { 'pipelineId' => id, } response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.GetPipelineDefinition' }, }) end end class Mock def get_pipeline_definition(id) response = Excon::Response.new pipeline = find_pipeline(id) response.body = self.data[:pipeline_definitions][id] || {"pipelineObjects" => []} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/list_pipelines.rb000066400000000000000000000017431437344660100253170ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # List all pipelines # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_ListPipelines.html # ==== Parameters # * Marker <~String> - The starting point for the results to be returned. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def list_pipelines(options={}) params = {} params['Marker'] = options[:marker] if options[:marker] response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.ListPipelines' }, }) end end class Mock def list_pipelines(options={}) response = Excon::Response.new response.body = {"pipelineIdList" => self.data[:pipelines].values.map { |p| {"id" => p["pipelineId"], "name" => p["name"]} } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/put_pipeline_definition.rb000066400000000000000000000056541437344660100272060ustar00rootroot00000000000000module Fog module AWS class DataPipeline module Shared class JSONObject def initialize(object) @json_fields = object.clone @id = @json_fields.delete('id') @name = @json_fields.delete('name') || @id end def to_api { 'id' => @id, 'name' => @name, 'fields' => fields } end private def fields @json_fields.map{|k,v| field_for_kv(k,v)}.flatten end def field_for_kv(key, value) if value.is_a?(Hash) { 'key' => key, 'refValue' => value['ref'], 'stringValue' => value['stringValue'] } elsif value.is_a?(Array) value.map { |subvalue| field_for_kv(key, subvalue) } else { 'key' => key, 'stringValue' => value } end end end # Take a list of pipeline object hashes as specified in the Data Pipeline JSON format # and transform it into the format expected by the API def transform_objects(objects) objects.map { |object| JSONObject.new(object).to_api } end end class Real include Shared # Put raw pipeline definition JSON # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline # * PipelineObjects <~String> - Objects in the pipeline # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def put_pipeline_definition(id, pipeline_objects, options={}) params = { 'pipelineId' => id, 'pipelineObjects' => transform_objects(pipeline_objects), }.merge(options) response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.PutPipelineDefinition' }, }) end end class Mock include Shared def put_pipeline_definition(id, pipeline_objects, _options={}) response = Excon::Response.new options = _options.dup pipeline = find_pipeline(id) stringified_objects = if pipeline_objects.any? transform_objects(stringify_keys(pipeline_objects)) else options.each { |k,v| options[k] = transform_objects(stringify_keys(v)) } end if stringified_objects.is_a?(Array) stringified_objects = {"pipelineObjects" => stringified_objects} end self.data[:pipeline_definitions][id] = stringified_objects response.body = {"errored" => false, "validationErrors" => [], "validationWarnings" => []} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/data_pipeline/query_objects.rb000066400000000000000000000025161437344660100251510ustar00rootroot00000000000000module Fog module AWS class DataPipeline class Real # Queries a pipeline for the names of objects that match a specified set of conditions. # http://docs.aws.amazon.com/datapipeline/latest/APIReference/API_QueryObjects.html # ==== Parameters # * PipelineId <~String> - The ID of the pipeline # * Sphere <~String> - Specifies whether the query applies to components or instances. # Allowable values: COMPONENT, INSTANCE, ATTEMPT. # * Marker <~String> - The starting point for the results to be returned. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def query_objects(id, sphere, options={}) params = { 'pipelineId' => id, 'sphere' => sphere, } params['marker'] = options[:marker] if options[:marker] response = request({ :body => Fog::JSON.encode(params), :headers => { 'X-Amz-Target' => 'DataPipeline.QueryObjects' }, }) end end class Mock def query_objects(id, sphere, options={}) response = Excon::Response.new find_pipeline(id) response.body = {"hasMoreResults" => false, "ids" => ["Default"]} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/000077500000000000000000000000001437344660100177305ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/dns/change_resource_record_sets.rb000066400000000000000000000210701437344660100260050ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/change_resource_record_sets' # Use this action to create or change your authoritative DNS information for a zone # http://docs.amazonwebservices.com/Route53/latest/DeveloperGuide/RRSchanges.html#RRSchanges_API # # ==== Parameters # * zone_id<~String> - ID of the zone these changes apply to # * options<~Hash> # * comment<~String> - Any comments you want to include about the change. # * change_batch<~Array> - The information for a change request # * changes<~Hash> - # * action<~String> - 'CREATE' or 'DELETE' # * name<~String> - This must be a fully-specified name, ending with a final period # * type<~String> - A | AAAA | CNAME | MX | NS | PTR | SOA | SPF | SRV | TXT # * ttl<~Integer> - Time-to-live value - omit if using an alias record # * weight<~Integer> - Time-to-live value - omit if using an alias record # * set_identifier<~String> - An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. # * region<~String> - The Amazon EC2 region where the resource that is specified in this resource record set resides. (Latency only) # * failover<~String> - To configure failover, you add the Failover element to two resource record sets. For one resource record set, you specify PRIMARY as the value for Failover; for the other resource record set, you specify SECONDARY. # * geo_location<~String XML> - A complex type currently requiring XML that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. # * health_check_id<~String> - If you want Amazon Route 53 to return this resource record set in response to a DNS query only when a health check is passing, include the HealthCheckId element and specify the ID of the applicable health check. # * resource_records<~Array> - Omit if using an alias record # * alias_target<~Hash> - Information about the domain to which you are redirecting traffic (Alias record sets only) # * dns_name<~String> - The Elastic Load Balancing domain to which you want to reroute traffic # * hosted_zone_id<~String> - The ID of the hosted zone that contains the Elastic Load Balancing domain to which you want to reroute traffic # * evaluate_target_health<~Boolean> - Applies only to alias, weighted alias, latency alias, and failover alias resource record sets: If you set the value of EvaluateTargetHealth to true, the alias resource record sets inherit the health of the referenced resource record sets. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ChangeInfo'<~Hash> # * 'Id'<~String> - The ID of the request # * 'Status'<~String> - status of the request - PENDING | INSYNC # * 'SubmittedAt'<~String> - The date and time the change was made # * status<~Integer> - 200 when successful # # ==== Examples # # Example changing a CNAME record: # # change_batch_options = [ # { # :action => "DELETE", # :name => "foo.example.com.", # :type => "CNAME", # :ttl => 3600, # :resource_records => [ "baz.example.com." ] # }, # { # :action => "CREATE", # :name => "foo.example.com.", # :type => "CNAME", # :ttl => 3600, # :resource_records => [ "bar.example.com." ] # } # ] # # change_resource_record_sets("ABCDEFGHIJKLMN", change_batch_options) # def change_resource_record_sets(zone_id, change_batch, options = {}) body = Fog::AWS::DNS.change_resource_record_sets_data(zone_id, change_batch, @version, options) request({ :body => body, :idempotent => true, :parser => Fog::Parsers::AWS::DNS::ChangeResourceRecordSets.new, :expects => 200, :method => 'POST', :path => "hostedzone/#{zone_id}/rrset" }) end end class Mock SET_PREFIX = 'SET_' def record_exist?(zone,change,change_name) return false if zone[:records][change[:type]].nil? current_records = zone[:records][change[:type]][change_name] return false if current_records.nil? if !change[:set_identifier].empty? !current_records[change[:SetIdentifier]].nil? else !current_records.empty? end end def change_resource_record_sets(zone_id, change_batch, options = {}) response = Excon::Response.new errors = [] if (zone = self.data[:zones][zone_id]) response.status = 200 change_id = Fog::AWS::Mock.change_id change_batch.each do |change| change_name = change[:name] change_name = change_name + "." unless change_name.end_with?(".") case change[:action] when "CREATE" if zone[:records][change[:type]].nil? zone[:records][change[:type]] = {} end if !record_exist?(zone, change, change_name) # raise change.to_s if change[:resource_records].nil? new_record = if change[:alias_target] record = { :alias_target => change[:alias_target] } else record = { :ttl => change[:ttl].to_s, } end new_record = { :change_id => change_id, :resource_records => change[:resource_records] || [], :name => change_name, :type => change[:type], :set_identifier => change[:set_identifier], :weight => change[:weight] }.merge(record) if change[:set_identifier].nil? zone[:records][change[:type]][change_name] = new_record else zone[:records][change[:type]][change_name] = {} if zone[:records][change[:type]][change_name].nil? zone[:records][change[:type]][change_name][SET_PREFIX + change[:set_identifier]] = new_record end else errors << "Tried to create resource record set #{change[:name]}. type #{change[:type]}, but it already exists" end when "DELETE" action_performed = false if !zone[:records][change[:type]].nil? && !zone[:records][change[:type]][change_name].nil? && !change[:set_identifier].nil? action_performed = true unless zone[:records][change[:type]][change_name].delete(SET_PREFIX + change[:set_identifier]).nil? zone[:records][change[:type]].delete(change_name) if zone[:records][change[:type]][change_name].empty? elsif !zone[:records][change[:type]].nil? action_performed = true unless zone[:records][change[:type]].delete(change_name).nil? end if !action_performed errors << "Tried to delete resource record set #{change[:name]}. type #{change[:type]}, but it was not found" end end end if errors.empty? change = { :id => change_id, :status => 'PENDING', :submitted_at => Time.now.utc.iso8601 } self.data[:changes][change[:id]] = change response.body = { 'Id' => change[:id], 'Status' => change[:status], 'SubmittedAt' => change[:submitted_at] } response else raise Fog::AWS::DNS::Error.new("InvalidChangeBatch => #{errors.join(", ")}") end else raise Fog::AWS::DNS::NotFound.new("NoSuchHostedZone => A hosted zone with the specified hosted zone ID does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/create_health_check.rb000066400000000000000000000074061437344660100242110ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/health_check' # This action creates a new health check. # http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html # # ==== Parameters (options as symbols Hash) # * ip_address<~String> - (optional if fqdn) The IPv4 IP address of the endpoint on which you want Amazon Route 53 to perform health checks # * port<~Integer> - The port on the endpoint on which you want Amazon Route 53 to perform health checks # * type<~String> - HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP # * resource_path<~Stringy> - (required for all types except TCP) The path that you want Amazon Route 53 to request when performing health checks. The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy # * fqdn<~String> - (optional if ip_address) The value that you want Amazon Route 53 to pass in the Host header in all health checks except TCP health checks # * search_string<~String> - If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that you want Amazon Route 53 to search for in the response body from the specified resource # * request_interval<~String> - 10 | 30 (optional) The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request # * failure_threshold<~Integer> - 1-10 (optional) The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HealthCheck'<~Hash> # * 'Id'<~String> - The ID of the request # * 'CallerReference'<~String> - A unique string that identifies the request and that allows failed CreateHealthCheck requests to be retried without the risk of executing the operation twice. # * 'HealthCheckConfig'<~Hash> # * 'IPAddress' # * 'Port' # * 'Type' # * 'ResourcePath' # * 'FullyQualifiedDomainName' # * 'RequestInterval' # * 'FailureThreshold' # * status<~Integer> - 201 when successful def create_health_check(ip_address, port, type, options = {}) version = @version builder = Nokogiri::XML::Builder.new(:encoding => 'UTF-8') do |xml| xml.CreateHealthCheckRequest(:xmlns => "https://route53.amazonaws.com/doc/#{version}/") do xml.CallerReference options[:caller_reference] || "#{Time.now.to_i.to_s}-#{SecureRandom.hex(6)}" xml.HealthCheckConfig do xml.IPAddress ip_address unless ip_address.nil? xml.Port port xml.Type type xml.ResourcePath options[:resource_path] if options.has_key?(:resource_path) xml.FullyQualifiedDomainName options[:fqdn] if options.has_key?(:fqdn) xml.SearchString options[:search_string] if options.has_key?(:search_string) xml.RequestInterval options[:request_interval] if options.has_key?(:request_interval) xml.FailureThreshold options[:failure_threshold] if options.has_key?(:failure_threshold) end end end request({ :body => builder.to_xml.to_s, :expects => 201, :method => 'POST', :path => 'healthcheck', :parser => Fog::Parsers::AWS::DNS::HealthCheck.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/create_hosted_zone.rb000066400000000000000000000111711437344660100241220ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/create_hosted_zone' # Creates a new hosted zone # # ==== Parameters # * name<~String> - The name of the domain. Must be a fully-specified domain that ends with a period # * options<~Hash> # * caller_ref<~String> - unique string that identifies the request & allows failed # calls to be retried without the risk of executing the operation twice # * comment<~String> - # * vpc_id<~String> - specify both a VPC's ID and its region to create a private zone for that VPC # * vpc_region<~String> - specify both a VPC's ID and its region to create a private zone for that VPC # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HostedZone'<~Hash>: # * 'Id'<~String> - # * 'Name'<~String> - # * 'CallerReference'<~String> # * 'Comment'<~String> - # * 'ChangeInfo'<~Hash> - # * 'Id'<~String> # * 'Status'<~String> # * 'SubmittedAt'<~String> # * 'NameServers'<~Array> # * 'NameServer'<~String> # * status<~Integer> - 201 when successful def create_hosted_zone(name, options = {}) optional_tags = '' if options[:caller_ref] optional_tags += "#{options[:caller_ref]}" else #make sure we have a unique call reference caller_ref = "ref-#{rand(1000000).to_s}" optional_tags += "#{caller_ref}" end if options[:comment] optional_tags += "#{options[:comment]}" end if options[:vpc_id] and options[:vpc_region] optional_tags += "#{options[:vpc_id]}#{options[:vpc_region]}" end request({ :body => %Q{#{name}#{optional_tags}}, :parser => Fog::Parsers::AWS::DNS::CreateHostedZone.new, :expects => 201, :method => 'POST', :path => "hostedzone" }) end end class Mock require 'time' def create_hosted_zone(name, options = {}) # Append a trailing period to the name if absent. name = name + "." unless name.end_with?(".") response = Excon::Response.new if list_hosted_zones.body['HostedZones'].select {|z| z['Name'] == name}.size < self.data[:limits][:duplicate_domains] response.status = 201 if options[:caller_ref] caller_ref = options[:caller_ref] else #make sure we have a unique call reference caller_ref = "ref-#{rand(1000000).to_s}" end zone_id = "/hostedzone/#{Fog::AWS::Mock.zone_id}" self.data[:zones][zone_id] = { :id => zone_id, :name => name, :reference => caller_ref, :comment => options[:comment], :records => {} } change = { :id => Fog::AWS::Mock.change_id, :status => 'PENDING', :submitted_at => Time.now.utc.iso8601 } self.data[:changes][change[:id]] = change response.body = { 'HostedZone' => { 'Id' => zone_id, 'Name' => name, 'CallerReference' => caller_ref, 'Comment' => options[:comment] }, 'ChangeInfo' => { 'Id' => change[:id], 'Status' => change[:status], 'SubmittedAt' => change[:submitted_at] }, 'NameServers' => Fog::AWS::Mock.nameservers } response else raise Fog::AWS::DNS::Error.new("DelegationSetNotAvailable => Amazon Route 53 allows some duplication, but Amazon Route 53 has a maximum threshold of duplicated domains. This error is generated when you reach that threshold. In this case, the error indicates that too many hosted zones with the given domain name exist. If you want to create a hosted zone and Amazon Route 53 generates this error, contact Customer Support.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/delete_health_check.rb000066400000000000000000000011331437344660100241770ustar00rootroot00000000000000module Fog module AWS class DNS class Real # This action deletes a health check. # http://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteHealthCheck.html # # ==== Parameters # * id<~String> - Health check ID # === Returns # * response<~Excon::Response>: # * status<~Integer> - 200 when successful def delete_health_check(id) request({ :expects => 200, :method => 'DELETE', :path => "healthcheck/#{id}" }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/delete_hosted_zone.rb000066400000000000000000000040141437344660100241170ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/delete_hosted_zone' # Delete a hosted zone # # ==== Parameters # * zone_id<~String> - # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ChangeInfo'<~Hash> - # * 'Id'<~String> The ID of the request # * 'Status'<~String> The current state of the hosted zone # * 'SubmittedAt'<~String> The date and time the change was made # * status<~Integer> - 200 when successful def delete_hosted_zone(zone_id) # AWS methods return zone_ids that looks like '/hostedzone/id'. Let the caller either use # that form or just the actual id (which is what this request needs) zone_id = zone_id.sub('/hostedzone/', '') request({ :expects => 200, :parser => Fog::Parsers::AWS::DNS::DeleteHostedZone.new, :method => 'DELETE', :path => "hostedzone/#{zone_id}" }) end end class Mock require 'time' def delete_hosted_zone(zone_id) response = Excon::Response.new key = [zone_id, "/hostedzone/#{zone_id}"].find { |k| !self.data[:zones][k].nil? } || raise(Fog::AWS::DNS::NotFound.new("NoSuchHostedZone => A hosted zone with the specified hosted zone does not exist.")) change = { :id => Fog::AWS::Mock.change_id, :status => 'INSYNC', :submitted_at => Time.now.utc.iso8601 } self.data[:changes][change[:id]] = change response.status = 200 response.body = { 'ChangeInfo' => { 'Id' => change[:id], 'Status' => change[:status], 'SubmittedAt' => change[:submitted_at] } } self.data[:zones].delete(key) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/get_change.rb000066400000000000000000000034041437344660100223420ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/get_change' # returns the current state of a change request # # ==== Parameters # * change_id<~String> # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Id'<~String> # * 'Status'<~String> # * 'SubmittedAt'<~String> # * status<~Integer> - 200 when successful def get_change(change_id) # AWS methods return change_ids that looks like '/change/id'. Let the caller either use # that form or just the actual id (which is what this request needs) change_id = change_id.sub('/change/', '') request({ :expects => 200, :parser => Fog::Parsers::AWS::DNS::GetChange.new, :method => 'GET', :path => "change/#{change_id}" }) end end class Mock def get_change(change_id) response = Excon::Response.new # find the record with matching change_id # records = data[:zones].values.map{|z| z[:records].values.map{|r| r.values}}.flatten change = self.data[:changes][change_id] || raise(Fog::AWS::DNS::NotFound.new("NoSuchChange => Could not find resource with ID: #{change_id}")) response.status = 200 submitted_at = Time.parse(change[:submitted_at]) response.body = { 'Id' => change[:id], # set as insync after some time 'Status' => (submitted_at + Fog::Mock.delay) < Time.now ? 'INSYNC' : change[:status], 'SubmittedAt' => change[:submitted_at] } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/get_health_check.rb000066400000000000000000000025261437344660100235230ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/health_check' # This action gets information about a specified health check. #http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetHealthCheck.html # # ==== Parameters # * id<~String> - The ID of the health check # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HealthCheck'<~Hash>: # * 'Id'<~String> - # * 'CallerReference'<~String> # * 'HealthCheckConfig'<~Hash>: # * 'IPAddress'<~String> - # * 'Port'<~String> - # * 'Type'<~String> - # * 'ResourcePath'<~String> - # * 'FullyQualifiedDomainName'<~String> - # * 'SearchString'<~String> - # * 'RequestInterval'<~Integer> - # * 'FailureThreshold'<~String> - # * 'HealthCheckVersion'<~Integer> - # * status<~Integer> - 200 when successful def get_health_check(id) request({ :expects => 200, :parser => Fog::Parsers::AWS::DNS::HealthCheck.new, :method => 'GET', :path => "healthcheck/#{id}" }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/get_hosted_zone.rb000066400000000000000000000036171437344660100234440ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/get_hosted_zone' # retrieve information about a hosted zone # # ==== Parameters # * zone_id<~String> - The ID of the hosted zone # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HostedZone'<~Hash>: # * 'Id'<~String> - # * 'Name'<~String> - # * 'CallerReference'<~String> # * 'Comment'<~String> - # * 'NameServers'<~Array> # * 'NameServer'<~String> # * status<~Integer> - 200 when successful def get_hosted_zone(zone_id) # AWS methods return zone_ids that looks like '/hostedzone/id'. Let the caller either use # that form or just the actual id (which is what this request needs) zone_id = zone_id.sub('/hostedzone/', '') request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::DNS::GetHostedZone.new, :path => "hostedzone/#{zone_id}" }) end end class Mock def get_hosted_zone(zone_id) response = Excon::Response.new if (zone = self.data[:zones][zone_id]) response.status = 200 response.body = { 'HostedZone' => { 'Id' => zone[:id], 'Name' => zone[:name], 'CallerReference' => zone[:reference], 'Comment' => zone[:comment] }, 'NameServers' => Fog::AWS::Mock.nameservers } response else raise Fog::AWS::DNS::NotFound.new("NoSuchHostedZone => A hosted zone with the specified hosted zone ID does not exist.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/list_health_checks.rb000066400000000000000000000021211437344660100240710ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/list_health_checks' # This action gets a list of the health checks that are associated with the current AWS account. # http://docs.aws.amazon.com/Route53/latest/APIReference/API_ListHealthChecks.html # # === Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HealthChecks'<~Array>: # * 'HealthCheck'<~Hash>: # * 'Id'<~String> - # * 'CallerReference'<~String> # * 'HealthCheckVersion'<~Integer> - # * 'Marker'<~String> - # * 'MaxItems'<~Integer> - # * 'IsTruncated'<~String> - # * 'NextMarker'<~String> # * status<~Integer> - 200 when successful def list_health_checks request({ :expects => 200, :method => 'GET', :path => "healthcheck", :parser => Fog::Parsers::AWS::DNS::ListHealthChecks.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/list_hosted_zones.rb000066400000000000000000000051031437344660100240130ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/list_hosted_zones' # Describe all or specified instances # # ==== Parameters # * options<~Hash> # * marker<~String> - Indicates where to begin in your list of hosted zones. # * max_items<~Integer> - The maximum number of hosted zones to be included in the response body # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'HostedZones'<~Array>: # * 'HostedZone'<~Hash>: # * 'Id'<~String> - # * 'Name'<~String> - # * 'CallerReference'<~String> # * 'Comment'<~String> - # * 'Marker'<~String> - # * 'MaxItems'<~Integer> - # * 'IsTruncated'<~String> - # * 'NextMarker'<~String> # * status<~Integer> - 200 when successful def list_hosted_zones(options = {}) parameters = {} options.each do |option, value| case option when :marker parameters[option] = value when :max_items parameters[:maxitems] = value end end request({ :query => parameters, :parser => Fog::Parsers::AWS::DNS::ListHostedZones.new, :expects => 200, :method => 'GET', :path => "hostedzone" }) end end class Mock def list_hosted_zones(options = {}) maxitems = [options[:max_items]||100,100].min if options[:marker].nil? start = 0 else start = self.data[:zones].find_index {|z| z[:id] == options[:marker]} end zones = self.data[:zones].values[start, maxitems] next_zone = self.data[:zones].values[start + maxitems] truncated = !next_zone.nil? response = Excon::Response.new response.status = 200 response.body = { 'HostedZones' => zones.map do |z| { 'Id' => z[:id], 'Name' => z[:name], 'CallerReference' => z[:reference], 'Comment' => z[:comment], } end, 'Marker' => options[:marker].to_s, 'MaxItems' => maxitems, 'IsTruncated' => truncated } if truncated response.body['NextMarker'] = next_zone[:id] end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dns/list_resource_record_sets.rb000066400000000000000000000117431437344660100255410ustar00rootroot00000000000000module Fog module AWS class DNS class Real require 'fog/aws/parsers/dns/list_resource_record_sets' # list your resource record sets # # ==== Parameters # * zone_id<~String> - # * options<~Hash> # * type<~String> - # * name<~String> - # * identifier<~String> - # * max_items<~Integer> - # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResourceRecordSet'<~Array>: # * 'Name'<~String> - # * 'Type'<~String> - # * 'TTL'<~Integer> - # * 'AliasTarget'<~Hash> - # * 'HostedZoneId'<~String> - # * 'DNSName'<~String> - # * 'ResourceRecords'<~Array> # * 'Value'<~String> - # * 'IsTruncated'<~String> - # * 'MaxItems'<~String> - # * 'NextRecordName'<~String> # * 'NextRecordType'<~String> # * 'NextRecordIdentifier'<~String> # * status<~Integer> - 201 when successful def list_resource_record_sets(zone_id, options = {}) # AWS methods return zone_ids that looks like '/hostedzone/id'. Let the caller either use # that form or just the actual id (which is what this request needs) zone_id = zone_id.sub('/hostedzone/', '') parameters = {} options.each do |option, value| case option when :type, :name, :identifier parameters[option] = "#{value}" when :max_items parameters['maxitems'] = "#{value}" end end request({ :expects => 200, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::DNS::ListResourceRecordSets.new, :path => "hostedzone/#{zone_id}/rrset", :query => parameters }) end end class Mock def list_all_records(record, zone, name) [].tap do |tmp_records| tmp_records.push(record) if !record[:name].nil? && ( name.nil? || record[:name].gsub(zone[:name],"") >= name) record.each do |key,subr| if subr.is_a?(Hash) && key.is_a?(String) && key.start_with?(Fog::AWS::DNS::Mock::SET_PREFIX) if name.nil? tmp_records.append(subr) else tmp_records.append(subr) if !subr[:name].nil? && subr[:name].gsub(zone[:name],"") >= name end end end end end def list_resource_record_sets(zone_id, options = {}) maxitems = [options[:max_items]||100,100].min response = Excon::Response.new zone = self.data[:zones][zone_id] || raise(Fog::AWS::DNS::NotFound.new("NoSuchHostedZone => A hosted zone with the specified hosted zone ID does not exist.")) records = if options[:type] records_type = zone[:records][options[:type]] records_type.values if records_type else zone[:records].values.map{|r| r.values}.flatten end records ||= [] tmp_records = [] if options[:name] name = options[:name].gsub(zone[:name],"") records.each do |r| tmp_records += list_all_records(r, zone, name) end else records.each do |r| tmp_records += list_all_records(r, zone, nil) end end records = tmp_records # sort for pagination records.sort! { |a,b| a[:name].gsub(zone[:name],"") <=> b[:name].gsub(zone[:name],"") } next_record = records[maxitems] records = records[0, maxitems] truncated = !next_record.nil? response.status = 200 response.body = { 'ResourceRecordSets' => records.map do |r| if r[:alias_target] record = { 'AliasTarget' => { 'HostedZoneId' => r[:alias_target][:hosted_zone_id], 'DNSName' => r[:alias_target][:dns_name] } } else record = { 'TTL' => r[:ttl] } end { 'ResourceRecords' => r[:resource_records], 'Name' => r[:name], 'Type' => r[:type], 'SetIdentifier' => r[:set_identifier], 'Weight' => r[:weight] }.merge(record) end, 'MaxItems' => maxitems, 'IsTruncated' => truncated } if truncated response.body['NextRecordName'] = next_record[:name] response.body['NextRecordType'] = next_record[:type] end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/000077500000000000000000000000001437344660100207415ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/batch_get_item.rb000066400000000000000000000023571437344660100242330ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Get DynamoDB items # # ==== Parameters # * 'request_items'<~Hash>: # * 'table_name'<~Hash>: # * 'Keys'<~Array>: array of keys # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Responses'<~Hash>: # * 'table_name'<~Array> - array of all elements # * 'UnprocessedKeys':<~Hash> - tables and keys in excess of per request limit, pass this to subsequent batch get for pseudo-pagination # * 'ConsumedCapacity':<~Hash>: # * 'TableName'<~String> - the name of the table # * 'CapacityUnits'<~Float> - Capacity units used in read # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchGetItem.html # def batch_get_item(request_items) body = { 'RequestItems' => request_items } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.BatchGetItem'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/batch_write_item.rb000066400000000000000000000016431437344660100246030ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real def batch_put_item(request_items) Fog::Logger.deprecation("batch_put_item is deprecated, use batch_write_item instead") batch_write_item(request_items) end # request_items has form: # # {"table_name"=> # [{"PutRequest"=> # {"Item"=> # {"hi" => {"N" => 99}} # } # }] # } # # See DynamoDB Documentation: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/API_BatchWriteItems.html # def batch_write_item(request_items) body = { 'RequestItems' => request_items } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.BatchWriteItem'} ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/create_table.rb000066400000000000000000000040241437344660100237000ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Create DynamoDB table # # ==== Parameters # * 'table_name'<~String> - name of table to create # * 'key_schema'<~Array>: # * 'AttributeName'<~String> - name of attribute # * 'KeyType'<~String> - type of attribute, in %w{N NS S SS} for number, number set, string, string set # * 'ProvisionedThroughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TableDescription'<~Hash> # * 'CreationDateTime'<~Float> - Unix epoch time of table creation # * 'KeySchema'<~Array> - schema for table # * 'AttributeName'<~String> - name of attribute # * 'KeyType'<~String> - type of attribute, in %w{N NS S SS} for number, number set, string, string set # * 'ProvisionedThroughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # * 'TableName'<~String> - name of table # * 'TableStatus'<~String> - status of table # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_CreateTable.html # def create_table(table_name, key_schema, provisioned_throughput) body = { 'KeySchema' => key_schema, 'ProvisionedThroughput' => provisioned_throughput, 'TableName' => table_name } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.CreateTable'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/delete_item.rb000066400000000000000000000020171437344660100235460ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Delete DynamoDB item # # ==== Parameters # * 'table_name'<~String> - name of table for item # * 'key'<~Hash> - hash of attributes # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # varies based on ReturnValues param, see: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/API_UpdateItem.html # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html # def delete_item(table_name, key, options = {}) body = { 'Key' => key, 'TableName' => table_name }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.DeleteItem'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/delete_table.rb000066400000000000000000000022041437344660100236750ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Delete DynamoDB table # # ==== Parameters # * 'table_name'<~String> - name of table to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TableDescription'<~Hash> # * 'ProvisionedThroughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # * 'TableName'<~String> - name of table # * 'TableStatus'<~String> - status of table # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteTable.html # def delete_table(table_name) body = { 'TableName' => table_name } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.DeleteTable'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/describe_table.rb000066400000000000000000000030141437344660100242130ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Describe DynamoDB table # # ==== Parameters # * 'table_name'<~String> - name of table to describe # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Table'<~Hash> # * 'CreationDateTime'<~Float> - Unix epoch time of table creation # * 'KeySchema'<~Array> - schema for table # * 'AttributeName'<~String> - name of attribute # * 'KeyType'<~String> - type of attribute, in %w{N NS S SS} for number, number set, string, string set # * 'ProvisionedThroughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # * 'TableName'<~String> - name of table # * 'TableSizeBytes'<~Integer> - size of table in bytes # * 'TableStatus'<~String> - status of table # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html # def describe_table(table_name) body = { 'TableName' => table_name } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.DescribeTable'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/get_item.rb000066400000000000000000000025421437344660100230660ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Get DynamoDB item # # ==== Parameters # * 'table_name'<~String> - name of table for item # * 'key'<~Hash>: # { # "ForumName": { # "S": "Amazon DynamoDB" # } # } # * 'options'<~Hash>: # * 'AttributesToGet'<~Array>: list of array names to return, defaults to returning all # * 'ConsistentRead'<~Boolean>: whether to wait for updates, defaults to false # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ConsumedCapacityUnits'<~Float> - Capacity units used in read # * 'Item':<~Hash>: # * 'AttributeName'<~Hash>: in form of {"type":value} # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_GetItem.html # def get_item(table_name, key, options = {}) body = { 'Key' => key, 'TableName' => table_name }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.GetItem'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/list_tables.rb000066400000000000000000000017311437344660100235750ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # List DynamoDB tables # # ==== Parameters # * 'options'<~Hash> - options, defaults to {} # * 'ExclusiveStartTableName'<~String> - name of table to begin listing with # * 'Limit'<~Integer> - limit number of tables to return # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'LastEvaluatedTableName'<~String> - last table name, for pagination # * 'TableNames'<~Array> - table names # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ListTables.html # def list_tables(options = {}) request( :body => Fog::JSON.encode(options), :headers => {'x-amz-target' => 'DynamoDB_20120810.ListTables'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/put_item.rb000066400000000000000000000021011437344660100231060ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Update DynamoDB item # # ==== Parameters # * 'table_name'<~String> - name of table for item # * 'item'<~Hash>: data to update, must include primary key # { # "LastPostDateTime": {"S": "201303190422"} # } # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # varies based on ReturnValues param, see: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/API_UpdateItem.html # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html # def put_item(table_name, item, options = {}) body = { 'Item' => item, 'TableName' => table_name }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.PutItem'} ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/query.rb000066400000000000000000000044741437344660100224440ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Query DynamoDB items # # ==== Parameters # * 'table_name'<~String> - name of table to query # * options<~Hash>: # * 'AttributesToGet'<~Array> - Array of attributes to get for each item, defaults to all # * 'ConsistentRead'<~Boolean> - Whether to wait for consistency, defaults to false # * 'Count'<~Boolean> - If true, returns only a count of such items rather than items themselves, defaults to false # * 'Limit'<~Integer> - limit of total items to return # * 'KeyConditionExpression'<~String> - the condition elements need to match # * 'ExpressionAttributeValues'<~Hash> - values to be used in the key condition expression # * 'ScanIndexForward'<~Boolean>: Whether to scan from start or end of index, defaults to start # * 'ExclusiveStartKey'<~Hash>: Key to start listing from, can be taken from LastEvaluatedKey in response # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ConsumedCapacityUnits'<~Integer> - number of capacity units used for query # * 'Count'<~Integer> - number of items in response # * 'Items'<~Array> - array of items returned # * 'LastEvaluatedKey'<~Hash> - last key scanned, can be passed to ExclusiveStartKey for pagination # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html # def query(table_name, options = {}, hash_key_deprecated = nil) if hash_key_deprecated || (options.keys.length == 1 && [:S, :N, :B].include?(options.keys.first.to_sym)) Fog::Logger.deprecation("The `20111205` API version is deprecated. You need to use `KeyConditionExpression` instead of `HashKey`.") apiVersion = '20111205' hash_key = options options = hash_key_deprecated end body = { 'TableName' => table_name, 'HashKeyValue' => hash_key }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => "DynamoDB_#{apiVersion || '20120810'}.Query"} ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/scan.rb000066400000000000000000000037431437344660100222210ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Scan DynamoDB items # # ==== Parameters # * 'table_name'<~String> - name of table to query # * options<~Hash>: # * 'AttributesToGet'<~Array> - Array of attributes to get for each item, defaults to all # * 'ConsistentRead'<~Boolean> - Whether to wait for consistency, defaults to false # * 'Count'<~Boolean> - If true, returns only a count of such items rather than items themselves, defaults to false # * 'Limit'<~Integer> - limit of total items to return # * 'KeyConditionExpression'<~String> - the condition elements need to match # * 'ExpressionAttributeValues'<~Hash> - values to be used in the key condition expression # * 'ScanIndexForward'<~Boolean>: Whether to scan from start or end of index, defaults to start # * 'ExclusiveStartKey'<~Hash>: Key to start listing from, can be taken from LastEvaluatedKey in response # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ConsumedCapacityUnits'<~Integer> - number of capacity units used for scan # * 'Count'<~Integer> - number of items in response # * 'Items'<~Array> - array of items returned # * 'LastEvaluatedKey'<~Hash> - last key scanned, can be passed to ExclusiveStartKey for pagination # * 'ScannedCount'<~Integer> - number of items scanned before applying filters # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html # def scan(table_name, options = {}) body = { 'TableName' => table_name }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.Scan'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/update_item.rb000066400000000000000000000036701437344660100235740ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real class DeprecatedAttributeUpdates < Exception; end # Update DynamoDB item # # ==== Parameters # * 'table_name'<~String> - name of table for item # * 'key'<~Hash> - list of Key attributes # { # "ForumName": {"S": "Amazon DynamoDB"}, # "Subject": {"S": "Maximum number of items?"} # } # # * 'options'<~Hash>: # * 'UpdateExpression'<~String> - the expression that will update the item # * 'ExpressionAttributeValues'<~Hash> - values to be used in the update expression # * 'ReturnValues'<~String> - data to return in %w{ALL_NEW ALL_OLD NONE UPDATED_NEW UPDATED_OLD}, defaults to NONE # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # varies based on ReturnValues param, see: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/API_UpdateItem.html # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html # def update_item(table_name, key, options = {}, deprecated_attribute_updates = nil) if deprecated_attribute_updates raise DeprecatedAttributeUpdates, "The `20111205` DynamoDB API is deprecated. You need to use `ExpressionAttributeValues` instead of `AttributeUpdates`." attribute_updates = options options = deprecated_attribute_updates.merge( 'AttributeUpdates' => attribute_updates, ) end body = { 'Key' => key, 'TableName' => table_name, }.merge(options) request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.UpdateItem'} ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/dynamodb/update_table.rb000066400000000000000000000032641437344660100237240ustar00rootroot00000000000000module Fog module AWS class DynamoDB class Real # Update DynamoDB table throughput # # ==== Parameters # * 'table_name'<~String> - name of table to describe # * 'provisioned_throughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Table'<~Hash> # * 'KeySchema'<~Array> - schema for table # * 'AttributeName'<~String> - name of attribute # * 'KeyType'<~String> - type of attribute, in %w{N NS S SS} for number, number set, string, string set # * 'ProvisionedThroughput'<~Hash>: # * 'ReadCapacityUnits'<~Integer> - read capacity for table, in 5..10000 # * 'WriteCapacityUnits'<~Integer> - write capacity for table, in 5..10000 # * 'TableName'<~String> - name of table # * 'TableStatus'<~String> - status of table # # See DynamoDB Documentation: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html # def update_table(table_name, provisioned_throughput) body = { 'ProvisionedThroughput' => provisioned_throughput, 'TableName' => table_name } request( :body => Fog::JSON.encode(body), :headers => {'x-amz-target' => 'DynamoDB_20120810.UpdateTable'}, :idempotent => true ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/000077500000000000000000000000001437344660100177165ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/ecs/create_cluster.rb000066400000000000000000000041271437344660100232530ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/create_cluster' # Creates a new Amazon ECS cluster # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCluster.html # ==== Parameters # * clusterName <~String> - The name of your cluster. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Cluster' <~Hash> - The full description of your new cluster def create_cluster(params={}) request({ 'Action' => 'CreateCluster', :parser => Fog::Parsers::AWS::ECS::CreateCluster.new }.merge(params)) end end class Mock def create_cluster(params={}) response = Excon::Response.new response.status = 200 params.has_key?('clusterName') || params['clusterName'] = 'default' owner_id = Fog::AWS::Mock.owner_id cluster_name = params['clusterName'] cluster_path = "cluster/#{cluster_name}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) cluster = {} search_cluster_result = self.data[:clusters].select { |c| c['clusterName'].eql?(cluster_name) } if search_cluster_result.empty? cluster = { 'clusterName' => cluster_name, 'clusterArn' => cluster_arn, 'status' => 'ACTIVE', 'registeredContainerInstancesCount' => 0, 'runningTasksCount' => 0, 'pendingTasksCount' => 0 } self.data[:clusters] << cluster else cluster = search_cluster_result.first end response.body = { 'CreateClusterResult' => { 'cluster' => cluster }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/create_service.rb000066400000000000000000000115521437344660100232320ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/create_service' # Runs and maintains a desired number of tasks from a specified task definition. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html # ==== Parameters # * clientToken <~String> - unique, case-sensitive identifier you provide to ensure the idempotency of the request. # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that you want to run your service on. # * desiredCount <~Integer> - number of instantiations of the specified task definition that you would like to place and keep running on your cluster. # * loadBalancers <~Array> - list of load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. # * role <~String> - name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. # * serviceName <~String> - name of your service # * taskDefinition <~String> - family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition that you want to run in your service # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Service' <~Hash> - The full description of your new service def create_service(params={}) if load_balancers = params.delete('loadBalancers') params.merge!(Fog::AWS.indexed_param('loadBalancers.member', [*load_balancers])) end request({ 'Action' => 'CreateService', :parser => Fog::Parsers::AWS::ECS::CreateService.new }.merge(params)) end end class Mock def create_service(params={}) response = Excon::Response.new response.status = 200 e = Fog::AWS::ECS::Error msg = 'ClientException => desiredCount cannot be empty.' raise e, msg unless desired_count = params['desiredCount'] msg = 'ClientException => serviceName cannot be empty.' raise e unless service_name = params['serviceName'] msg = 'ClientException => taskDefinition cannot be empty.' raise e unless task_definition = params['taskDefinition'] owner_id = Fog::AWS::Mock.owner_id service_path = "service/#{service_name}" service_arn = Fog::AWS::Mock.arn('ecs', owner_id, service_path, region) cluster = params['cluster'] || 'default' if !cluster.match(/^arn:aws:ecs:.+:.+:cluster\/(.+)$/) cluster_path = "cluster/#{cluster}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) else cluster_arn = cluster end if params['role'] role = params['role'] if params['role'] if !role.match(/^arn:aws:iam:.*:.*:role\/(.+)$/) role_path = "role/#{role}" role_arn = Fog::AWS::Mock.arn('iam', owner_id, role_path, region) else role_arn = role end end if !task_definition.match(/^arn:aws:ecs:.+:.+:task-definition\/.+$/) task_def_path = "task-definition\/#{task_definition}" task_def_arn = Fog::AWS::Mock.arn('ecs', owner_id, task_def_path, region) else task_def_arn = task_definition end load_balancers = params['loadBalancers'] || [] service = { 'events' => [], 'serviceName' => service_name, 'serviceArn' => service_arn, 'taskDefinition' => task_def_arn, 'clusterArn' => cluster_arn, 'status' => 'ACTIVE', 'roleArn' => role_arn, 'loadBalancers' => [*load_balancers], 'deployments' => [], 'desiredCount' => desired_count, 'pendingCount' => 0, 'runningCount' => 0 } service['deployments'] << { 'updatedAt' => Time.now.utc, 'id' => "ecs-svc/#{Fog::Mock.random_numbers(19)}", 'taskDefinition' => task_def_arn, 'status' => 'PRIMARY', 'desiredCount' => desired_count, 'createdAt' => Time.now.utc, 'pendingCount' => 0, 'runningCount' => 0 } self.data[:services] << service response.body = { 'CreateServiceResult' => { 'service' => service, }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/delete_cluster.rb000066400000000000000000000035361437344660100232550ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/delete_cluster' # Deletes the specified cluster # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteCluster.html # ==== Parameters # * cluster <~String> - The short name or full Amazon Resource Name (ARN) of the cluster that you want to delete # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Cluster'<~Hash> - The full description of the deleted cluster def delete_cluster(params={}) request({ 'Action' => 'DeleteCluster', :parser => Fog::Parsers::AWS::ECS::DeleteCluster.new }.merge(params)) end end class Mock def delete_cluster(params={}) response = Excon::Response.new response.status = 200 cluster_id = params.delete('cluster') if !cluster_id message = 'ClientException => Cluster can not be blank.' raise Fog::AWS::ECS::Error, message end if match = cluster_id.match(/^arn:aws:ecs:.+:\d{1,12}:cluster\/(.+)$/) i = self.data[:clusters].index { |c| c['clusterArn'].eql?(cluster_id) } else i = self.data[:clusters].index { |c| c['clusterName'].eql?(cluster_id) } end if i cluster = self.data[:clusters].delete_at(i) else raise Fog::AWS::ECS::NotFound, 'Cluster not found.' end cluster['status'] = 'INACTIVE' response.body = { 'DeleteClusterResult' => { 'cluster' => cluster }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/delete_service.rb000066400000000000000000000045521437344660100232330ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/delete_service' # Deletes a specified service within a cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteService.html # ==== Parameters # * cluster <~String> - name of the cluster that hosts the service you want to delete. # * service <~String> - name of the service you want to delete. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Service'<~Hash> - The full description of the deleted service def delete_service(params={}) request({ 'Action' => 'DeleteService', :parser => Fog::Parsers::AWS::ECS::DeleteService.new }.merge(params)) end end class Mock def delete_service(params={}) response = Excon::Response.new response.status = 200 service_id = params.delete('service') msg = 'ClientException => Service cannot be empty.' raise Fog::AWS::ECS::Error, msg unless service_id owner_id = Fog::AWS::Mock.owner_id cluster = params.delete('cluster') || 'default' if !cluster.match(/^arn:aws:ecs:.+:.+:cluster\/(.+)$/) cluster_path = "cluster/#{cluster}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) else cluster_arn = cluster end if match = service_id.match(/^arn:aws:ecs:.+:\d{1,12}:service\/(.+)$/) i = self.data[:services].index do |s| s['clusterArn'].eql?(cluster_arn) && s['serviceArn'].eql?(service_id) end else i = self.data[:services].index do |s| s['clusterArn'].eql?(cluster_arn) && s['serviceName'].eql?(service_id) end end msg = "ServiceNotFoundException => Service not found." raise Fog::AWS::ECS::Error, msg unless i service = self.data[:services][i] self.data[:services].delete_at(i) response.body = { 'DeleteServiceResult' => { 'service' => service }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/deregister_container_instance.rb000066400000000000000000000047031437344660100263320ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/deregister_container_instance' # Deregisters an Amazon ECS container instance from the specified cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterContainerInstance.html # ==== Parameters # * cluster <~String> - short name or full ARN of the cluster that hosts the container instance you want to deregister. # * containerInstance <~String> - container instance UUID or full Amazon Resource Name (ARN) of the container instance you want to deregister. # * force <~Boolean> - Force the deregistration of the container instance. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ContainerInstance' <~Hash> - full description of the deregistered container instance def deregister_container_instance(params={}) request({ 'Action' => 'DeregisterContainerInstance', :parser => Fog::Parsers::AWS::ECS::DeregisterContainerInstance.new }.merge(params)) end end class Mock def deregister_container_instance(params={}) response = Excon::Response.new response.status = 200 instance_id = params.delete('containerInstance') instance_error = "ClientException => Container instance can not be blank." raise Fog::AWS::ECS::Error, instance_error unless instance_id if match = instance_id.match(/^arn:aws:ecs:.+:\d{1,12}:container-instance\/(.+)$/) i = self.data[:container_instances].index do |inst| inst['containerInstanceArn'].eql?(instance_id) end else i = self.data[:container_instances].index do |inst| inst['containerInstanceArn'].match(/#{instance_id}$/) end end msg = "ClientException => Referenced container instance #{instance_id} not found." raise Fog::AWS::ECS::Error, msg unless i instance = self.data[:container_instances][i] self.data[:container_instances].delete_at(i) response.body = { 'DeregisterContainerInstanceResult' => { 'containerInstance' => instance }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/deregister_task_definition.rb000066400000000000000000000041731437344660100256370ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/deregister_task_definition' # Deregisters the specified task definition. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeregisterTaskDefinition.html # ==== Parameters # * taskDefinition <~String> - The family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition that you want to deregister. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TaskDefinition' <~Hash> - full description of the deregistered task def deregister_task_definition(params={}) request({ 'Action' => 'DeregisterTaskDefinition', :parser => Fog::Parsers::AWS::ECS::DeregisterTaskDefinition.new }.merge(params)) end end class Mock def deregister_task_definition(params={}) response = Excon::Response.new response.status = 200 taskdef_error = "ClientException => Task Definition can not be blank." raise Fog::AWS::ECS::Error, taskdef_error unless params['taskDefinition'] task_def_name = params['taskDefinition'] case task_def_name when /^arn:aws:ecs:.+:\d{1,12}:task-definition\/(.+:.+)$/ i = self.data[:task_definitions].index { |t| t['taskDefinitionArn'].eql?(task_def_name) } when /^(.+:.+)$/ i = self.data[:task_definitions].index { |t| t['taskDefinitionArn'].match(/task-definition\/#{task_def_name}$/) } else raise Fog::AWS::ECS::Error, 'Invalid task definition' end raise Fog::AWS::ECS::NotFound, 'Task definition not found.' unless i task_definition = self.data[:task_definitions].delete_at(i) response.body = { 'DeregisterTaskDefinitionResult' => { 'taskDefinition' => task_definition }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/describe_clusters.rb000066400000000000000000000050751437344660100237560ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/describe_clusters' # Describes one or more of your clusters. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeClusters.html # ==== Parameters # * clusters <~Array> - list of cluster names or full cluster Amazon Resource Name (ARN) entries # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'clusters' <~Array> - The list of clusters # * 'failures' <~Array> - The list of failures (if any) def describe_clusters(params={}) if members = params.delete('clusters') params.merge!(Fog::AWS.indexed_param('clusters.member', [*members])) end request({ 'Action' => 'DescribeClusters', :parser => Fog::Parsers::AWS::ECS::DescribeClusters.new }.merge(params)) end end class Mock def describe_clusters(params={}) response = Excon::Response.new response.status = 200 members = params.delete('clusters') members = 'default' unless members clusters = [] failures = [] [*members].each do |c| if match = c.match(/^arn:aws:ecs:.+:\d{1,12}:cluster\/(.+)$/) result = self.data[:clusters].select { |cl| cl['clusterArn'].eql?(c) } else result = self.data[:clusters].select { |cl| cl['clusterName'].eql?(c) } end if result.empty? cluster_name = match[1] if match cluster_name = c unless match failures << { 'name' => cluster_name } else clusters << result.first end end owner_id = Fog::AWS::Mock.owner_id failures.map! do |f| { 'arn' => Fog::AWS::Mock.arn('ecs', owner_id, "cluster/#{f['name']}", region), 'reason' => 'MISSING' } end clusters.map! do |c| { 'clusterName' => c['clusterName'], 'clusterArn' => c['clusterArn'], 'status' => c['status'] } end response.body = { 'DescribeClustersResult' => { 'failures' => failures, 'clusters' => clusters }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/describe_container_instances.rb000066400000000000000000000046021437344660100261360ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/describe_container_instances' # Describes Amazon EC2 Container Service container instances. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeContainerInstances.html # ==== Parameters # * cluster <~String> - short name or full ARN of the cluster that hosts the container instances you want to describe. # * containerInstances <~Array> - list of container instance UUIDs or full Amazon Resource Name (ARN) entries. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'containerInstances' <~Array> - list of container instances. # * 'failures' <~Array> - list of failures (if any) def describe_container_instances(params={}) if instances = params.delete('containerInstances') params.merge!(Fog::AWS.indexed_param('containerInstances.member', [*instances])) end request({ 'Action' => 'DescribeContainerInstances', :parser => Fog::Parsers::AWS::ECS::DescribeContainerInstances.new }.merge(params)) end end class Mock def describe_container_instances(params={}) response = Excon::Response.new response.status = 200 cluster = params.delete('cluster') || 'default' instances_id = params.delete('containerInstances') msg = 'ClientException => Container instance cannot be empty.' raise Fog::AWS::ECS::Error, msg unless instances_id result = [] [*instances_id].each do |inst| if match = inst.match(/^arn:aws:ecs:.+:\d{1,12}:container-instance\/(.+)$/) result = self.data[:container_instances].select { |i| i['containerInstanceArn'].eql?(inst) } else result = self.data[:container_instances].select { |i| i['containerInstanceArn'].match(/#{inst}$/) } end end instances = result response.body = { 'DescribeContainerInstancesResult' => { 'containerInstances' => instances, 'failures' => [] }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/describe_services.rb000066400000000000000000000050551437344660100237330ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/describe_services' # Describes the specified services running in your cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeServices.html # ==== Parameters # * cluster <~String> - name of the cluster that hosts the service you want to describe. # * services <~Array> - list of services you want to describe. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'services' <~Array> - The list of services described. # * 'failures' <~Array> - The list of failures associated with the call (if any). def describe_services(params={}) if services = params.delete('services') params.merge!(Fog::AWS.indexed_param('services.member', [*services])) end request({ 'Action' => 'DescribeServices', :parser => Fog::Parsers::AWS::ECS::DescribeServices.new }.merge(params)) end end class Mock def describe_services(params={}) response = Excon::Response.new response.status = 200 cluster = params.delete('cluster') || 'default' services = params.delete('services') msg = 'InvalidParameterException => Services cannot be empty.' raise Fog::AWS::ECS::Error, msg unless services owner_id = Fog::AWS::Mock.owner_id if !cluster.match(/^arn:aws:ecs:.+:.+:cluster\/(.+)$/) cluster_path = "cluster/#{cluster}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) else cluster_arn = cluster end result = [] ([*services].select { |s| s.match(/^arn:/) }).each do |ds| result.concat(self.data[:services].select do |sv| sv['serviceArn'].eql?(ds) && sv['clusterArn'].eql?(cluster_arn) end) end ([*services].select { |s| !s.match(/^arn:/) }).each do |ds| result.concat(self.data[:services].select do |sv| sv['serviceName'].eql?(ds) && sv['clusterArn'].eql?(cluster_arn) end) end response.body = { 'DescribeServicesResult' => { 'services' => result, 'failures' => [] }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/describe_task_definition.rb000066400000000000000000000045241437344660100252620ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/describe_task_definition' # Describes a task definition # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTaskDefinition.html # ==== Parameters # * taskDefinition <~String> - The family for the latest revision, family and revision (family:revision) for a specific revision in the family, or full Amazon Resource Name (ARN) of the task definition that you want to describe. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'taskDefinition' <~Hash> - full task definition description def describe_task_definition(params={}) request({ 'Action' => 'DescribeTaskDefinition', :parser => Fog::Parsers::AWS::ECS::DescribeTaskDefinition.new }.merge(params)) end end class Mock def describe_task_definition(params={}) response = Excon::Response.new response.status = 200 taskdef_error = "ClientException => Task Definition can not be blank." raise Fog::AWS::ECS::Error, taskdef_error unless params['taskDefinition'] task_def_name = params['taskDefinition'] case task_def_name when /^arn:aws:ecs:.+:\d{1,12}:task-definition\/(.+:.+)$/ result = self.data[:task_definitions].select { |t| t['taskDefinitionArn'].eql?(task_def_name) } when /^(.+:.+)$/ result = self.data[:task_definitions].select { |t| t['taskDefinitionArn'].match(/task-definition\/#{task_def_name}/) } else result = self.data[:task_definitions].select { |t| t['family'].eql?(task_def_name) } if !result.empty? result = [] << (result.max_by { |t| t['revision'] }) end end if result.empty? raise Fog::AWS::ECS::Error, 'ClientException => Unable to describe task definition.' end task_definition = result.first response.body = { 'DescribeTaskDefinitionResult' => { 'taskDefinition' => task_definition }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/describe_tasks.rb000066400000000000000000000041221437344660100232270ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/describe_tasks' # Describes a specified task or tasks. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that hosts the task you want to describe # * tasks <~Array> - space-separated list of task UUIDs or full Amazon Resource Name (ARN) entries # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'tasks' <~Array> - The list of tasks # * 'failures' <~Array> - The list of failures (if any) def describe_tasks(params={}) if tasks = params.delete('tasks') params.merge!(Fog::AWS.indexed_param('tasks.member', [*tasks])) end request({ 'Action' => 'DescribeTasks', :parser => Fog::Parsers::AWS::ECS::DescribeTasks.new }.merge(params)) end end class Mock def describe_tasks(params={}) response = Excon::Response.new response.status = 200 unless tasks = params.delete('tasks') msg = 'InvalidParameterException => Tasks cannot be empty.' raise Fog::AWS::ECS::Error, msg end cluster = params.delete('cluster') || 'default' result = [] [*tasks].each do |tid| if match = tid.match(/^arn:aws:ecs:.+:\d{1,12}:task\/(.+)$/) result = self.data[:tasks].select { |t| t['taskArn'].eql?(tid) } else result = self.data[:tasks].select { |t| t['taskArn'].match(/#{tid}$/) } end end tasks = result response.body = { 'DescribeTasksResult' => { 'failures' => [], 'tasks' => tasks }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_clusters.rb000066400000000000000000000030431437344660100231420ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_clusters' # Returns a list of existing clusters # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListClusters.html # ==== Parameters # * maxResults <~Integer> - The maximum number of cluster results returned by ListClusters in paginated output. # * nextToken <~String> - The nextToken value returned from a previous paginated ListClusters request where maxResults was used. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ClusterArns' <~Array> - list of full Amazon Resource Name (ARN) entries for each cluster associated with your account. # * 'NextToken' <~String> - nextToken value to include in a future ListClusters request. def list_clusters(params={}) request({ 'Action' => 'ListClusters', :parser => Fog::Parsers::AWS::ECS::ListClusters.new }.merge(params)) end end class Mock def list_clusters(params={}) response = Excon::Response.new response.status = 200 cluster_arns = self.data[:clusters].map { |c| c['clusterArn'] } response.body = { 'ListClustersResult' => { 'clusterArns' => cluster_arns }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_container_instances.rb000066400000000000000000000035641437344660100253370ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_container_instances' # Returns a list of container instances in a specified cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListContainerInstances.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances you want to list. # * maxResults <~Integer> - maximum number of container instance results returned by ListContainerInstances in paginated output. # * nextToken <~String> - nextToken value returned from a previous paginated ListContainerInstances request where maxResults was used. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ContainerInstanceArns' <~Array> - list of container instance full ARN entries for each container instance associated with the specified cluster. # * 'NextToken' <~String> - nextToken value to include in a future ListContainerInstances request. def list_container_instances(params={}) request({ 'Action' => 'ListContainerInstances', :parser => Fog::Parsers::AWS::ECS::ListContainerInstances.new }.merge(params)) end end class Mock def list_container_instances(params={}) response = Excon::Response.new response.status = 200 instance_arns = self.data[:container_instances].map { |i| i['containerInstanceArn'] } response.body = { 'ListContainerInstancesResult' => { 'containerInstanceArns' => instance_arns }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_services.rb000066400000000000000000000042531437344660100231250ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_services' # Lists the services that are running in a specified cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListServices.html # ==== Parameters # * cluster <~String> - The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services you want to list. # * maxResults <~Integer> - The maximum number of container instance results returned by ListServices in paginated output. # * nextToken <~String> - The nextToken value returned from a previous paginated ListServices request where maxResults was used. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ServiceArns' <~Array> - list of full Amazon Resource Name (ARN) entries for each service associated with the specified cluster. # * 'NextToken' <~String> - nextToken value to include in a future ListServices request. def list_services(params={}) request({ 'Action' => 'ListServices', :parser => Fog::Parsers::AWS::ECS::ListServices.new }.merge(params)) end end class Mock def list_services(params={}) response = Excon::Response.new response.status = 200 owner_id = Fog::AWS::Mock.owner_id cluster = params.delete('cluster') || 'default' if !cluster.match(/^arn:aws:ecs:.+:.+:cluster\/(.+)$/) cluster_path = "cluster/#{cluster}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) else cluster_arn = cluster end result = self.data[:services].select do |s| s['clusterArn'].eql?(cluster_arn) end service_arns = result.map { |s| s['serviceArn'] } response.body = { 'ListServicesResult' => { 'serviceArns' => service_arns }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_task_definition_families.rb000066400000000000000000000041611437344660100263230ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_task_definition_families' # Returns a list of task definition families that are registered to your account. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTaskDefinitionFamilies.html # ==== Parameters # * familyPrefix <~String> - familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. # * maxResults <~Integer> - maximum number of task definition family results returned by ListTaskDefinitionFamilies in paginated output. # * nextToken <~String> - nextToken value returned from a previous paginated ListTaskDefinitionFamilies request where maxResults was used. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Families' <~Array> - list of task definition family names that match the ListTaskDefinitionFamilies request. # * 'NextToken' <~String> - nextToken value to include in a future ListTaskDefinitionFamilies request. def list_task_definition_families(params={}) request({ 'Action' => 'ListTaskDefinitionFamilies', :parser => Fog::Parsers::AWS::ECS::ListTaskDefinitionFamilies.new }.merge(params)) end end class Mock def list_task_definition_families(params={}) response = Excon::Response.new response.status = 200 family_prefix = params['familyPrefix'] if family_prefix result = self.data[:task_definitions].select do |t| t['family'].match(/^#{family_prefix}/) end else result = self.data[:task_definitions].dup end result.map! { |t| t['family'] } result.uniq! response.body = { 'ListTaskDefinitionFamiliesResult' => { 'families' => result }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_task_definitions.rb000066400000000000000000000041011437344660100246270ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_task_definitions' # Returns a list of task definitions that are registered to your account # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTaskDefinitions.html # ==== Parameters # * familyPrefix <~String> - The full family name that you want to filter the ListTaskDefinitions results with. # * maxResults <~Integer> - The maximum number of task definition results returned by ListTaskDefinitions in paginated output. # * nextToken <~String> - The nextToken value returned from a previous paginated ListTaskDefinitions request. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TaskDefinitionArns' <~Array> - list of task definition Amazon Resource Name (ARN) entries for the ListTaskDefintions request. # * 'NextToken' <~String> - nextToken value to include in a future ListTaskDefinitions request def list_task_definitions(params={}) request({ 'Action' => 'ListTaskDefinitions', :parser => Fog::Parsers::AWS::ECS::ListTaskDefinitions.new }.merge(params)) end end class Mock def list_task_definitions(params={}) if %w( familyPrefix maxResults nextToken ).any? { |k| params.has_key?(k) } Fog::Logger.warning("list_task_definitions filters are not yet mocked [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end response = Excon::Response.new response.status = 200 taskdef_arns = self.data[:task_definitions].map { |c| c['taskDefinitionArn'] } response.body = { 'ListTaskDefinitionsResult' => { 'taskDefinitionArns' => taskdef_arns }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/list_tasks.rb000066400000000000000000000041131437344660100224220ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/list_tasks' # Returns a list of tasks for a specified cluster. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ListTasks.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks you want to list. # * containerInstance <~String> - container instance UUID or full Amazon Resource Name (ARN) of the container instance that you want to filter the ListTasks results with. # * family <~String> - name of the family that you want to filter the ListTasks results with. # * maxResults <~Integer> - maximum number of task results returned by ListTasks in paginated output. # * nextToken <~String> - nextToken value returned from a previous paginated ListTasks request where maxResults was used. # * serviceName <~String> - name of the service that you want to filter the ListTasks results with. # * startedBy <~String> - startedBy value that you want to filter the task results with. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TaskArns' <~Array> - list of task Amazon Resource Name (ARN) entries for the ListTasks request. # * 'NextToken' <~String> - nextToken value to include in a future ListTasks request. def list_tasks(params={}) request({ 'Action' => 'ListTasks', :parser => Fog::Parsers::AWS::ECS::ListTasks.new }.merge(params)) end end class Mock def list_tasks(params={}) response = Excon::Response.new response.status = 200 task_arns = self.data[:tasks].map { |t| t['taskArn'] } response.body = { 'ListTasksResult' => { 'taskArns' => task_arns }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/register_task_definition.rb000066400000000000000000000053601437344660100253250ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/register_task_definition' # Registers a new task definition from the supplied family and containerDefinitions. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RegisterTaskDefinition.html # ==== Parameters # * containerDefinitions <~Array> - list of container definitions in JSON format that describe the different containers that make up your task. # * family <~String> - family for a task definition, which allows you to track multiple versions of the same task definition. # * volumes <~String> - list of volume definitions in JSON format that containers in your task may use. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'TaskDefinition' <~Array> - full task definition description registered def register_task_definition(params={}) serialized_params = {} params.each_pair do |k,v| serialized_params.merge!(Fog::AWS.serialize_keys(k, v)) end request({ 'Action' => 'RegisterTaskDefinition', :parser => Fog::Parsers::AWS::ECS::RegisterTaskDefinition.new }.merge(serialized_params)) end end class Mock def register_task_definition(params={}) response = Excon::Response.new response.status = 200 family_error = 'ClientException => Family can not be blank.' container_error = 'ClientException => Container list cannot be empty.' raise Fog::AWS::ECS::Error, family_error unless params['family'] raise Fog::AWS::ECS::Error, container_error unless params['containerDefinitions'] owner_id = Fog::AWS::Mock.owner_id taskdef_name = params['family'] taskdef_rev = (1..9).to_a.shuffle.first taskdef_path = "task-definition/#{taskdef_name}:#{taskdef_rev}" taskdef_arn = Fog::AWS::Mock.arn('ecs', owner_id, taskdef_path, region) task_definition = { 'revision' => taskdef_rev, 'taskDefinitionArn' => taskdef_arn, 'family' => params['family'], 'containerDefinitions' => params['containerDefinitions'] } task_definition['volumes'] = params['volumes'] if params['volumes'] self.data[:task_definitions] << task_definition response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'RegisterTaskDefinitionResult' => { 'taskDefinition' => task_definition } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/run_task.rb000066400000000000000000000105211437344660100220700ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/run_task' # Start a task using random placement and the default Amazon ECS scheduler. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that you want to run your task on. # * count <~Integer> - number of instantiations of the specified task that you would like to place on your cluster. # * overrides <~Hash> - list of container overrides. # * startedBy <~String> - optional tag specified when a task is started # * taskDefinition <~String> - family and revision (family:revision) or full ARN of the task definition that you want to run. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'tasks' <~Array> - full description of the tasks that were run. # * 'failures' <~Array> - Any failed tasks from your RunTask action are listed here. def run_task(params={}) if overrides = params.delete('overrides') serialized_overrides = {} if overrides.is_a?(Hash) overrides.each_pair do |k,v| serialized_overrides.merge!(Fog::AWS.serialize_keys(k, v)) end end params.merge!('overrides' => serialized_overrides) end request({ 'Action' => 'RunTask', :parser => Fog::Parsers::AWS::ECS::RunTask.new }.merge(params)) end end class Mock def run_task(params={}) response = Excon::Response.new response.status = 200 unless task_def_id = params.delete('taskDefinition') msg = 'ClientException => TaskDefinition cannot be empty.' raise Fog::AWS::ECS::Error, msg end begin result = describe_task_definition('taskDefinition' => task_def_id).body task_def = result["DescribeTaskDefinitionResult"]["taskDefinition"] task_def_arn = task_def["taskDefinitionArn"] rescue Fog::AWS::ECS::Error => e msg = 'ClientException => TaskDefinition not found.' raise Fog::AWS::ECS::Error, msg end if %w(count overrides).any? { |k| params.has_key?(k) } Fog::Logger.warning("you used parameters not mocked yet [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end cluster_id = params.delete('cluster') || 'default' cluster_arn = nil owner_id = Fog::AWS::Mock.owner_id if cluster_id.match(/^arn:aws:ecs:.+:\d{1,12}:cluster\/(.+)$/) cluster_arn = cluster_id else cluster_path = "cluster/#{cluster_id}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) end task_path = "task/#{UUID.uuid}" task_arn = Fog::AWS::Mock.arn('ecs', owner_id, task_path, region) instance_path = "container-instance/#{UUID.uuid}" container_instance_arn = Fog::AWS::Mock.arn('ecs', owner_id, instance_path, region) containers = [] task_def["containerDefinitions"].each do |c| container_path = "container/#{UUID.uuid}" containers << { 'name' => c['name'], 'taskArn' => task_arn, 'lastStatus' => 'PENDING', 'containerArn' => Fog::AWS::Mock.arn('ecs', owner_id, container_path, region) } end task = { 'clusterArn' => cluster_arn, 'desiredStatus' => 'RUNNING', 'taskDefinitionArn' => task_def_arn, 'lastStatus' => 'PENDING', 'taskArn' => task_arn, 'containerInstanceArn' => container_instance_arn, 'containers' => containers } self.data[:tasks] << task response.body = { 'RunTaskResult' => { 'failures' => [], 'tasks' => [] << task }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/start_task.rb000066400000000000000000000121101437344660100224150ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/start_task' # Starts a new task from the specified task definition on the specified container instance or instances. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_StartTask.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that you want to start your task on. # * containerInstances <~Array> - container instance UUIDs or full ARN entries for the container instances on which you would like to place your task. # * overrides <~Hash> - list of container overrides. # * startedBy <~String> - optional tag specified when a task is started # * taskDefinition <~String> - family and revision (family:revision) or full ARN of the task definition that you want to start. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'tasks' <~Array> - full description of the tasks that were started. # * 'failures' <~Array> - Any failed tasks from your StartTask action are listed here. def start_task(params={}) if container_instances = params.delete('containerInstances') params.merge!(Fog::AWS.indexed_param('containerInstances.member', [*container_instances])) end if overrides = params.delete('overrides') serialized_overrides = {} if overrides.is_a?(Hash) overrides.each_pair do |k,v| serialized_overrides.merge!(Fog::AWS.serialize_keys(k, v)) end end params.merge!('overrides' => serialized_overrides) end request({ 'Action' => 'StartTask', :parser => Fog::Parsers::AWS::ECS::StartTask.new }.merge(params)) end end class Mock def start_task(params={}) response = Excon::Response.new response.status = 200 unless task_def_id = params.delete('taskDefinition') msg = 'ClientException => TaskDefinition cannot be empty.' raise Fog::AWS::ECS::Error, msg end unless instances_id = params.delete('containerInstances') msg = 'ClientException => Container instances cannot be empty.' raise Fog::AWS::ECS::Error, msg end begin result = describe_task_definition('taskDefinition' => task_def_id).body task_def = result["DescribeTaskDefinitionResult"]["taskDefinition"] task_def_arn = task_def["taskDefinitionArn"] rescue Fog::AWS::ECS::Error => e msg = 'ClientException => TaskDefinition not found.' raise Fog::AWS::ECS::Error, msg end if %w(startedBy overrides).any? { |k| params.has_key?(k) } Fog::Logger.warning("you used parameters not mocked yet [light_black](#{caller.first})[/]") Fog::Mock.not_implemented end cluster_id = params.delete('cluster') || 'default' cluster_arn = nil owner_id = Fog::AWS::Mock.owner_id if cluster_id.match(/^arn:aws:ecs:.+:\d{1,12}:cluster\/(.+)$/) cluster_arn = cluster_id else cluster_path = "cluster/#{cluster_id}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) end task_path = "task/#{UUID.uuid}" task_arn = Fog::AWS::Mock.arn('ecs', owner_id, task_path, region) instance_path = "container-instance/#{UUID.uuid}" instance_id = [*instances_id].first if instance_id.match(/^arn:aws:ecs:.+:\d{1,12}:container-instance\/(.+)$/) container_instance_arn = instance_id else instance_path = "container-instance/#{instance_id}" container_instance_arn = Fog::AWS::Mock.arn('ecs', owner_id, instance_path, region) end containers = [] task_def["containerDefinitions"].each do |c| container_path = "container/#{UUID.uuid}" containers << { 'name' => c['name'], 'taskArn' => task_arn, 'lastStatus' => 'PENDING', 'containerArn' => Fog::AWS::Mock.arn('ecs', owner_id, container_path, region) } end task = { 'clusterArn' => cluster_arn, 'desiredStatus' => 'RUNNING', 'taskDefinitionArn' => task_def_arn, 'lastStatus' => 'PENDING', 'taskArn' => task_arn, 'containerInstanceArn' => container_instance_arn, 'containers' => containers } self.data[:tasks] << task response.body = { 'StartTaskResult' => { 'failures' => [], 'tasks' => [] << task }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/stop_task.rb000066400000000000000000000041131437344660100222510ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/stop_task' # Stops a running task. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_StopTask.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that hosts the task you want to stop. # * task <~String> - task UUIDs or full Amazon Resource Name (ARN) entry of the task you would like to stop. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Task' <~Hash> - The full description of the stopped task. def stop_task(params={}) request({ 'Action' => 'StopTask', :parser => Fog::Parsers::AWS::ECS::StopTask.new }.merge(params)) end end class Mock def stop_task(params={}) response = Excon::Response.new response.status = 200 unless task_id = params.delete('task') msg = "InvalidParameterException => Task can not be blank." raise Fog::AWS::ECS::Error, msg end if cluster = params.delete('cluster') Fog::Logger.warning("you used parameters not mocked yet [light_black](#{caller.first})[/]") end if match = task_id.match(/^arn:aws:ecs:.+:\d{1,12}:task\/(.+)$/) i = self.data[:tasks].index { |t| t['taskArn'].eql?(task_id) } else i = self.data[:tasks].index { |t| t['taskArn'].match(/#{task_id}$/) } end msg = "ClientException => The referenced task was not found." raise Fog::AWS::ECS::Error, msg unless i task = self.data[:tasks][i] task['desiredStatus'] = 'STOPPED' self.data[:tasks].delete_at(i) response.body = { 'StopTaskResult' => { 'task' => task }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ecs/update_service.rb000066400000000000000000000056171437344660100232560ustar00rootroot00000000000000module Fog module AWS class ECS class Real require 'fog/aws/parsers/ecs/update_service' # Modify the desired count or task definition used in a service. # http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_UpdateService.html # ==== Parameters # * cluster <~String> - short name or full Amazon Resource Name (ARN) of the cluster that your service is running on. # * desiredCount <~Integer> - number of instantiations of the task that you would like to place and keep running in your service. # * service <~String> - name of the service that you want to update. # * taskDefinition <~String> - family and revision (family:revision) or full Amazon Resource Name (ARN) of the task definition that you want to run in your service. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Service'<~Hash> - The full description of the updated cluster def update_service(params={}) request({ 'Action' => 'UpdateService', :parser => Fog::Parsers::AWS::ECS::UpdateService.new }.merge(params)) end end class Mock def update_service(params={}) response = Excon::Response.new response.status = 200 service_id = params.delete('service') msg = 'ClientException => Service cannot be empty.' raise Fog::AWS::ECS::Error, msg unless service_id owner_id = Fog::AWS::Mock.owner_id cluster = params.delete('cluster') || 'default' if !cluster.match(/^arn:aws:ecs:.+:.+:cluster\/(.+)$/) cluster_path = "cluster/#{cluster}" cluster_arn = Fog::AWS::Mock.arn('ecs', owner_id, cluster_path, region) else cluster_arn = cluster end if match = service_id.match(/^arn:aws:ecs:.+:\d{1,12}:service\/(.+)$/) i = self.data[:services].index do |s| s['clusterArn'].eql?(cluster_arn) && s['serviceArn'].eql?(service_id) end else i = self.data[:services].index do |s| s['clusterArn'].eql?(cluster_arn) && s['serviceName'].eql?(service_id) end end msg = "ServiceNotFoundException => Service not found." raise Fog::AWS::ECS::Error, msg unless i service = self.data[:services][i] if desired_count = params.delete('desiredCount') # ignore end if task_definition = params.delete('taskDefinition') service['taskDefinition'] = task_definition end response.body = { 'UpdateServiceResult' => { 'service' => service }, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/000077500000000000000000000000001437344660100177215ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/efs/create_file_system.rb000066400000000000000000000072751437344660100241270ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Create a new, empty file system # http://docs.aws.amazon.com/efs/latest/ug/API_CreateFileSystem.html # ==== Parameters # * CreationToken <~String> - String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent creation. # * PerformanceMode <~String> - (Optional) The PerformanceMode of the file system. We recommend generalPurpose performance mode for most file systems. File systems using the maxIO performance mode can scale to higher levels of aggregate throughput and operations per second with a tradeoff of slightly higher latencies for most file operations. This can't be changed after the file system has been created. # * Encrypted <~Boolean> - (Optional) A Boolean value that, if true, creates an encrypted file system. When creating an encrypted file system, you have the option of specifying a CreateFileSystem:KmsKeyId for an existing AWS Key Management Service (AWS KMS) customer master key (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem, is used to protect the encrypted file system. # * KmsKeyId <~String> - (Optional) The ID of the AWS KMS CMK to be used to protect the encrypted file system. This parameter is only required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for Amazon EFS is used. This ID can be in one of the following formats: # - Key ID - A unique identifier of the key, for example, 1234abcd-12ab-34cd-56ef-1234567890ab. # - ARN - An Amazon Resource Name (ARN) for the key, for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab. # - Key alias - A previously created display name for a key. For example, alias/projectKey1. # - Key alias ARN - An ARN for a key alias, for example, arn:aws:kms:us-west-2:444455556666:alias/projectKey1. # If KmsKeyId is specified, the CreateFileSystem:Encrypted parameter must be set to true. # ==== Returns # * response<~Excon::Response> # * body<~Hash> def create_file_system(creation_token, options={}) params = { :path => "file-systems", :method => 'POST', :expects => 201, 'CreationToken' => creation_token, 'PerformanceMode' => options[:peformance_mode] || 'generalPurpose', 'Encrypted' => options[:encrypted] || false } params[:kms_key_id] = options[:kms_key_id] if options.key?(:kms_key_id) request(params) end end class Mock def create_file_system(creation_token, options={}) response = Excon::Response.new id = "fs-#{Fog::Mock.random_letters(8)}" file_system = { "OwnerId" => Fog::AWS::Mock.owner_id, "CreationToken" => creation_token, "PerformanceMode" => options[:performance_mode] || "generalPurpose", "Encrypted" => options[:encrypted] || false, "FileSystemId" => id, "CreationTime" => Time.now.to_i.to_f, "LifeCycleState" => "creating", "NumberOfMountTargets" => 0, "SizeInBytes" => { "Value" => 1024, "Timestamp" => Time.now.to_i.to_f } } file_system[:kms_key_id] = options[:kms_key_id] if options.key?(:kms_key_id) self.data[:file_systems][id] = file_system response.body = file_system response.status = 201 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/create_mount_target.rb000066400000000000000000000055771437344660100243170ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Create a mount target for a specified file system # http://docs.aws.amazon.com/efs/latest/ug/API_CreateMountTarget.html # ==== Parameters # * FileSystemId <~String> - ID of the file system for which to create the mount target. # * IpAddress <~String> - Valid IPv4 address within the address range of the specified subnet. # * SecurityGroups <~Array> - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. # * SubnetId <~String> - ID of the subnet to add the mount target in. # ==== Returns # * response<~Excon::Response> # * body<~Hash> def create_mount_target(file_system_id, subnet_id, options={}) request({ :path => "mount-targets", :method => "POST", 'FileSystemId' => file_system_id, 'SubnetId' => subnet_id }.merge(options)) end end class Mock def create_mount_target(file_system_id, subnet_id, options={}) response = Excon::Response.new default_security_group = mock_compute.data[:security_groups].find do |_, sg| sg['groupDescription'] == 'default_elb security group' end security_groups = options["SecurityGroups"] || [default_security_group.first] unless file_system = self.data[:file_systems][file_system_id] raise Fog::AWS::EFS::NotFound.new("invalid file system ID: #{file_system_id}") end unless file_system["LifeCycleState"] == 'available' # this error doesn't include a message for some reason raise Fog::AWS::EFS::IncorrectFileSystemLifeCycleState.new("") end unless subnet = mock_compute.subnets.get(subnet_id) raise Fog::AWS::EFS::InvalidSubnet.new("invalid subnet ID: #{subnet_id}") end security_groups.each do |group_id| unless mock_compute.data[:security_groups][group_id] raise Fog::AWS::EFS::NotFound.new("invalid security group ID: #{group_id}") end end id = "fsmt-#{Fog::Mock.random_letters(8)}" mount_target = { 'MountTargetId' => id, 'FileSystemId' => file_system_id, 'IpAddress' => Fog::AWS::Mock.ip_address, 'OwnerId' => Fog::AWS::Mock.owner_id, 'LifeCycleState' => 'creating', 'NetworkInterfaceId' => "eni-#{Fog::Mock.random_hex(8)}", 'SubnetId' => subnet.identity, } self.data[:mount_targets][id] = mount_target self.data[:security_groups][id] = security_groups response.body = mount_target response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/delete_file_system.rb000066400000000000000000000021411437344660100241110ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Delete a file system # http://docs.aws.amazon.com/efs/latest/ug/API_DeleteFileSystem.html # ==== Parameters # * FileSystemId <~String> - ID of the file system you want to delete. # ==== Returns # * response<~Excon::Response> # * body - Empty # * status - 204 def delete_file_system(id) request({ :path => "file-systems/#{id}", :method => 'DELETE', :expects => 204, }) end end class Mock def delete_file_system(id) unless file_system = self.data[:file_systems][id] raise Fog::AWS::EFS::NotFound.new("invalid file system ID: #{id}") end if file_system["NumberOfMountTargets"] > 0 raise Fog::AWS::EFS::FileSystemInUse.new("") end self.data[:file_systems].delete(id) response = Excon::Response.new response.status = 204 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/delete_mount_target.rb000066400000000000000000000017021437344660100243000ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Delete a mount target # http://docs.aws.amazon.com/efs/latest/ug/API_DeleteMountTarget.html # ==== Parameters # * MountTargetId <~String> - ID of the mount target you want to delete # ==== Returns # * response<~Excon::Response> # * body - Empty # * status - 204 def delete_mount_target(id) request( :path => "mount-targets/#{id}", :method => "DELETE", :expects => 204 ) end end class Mock def delete_mount_target(id) response = Excon::Response.new unless self.data[:mount_targets][id] raise Fog::AWS::EFS::NotFound.new("invalid mount target ID: #{id}") end self.data[:mount_targets].delete(id) response.status = 204 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/describe_file_systems.rb000066400000000000000000000043471437344660100246240ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Describe all or specified elastic file systems # http://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html # ==== Parameters # * CreationToken <~String> - (Optional) Restricts the list to the file system with this creation token (String). You specify a creation token when you create an Amazon EFS file system. # * FileSystemId <~String> - (Optional) ID of the file system whose description you want to retrieve (String). # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def describe_file_systems(options={}) params = {} if options[:marker] params['Marker'] = options[:marker] end if options[:max_records] params['MaxRecords'] = options[:max_records] end if options[:id] params['FileSystemId'] = options[:id] end if options[:creation_token] params['CreationToken'] = options[:creation_token] end request({ :path => "file-systems" }.merge(params)) end end class Mock def describe_file_systems(options={}) response = Excon::Response.new file_systems = if id = options[:id] if fs = self.data[:file_systems][id] [fs] else raise Fog::AWS::EFS::NotFound.new("invalid file system ID: #{id}") end elsif creation_token = options[:creation_token] fs = self.data[:file_systems].values.detect { |file_system| file_system["CreationToken"] == creation_token } [fs] else self.data[:file_systems].values end file_systems.each do |file_system| file_system['LifeCycleState'] = 'available' self.data[:file_systems][file_system['FileSystemId']] = file_system end response.body = {"FileSystems" => file_systems} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/describe_mount_target_security_groups.rb000066400000000000000000000020271437344660100301450ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Describe mount target security groups # http://docs.aws.amazon.com/efs/latest/ug/API_DescribeMountTargetSecurityGroups.html # ==== Parameters # * MountTargetId - Id of the mount target for which you want to describe security groups # ==== Returns # * response<~Excon::Response> # * body<~Hash> def describe_mount_target_security_groups(mount_target_id) request( :path => "mount-targets/#{mount_target_id}/security-groups" ) end end class Mock def describe_mount_target_security_groups(mount_target_id) response = Excon::Response.new unless self.data[:mount_targets][mount_target_id] raise Fog::AWS::EFS::NotFound.new("invalid mount target ID: #{mount_target_id}") end response.body = {"SecurityGroups" => self.data[:security_groups][mount_target_id]} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/describe_mount_targets.rb000066400000000000000000000043441437344660100250060ustar00rootroot00000000000000module Fog module AWS class EFS class Real # Describe all mount targets for a filesystem, or specified mount target # http://docs.aws.amazon.com/efs/latest/ug/API_DescribeMountTargets.html # ==== Parameters # * FileSystemId<~String> - Id of file system to describe mount targets for. Required unless MountTargetId is specified # * MountTargetId<~String> - Specific mount target to describe. Required if FileSystemId is not specified # ==== Returns # * response<~Excon::Response> # * body<~Hash> def describe_mount_targets(options={}) params = {} if options[:marker] params['Marker'] = options[:marker] end if options[:max_records] params['MaxRecords'] = options[:max_records] end if options[:id] params['MountTargetId'] = options[:id] end if options[:file_system_id] params['FileSystemId'] = options[:file_system_id] end request({ :path => "mount-targets" }.merge(params)) end end class Mock def describe_mount_targets(options={}) response = Excon::Response.new mount_targets = if id = options[:id] if mount_target = self.data[:mount_targets][id] [mount_target] else raise Fog::AWS::EFS::NotFound.new("Mount target does not exist.") end elsif file_system_id = options[:file_system_id] self.data[:mount_targets].values.select { |mt| mt["FileSystemId"] == file_system_id } else raise Fog::AWS::EFS::Error.new("file system ID or mount target ID must be specified") end mount_targets.each do |mount_target| mount_target['LifeCycleState'] = 'available' self.data[:mount_targets][mount_target["MountTargetId"]] = mount_target end response.body = {"MountTargets" => mount_targets} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/efs/modify_mount_target_security_groups.rb000066400000000000000000000022401437344660100276510ustar00rootroot00000000000000module Fog module AWS class EFS class Real def modify_mount_target_security_groups(id, security_groups) request({ :path => "mount-targets/#{id}/security-groups", :method => "PUT", :expects => 204, 'SecurityGroups' => security_groups }) end end class Mock def modify_mount_target_security_groups(id, security_groups) if security_groups.nil? || security_groups.empty? raise Fog::AWS::EFS::Error.new("Must provide at least one security group.") end response = Excon::Response.new unless self.data[:mount_targets][id] raise Fog::AWS::EFS::NotFound.new("invalid mount target ID: #{id}") end security_groups.each do |sg| raise Fog::AWS::EFS::NotFound.new("invalid security group ID: #{sg}") unless mock_compute.data[:security_groups].values.detect { |sgd| sgd["groupId"] == sg } end self.data[:security_groups][id] = security_groups response.status = 204 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/000077500000000000000000000000001437344660100214115ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/elasticache/authorize_cache_security_group_ingress.rb000066400000000000000000000041521437344660100317720ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_security_group' # Authorize ingress to a CacheSecurityGroup using EC2 Security Groups # # === Parameters # * name <~String> - The name of the cache security group # * ec2_name <~String> - The name of the EC2 security group to authorize # * ec2_owner_id <~String> - The AWS Account Number of the EC2 security group # === Returns # * response <~Excon::Response>: # * body <~Hash> def authorize_cache_security_group_ingress(name, ec2_name, ec2_owner_id) request({ 'Action' => 'AuthorizeCacheSecurityGroupIngress', 'CacheSecurityGroupName' => name, 'EC2SecurityGroupName' => ec2_name, 'EC2SecurityGroupOwnerId' => ec2_owner_id, :parser => Fog::Parsers::AWS::Elasticache::SingleSecurityGroup.new }) end end class Mock def authorize_cache_security_group_ingress(name, ec2_name, ec2_owner_id) opts = { 'EC2SecurityGroupName' => ec2_name, 'EC2SecurityGroupOwnerId' => ec2_owner_id } if sec_group = self.data[:security_groups][name] if sec_group['EC2SecurityGroups'].find{|h| h['EC2SecurityGroupName'] == opts['EC2SecurityGroupName']} raise Fog::AWS::Elasticache::AuthorizationAlreadyExists.new("AuthorizationAlreadyExists => #{opts['EC2SecurityGroupName']} is alreay defined") end sec_group['EC2SecurityGroups'] << opts.merge({'Status' => 'authorizing'}) Excon::Response.new( { :status => 200, :body => { 'ResponseMetadata'=>{ 'RequestId'=> Fog::AWS::Mock.request_id }, 'CacheSecurityGroup' => sec_group } } ) else raise Fog::AWS::Elasticache::NotFound.new("CacheSecurityGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/create_cache_cluster.rb000066400000000000000000000112741437344660100260720ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_cache_cluster' # creates a cache cluster # # === Required Parameters # * id <~String> - A unique cluster ID - 20 characters max. # === Optional Parameters # * options <~Hash> - All optional parameters should be set in this Hash: # * :node_type <~String> - The size (flavor) of the cache Nodes # * :security_group_names <~Array> - Array of Elasticache::SecurityGroup names # * :vpc_security_groups <~Array> - Array # * :num_nodes <~Integer> - The number of nodes in the Cluster # * :auto_minor_version_upgrade <~TrueFalseClass> # * :parameter_group_name <~String> - Name of the Cluster's ParameterGroup # * :engine <~String> - The Cluster's caching software (memcached) # * :engine_version <~String> - The Cluster's caching software version # * :notification_topic_arn <~String> - Amazon SNS Resource Name # * :port <~Integer> - The memcached port number # * :preferred_availablility_zone <~String> # * :preferred_maintenance_window <~String> # * :cache_subnet_group_name <~String> # * :s3_snapshot_location <~String> - Amazon resource location for snapshot # === Returns # * response <~Excon::Response>: # * body <~Hash> def create_cache_cluster(id, options = {}) req_options = { 'Action' => 'CreateCacheCluster', 'CacheClusterId' => id.strip, 'CacheNodeType' => options[:node_type] || 'cache.m1.large', 'Engine' => options[:engine] || 'memcached', 'NumCacheNodes' => options[:num_nodes] || 1, 'AutoMinorVersionUpgrade' => options[:auto_minor_version_upgrade], 'CacheParameterGroupName' => options[:parameter_group_name], 'CacheSubnetGroupName' => options[:cache_subnet_group_name], 'EngineVersion' => options[:engine_version], 'NotificationTopicArn' => options[:notification_topic_arn], 'Port' => options[:port], 'PreferredAvailabilityZone' => options[:preferred_availablility_zone], 'PreferredMaintenanceWindow' => options[:preferred_maintenance_window], :parser => Fog::Parsers::AWS::Elasticache::SingleCacheCluster.new } if s3_snapshot_location = options.delete(:s3_snapshot_location) req_options.merge!(Fog::AWS.indexed_param('SnapshotArns.member.%d', [*s3_snapshot_location])) end if cache_security_groups = options.delete(:security_group_names) req_options.merge!(Fog::AWS.indexed_param('CacheSecurityGroupNames.member.%d', [*cache_security_groups])) end if vpc_security_groups = options.delete(:vpc_security_groups) req_options.merge!(Fog::AWS.indexed_param('SecurityGroupIds.member.%d', [*vpc_security_groups])) end request( req_options ) end end class Mock def create_cache_cluster(id, options = {}) response = Excon::Response.new cluster = { # create an in-memory representation of this cluster 'CacheClusterId' => id.strip, 'NumCacheNodes' => options[:num_nodes] || 1, 'CacheNodeType' => options[:node_type] || 'cache.m1.large', 'Engine' => options[:engine] || 'memcached', 'EngineVersion' => options[:engine_version] || '1.4.5', 'CacheClusterStatus' => 'available', 'CacheNodes' => create_cache_nodes(id.strip, options[:num_nodes]), 'CacheSecurityGroups' => [], 'SecurityGroups' => [], 'CacheParameterGroup' => { 'CacheParameterGroupName' => options[:parameter_group_name] || 'default.memcached1.4' }, 'CacheSubnetGroupName' => options[:cache_subnet_group_name], 'PendingModifiedValues' => {}, 'AutoMinorVersionUpgrade' => options[:auto_minor_version_upgrade] || 'true', 'PreferredMaintenanceWindow' => options[:preferred_maintenance_window] || 'sun:05:00-sun:09:00', } self.data[:clusters][id] = cluster # store the in-memory cluster response.body = { 'CacheCluster' => cluster.merge({'CacheClusterStatus' => 'creating'}), 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/create_cache_parameter_group.rb000066400000000000000000000034721437344660100276060ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_parameter_group' # creates a cache parameter group # # === Parameters # * name <~String> - The name for the Cache Parameter Group # === Optional Parameters # * description <~String> - The description for the Cache Parameter Group # * family <~String> - The description for the Cache Parameter Group # === Returns # * response <~Excon::Response>: # * body <~Hash> def create_cache_parameter_group(name, description = name, family = 'memcached1.4') request({ 'Action' => 'CreateCacheParameterGroup', 'CacheParameterGroupName' => name, 'Description' => description, 'CacheParameterGroupFamily' => family, :parser => Fog::Parsers::AWS::Elasticache::SingleParameterGroup.new }) end end class Mock def create_cache_parameter_group(name, description = name, family = 'memcached1.4') response = Excon::Response.new if self.data[:parameter_groups] and self.data[:parameter_groups][name] raise Fog::AWS::Elasticache::IdentifierTaken.new("Parameter group #{name} already exists") end data = { 'CacheParameterGroupName' => name, 'CacheParameterGroupFamily' => family.downcase, 'Description' => description } self.data[:parameter_groups][name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CreateCacheParameterGroupResult"=> {"CacheParameterGroup"=> data} } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/create_cache_security_group.rb000066400000000000000000000031231437344660100274660ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_security_group' # creates a cache security group # # === Parameters # * name <~String> - The name for the Cache Security Group # * description <~String> - The description for the Cache Security Group # === Returns # * response <~Excon::Response>: # * body <~Hash> def create_cache_security_group(name, description = name) request({ 'Action' => 'CreateCacheSecurityGroup', 'CacheSecurityGroupName' => name, 'Description' => description, :parser => Fog::Parsers::AWS::Elasticache::SingleSecurityGroup.new }) end end class Mock def create_cache_security_group(name, description = name) if self.data[:security_groups][name] raise Fog::AWS::Elasticache::IdentifierTaken.new("CacheClusterAlreadyExists => The security group '#{name}' already exists") end data = { 'CacheSecurityGroupName' => name, 'Description' => description, 'EC2SecurityGroups' => [], 'OwnerId' => '0123456789' } self.data[:security_groups][name] = data Excon::Response.new( { :body => { 'ResponseMetadata'=>{ 'RequestId'=> Fog::AWS::Mock.request_id }, 'CacheSecurityGroup' => data } } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/create_cache_subnet_group.rb000066400000000000000000000045231437344660100271240ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/create_cache_subnet_group' # Creates a cache subnet group # http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html # # ==== Parameters # * CacheSubnetGroupName <~String> - A name for the cache subnet group. This value is stored as a lowercase string. Must contain no more than 255 alphanumeric characters or hyphens. # * SubnetIds <~Array> - The VPC subnet IDs for the cache subnet group. # * CacheSubnetGroupDescription <~String> - A description for the cache subnet group. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_cache_subnet_group(name, subnet_ids, description = name) params = { 'Action' => 'CreateCacheSubnetGroup', 'CacheSubnetGroupName' => name, 'CacheSubnetGroupDescription' => description, :parser => Fog::Parsers::AWS::Elasticache::CreateCacheSubnetGroup.new } params.merge!(Fog::AWS.indexed_param("SubnetIds.member", Array(subnet_ids))) request(params) end end class Mock def create_cache_subnet_group(name, subnet_ids, description = name) response = Excon::Response.new if self.data[:subnet_groups] && self.data[:subnet_groups][name] raise Fog::AWS::Elasticache::IdentifierTaken.new("CacheSubnetGroupAlreadyExists => The subnet group '#{name}' already exists") end collection = Fog::Compute[:aws] collection.region = @region subnets = collection.subnets subnets = subnet_ids.map { |snid| subnets.get(snid) } vpc_id = subnets.first.vpc_id data = { 'CacheSubnetGroupName' => name, 'CacheSubnetGroupDescription' => description, 'SubnetGroupStatus' => 'Complete', 'Subnets' => subnet_ids, 'VpcId' => vpc_id } self.data[:subnet_groups][name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, 'CreateCacheSubnetGroupResult' => { 'CacheSubnetGroup' => data } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/delete_cache_cluster.rb000066400000000000000000000022171437344660100260660ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_cache_clusters' # Deletes a Cache Cluster # # === Parameter (required): # * id <~String> - The ID of the cache cluster to delete # === Returns # * response <~Excon::Response>: # * body <~Hash> def delete_cache_cluster(cluster_id) request( 'Action' => 'DeleteCacheCluster', 'CacheClusterId' => cluster_id, :parser => Fog::Parsers::AWS::Elasticache::DescribeCacheClusters.new ) end end class Mock def delete_cache_cluster(cluster_id) response = Excon::Response.new cluster = self.data[:clusters][cluster_id] cluster['CacheClusterStatus'] = 'deleting' response.body = { 'CacheClusters' => self.data[:clusters].values, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } self.data[:clusters].delete(cluster_id) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/delete_cache_parameter_group.rb000066400000000000000000000021411437344660100275750ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/base' # deletes a cache parameter group # # === Parameters # * name <~String> - The name for the Cache Parameter Group # === Returns # * response <~Excon::Response>: # * body <~Hash> def delete_cache_parameter_group(name) request({ 'Action' => 'DeleteCacheParameterGroup', 'CacheParameterGroupName' => name, :parser => Fog::Parsers::AWS::Elasticache::Base.new }) end end class Mock def delete_cache_parameter_group(name) response = Excon::Response.new if self.data[:parameter_groups].delete(name) response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, } response else raise Fog::AWS::Elasticache::NotFound.new("CacheParameterGroup not found: #{name}") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/delete_cache_security_group.rb000066400000000000000000000021261437344660100274670ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/base' # deletes a cache security group # # === Parameters # * name <~String> - The name for the Cache Security Group # === Returns # * response <~Excon::Response>: # * body <~Hash> def delete_cache_security_group(name) request({ 'Action' => 'DeleteCacheSecurityGroup', 'CacheSecurityGroupName' => name, :parser => Fog::Parsers::AWS::Elasticache::Base.new }) end end class Mock def delete_cache_security_group(name) if self.data[:security_groups].delete(name) Excon::Response.new( { :status => 200, :body => { 'ResponseMetadata'=>{ 'RequestId'=> Fog::AWS::Mock.request_id } } } ) else raise Fog::AWS::RDS::NotFound.new("DBSecurityGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/delete_cache_subnet_group.rb000066400000000000000000000021211437344660100271130ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/base' # deletes a cache subnet group # # === Parameters # * name <~String> - The name for the Cache Subnet Group # === Returns # * response <~Excon::Response>: # * body <~Hash> def delete_cache_subnet_group(name) request({ 'Action' => 'DeleteCacheSubnetGroup', 'CacheSubnetGroupName' => name, :parser => Fog::Parsers::AWS::Elasticache::Base.new }) end end class Mock def delete_cache_subnet_group(name) if self.data[:subnet_groups].delete(name) Excon::Response.new( { :status => 200, :body => { 'ResponseMetadata'=>{ 'RequestId'=> Fog::AWS::Mock.request_id } } } ) else raise Fog::AWS::Elasticache::NotFound.new("CacheSubnetGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_cache_clusters.rb000066400000000000000000000035031437344660100265660ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_cache_clusters' # Returns a list of Cache Cluster descriptions # # === Parameters (optional) # * id - The ID of an existing cache cluster # * options <~Hash> (optional): # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include # * :show_node_info <~Boolean> - whether to show node info # === Returns # * response <~Excon::Response>: # * body <~Hash> def describe_cache_clusters(id = nil, options = {}) request({ 'Action' => 'DescribeCacheClusters', 'CacheClusterId' => id, 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], 'ShowCacheNodeInfo' => options[:show_node_info], :parser => Fog::Parsers::AWS::Elasticache::DescribeCacheClusters.new }) end end class Mock def describe_cache_clusters(id = nil, options = {}) response = Excon::Response.new all_clusters = self.data[:clusters].values.map do |cluster| cluster.merge!(options[:show_node_info] ? { 'CacheClusterCreateTime' => DateTime.now - 60, 'PreferredAvailabilityZone' => 'us-east-1a' } : {}) end if (id != nil) && (all_clusters.empty?) raise Fog::AWS::Elasticache::NotFound end response.body = { 'CacheClusters' => all_clusters, 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_cache_parameter_groups.rb000066400000000000000000000031441437344660100303020ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_parameter_groups' # Returns a list of CacheParameterGroup descriptions # # === Parameters (optional) # * name <~String> - The name of an existing cache parameter group # * options <~Hash> (optional): # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include def describe_cache_parameter_groups(name = nil, options = {}) request({ 'Action' => 'DescribeCacheParameterGroups', 'CacheParameterGroupName' => name, 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], :parser => Fog::Parsers::AWS::Elasticache::DescribeParameterGroups.new }.merge(options)) end end class Mock def describe_cache_parameter_groups(name = nil, options = {}) response = Excon::Response.new parameter_set = [] if name if server = self.data[:parameter_groups][name] parameter_set << server else raise Fog::AWS::Elasticache::NotFound.new("CacheParameterGroup #{name} not found") end else parameter_set = self.data[:parameter_groups].values end response.status = 200 response.body = { "CacheParameterGroups" => parameter_set } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_cache_parameters.rb000066400000000000000000000023201437344660100270610ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_cache_parameters' # Returns a list of CacheParameterGroup descriptions # # === Parameters (optional) # * name <~String> - The name of an existing cache parameter group # * options <~Hash> (optional): # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include # * :source <~String> - the parameter types to return. def describe_cache_parameters(name = nil, options = {}) request({ 'Action' => 'DescribeCacheParameters', 'CacheParameterGroupName' => name, 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], 'Source' => options[:source], :parser => Fog::Parsers::AWS::Elasticache::DescribeCacheParameters.new }) end end class Mock def describe_cache_parameters(name = nil, options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_cache_security_groups.rb000066400000000000000000000050171437344660100301720ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_security_groups' # Returns a list of CacheSecurityGroup descriptions # # === Parameters (optional) # * name <~String> - The name of an existing cache security group # * options <~Hash> (optional): # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include def describe_cache_security_groups(name = nil, options = {}) request({ 'Action' => 'DescribeCacheSecurityGroups', 'CacheSecurityGroupName' => name, 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], :parser => Fog::Parsers::AWS::Elasticache::DescribeSecurityGroups.new }.merge(options)) end end class Mock def describe_cache_security_groups(name = nil, opts={}) if name sec_group_set = [self.data[:security_groups][name]].compact raise Fog::AWS::Elasticache::NotFound.new("Security Group #{name} not found") if sec_group_set.empty? else sec_group_set = self.data[:security_groups].values end # TODO: refactor to not delete items that we're iterating over. Causes # model tests to fail (currently pending) sec_group_set.each do |sec_group| # TODO: refactor to not delete items that we're iterating over. Causes # model tests to fail (currently pending) sec_group["EC2SecurityGroups"].each do |ec2_secg| if ec2_secg["Status"] == "authorizing" || ec2_secg["Status"] == "revoking" ec2_secg[:tmp] ||= Time.now + Fog::Mock.delay * 2 if ec2_secg[:tmp] <= Time.now ec2_secg["Status"] = "authorized" if ec2_secg["Status"] == "authorizing" ec2_secg.delete(:tmp) sec_group["EC2SecurityGroups"].delete(ec2_secg) if ec2_secg["Status"] == "revoking" end end end end Excon::Response.new( { :status => 200, :body => { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CacheSecurityGroups" => sec_group_set } } ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_cache_subnet_groups.rb000066400000000000000000000037701437344660100276270ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_cache_subnet_groups' # This API returns a list of CacheSubnetGroup descriptions. If a CacheSubnetGroupName is specified, the list will contain only # the descriptions of the specified CacheSubnetGroup # http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeCacheSubnetGroups.html # ==== Parameters # * CacheSubnetGroupName <~String> - The name of a specific database subnet group to return details for. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_cache_subnet_groups(name = nil, opts = {}) params = {} if opts[:marker] params['Marker'] = opts[:marker] end if name params['CacheSubnetGroupName'] = name end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeCacheSubnetGroups', :parser => Fog::Parsers::AWS::Elasticache::DescribeCacheSubnetGroups.new }.merge(params)) end end class Mock def describe_cache_subnet_groups(name = nil, opts = {}) response = Excon::Response.new subnet_group_set = [] if name if subnet_group = self.data[:subnet_groups][name] subnet_group_set << subnet_group else raise Fog::AWS::Elasticache::NotFound.new("Subnet Group #{name} not found") end else subnet_group_set = self.data[:subnet_groups].values end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeCacheSubnetGroupsResult" => { "CacheSubnetGroups" => subnet_group_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_engine_default_parameters.rb000066400000000000000000000022661437344660100310000ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_engine_default_parameters' # Returns the default engine and system parameter information # for the specified cache engine. # # === Parameters (optional) # * options <~Hash>: # * :engine <~String> - the engine whose parameters are requested # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include def describe_engine_default_parameters(options = {}) request({ 'Action' => 'DescribeEngineDefaultParameters', 'CacheParameterGroupFamily' => options[:engine] || 'memcached1.4', 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], :parser => Fog::Parsers::AWS::Elasticache::DescribeEngineDefaultParameters.new }) end end class Mock def describe_engine_defalut_parameters(options = {}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_events.rb000066400000000000000000000033771437344660100251140ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/event_list' # Returns a list of service events # # For more information see: # http://docs.amazonwebservices.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html # # === Parameters (optional) # * options <~Hash> (optional): # * :start_time <~DateTime> - starting time for event records # * :end_time <~DateTime> - ending time for event records # * :duration <~Integer> - time span for event records # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include # * :source_identifier <~String> - identifier of the event source # * :source_type <~String> - event type, one of: # (cache-cluster | cache-parameter-group | cache-security-group) # === Returns # * response <~Excon::Response>: # * body <~Hash> def describe_events(options = {}) request( 'Action' => 'DescribeEvents', 'StartTime' => options[:start_time], 'EndTime' => options[:end_time], 'Duration' => options[:duration], 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], 'SourceIdentifier' => options[:source_identifier], 'SourceType' => options[:source_type], :parser => Fog::Parsers::AWS::Elasticache::EventListParser.new ) end end class Mock def describe_events Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/describe_reserved_cache_nodes.rb000066400000000000000000000024011437344660100277250ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/describe_reserved_cache_nodes' # Describe all or specified reserved Elasticache nodes # http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeReservedCacheNodes.html # ==== Parameters # * ReservedCacheNodeId <~String> - ID of node to retrieve information for. If absent, information for all nodes is returned. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_reserved_cache_nodes(identifier=nil, opts={}) params = {} params['ReservedCacheNodeId'] = identifier if identifier if opts[:marker] params['Marker'] = opts[:marker] end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeReservedCacheNodes', :parser => Fog::Parsers::AWS::Elasticache::DescribeReservedCacheNodes.new }.merge(params)) end end class Mock def describe_db_reserved_instances(identifier=nil, opts={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/modify_cache_cluster.rb000066400000000000000000000113051437344660100261110ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_cache_cluster' # Modifies an existing cache cluster # Returns a cache cluster description # # === Required Parameters # * id <~String> - The ID of the existing cluster to be modified # === Optional Parameters # * options <~Hash> - All optional parameters should be set in this Hash: # * :apply_immediately <~TrueFalseClass> - whether to apply changes now # * :auto_minor_version_upgrade <~TrueFalseClass> # * :num_nodes <~Integer> - The number of nodes in the Cluster # * :nodes_to_remove <~Array> - Array of node IDs to delete # * :security_group_names <~Array> - Array of Elasticache::SecurityGroup names # * :parameter_group_name <~String> - Name of the Cluster's ParameterGroup # * :engine_version <~String> - The Cluster's caching software version # * :notification_topic_arn <~String> - Amazon SNS Resource Name # * :notification_topic_status <~String> - Amazon SNS Topic status # * :preferred_maintenance_window <~String> # === Returns # * response <~Excon::Response>: # * body <~Hash> def modify_cache_cluster(id, options = {}) # Construct Cache Security Group parameters in the format: # CacheSecurityGroupNames.member.N => "security_group_name" group_names = options[:security_group_names] || [] sec_group_params = group_names.reduce({}) do |group_hash, name| index = group_names.index(name) + 1 group_hash["CacheSecurityGroupNames.member.#{index}"] = name group_hash end # Construct CacheNodeIdsToRemove parameters in the format: # CacheNodeIdsToRemove.member.N => "node_id" node_ids = options[:nodes_to_remove] || [] node_id_params = node_ids.reduce({}) do |node_hash, node_id| index = node_ids.index(node_id) + 1 node_hash["CacheNodeIdsToRemove.member.#{index}"] = node_id node_hash end # Merge the Cache Security Group parameters with the normal options request(node_id_params.merge(sec_group_params.merge( 'Action' => 'ModifyCacheCluster', 'CacheClusterId' => id.strip, 'ApplyImmediately' => options[:apply_immediately], 'NumCacheNodes' => options[:num_nodes], 'AutoMinorVersionUpgrade' => options[:auto_minor_version_upgrade], 'CacheParameterGroupName' => options[:parameter_group_name], 'EngineVersion' => options[:engine_version], 'NotificationTopicArn' => options[:notification_topic_arn], 'NotificationTopicStatus' => options[:notification_topic_status], 'PreferredMaintenanceWindow' => options[:preferred_maintenance_window], :parser => Fog::Parsers::AWS::Elasticache::SingleCacheCluster.new ))) end end class Mock def modify_cache_cluster(id, options = {}) response = Excon::Response.new cluster = self.data[:clusters][id] pending_values = Hash.new # For any given option, update the cluster's corresponding value { :auto_minor_version_upgrade => 'AutoMinorVersionUpgrade', :preferred_maintenance_window => 'PreferredMaintenanceWindow', :engine_version => 'EngineVersion', :num_nodes => 'NumCacheNodes', }.each do |option, cluster_key| if options[option] != nil cluster[cluster_key] = options[option].to_s pending_values[cluster_key] = options[option] end end cache['CacheParameterGroup'] = { 'CacheParameterGroupName' => options[:parameter_group_name] } if options[:parameter_group_name] if options[:num_nodes] || options[:engine_version] cluster['CacheNodes'] = create_cache_nodes(cluster['CacheClusterId'], options[:num_nodes]) cluster['NumCacheNodes'] = cluster['CacheNodes'].size end if options[:nodes_to_remove] pending_values['CacheNodeId'] = options[:nodes_to_remove].join(',') end response.body = { 'CacheCluster' => cluster.merge({ 'PendingModifiedValues' => pending_values }), 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/modify_cache_parameter_group.rb000066400000000000000000000031641437344660100276300ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/modify_parameter_group' # Modifies an existing cache parameter group # Returns a the name of the modified parameter group # # === Required Parameters # * id <~String> - The ID of the parameter group to be modified # * new_parameters <~Hash> - The parameters to modify, and their values # === Returns # * response <~Excon::Response>: # * body <~Hash> def modify_cache_parameter_group(id, new_parameters) # Construct Parameter Modifications in the format: # ParameterNameValues.member.N.ParameterName => "param_name" # ParameterNameValues.member.N.ParameterValue => "param_value" n = 0 # n is the parameter index parameter_changes = new_parameters.reduce({}) do |new_args,pair| n += 1 new_args["ParameterNameValues.member.#{n}.ParameterName"] = pair[0] new_args["ParameterNameValues.member.#{n}.ParameterValue"] = pair[1] new_args end # Merge the Cache Security Group parameters with the normal options request(parameter_changes.merge( 'Action' => 'ModifyCacheParameterGroup', 'CacheParameterGroupName' => id, :parser => Fog::Parsers::AWS::Elasticache::ModifyParameterGroup.new )) end end class Mock def modify_cache_parameter_group(id, new_parameters) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/reboot_cache_cluster.rb000066400000000000000000000033571437344660100261240ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_cache_cluster' # Reboots some or all of an existing cache cluster's nodes # Returns a cache cluster description # # === Required Parameters # * id <~String> - The ID of the existing cluster to be rebooted # === Optional Parameters # * nodes_to_reboot <~Array> - Array of node IDs to reboot # === Returns # * response <~Excon::Response>: # * body <~Hash> def reboot_cache_cluster(id, nodes_to_reboot) # Construct CacheNodeIdsToReboot parameters in the format: # CacheNodeIdsToReboot.member.N => "node_id" node_ids = nodes_to_reboot || [] node_id_params = node_ids.reduce({}) do |node_hash, node_id| index = node_ids.index(node_id) + 1 node_hash["CacheNodeIdsToReboot.member.#{index}"] = node_id node_hash end # Merge the CacheNodeIdsToReboot parameters with the normal options request(node_id_params.merge( 'Action' => 'RebootCacheCluster', 'CacheClusterId' => id, :parser => Fog::Parsers::AWS::Elasticache::SingleCacheCluster.new )) end end class Mock def reboot_cache_cluster(id, nodes_to_reboot) response = Excon::Response.new response.body = { 'CacheCluster' => self.data[:clusters][id].merge({ 'CacheClusterStatus' => 'rebooting cache cluster nodes' }), 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/reset_cache_parameter_group.rb000066400000000000000000000030661437344660100274640ustar00rootroot00000000000000module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/reset_parameter_group' # Resets an existing cache parameter group # Returns a the name of the modified parameter group # # === Required Parameters # * id <~String> - The ID of the parameter group to be modified # === Optional Parameters # * parameter_names <~Array> - The parameters to reset # === Returns # * response <~Excon::Response>: # * body <~Hash> def reset_cache_parameter_group(id, parameter_names = []) # Construct Parameter resets in the format: # ParameterNameValues.member.N => "param_name" parameter_changes = parameter_names.reduce({}) do |new_args, param| index = parameter_names.index(param) + 1 new_args["ParameterNameValues.member.#{index}"] = param new_args end if parameter_changes.empty? parameter_changes = {'ResetAllParameters' => 'true'} end # Merge the Cache Security Group parameters with the normal options request(parameter_changes.merge( 'Action' => 'ResetCacheParameterGroup', 'CacheParameterGroupName' => id, :parser => Fog::Parsers::AWS::Elasticache::ResetParameterGroup.new )) end end class Mock def reset_cache_parameter_group(id, parameter_names) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elasticache/revoke_cache_security_group_ingress.rb000066400000000000000000000021451437344660100312530ustar00rootroot00000000000000 module Fog module AWS class Elasticache class Real require 'fog/aws/parsers/elasticache/single_security_group' # Revoke ingress to a CacheSecurityGroup using EC2 Security Groups # # === Parameters # * name <~String> - The name of the cache security group # * ec2_name <~String> - The name of the EC2 security group to revoke # * ec2_owner_id <~String> - The AWS Account Number of the EC2 security group # === Returns # * response <~Excon::Response>: # * body <~Hash> def revoke_cache_security_group_ingress(name, ec2_name, ec2_owner_id) request({ 'Action' => 'RevokeCacheSecurityGroupIngress', 'CacheSecurityGroupName' => name, 'EC2SecurityGroupName' => ec2_name, 'EC2SecurityGroupOwnerId' => ec2_owner_id, :parser => Fog::Parsers::AWS::Elasticache::SingleSecurityGroup.new }) end end class Mock def revoke_cache_security_group_ingress Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/000077500000000000000000000000001437344660100177065ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/elb/add_tags.rb000066400000000000000000000027151437344660100220060ustar00rootroot00000000000000module Fog module AWS class ELB class Real # adds tags to a load balancer instance # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_AddTags.html # ==== Parameters # * elb_id <~String> - name of the ELB instance to be tagged # * tags <~Hash> A Hash of (String) key-value pairs # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def add_tags(elb_id, tags) keys = tags.keys.sort values = keys.map {|key| tags[key]} request({ 'Action' => 'AddTags', 'LoadBalancerNames.member.1' => elb_id, :parser => Fog::Parsers::AWS::ELB::Empty.new, }.merge(Fog::AWS.indexed_param('Tags.member.%d.Key', keys)). merge(Fog::AWS.indexed_param('Tags.member.%d.Value', values))) end end class Mock def add_tags(elb_id, tags) response = Excon::Response.new if server = self.data[:load_balancers][elb_id] self.data[:tags][elb_id].merge! tags response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound.new("Elastic load balancer #{elb_id} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/apply_security_groups_to_load_balancer.rb000066400000000000000000000041001437344660100302310ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/apply_security_groups_to_load_balancer' # Sets the security groups for an ELB in VPC # # ==== Parameters # * security_group_ids<~Array> - List of security group ids to enable on ELB # * lb_name<~String> - Load balancer to disable availability zones on # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'ApplySecurityGroupsToLoadBalancer'<~Hash>: # * 'SecurityGroups'<~Array> - array of strings describing the security group ids currently enabled def apply_security_groups_to_load_balancer(security_group_ids, lb_name) params = Fog::AWS.indexed_param('SecurityGroups.member', [*security_group_ids]) request({ 'Action' => 'ApplySecurityGroupsToLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::ApplySecurityGroupsToLoadBalancer.new }.merge!(params)) end alias_method :apply_security_groups, :apply_security_groups_to_load_balancer end class Mock def apply_security_groups_to_load_balancer(security_group_ids, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['SecurityGroups'] << security_group_ids load_balancer['SecurityGroups'].flatten!.uniq! response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DetachLoadBalancerFromSubnetsResult' => { 'SecurityGroups' => load_balancer['SecurityGroups'] } } response end alias_method :apply_security_groups, :apply_security_groups_to_load_balancer end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/attach_load_balancer_to_subnets.rb000066400000000000000000000036311437344660100265750ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/attach_load_balancer_to_subnets' # Enable a subnet for an existing ELB # # ==== Parameters # * subnet_ids<~Array> - List of subnet ids to enable on ELB # * lb_name<~String> - Load balancer to enable availability zones on # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'AttachLoadBalancerToSubnetsResult'<~Hash>: # * 'Subnets'<~Array> - array of strings describing the subnet ids currently enabled def attach_load_balancer_to_subnets(subnet_ids, lb_name) params = Fog::AWS.indexed_param('Subnets.member', [*subnet_ids]) request({ 'Action' => 'AttachLoadBalancerToSubnets', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::AttachLoadBalancerToSubnets.new }.merge!(params)) end alias_method :enable_subnets, :attach_load_balancer_to_subnets end class Mock def attach_load_balancer_to_subnets(subnet_ids, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['Subnets'] << subnet_ids load_balancer['Subnets'].flatten!.uniq! response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'AttachLoadBalancerToSubnetsResult' => { 'Subnets' => load_balancer['Subnets'] } } response end alias_method :enable_subnets, :attach_load_balancer_to_subnets end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/configure_health_check.rb000066400000000000000000000047171437344660100247070ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/configure_health_check' # Enables the client to define an application healthcheck for the instances. # See http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/index.html?API_ConfigureHealthCheck.html # # ==== Parameters # * lb_name<~String> - Name of the ELB # * health_check<~Hash> - A hash of parameters describing the health check # * 'HealthyThreshold'<~Integer> - Specifies the number of consecutive # health probe successes required before moving the instance to the Healthy state. # * 'Interval'<~Integer> - Specifies the approximate interval, in seconds, # between health checks of an individual instance. # * 'Target'<~String> - Specifies the instance being checked. # The protocol is either TCP or HTTP. The range of valid ports is one (1) through 65535. # * 'Timeout'<~Integer> - Specifies the amount of time, in seconds, # during which no response means a failed health probe. # * 'UnhealthyThreshold'<~Integer> - Specifies the number of consecutive # health probe failures required before moving the instance to the Unhealthy state. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def configure_health_check(lb_name, health_check) params = {'LoadBalancerName' => lb_name} health_check.each {|key, value| params["HealthCheck.#{key}"] = value } request({ 'Action' => 'ConfigureHealthCheck', :parser => Fog::Parsers::AWS::ELB::ConfigureHealthCheck.new }.merge!(params)) end end class Mock def configure_health_check(lb_name, health_check) if load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['HealthCheck'] = health_check response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'ConfigureHealthCheckResult' => { 'HealthCheck' => load_balancer['HealthCheck'] } } response else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/create_app_cookie_stickiness_policy.rb000066400000000000000000000033121437344660100275040ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Create an app cookie stickiness policy # # ==== Parameters # * lb_name<~String> - Name of the ELB # * policy_name<~String> - The name of the policy being created. # The name must be unique within the set of policies for this Load Balancer. # * cookie_name<~String> - Name of the application cookie used for stickiness. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def create_app_cookie_stickiness_policy(lb_name, policy_name, cookie_name) params = {'CookieName' => cookie_name, 'PolicyName' => policy_name} request({ 'Action' => 'CreateAppCookieStickinessPolicy', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def create_app_cookie_stickiness_policy(lb_name, policy_name, cookie_name) if load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 create_load_balancer_policy(lb_name, policy_name, 'AppCookieStickinessPolicyType', {'CookieName' => cookie_name}) response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/create_lb_cookie_stickiness_policy.rb000066400000000000000000000037261437344660100273320ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Create a Load Balancer Cookie Stickiness Policy # # ==== Parameters # * lb_name<~String> - Name of the ELB # * policy_name<~String> - The name of the policy being created. The name # must be unique within the set of policies for this Load Balancer. # * cookie_expiration_period<~Integer> - The time period in seconds after # which the cookie should be considered stale. Not specifying this # parameter indicates that the sticky session will last for the duration of the browser session. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def create_lb_cookie_stickiness_policy(lb_name, policy_name, cookie_expiration_period=nil) params = {'PolicyName' => policy_name, 'CookieExpirationPeriod' => cookie_expiration_period} request({ 'Action' => 'CreateLBCookieStickinessPolicy', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def create_lb_cookie_stickiness_policy(lb_name, policy_name, cookie_expiration_period=nil) if load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 create_load_balancer_policy(lb_name, policy_name, 'LBCookieStickinessPolicyType', {'CookieExpirationPeriod' => cookie_expiration_period}) response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/create_load_balancer.rb000066400000000000000000000213111437344660100243220ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/create_load_balancer' # Create a new Elastic Load Balancer # # ==== Parameters # * availability_zones<~Array> - List of availability zones for the ELB # * lb_name<~String> - Name for the new ELB -- must be unique # * listeners<~Array> - Array of Hashes describing ELB listeners to assign to the ELB # * 'Protocol'<~String> - Protocol to use. Either HTTP, HTTPS, TCP or SSL. # * 'LoadBalancerPort'<~Integer> - The port that the ELB will listen to for outside traffic # * 'InstancePort'<~Integer> - The port on the instance that the ELB will forward traffic to # * 'InstanceProtocol'<~String> - Protocol for sending traffic to an instance. Either HTTP, HTTPS, TCP or SSL. # * 'SSLCertificateId'<~String> - ARN of the server certificate # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'CreateLoadBalancerResult'<~Hash>: # * 'DNSName'<~String> - DNS name for the newly created ELB def create_load_balancer(availability_zones, lb_name, listeners, options = {}) params = Fog::AWS.indexed_param('AvailabilityZones.member', [*availability_zones]) params.merge!(Fog::AWS.indexed_param('Subnets.member.%d', options[:subnet_ids])) params.merge!(Fog::AWS.serialize_keys('Scheme', options[:scheme])) params.merge!(Fog::AWS.indexed_param('SecurityGroups.member.%d', options[:security_groups])) listener_protocol = [] listener_lb_port = [] listener_instance_port = [] listener_instance_protocol = [] listener_ssl_certificate_id = [] listeners.each do |listener| listener_protocol.push(listener['Protocol']) listener_lb_port.push(listener['LoadBalancerPort']) listener_instance_port.push(listener['InstancePort']) listener_instance_protocol.push(listener['InstanceProtocol']) listener_ssl_certificate_id.push(listener['SSLCertificateId']) end params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.Protocol', listener_protocol)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.LoadBalancerPort', listener_lb_port)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.InstancePort', listener_instance_port)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.InstanceProtocol', listener_instance_protocol)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.SSLCertificateId', listener_ssl_certificate_id)) request({ 'Action' => 'CreateLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::CreateLoadBalancer.new }.merge!(params)) end end class Mock def create_load_balancer(availability_zones, lb_name, listeners = [], options = {}) response = Excon::Response.new response.status = 200 raise Fog::AWS::ELB::IdentifierTaken if self.data[:load_balancers].key? lb_name certificate_ids = Fog::AWS::IAM::Mock.data[@aws_access_key_id][:server_certificates].map {|n, c| c['Arn'] } listeners = [*listeners].map do |listener| if listener['SSLCertificateId'] and !certificate_ids.include? listener['SSLCertificateId'] raise Fog::AWS::IAM::NotFound.new('CertificateNotFound') end {'Listener' => listener, 'PolicyNames' => []} end dns_name = Fog::AWS::ELB::Mock.dns_name(lb_name, @region) availability_zones = [*availability_zones].compact subnet_ids = options[:subnet_ids] || [] region = if availability_zones.any? availability_zones.first.gsub(/[a-z]$/, '') elsif subnet_ids.any? # using Hash here for Rubt 1.8.7 support. Hash[ Fog::AWS::Compute::Mock.data.select do |_, region_data| region_data[@aws_access_key_id][:subnets].any? do |region_subnets| subnet_ids.include? region_subnets['subnetId'] end end ].keys[0] else 'us-east-1' end supported_platforms = Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:account_attributes].find { |h| h["attributeName"] == "supported-platforms" }["values"] subnets = Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:subnets].select {|e| subnet_ids.include?(e["subnetId"]) } # http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html elb_location = if supported_platforms.include?("EC2") if subnet_ids.empty? 'EC2-Classic' else 'EC2-VPC' end else if subnet_ids.empty? 'EC2-VPC-Default' else 'VPC' end end security_group = case elb_location when 'EC2-Classic' Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:security_groups]['amazon-elb-sg'] when 'EC2-VPC-Default' compute = Fog::AWS::Compute::new(:aws_access_key_id => @aws_access_key_id, :aws_secret_access_key => @aws_secret_access_key) vpc = compute.vpcs.all.first || compute.vpcs.create('cidr_block' => '10.0.0.0/24') Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:security_groups].values.find { |sg| sg['groupName'] =~ /^default_elb/ && sg["vpcId"] == vpc.id } when 'EC2-VPC' vpc_id = subnets.first["vpcId"] Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:security_groups].values.find { |sg| sg['groupName'] == 'default' && sg["vpcId"] == vpc_id } end self.data[:tags] ||= {} self.data[:tags][lb_name] = {} self.data[:load_balancers][lb_name] = { 'AvailabilityZones' => availability_zones, 'BackendServerDescriptions' => [], # Hack to facilitate not updating the local data structure # (BackendServerDescriptions) until we do a subsequent # describe as that is how AWS behaves. 'BackendServerDescriptionsRemote' => [], 'Subnets' => options[:subnet_ids] || [], 'Scheme' => options[:scheme].nil? ? 'internet-facing' : options[:scheme], 'SecurityGroups' => options[:security_groups].nil? ? [] : options[:security_groups], 'CanonicalHostedZoneName' => '', 'CanonicalHostedZoneNameID' => '', 'CreatedTime' => Time.now, 'DNSName' => dns_name, 'HealthCheck' => { 'HealthyThreshold' => 10, 'Timeout' => 5, 'UnhealthyThreshold' => 2, 'Interval' => 30, 'Target' => 'TCP:80' }, 'Instances' => [], 'ListenerDescriptions' => listeners, 'LoadBalancerAttributes' => { 'ConnectionDraining' => {'Enabled' => false, 'Timeout' => 300}, 'CrossZoneLoadBalancing' => {'Enabled' => false}, 'ConnectionSettings' => {'IdleTimeout' => 60} }, 'LoadBalancerName' => lb_name, 'Policies' => { 'AppCookieStickinessPolicies' => [], 'LBCookieStickinessPolicies' => [], 'OtherPolicies' => [], 'Proper' => [] }, 'SourceSecurityGroup' => { 'GroupName' => security_group['groupName'], 'OwnerAlias' => '' } } response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'CreateLoadBalancerResult' => { 'DNSName' => dns_name } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/create_load_balancer_listeners.rb000066400000000000000000000074261437344660100264250ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Create Elastic Load Balancer Listeners # # ==== Parameters # * lb_name<~String> - Name for the new ELB -- must be unique # * listeners<~Array> - Array of Hashes describing ELB listeners to add to the ELB # * 'Protocol'<~String> - Protocol to use. Either HTTP, HTTPS, TCP or SSL. # * 'LoadBalancerPort'<~Integer> - The port that the ELB will listen to for outside traffic # * 'InstancePort'<~Integer> - The port on the instance that the ELB will forward traffic to # * 'InstanceProtocol'<~String> - Protocol for sending traffic to an instance. Either HTTP, HTTPS, TCP or SSL. # * 'SSLCertificateId'<~String> - ARN of the server certificate # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def create_load_balancer_listeners(lb_name, listeners) params = {} listener_protocol = [] listener_lb_port = [] listener_instance_port = [] listener_instance_protocol = [] listener_ssl_certificate_id = [] listeners.each do |listener| listener_protocol.push(listener['Protocol']) listener_lb_port.push(listener['LoadBalancerPort']) listener_instance_port.push(listener['InstancePort']) listener_instance_protocol.push(listener['InstanceProtocol']) listener_ssl_certificate_id.push(listener['SSLCertificateId']) end params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.Protocol', listener_protocol)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.LoadBalancerPort', listener_lb_port)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.InstancePort', listener_instance_port)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.InstanceProtocol', listener_instance_protocol)) params.merge!(Fog::AWS.indexed_param('Listeners.member.%d.SSLCertificateId', listener_ssl_certificate_id)) request({ 'Action' => 'CreateLoadBalancerListeners', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def create_load_balancer_listeners(lb_name, listeners) load_balancer = data[:load_balancers][lb_name] raise Fog::AWS::ELB::NotFound unless load_balancer response = Excon::Response.new certificate_ids = Fog::AWS::IAM::Mock.data[@aws_access_key_id][:server_certificates].map { |_n, c| c['Arn'] } listeners.each do |listener| if listener['SSLCertificateId'] && !certificate_ids.include?(listener['SSLCertificateId']) raise Fog::AWS::IAM::NotFound, 'CertificateNotFound' end if listener['Protocol'] && listener['InstanceProtocol'] if ( %w[HTTP HTTPS].include?(listener['Protocol']) && !%w[HTTP HTTPS].include?(listener['InstanceProtocol']) ) || ( %w[TCP SSL].include?(listener['Protocol']) && !%w[TCP SSL].include?(listener['InstanceProtocol']) ) raise Fog::AWS::ELB::ValidationError end end load_balancer['ListenerDescriptions'] << { 'Listener' => listener, 'PolicyNames' => [] } end response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/create_load_balancer_policy.rb000066400000000000000000000064211437344660100257060ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Create Elastic Load Balancer Policy # # ==== Parameters # * lb_name<~String> - The name associated with the LoadBalancer for which the policy is being created. This name must be unique within the client AWS account. # * attributes<~Hash> - A list of attributes associated with the policy being created. # * 'AttributeName'<~String> - The name of the attribute associated with the policy. # * 'AttributeValue'<~String> - The value of the attribute associated with the policy. # * name<~String> - The name of the LoadBalancer policy being created. The name must be unique within the set of policies for this LoadBalancer. # * type_name<~String> - The name of the base policy type being used to create this policy. To get the list of policy types, use the DescribeLoadBalancerPolicyTypes action. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def create_load_balancer_policy(lb_name, name, type_name, attributes = {}) params = {} attribute_name = [] attribute_value = [] attributes.each do |name, value| attribute_name.push(name) attribute_value.push(value) end params.merge!(Fog::AWS.indexed_param('PolicyAttributes.member.%d.AttributeName', attribute_name)) params.merge!(Fog::AWS.indexed_param('PolicyAttributes.member.%d.AttributeValue', attribute_value)) request({ 'Action' => 'CreateLoadBalancerPolicy', 'LoadBalancerName' => lb_name, 'PolicyName' => name, 'PolicyTypeName' => type_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def create_load_balancer_policy(lb_name, name, type_name, attributes = {}) if load_balancer = self.data[:load_balancers][lb_name] raise Fog::AWS::ELB::DuplicatePolicyName, name if policy = load_balancer['Policies']['Proper'].find { |p| p['PolicyName'] == name } raise Fog::AWS::ELB::PolicyTypeNotFound, type_name unless policy_type = self.data[:policy_types].find { |pt| pt['PolicyTypeName'] == type_name } response = Excon::Response.new attributes = attributes.map do |key, value| if key == "CookieExpirationPeriod" && !value value = 0 end {"AttributeName" => key, "AttributeValue" => value.to_s} end load_balancer['Policies']['Proper'] << { 'PolicyAttributeDescriptions' => attributes, 'PolicyName' => name, 'PolicyTypeName' => type_name } response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/delete_load_balancer.rb000066400000000000000000000025771437344660100243360ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/delete_load_balancer' # Delete an existing Elastic Load Balancer # # Note that this API call, as defined by Amazon, is idempotent. # That is, it will not return an error if you try to delete an # ELB that does not exist. # # ==== Parameters # * lb_name<~String> - Name of the ELB to be deleted # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'DeleteLoadBalancerResponse'<~nil> # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def delete_load_balancer(lb_name) request({ 'Action' => 'DeleteLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DeleteLoadBalancer.new }) end end class Mock def delete_load_balancer(lb_name) response = Excon::Response.new response.status = 200 self.data[:load_balancers].delete(lb_name) response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DeleteLoadBalancerResult' => nil } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/delete_load_balancer_listeners.rb000066400000000000000000000030541437344660100264150ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Delet Elastic Load Balancer Listeners # # ==== Parameters # * lb_name<~String> - Name for the new ELB -- must be unique # * load_balancer_ports<~Array> - Array of client port numbers of the LoadBalancerListeners to remove # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def delete_load_balancer_listeners(lb_name, load_balancer_ports) params = Fog::AWS.indexed_param('LoadBalancerPorts.member.%d', load_balancer_ports) request({ 'Action' => 'DeleteLoadBalancerListeners', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def delete_load_balancer_listeners(lb_name, load_balancer_ports) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['ListenerDescriptions'].delete_if { |listener| load_balancer_ports.include? listener['Listener']['LoadBalancerPort'] } response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/delete_load_balancer_policy.rb000066400000000000000000000027561437344660100257140ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Delete a Load Balancer Stickiness Policy # # ==== Parameters # * lb_name<~String> - Name of the ELB # * policy_name<~String> - The name of the policy to delete # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def delete_load_balancer_policy(lb_name, policy_name) params = {'PolicyName' => policy_name} request({ 'Action' => 'DeleteLoadBalancerPolicy', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def delete_load_balancer_policy(lb_name, policy_name) if load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['Policies'].each do |name, policies| policies.delete_if { |policy| policy['PolicyName'] == policy_name } end response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/deregister_instances_from_load_balancer.rb000066400000000000000000000044321437344660100303130ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/deregister_instances_from_load_balancer' # Deregister an instance from an existing ELB # # ==== Parameters # * instance_ids<~Array> - List of instance IDs to remove from ELB # * lb_name<~String> - Load balancer to remove instances from # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DeregisterInstancesFromLoadBalancerResult'<~Hash>: # * 'Instances'<~Array> - array of hashes describing instances currently enabled # * 'InstanceId'<~String> def deregister_instances_from_load_balancer(instance_ids, lb_name) params = Fog::AWS.indexed_param('Instances.member.%d.InstanceId', [*instance_ids]) request({ 'Action' => 'DeregisterInstancesFromLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DeregisterInstancesFromLoadBalancer.new }.merge!(params)) end alias_method :deregister_instances, :deregister_instances_from_load_balancer end class Mock def deregister_instances_from_load_balancer(instance_ids, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] instance_ids = [*instance_ids] instance_ids.each do |instance| raise Fog::AWS::ELB::InvalidInstance unless Fog::AWS::Compute::Mock.data[@region][@aws_access_key_id][:instances][instance] end response = Excon::Response.new response.status = 200 load_balancer['Instances'].delete_if { |i| instance_ids.include? i['InstanceId'] } response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DeregisterInstancesFromLoadBalancerResult' => { 'Instances' => load_balancer['Instances'].dup } } response end alias_method :deregister_instances, :deregister_instances_from_load_balancer end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_instance_health.rb000066400000000000000000000045341437344660100252320ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/describe_instance_health' # Get health status for one or more instances on an existing ELB # # ==== Parameters # * lb_name<~String> - Load balancer to check instances health on # * instance_ids<~Array> - Optional list of instance IDs to check # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeInstanceHealthResult'<~Hash>: # * 'InstanceStates'<~Array> - array of hashes describing instance health # * 'Description'<~String> # * 'State'<~String> # * 'InstanceId'<~String> # * 'ReasonCode'<~String> def describe_instance_health(lb_name, instance_ids = []) params = Fog::AWS.indexed_param('Instances.member.%d.InstanceId', [*instance_ids]) request({ 'Action' => 'DescribeInstanceHealth', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DescribeInstanceHealth.new }.merge!(params)) end end class Mock def describe_instance_health(lb_name, instance_ids = []) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] instance_ids = [*instance_ids] instance_ids = load_balancer['Instances'].map { |i| i['InstanceId'] } unless instance_ids.any? data = instance_ids.map do |id| unless Fog::AWS::Compute::Mock.data[@region][@aws_access_key_id][:instances][id] raise Fog::AWS::ELB::InvalidInstance end { 'Description' => "", 'InstanceId' => id, 'ReasonCode' => "", 'State' => 'OutOfService' } end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeInstanceHealthResult' => { 'InstanceStates' => data } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_load_balancer_attributes.rb000066400000000000000000000043761437344660100271210ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/describe_load_balancer_attributes' # Describe the load balancer attributes # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DescribeLoadBalancerAttributes.html # ==== Parameters # * lb_name<~String> - The mnemonic name associated with the LoadBalancer. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLoadBalancerAttributesResult'<~Hash>: # * 'LoadBalancerAttributes'<~Hash> # * 'ConnectionDraining'<~Hash> # * 'Enabled'<~Boolean> - whether connection draining is enabled # * 'Timeout'<~Integer> - max time (in seconds) to keep existing conns open before deregistering instances. # * 'CrossZoneLoadBalancing'<~Hash> # * 'Enabled'<~Boolean> - whether crosszone load balancing is enabled # * 'ConnectionSettings'<~Hash> # * 'IdleTimeout'<~Integer> - time (in seconds) the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer. def describe_load_balancer_attributes(lb_name) request({ 'Action' => 'DescribeLoadBalancerAttributes', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DescribeLoadBalancerAttributes.new }) end end class Mock def describe_load_balancer_attributes(lb_name = nil, names = []) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] attributes = load_balancer['LoadBalancerAttributes'] response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeLoadBalancerAttributesResult' => { 'LoadBalancerAttributes' => attributes } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_load_balancer_policies.rb000066400000000000000000000053371437344660100265400ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/describe_load_balancer_policies' # Describe all or specified load balancer policies # # ==== Parameters # * lb_name<~String> - The mnemonic name associated with the LoadBalancer. If no name is specified, the operation returns the attributes of either all the sample policies pre-defined by Elastic Load Balancing or the specified sample polices. # * names<~Array> - The names of LoadBalancer policies you've created or Elastic Load Balancing sample policy names. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLoadBalancerPoliciesResult'<~Hash>: # * 'PolicyDescriptions'<~Array> # * 'PolicyAttributeDescriptions'<~Array> # * 'AttributeName'<~String> - The name of the attribute associated with the policy. # * 'AttributeValue'<~String> - The value of the attribute associated with the policy. # * 'PolicyName'<~String> - The name mof the policy associated with the LoadBalancer. # * 'PolicyTypeName'<~String> - The name of the policy type. def describe_load_balancer_policies(lb_name = nil, names = []) params = Fog::AWS.indexed_param('PolicyNames.member', [*names]) request({ 'Action' => 'DescribeLoadBalancerPolicies', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DescribeLoadBalancerPolicies.new }.merge!(params)) end end class Mock def describe_load_balancer_policies(lb_name = nil, names = []) if lb_name raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] names = [*names] policies = if names.any? names.map do |name| raise Fog::AWS::ELB::PolicyNotFound unless policy = load_balancer['Policies']['Proper'].find { |p| p['PolicyName'] == name } policy.dup end.compact else load_balancer['Policies']['Proper'] end else policies = [] end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeLoadBalancerPoliciesResult' => { 'PolicyDescriptions' => policies } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_load_balancer_policy_types.rb000066400000000000000000000053171437344660100274520ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/describe_load_balancer_policy_types' # Describe all or specified load balancer policy types # # ==== Parameters # * type_name<~Array> - Specifies the name of the policy types. If no names are specified, returns the description of all the policy types defined by Elastic Load Balancing service. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLoadBalancerPolicyTypesResult'<~Hash>: # * 'PolicyTypeDescriptions'<~Array> # * 'Description'<~String> - A human-readable description of the policy type. # * 'PolicyAttributeTypeDescriptions'<~Array> # * 'AttributeName'<~String> - The name of the attribute associated with the policy type. # * 'AttributeValue'<~String> - The type of attribute. For example, Boolean, Integer, etc. # * 'Cardinality'<~String> - The cardinality of the attribute. # * 'DefaultValue'<~String> - The default value of the attribute, if applicable. # * 'Description'<~String> - A human-readable description of the attribute. # * 'PolicyTypeName'<~String> - The name of the policy type. def describe_load_balancer_policy_types(type_names = []) params = Fog::AWS.indexed_param('PolicyTypeNames.member', [*type_names]) request({ 'Action' => 'DescribeLoadBalancerPolicyTypes', :parser => Fog::Parsers::AWS::ELB::DescribeLoadBalancerPolicyTypes.new }.merge!(params)) end end class Mock def describe_load_balancer_policy_types(type_names = []) type_names = [*type_names] policy_types = if type_names.any? type_names.map do |type_name| policy_type = self.data[:policy_types].find { |pt| pt['PolicyTypeName'] == type_name } raise Fog::AWS::ELB::PolicyTypeNotFound unless policy_type policy_type[1].dup end.compact else self.data[:policy_types].map { |policy_type| policy_type.dup } end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeLoadBalancerPolicyTypesResult' => { 'PolicyTypeDescriptions' => policy_types } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_load_balancers.rb000066400000000000000000000161071437344660100250310ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/describe_load_balancers' # Describe all or specified load balancers # # ==== Parameters # * options<~Hash> # * 'LoadBalancerNames'<~Array> - List of load balancer names to describe, defaults to all # * 'Marker' - Indicates where to begin in your list of load balancers # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLoadBalancersResult'<~Hash>: # * 'LoadBalancerDescriptions'<~Array> # * 'AvailabilityZones'<~Array> - list of availability zones covered by this load balancer # * 'BackendServerDescriptions'<~Array>: # * 'InstancePort'<~Integer> - the port on which the back-end server is listening # * 'PolicyNames'<~Array> - list of policy names enabled for the back-end server # * 'CanonicalHostedZoneName'<~String> - name of the Route 53 hosted zone associated with the load balancer # * 'CanonicalHostedZoneNameID'<~String> - ID of the Route 53 hosted zone associated with the load balancer # * 'CreatedTime'<~Time> - time load balancer was created # * 'DNSName'<~String> - external DNS name of load balancer # * 'HealthCheck'<~Hash>: # * 'HealthyThreshold'<~Integer> - number of consecutive health probe successes required before moving the instance to the Healthy state # * 'Timeout'<~Integer> - number of seconds after which no response means a failed health probe # * 'Interval'<~Integer> - interval (in seconds) between health checks of an individual instance # * 'UnhealthyThreshold'<~Integer> - number of consecutive health probe failures that move the instance to the unhealthy state # * 'Target'<~String> - string describing protocol type, port and URL to check # * 'Instances'<~Array> - list of instances that the load balancer balances between # * 'ListenerDescriptions'<~Array> # * 'PolicyNames'<~Array> - list of policies enabled # * 'Listener'<~Hash>: # * 'InstancePort'<~Integer> - port on instance that requests are sent to # * 'Protocol'<~String> - transport protocol used for routing in [TCP, HTTP] # * 'LoadBalancerPort'<~Integer> - port that load balancer listens on for requests # * 'LoadBalancerName'<~String> - name of load balancer # * 'Policies'<~Hash>: # * 'LBCookieStickinessPolicies'<~Array> - list of Load Balancer Generated Cookie Stickiness policies for the LoadBalancer # * 'AppCookieStickinessPolicies'<~Array> - list of Application Generated Cookie Stickiness policies for the LoadBalancer # * 'OtherPolicies'<~Array> - list of policy names other than the stickiness policies # * 'SourceSecurityGroup'<~Hash>: # * 'GroupName'<~String> - Name of the source security group to use with inbound security group rules # * 'OwnerAlias'<~String> - Owner of the source security group # * 'NextMarker'<~String> - Marker to specify for next page def describe_load_balancers(options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("describe_load_balancers with #{options.class} is deprecated, use all('LoadBalancerNames' => []) instead [light_black](#{caller.first})[/]") options = { 'LoadBalancerNames' => [options].flatten } end if names = options.delete('LoadBalancerNames') options.update(Fog::AWS.indexed_param('LoadBalancerNames.member', [*names])) end request({ 'Action' => 'DescribeLoadBalancers', :parser => Fog::Parsers::AWS::ELB::DescribeLoadBalancers.new }.merge!(options)) end end class Mock def describe_load_balancers(options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("describe_load_balancers with #{options.class} is deprecated, use all('LoadBalancerNames' => []) instead [light_black](#{caller.first})[/]") options = { 'LoadBalancerNames' => [options].flatten } end lb_names = options['LoadBalancerNames'] || [] lb_names = [*lb_names] load_balancers = if lb_names.any? lb_names.map do |lb_name| lb = self.data[:load_balancers].find { |name, data| name == lb_name } raise Fog::AWS::ELB::NotFound unless lb lb[1].dup end.compact else self.data[:load_balancers].map { |lb, values| values.dup } end marker = options.fetch('Marker', 0).to_i if load_balancers.count - marker > 400 next_marker = marker + 400 load_balancers = load_balancers[marker...next_marker] else next_marker = nil end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeLoadBalancersResult' => { 'LoadBalancerDescriptions' => load_balancers.map do |lb| lb['Instances'] = lb['Instances'].map { |i| i['InstanceId'] } lb['Policies'] = lb['Policies']['Proper'].reduce({'AppCookieStickinessPolicies' => [], 'LBCookieStickinessPolicies' => [], 'OtherPolicies' => []}) { |m, policy| case policy['PolicyTypeName'] when 'AppCookieStickinessPolicyType' cookie_name = policy['PolicyAttributeDescriptions'].find{|h| h['AttributeName'] == 'CookieName'}['AttributeValue'] m['AppCookieStickinessPolicies'] << { 'PolicyName' => policy['PolicyName'], 'CookieName' => cookie_name } when 'LBCookieStickinessPolicyType' cookie_expiration_period = policy['PolicyAttributeDescriptions'].find{|h| h['AttributeName'] == 'CookieExpirationPeriod'}['AttributeValue'].to_i lb_policy = { 'PolicyName' => policy['PolicyName'] } lb_policy['CookieExpirationPeriod'] = cookie_expiration_period if cookie_expiration_period > 0 m['LBCookieStickinessPolicies'] << lb_policy else m['OtherPolicies'] << policy['PolicyName'] end m } lb['BackendServerDescriptions'] = lb.delete('BackendServerDescriptionsRemote') lb end } } if next_marker response.body['DescribeLoadBalancersResult']['NextMarker'] = next_marker.to_s end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/describe_tags.rb000066400000000000000000000035601437344660100230350ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/tag_list_parser' # returns a Hash of tags for a load balancer # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DescribeTags.html # ==== Parameters # * elb_id <~String> - name(s) of the ELB instance whose tags are to be retrieved (allows 1-20 of them) # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_tags(elb_ids) request({ 'Action' => 'DescribeTags', :parser => Fog::Parsers::AWS::ELB::TagListParser.new }.merge(Fog::AWS.indexed_param('LoadBalancerNames.member.%d', elb_ids)) ) end # def describe_tags(filters = {}) # params = Fog::AWS.indexed_filters(filters) # request({ # 'Action' => 'DescribeTags', # :idempotent => true, # :parser => Fog::Parsers::AWS::Compute::DescribeTags.new # }.merge!(params)) # end end class Mock def describe_tags(elb_id) response = Excon::Response.new if server = self.data[:load_balancers][elb_id] response.status = 200 ##{"DescribeTagsResult"=>{"LoadBalancers"=>[{"Tags"=>{"Name"=>"2esakowski-test-opsworks-elb"}, "LoadBalancerName"=>"esakowski-test-opsworks"}]}} response.body = {"DescribeTagsResult"=>{"LoadBalancers"=>[{"Tags"=>self.data[:tags][elb_id], "LoadBalancerName"=>elb_id}]}} # response.body = { # "DescribeTagsResult" => # {"TagDescriptions" => self.data[:tags][elb_id]} # } response else raise Fog::AWS::ELB::NotFound.new("Elastic load balancer #{elb_id} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/detach_load_balancer_from_subnets.rb000066400000000000000000000036571437344660100271120ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/detach_load_balancer_from_subnets' # Disable a subnet for an existing ELB # # ==== Parameters # * subnet_ids<~Array> - List of subnet ids to enable on ELB # * lb_name<~String> - Load balancer to disable availability zones on # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DetachLoadBalancerFromSubnetsResult'<~Hash>: # * 'Subnets'<~Array> - array of strings describing the subnet ids currently enabled def detach_load_balancer_from_subnets(subnet_ids, lb_name) params = Fog::AWS.indexed_param('Subnets.member', [*subnet_ids]) request({ 'Action' => 'DetachLoadBalancerFromSubnets', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DetachLoadBalancerFromSubnets.new }.merge!(params)) end alias_method :disable_subnets, :detach_load_balancer_from_subnets end class Mock def detach_load_balancer_from_subnets(subnet_ids, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['Subnets'] << subnet_ids load_balancer['Subnets'].flatten!.uniq! response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DetachLoadBalancerFromSubnetsResult' => { 'Subnets' => load_balancer['Subnets'] } } response end alias_method :disable_subnets, :detach_load_balancer_from_subnets end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/disable_availability_zones_for_load_balancer.rb000066400000000000000000000041411437344660100313020ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/disable_availability_zones_for_load_balancer' # Disable an availability zone for an existing ELB # # ==== Parameters # * availability_zones<~Array> - List of availability zones to disable on ELB # * lb_name<~String> - Load balancer to disable availability zones on # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DisableAvailabilityZonesForLoadBalancerResult'<~Hash>: # * 'AvailabilityZones'<~Array> - A list of updated Availability Zones for the LoadBalancer. def disable_availability_zones_for_load_balancer(availability_zones, lb_name) params = Fog::AWS.indexed_param('AvailabilityZones.member', [*availability_zones]) request({ 'Action' => 'DisableAvailabilityZonesForLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::DisableAvailabilityZonesForLoadBalancer.new }.merge!(params)) end alias_method :disable_zones, :disable_availability_zones_for_load_balancer end class Mock def disable_availability_zones_for_load_balancer(availability_zones, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['AvailabilityZones'].delete_if { |az| availability_zones.include? az } response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DisableAvailabilityZonesForLoadBalancerResult' => { 'AvailabilityZones' => load_balancer['AvailabilityZones'] } } response end alias_method :disable_zones, :disable_availability_zones_for_load_balancer end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/enable_availability_zones_for_load_balancer.rb000066400000000000000000000041601437344660100311260ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/enable_availability_zones_for_load_balancer' # Enable an availability zone for an existing ELB # # ==== Parameters # * availability_zones<~Array> - List of availability zones to enable on ELB # * lb_name<~String> - Load balancer to enable availability zones on # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'EnableAvailabilityZonesForLoadBalancerResult'<~Hash>: # * 'AvailabilityZones'<~Array> - array of strings describing instances currently enabled def enable_availability_zones_for_load_balancer(availability_zones, lb_name) params = Fog::AWS.indexed_param('AvailabilityZones.member', [*availability_zones]) request({ 'Action' => 'EnableAvailabilityZonesForLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::EnableAvailabilityZonesForLoadBalancer.new }.merge!(params)) end alias_method :enable_zones, :enable_availability_zones_for_load_balancer end class Mock def enable_availability_zones_for_load_balancer(availability_zones, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] response = Excon::Response.new response.status = 200 load_balancer['AvailabilityZones'] << availability_zones load_balancer['AvailabilityZones'].flatten!.uniq! response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'EnableAvailabilityZonesForLoadBalancerResult' => { 'AvailabilityZones' => load_balancer['AvailabilityZones'] } } response end alias_method :enable_zones, :enable_availability_zones_for_load_balancer end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/modify_load_balancer_attributes.rb000066400000000000000000000046451437344660100266270ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Sets attributes of the load balancer # # The following attributes can be set: # * CrossZoneLoadBalancing (enable/disable) # * ConnectionDraining (enable/disable and timeout) # * Idle Connection Timeouts # # Still requires: AccessLog configuration # # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_ModifyLoadBalancerAttributes.html # ==== Parameters # * lb_name<~String> - Name of the ELB # * options<~Hash> # * 'ConnectionDraining'<~Hash>: # * 'Enabled'<~Boolean> whether to enable connection draining # * 'Timeout'<~Integer> max time to keep existing conns open before deregistering instances # * 'CrossZoneLoadBalancing'<~Hash>: # * 'Enabled'<~Boolean> whether to enable cross zone load balancing # * 'ConnectionSettings'<~Hash>: # * 'IdleTimeout'<~Integer> time (in seconds) the connection is allowed to be idle (no data has been sent over the connection) before it is closed by the load balancer. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def modify_load_balancer_attributes(lb_name, options) attributes = Fog::AWS.serialize_keys 'LoadBalancerAttributes', options request(attributes.merge( 'Action' => 'ModifyLoadBalancerAttributes', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new )) end end class Mock def modify_load_balancer_attributes(lb_name, attributes) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] if attributes['CrossZoneLoadBalancing'] || attributes['ConnectionDraining'] || attributes['ConnectionSettings'] load_balancer['LoadBalancerAttributes'].merge! attributes end response = Excon::Response.new response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/register_instances_with_load_balancer.rb000066400000000000000000000044251437344660100300140ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/register_instances_with_load_balancer' # Register an instance with an existing ELB # # ==== Parameters # * instance_ids<~Array> - List of instance IDs to associate with ELB # * lb_name<~String> - Load balancer to assign instances to # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'RegisterInstancesWithLoadBalancerResult'<~Hash>: # * 'Instances'<~Array> - array of hashes describing instances currently enabled # * 'InstanceId'<~String> def register_instances_with_load_balancer(instance_ids, lb_name) params = Fog::AWS.indexed_param('Instances.member.%d.InstanceId', [*instance_ids]) request({ 'Action' => 'RegisterInstancesWithLoadBalancer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::RegisterInstancesWithLoadBalancer.new }.merge!(params)) end alias_method :register_instances, :register_instances_with_load_balancer end class Mock def register_instances_with_load_balancer(instance_ids, lb_name) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] instance_ids = [*instance_ids] instances = instance_ids.map do |instance| raise Fog::AWS::ELB::InvalidInstance unless Fog::AWS::Compute::Mock.data[@region][@aws_access_key_id][:instances][instance] {'InstanceId' => instance} end response = Excon::Response.new response.status = 200 load_balancer['Instances'] = load_balancer['Instances'] | instances.dup response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'RegisterInstancesWithLoadBalancerResult' => { 'Instances' => instances } } response end alias_method :register_instances, :register_instances_with_load_balancer end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/remove_tags.rb000066400000000000000000000031141437344660100225450ustar00rootroot00000000000000module Fog module AWS class ELB class Real # removes tags from an elastic load balancer instance # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_RemoveTags.html # ==== Parameters # * elb_id <~String> - name of the ELB instance whose tags are to be retrieved # * keys <~Array> A list of String keys for the tags to remove # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def remove_tags(elb_id, keys) request( { 'Action' => 'RemoveTags', # Note: there is a discrepancy in the API docs in the ID parameter name between the write-up and the example. # Who knows which way the fix will go, if any is ever made? In any case, this works. 'LoadBalancerNames.member.1' => elb_id, :parser => Fog::Parsers::AWS::ELB::Empty.new, }.merge(Fog::AWS.indexed_param('Tags.member.%d.Key', keys)) ) end end class Mock def remove_tags(elb_id, keys) response = Excon::Response.new if server = self.data[:load_balancers][elb_id] keys.each {|key| self.data[:tags][elb_id].delete key} response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELB::NotFound.new("Elastic load balancer #{elb_id} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/set_load_balancer_listener_ssl_certificate.rb000066400000000000000000000052421437344660100310070ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Sets the certificate that terminates the specified listener's SSL # connections. The specified certificate replaces any prior certificate # that was used on the same LoadBalancer and port. # # ==== Parameters # * lb_name<~String> - Name of the ELB # * load_balancer_port<~Integer> - The external port of the LoadBalancer # with which this policy has to be associated. # * ssl_certificate_id<~String> - ID of the SSL certificate chain to use # example: arn:aws:iam::322191361670:server-certificate/newCert # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def set_load_balancer_listener_ssl_certificate(lb_name, load_balancer_port, ssl_certificate_id) request({ 'Action' => 'SetLoadBalancerListenerSSLCertificate', 'LoadBalancerName' => lb_name, 'LoadBalancerPort' => load_balancer_port, 'SSLCertificateId' => ssl_certificate_id, :parser => Fog::Parsers::AWS::ELB::Empty.new }) end end class Mock def set_load_balancer_listener_ssl_certificate(lb_name, load_balancer_port, ssl_certificate_id) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] certificate_ids = Fog::AWS::IAM::Mock.data[@aws_access_key_id][:server_certificates].map {|n, c| c['Arn'] } if !certificate_ids.include? ssl_certificate_id raise Fog::AWS::IAM::NotFound.new('CertificateNotFound') end response = Excon::Response.new unless listener = load_balancer['ListenerDescriptions'].find { |listener| listener['Listener']['LoadBalancerPort'] == load_balancer_port } response.status = 400 response.body = "ListenerNotFoundLoadBalancer does not have a listnener configured at the given port.#{Fog::AWS::Mock.request_id}" raise Excon::Errors.status_error({:expects => 200}, response) end listener['Listener']['SSLCertificateId'] = ssl_certificate_id response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/set_load_balancer_policies_for_backend_server.rb000066400000000000000000000053341437344660100314530ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Replaces the current set of policies associated with a port on which the back-end server is listening with a new set of policies. # After the policies have been created using CreateLoadBalancerPolicy, they can be applied here as a list. # # ==== Parameters # * lb_name<~String> - Name of the ELB # * instance_port<~Integer> - The port on the instance that the ELB will forward traffic to # * policy_names<~Array> - Array of Strings listing the policies to set for the backend port # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def set_load_balancer_policies_for_backend_server(lb_name, instance_port, policy_names) params = {'InstancePort' => instance_port} if policy_names.any? params.merge!(Fog::AWS.indexed_param('PolicyNames.member', policy_names)) else params['PolicyNames'] = '' end request({ 'Action' => 'SetLoadBalancerPoliciesForBackendServer', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def set_load_balancer_policies_for_backend_server(lb_name, instance_port, policy_names) if load_balancer = self.data[:load_balancers][lb_name] # Ensure policies exist policy_names.each do |policy_name| unless load_balancer['Policies']['Proper'].find { |p| p['PolicyName'] == policy_name } raise Fog::AWS::ELB::PolicyNotFound, "There is no policy with name #{policy_name} for load balancer #{lb_name}" end end # Update backend policies: description = load_balancer['BackendServerDescriptionsRemote'].find{|d| d["InstancePort"] == instance_port } || {} description["InstancePort"] = instance_port description["PolicyNames"] = policy_names load_balancer['BackendServerDescriptionsRemote'].delete_if{|d| d["InstancePort"] == instance_port } load_balancer['BackendServerDescriptionsRemote'] << description Excon::Response.new.tap do |response| response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } end else raise Fog::AWS::ELB::NotFound end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elb/set_load_balancer_policies_of_listener.rb000066400000000000000000000067671437344660100301540ustar00rootroot00000000000000module Fog module AWS class ELB class Real require 'fog/aws/parsers/elb/empty' # Associates, updates, or disables a policy with a listener on the # load balancer. Currently only zero (0) or one (1) policy can be # associated with a listener. # # ==== Parameters # * lb_name<~String> - Name of the ELB # * load_balancer_port<~Integer> - The external port of the LoadBalancer # with which this policy has to be associated. # * policy_names<~Array> - List of policies to be associated with the # listener. Currently this list can have at most one policy. If the # list is empty, the current policy is removed from the listener. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def set_load_balancer_policies_of_listener(lb_name, load_balancer_port, policy_names) params = {'LoadBalancerPort' => load_balancer_port} if policy_names.any? params.merge!(Fog::AWS.indexed_param('PolicyNames.member', policy_names)) else params['PolicyNames'] = '' end request({ 'Action' => 'SetLoadBalancerPoliciesOfListener', 'LoadBalancerName' => lb_name, :parser => Fog::Parsers::AWS::ELB::Empty.new }.merge!(params)) end end class Mock def set_load_balancer_policies_of_listener(lb_name, load_balancer_port, policy_names) raise Fog::AWS::ELB::NotFound unless load_balancer = self.data[:load_balancers][lb_name] policy_names = [*policy_names] response = Excon::Response.new if policy_names.size > 1 response.status = 409 response.body = "InvalidConfigurationRequestRequested configuration change is invalid.#{Fog::AWS::Mock.request_id}" raise Excon::Errors.status_error({:expects => 200}, response) end unless listener = load_balancer['ListenerDescriptions'].find { |listener| listener['Listener']['LoadBalancerPort'] == load_balancer_port } response.status = 400 response.body = "ListenerNotFoundLoadBalancer does not have a listnener configured at the given port.#{Fog::AWS::Mock.request_id}" raise Excon::Errors.status_error({:expects => 200}, response) end unless load_balancer['Policies']['Proper'].find { |policy| policy['PolicyName'] == policy_names.first } response.status = 400 response.body = "PolicyNotFoundOne or more specified policies were not found.#{Fog::AWS::Mock.request_id}" raise Excon::Errors.status_error({:expects => 200}, response) end if policy_names.any? listener['PolicyNames'] = policy_names response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/000077500000000000000000000000001437344660100201565ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/elbv2/add_tags.rb000066400000000000000000000030531437344660100222520ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/empty' # adds tags to a load balancer instance # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_AddTags.html # ==== Parameters # * resource_arn <~String> - The Amazon Resource Name (ARN) of the resource # * tags <~Hash> A Hash of (String) key-value pairs # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def add_tags(resource_arn, tags) keys = tags.keys.sort values = keys.map {|key| tags[key]} request({ 'Action' => 'AddTags', 'ResourceArns.member.1' => resource_arn, :parser => Fog::Parsers::AWS::ELBV2::Empty.new, }.merge(Fog::AWS.indexed_param('Tags.member.%d.Key', keys)) .merge(Fog::AWS.indexed_param('Tags.member.%d.Value', values))) end end class Mock def add_tags(resource_arn, tags) response = Excon::Response.new if self.data[:load_balancers_v2][resource_arn] self.data[:tags][resource_arn].merge! tags response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELBV2::NotFound.new("Elastic load balancer #{resource_arn} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/create_load_balancer.rb000066400000000000000000000206571437344660100246060ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/create_load_balancer' # Create a new Elastic Load Balancer # # ==== Parameters # * name<~String> - The name of the load balancer. # This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, # must not begin or end with a hyphen, and must not begin with "internal-". # - Required: Yes # * options<~Hash>: # * ip_address_type<~String> - [Application Load Balancers] The type of IP addresses used by the subnets for your load balancer. # The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). # Internal load balancers must use ipv4. # - Required: No # * scheme<~String> - The default is an Internet-facing load balancer. Valid Values: internet-facing | internal # - Required: No # * security_groups<~Array> - The IDs of the security groups for the load balancer. # - Required: No # * subnet_mappings<~Array> - The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. # - [Application Load Balancers] You must specify subnets from at least two Availability Zones. # You cannot specify Elastic IP addresses for your subnets. # - [Network Load Balancers] You can specify subnets from one or more Availability Zones. # You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. # For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. # - Required: No # * subnets<~Array> - The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. # - [Application Load Balancers] You must specify subnets from at least two Availability Zones. # - [Network Load Balancers] You can specify subnets from one or more Availability Zones. # - Required: No # * tags<~Hash> - One or more tags to assign to the load balancer. # - Required: No # * type<~String> - The type of load balancer. The default is application. Valid Values: application | network # - Required: No # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'CreateLoadBalancerResult'<~Hash>: # * 'LoadBalancers'<~Array> # * 'AvailabilityZones'<~Array>: # * 'SubnetId'<~String> - ID of the subnet # * 'ZoneName'<~String> - Name of the Availability Zone # * 'LoadBalancerAddresses'<~Array>: # * 'IpAddress'<~String> - IP address # * 'AllocationId'<~String> - ID of the AWS allocation # * 'CanonicalHostedZoneName'<~String> - name of the Route 53 hosted zone associated with the load balancer # * 'CanonicalHostedZoneNameID'<~String> - ID of the Route 53 hosted zone associated with the load balancer # * 'CreatedTime'<~Time> - time load balancer was created # * 'DNSName'<~String> - external DNS name of load balancer # * 'LoadBalancerName'<~String> - name of load balancer # * 'SecurityGroups'<~Array> - array of security group id def create_load_balancer(name, options = {}) params = {} params.merge!(Fog::AWS.indexed_param('Subnets.member.%d', options[:subnets])) params.merge!(Fog::AWS.indexed_param('SecurityGroups.member.%d', options[:security_groups])) params.merge!(Fog::AWS.serialize_keys('Scheme', options[:scheme])) params.merge!(Fog::AWS.serialize_keys('Type', options[:type])) params.merge!(Fog::AWS.serialize_keys('IpAddressType', options[:ip_address_type])) unless options[:tags].nil? tag_keys = options[:tags].keys.sort tag_values = tag_keys.map { |key| options[:tags][key] } params.merge!(Fog::AWS.indexed_param('Tags.member.%d.Key', tag_keys)) params.merge!(Fog::AWS.indexed_param('Tags.member.%d.Value', tag_values)) end unless options[:subnet_mappings].nil? subnet_ids = [] allocation_ids = [] private_ipv4_address = [] options[:subnet_mappings].each do |subnet_mapping| subnet_ids.push(subnet_mapping[:subnet_id]) allocation_ids.push(subnet_mapping[:allocation_id]) private_ipv4_address.push(subnet_mapping[:private_ipv4_address]) end params.merge!(Fog::AWS.indexed_param('SubnetMappings.member.%d.SubnetId', subnet_ids)) params.merge!(Fog::AWS.indexed_param('SubnetMappings.member.%d.AllocationId', allocation_ids)) params.merge!(Fog::AWS.indexed_param('SubnetMappings.member.%d.PrivateIPv4Address', private_ipv4_address)) end request({ 'Action' => 'CreateLoadBalancer', 'Name' => name, :parser => Fog::Parsers::AWS::ELBV2::CreateLoadBalancer.new }.merge!(params)) end end class Mock def create_load_balancer(name, options = {}) response = Excon::Response.new response.status = 200 raise Fog::AWS::ELBV2::IdentifierTaken if self.data[:load_balancers_v2].key? name dns_name = Fog::AWS::ELBV2::Mock.dns_name(name, @region) type = options[:type] || 'application' load_balancer_arn = Fog::AWS::Mock.arn('elasticloadbalancing', self.data[:owner_id], "loadbalancer/#{type[0..2]}/#{name}/#{Fog::AWS::Mock.key_id}") subnet_ids = options[:subnets] || [] region = if subnet_ids.any? # using Hash here for Rubt 1.8.7 support. Hash[ Fog::AWS::Compute::Mock.data.select do |_, region_data| unless region_data[@aws_access_key_id].nil? region_data[@aws_access_key_id][:subnets].any? do |region_subnets| subnet_ids.include? region_subnets['subnetId'] end end end ].keys[0] else 'us-east-1' end subnets = Fog::AWS::Compute::Mock.data[region][@aws_access_key_id][:subnets].select {|e| subnet_ids.include?(e["subnetId"]) } availability_zones = subnets.map do |subnet| { "LoadBalancerAddresses"=>[], "SubnetId"=>subnet["subnetId"], "ZoneName"=>subnet["availabilityZone"]} end vpc_id = subnets.first['vpcId'] self.data[:tags] ||= {} self.data[:tags][load_balancer_arn] = options[:tags] || {} load_balancer = { 'AvailabilityZones' => availability_zones || [], 'Scheme' => options[:scheme] || 'internet-facing', 'SecurityGroups' => options[:security_groups] || [], 'CanonicalHostedZoneId' => '', 'CreatedTime' => Time.now, 'DNSName' => dns_name, 'VpcId' => vpc_id, 'Type' => type, 'State' => {'Code' => 'provisioning'}, 'LoadBalancerArn' => load_balancer_arn, 'LoadBalancerName' => name } self.data[:load_balancers_v2][load_balancer_arn] = load_balancer response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'CreateLoadBalancerResult' => { 'LoadBalancers' => [load_balancer] } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/describe_listeners.rb000066400000000000000000000032051437344660100243530ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/describe_listeners' # Describe all or specified load balancers # # ==== Parameters # * 'LoadBalancerArn'<~String> - The Amazon Resource Name (ARN) of the load balancer # * options<~Hash> # * 'Marker' - Indicates where to begin in your list of load balancers # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeListenersResult'<~Hash>: # * 'Listeners'<~Array> # * 'LoadBalancerArn'<~String> - The Amazon Resource Name (ARN) of the load balancer # * 'Protocol'<~String> - The protocol for connections from clients to the load balancer # * 'Port'<~String> - The port on which the load balancer is listening # * 'DefaultActions'<~Array> - The default actions for the listener # * 'Type'<~String> - The type of action # * 'TargetGroupArn'<~String> - The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward # * 'NextMarker'<~String> - Marker to specify for next page def describe_listeners(load_balancer_arn, options = {}) request({ 'Action' => 'DescribeListeners', 'LoadBalancerArn' => load_balancer_arn, :parser => Fog::Parsers::AWS::ELBV2::DescribeListeners.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/describe_load_balancers.rb000066400000000000000000000077001437344660100253000ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/describe_load_balancers' # Describe all or specified load balancers # # ==== Parameters # * options<~Hash> # * 'LoadBalancerNames'<~Array> - List of load balancer names to describe, defaults to all # * 'Marker' - Indicates where to begin in your list of load balancers # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request # * 'DescribeLoadBalancersResult'<~Hash>: # * 'LoadBalancers'<~Array> # * 'AvailabilityZones'<~Array>: # * 'SubnetId'<~String> - ID of the subnet # * 'ZoneName'<~String> - Name of the Availability Zone # * 'LoadBalancerAddresses'<~Array>: # * 'IpAddress'<~String> - IP address # * 'AllocationId'<~String> - ID of the AWS allocation # * 'CanonicalHostedZoneName'<~String> - name of the Route 53 hosted zone associated with the load balancer # * 'CanonicalHostedZoneNameID'<~String> - ID of the Route 53 hosted zone associated with the load balancer # * 'CreatedTime'<~Time> - time load balancer was created # * 'DNSName'<~String> - external DNS name of load balancer # * 'LoadBalancerName'<~String> - name of load balancer # * 'SecurityGroups'<~Array> - array of security group id # * 'NextMarker'<~String> - Marker to specify for next page def describe_load_balancers(options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("describe_load_balancers with #{options.class} is deprecated, use all('LoadBalancerNames' => []) instead [light_black](#{caller.first})[/]") options = { 'LoadBalancerNames' => [options].flatten } end if names = options.delete('LoadBalancerNames') options.update(Fog::AWS.indexed_param('LoadBalancerNames.member', [*names])) end request({ 'Action' => 'DescribeLoadBalancers', :parser => Fog::Parsers::AWS::ELBV2::DescribeLoadBalancers.new }.merge!(options)) end end class Mock def describe_load_balancers(options = {}) unless options.is_a?(Hash) Fog::Logger.deprecation("describe_load_balancers with #{options.class} is deprecated, use all('LoadBalancerNames' => []) instead [light_black](#{caller.first})[/]") options = { 'LoadBalancerNames' => [options].flatten } end lb_names = options['LoadBalancerNames'] || [] lb_names = [*lb_names] load_balancers = if lb_names.any? lb_names.map do |lb_name| lb = self.data[:load_balancers_v2].find { |name, data| name == lb_name } raise Fog::AWS::ELBV2::NotFound unless lb lb[1].dup end.compact else self.data[:load_balancers_v2].map { |lb, values| values.dup } end marker = options.fetch('Marker', 0).to_i if load_balancers.count - marker > 400 next_marker = marker + 400 load_balancers = load_balancers[marker...next_marker] else next_marker = nil end response = Excon::Response.new response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeLoadBalancersResult' => { 'LoadBalancers' => load_balancers } } if next_marker response.body['DescribeLoadBalancersResult']['NextMarker'] = next_marker.to_s end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/describe_tags.rb000066400000000000000000000031021437344660100232750ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/describe_tags' # returns a Hash of tags for a load balancer # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DescribeTags.html # ==== Parameters # * resource_arns <~Array> - ARN(s) of the ELB instance whose tags are to be retrieved # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_tags(resource_arns) request({ 'Action' => 'DescribeTags', :parser => Fog::Parsers::AWS::ELBV2::DescribeTags.new }.merge!(Fog::AWS.indexed_param('ResourceArns.member.%d', [*resource_arns])) ) end end class Mock def describe_tags(resource_arns) response = Excon::Response.new resource_arns = [*resource_arns] tag_describtions = resource_arns.map do |resource_arn| if self.data[:load_balancers_v2][resource_arn] { "Tags"=>self.data[:tags][resource_arn], "ResourceArn"=>resource_arn } else raise Fog::AWS::ELBV2::NotFound.new("Elastic load balancer #{resource_arns} not found") end end response.status = 200 response.body = { "ResponseMetadata"=>{"RequestId"=> Fog::AWS::Mock.request_id }, "DescribeTagsResult"=>{"TagDescriptions"=> tag_describtions} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/elbv2/remove_tags.rb000066400000000000000000000026761437344660100230310ustar00rootroot00000000000000module Fog module AWS class ELBV2 class Real require 'fog/aws/parsers/elbv2/empty' # removes tags from an elastic load balancer instance # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_RemoveTags.html # ==== Parameters # * resource_arn <~String> - ARN of the ELB instance whose tags are to be retrieved # * keys <~Array> A list of String keys for the tags to remove # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def remove_tags(resource_arn, keys) request( { 'Action' => 'RemoveTags', 'ResourceArns.member.1' => resource_arn, :parser => Fog::Parsers::AWS::ELBV2::Empty.new, }.merge(Fog::AWS.indexed_param('TagKeys.member.%d', keys)) ) end end class Mock def remove_tags(resource_arn, keys) response = Excon::Response.new if self.data[:load_balancers_v2][resource_arn] keys.each {|key| self.data[:tags][resource_arn].delete key} response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id } } response else raise Fog::AWS::ELBV2::NotFound.new("Elastic load balancer #{resource_arn} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/000077500000000000000000000000001437344660100177275ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/emr/add_instance_groups.rb000066400000000000000000000034211437344660100242670ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/add_instance_groups' # adds an instance group to a running cluster # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_AddInstanceGroups.html # ==== Parameters # * JobFlowId <~String> - Job flow in which to add the instance groups # * InstanceGroups<~Array> - Instance Groups to add # * 'BidPrice'<~String> - Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD. # * 'InstanceCount'<~Integer> - Target number of instances for the instance group # * 'InstanceRole'<~String> - MASTER | CORE | TASK The role of the instance group in the cluster # * 'InstanceType'<~String> - The Amazon EC2 instance type for all instances in the instance group # * 'MarketType'<~String> - ON_DEMAND | SPOT Market type of the Amazon EC2 instances used to create a cluster node # * 'Name'<~String> - Friendly name given to the instance group. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def add_instance_groups(job_flow_id, options={}) if instance_groups = options.delete('InstanceGroups') options.merge!(Fog::AWS.indexed_param('InstanceGroups.member.%d', [*instance_groups])) end request({ 'Action' => 'AddInstanceGroups', 'JobFlowId' => job_flow_id, :parser => Fog::Parsers::AWS::EMR::AddInstanceGroups.new, }.merge(options)) end end class Mock def add_instance_groups(job_flow_id, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/add_job_flow_steps.rb000066400000000000000000000040551437344660100241070ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/add_job_flow_steps' # adds new steps to a running job flow. # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_AddJobFlowSteps.html # ==== Parameters # * JobFlowId <~String> - A string that uniquely identifies the job flow # * Steps <~Array> - A list of steps to be executed by the job flow # * 'ActionOnFailure'<~String> - TERMINATE_JOB_FLOW | CANCEL_AND_WAIT | CONTINUE Specifies the action to take if the job flow step fails # * 'HadoopJarStep'<~Array> - Specifies the JAR file used for the job flow step # * 'Args'<~String list> - A list of command line arguments passed to the JAR file's main function when executed. # * 'Jar'<~String> - A path to a JAR file run during the step. # * 'MainClass'<~String> - The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file # * 'Properties'<~Array> - A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function # * 'Key'<~String> - The unique identifier of a key value pair # * 'Value'<~String> - The value part of the identified key # * 'Name'<~String> - The name of the job flow step # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def add_job_flow_steps(job_flow_id, options={}) if steps = options.delete('Steps') options.merge!(Fog::AWS.serialize_keys('Steps', steps)) end request({ 'Action' => 'AddJobFlowSteps', 'JobFlowId' => job_flow_id, :parser => Fog::Parsers::AWS::EMR::AddJobFlowSteps.new, }.merge(options)) end end class Mock def add_job_flow_steps(db_name, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/describe_job_flows.rb000066400000000000000000000201461437344660100241030ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/describe_job_flows' # returns a list of job flows that match all of the supplied parameters. # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_DescribeJobFlows.html # ==== Parameters # * CreatedAfter <~DateTime> - Return only job flows created after this date and time # * CreatedBefore <~DateTime> - Return only job flows created before this date and time # * JobFlowIds <~String list> - Return only job flows whose job flow ID is contained in this list # * JobFlowStates <~String list> - RUNNING | WAITING | SHUTTING_DOWN | STARTING Return only job flows whose state is contained in this list # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * JobFlows <~Array> - A list of job flows matching the parameters supplied. # * AmiVersion <~String> - A list of bootstrap actions that will be run before Hadoop is started on the cluster nodes. # * 'BootstrapActions'<~Array> - A list of the bootstrap actions run by the job flow # * 'BootstrapConfig <~Array> - A description of the bootstrap action # * 'Name' <~String> - The name of the bootstrap action # * 'ScriptBootstrapAction' <~Array> - The script run by the bootstrap action. # * 'Args' <~String list> - A list of command line arguments to pass to the bootstrap action script. # * 'Path' <~String> - Location of the script to run during a bootstrap action. # * 'ExecutionStatusDetail'<~Array> - Describes the execution status of the job flow # * 'CreationDateTime <~DateTime> - The creation date and time of the job flow. # * 'EndDateTime <~DateTime> - The completion date and time of the job flow. # * 'LastStateChangeReason <~String> - Description of the job flow last changed state. # * 'ReadyDateTime <~DateTime> - The date and time when the job flow was ready to start running bootstrap actions. # * 'StartDateTime <~DateTime> - The start date and time of the job flow. # * 'State <~DateTime> - COMPLETED | FAILED | TERMINATED | RUNNING | SHUTTING_DOWN | STARTING | WAITING | BOOTSTRAPPING The state of the job flow. # * Instances <~Array> - A specification of the number and type of Amazon EC2 instances on which to run the job flow. # * 'Ec2KeyName'<~String> - Specifies the name of the Amazon EC2 key pair that can be used to ssh to the master node as the user called "hadoop. # * 'HadoopVersion'<~String> - "0.18" | "0.20" Specifies the Hadoop version for the job flow # * 'InstanceCount'<~Integer> - The number of Amazon EC2 instances used to execute the job flow # * 'InstanceGroups'<~Array> - Configuration for the job flow's instance groups # * 'BidPrice' <~String> - Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD. # * 'CreationDateTime' <~DateTime> - The date/time the instance group was created. # * 'EndDateTime' <~DateTime> - The date/time the instance group was terminated. # * 'InstanceGroupId' <~String> - Unique identifier for the instance group. # * 'InstanceRequestCount'<~Integer> - Target number of instances for the instance group # * 'InstanceRole'<~String> - MASTER | CORE | TASK The role of the instance group in the cluster # * 'InstanceRunningCount'<~Integer> - Actual count of running instances # * 'InstanceType'<~String> - The Amazon EC2 instance type for all instances in the instance group # * 'LastStateChangeReason'<~String> - Details regarding the state of the instance group # * 'Market'<~String> - ON_DEMAND | SPOT Market type of the Amazon EC2 instances used to create a cluster # * 'Name'<~String> - Friendly name for the instance group # * 'ReadyDateTime'<~DateTime> - The date/time the instance group was available to the cluster # * 'StartDateTime'<~DateTime> - The date/time the instance group was started # * 'State'<~String> - PROVISIONING | STARTING | BOOTSTRAPPING | RUNNING | RESIZING | ARRESTED | SHUTTING_DOWN | TERMINATED | FAILED | ENDED State of instance group # * 'KeepJobFlowAliveWhenNoSteps' <~Boolean> - Specifies whether the job flow should terminate after completing all steps # * 'MasterInstanceId'<~String> - The Amazon EC2 instance identifier of the master node # * 'MasterInstanceType'<~String> - The EC2 instance type of the master node # * 'MasterPublicDnsName'<~String> - The DNS name of the master node # * 'NormalizedInstanceHours'<~Integer> - An approximation of the cost of the job flow, represented in m1.small/hours. # * 'Placement'<~Array> - Specifies the Availability Zone the job flow will run in # * 'AvailabilityZone' <~String> - The Amazon EC2 Availability Zone for the job flow. # * 'SlaveInstanceType'<~String> - The EC2 instance type of the slave nodes # * 'TerminationProtected'<~Boolean> - Specifies whether to lock the job flow to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job flow error # * LogUri <~String> - Specifies the location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created # * Name <~String> - The name of the job flow # * Steps <~Array> - A list of steps to be executed by the job flow # * 'ExecutionStatusDetail'<~Array> - Describes the execution status of the job flow # * 'CreationDateTime <~DateTime> - The creation date and time of the job flow. # * 'EndDateTime <~DateTime> - The completion date and time of the job flow. # * 'LastStateChangeReason <~String> - Description of the job flow last changed state. # * 'ReadyDateTime <~DateTime> - The date and time when the job flow was ready to start running bootstrap actions. # * 'StartDateTime <~DateTime> - The start date and time of the job flow. # * 'State <~DateTime> - COMPLETED | FAILED | TERMINATED | RUNNING | SHUTTING_DOWN | STARTING | WAITING | BOOTSTRAPPING The state of the job flow. # * StepConfig <~Array> - The step configuration # * 'ActionOnFailure'<~String> - TERMINATE_JOB_FLOW | CANCEL_AND_WAIT | CONTINUE Specifies the action to take if the job flow step fails # * 'HadoopJarStep'<~Array> - Specifies the JAR file used for the job flow step # * 'Args'<~String list> - A list of command line arguments passed to the JAR file's main function when executed. # * 'Jar'<~String> - A path to a JAR file run during the step. # * 'MainClass'<~String> - The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file # * 'Properties'<~Array> - A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function # * 'Key'<~String> - The unique identifier of a key value pair # * 'Value'<~String> - The value part of the identified key # * 'Name'<~String> - The name of the job flow step def describe_job_flows(options={}) if job_ids = options.delete('JobFlowIds') options.merge!(Fog::AWS.serialize_keys('JobFlowIds', job_ids)) end if job_states = options.delete('JobFlowStates') options.merge!(Fog::AWS.serialize_keys('JobFlowStates', job_states)) end request({ 'Action' => 'DescribeJobFlows', :parser => Fog::Parsers::AWS::EMR::DescribeJobFlows.new, }.merge(options)) end end class Mock def describe_job_flows(db_name, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/modify_instance_groups.rb000066400000000000000000000023041437344660100250250ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/modify_instance_groups' # modifies the number of nodes and configuration settings of an instance group.. # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_ModifyInstanceGroups.html # ==== Parameters # * InstanceGroups <~InstanceGroupModifyConfig list> - Instance groups to change # * InstanceCount <~Integer> - Target size for instance group # * InstanceGroupId <~String> - Unique ID of the instance group to expand or shrink # # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def modify_instance_groups(options={}) if job_ids = options.delete('InstanceGroups') options.merge!(Fog::AWS.serialize_keys('InstanceGroups', job_ids)) end request({ 'Action' => 'ModifyInstanceGroups', :parser => Fog::Parsers::AWS::EMR::ModifyInstanceGroups.new, }.merge(options)) end end class Mock def modify_instance_groups(options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/run_job_flow.rb000066400000000000000000000143341437344660100227460ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/run_job_flow' # creates and starts running a new job flow # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_RunJobFlow.html # ==== Parameters # * AdditionalInfo <~String> - A JSON string for selecting additional features. # * BootstrapActions <~Array> - A list of bootstrap actions that will be run before Hadoop is started on the cluster nodes. # * 'Name'<~String> - The name of the bootstrap action # * 'ScriptBootstrapAction'<~Array> - The script run by the bootstrap action # * 'Args' <~Array> - A list of command line arguments to pass to the bootstrap action script # * 'Path' <~String> - Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system. # * Instances <~Array> - A specification of the number and type of Amazon EC2 instances on which to run the job flow. # * 'Ec2KeyName'<~String> - Specifies the name of the Amazon EC2 key pair that can be used to ssh to the master node as the user called "hadoop. # * 'HadoopVersion'<~String> - "0.18" | "0.20" Specifies the Hadoop version for the job flow # * 'InstanceCount'<~Integer> - The number of Amazon EC2 instances used to execute the job flow # * 'InstanceGroups'<~Array> - Configuration for the job flow's instance groups # * 'BidPrice' <~String> - Bid price for each Amazon EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD. # * 'InstanceCount'<~Integer> - Target number of instances for the instance group # * 'InstanceRole'<~String> - MASTER | CORE | TASK The role of the instance group in the cluster # * 'InstanceType'<~String> - The Amazon EC2 instance type for all instances in the instance group # * 'MarketType'<~String> - ON_DEMAND | SPOT Market type of the Amazon EC2 instances used to create a cluster node # * 'Name'<~String> - Friendly name given to the instance group. # * 'KeepJobFlowAliveWhenNoSteps' <~Boolean> - Specifies whether the job flow should terminate after completing all steps # * 'MasterInstanceType'<~String> - The EC2 instance type of the master node # * 'Placement'<~Array> - Specifies the Availability Zone the job flow will run in # * 'AvailabilityZone' <~String> - The Amazon EC2 Availability Zone for the job flow. # * 'SlaveInstanceType'<~String> - The EC2 instance type of the slave nodes # * 'TerminationProtected'<~Boolean> - Specifies whether to lock the job flow to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job flow error # * LogUri <~String> - Specifies the location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created # * Name <~String> - The name of the job flow # * Steps <~Array> - A list of steps to be executed by the job flow # * 'ActionOnFailure'<~String> - TERMINATE_JOB_FLOW | CANCEL_AND_WAIT | CONTINUE Specifies the action to take if the job flow step fails # * 'HadoopJarStep'<~Array> - Specifies the JAR file used for the job flow step # * 'Args'<~String list> - A list of command line arguments passed to the JAR file's main function when executed. # * 'Jar'<~String> - A path to a JAR file run during the step. # * 'MainClass'<~String> - The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file # * 'Properties'<~Array> - A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function # * 'Key'<~String> - The unique identifier of a key value pair # * 'Value'<~String> - The value part of the identified key # * 'Name'<~String> - The name of the job flow step # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def run_job_flow(name, options={}) if bootstrap_actions = options.delete('BootstrapActions') options.merge!(Fog::AWS.serialize_keys('BootstrapActions', bootstrap_actions)) end if instances = options.delete('Instances') options.merge!(Fog::AWS.serialize_keys('Instances', instances)) end if steps = options.delete('Steps') options.merge!(Fog::AWS.serialize_keys('Steps', steps)) end request({ 'Action' => 'RunJobFlow', 'Name' => name, :parser => Fog::Parsers::AWS::EMR::RunJobFlow.new, }.merge(options)) end def run_hive(name, options={}) steps = [] steps << { 'Name' => 'Setup Hive', 'HadoopJarStep' => { 'Jar' => 's3://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar', 'Args' => ['s3://us-east-1.elasticmapreduce/libs/hive/hive-script', '--base-path', 's3://us-east-1.elasticmapreduce/libs/hive/', '--install-hive']}, 'ActionOnFailure' => 'TERMINATE_JOB_FLOW' } # To add a configuration step to the Hive flow, see the step below # steps << { # 'Name' => 'Install Hive Site Configuration', # 'HadoopJarStep' => { # 'Jar' => 's3://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar', # 'Args' => ['s3://us-east-1.elasticmapreduce/libs/hive/hive-script', '--base-path', 's3://us-east-1.elasticmapreduce/libs/hive/', '--install-hive-site', '--hive-site=s3://my.bucket/hive/hive-site.xml']}, # 'ActionOnFailure' => 'TERMINATE_JOB_FLOW' # } options['Steps'] = steps if not options['Instances'].nil? options['Instances']['KeepJobFlowAliveWhenNoSteps'] = true end run_job_flow name, options end end class Mock def run_job_flow(db_name, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/set_termination_protection.rb000066400000000000000000000023671437344660100257360ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/set_termination_protection' # locks a job flow so the Amazon EC2 instances in the cluster cannot be terminated by user intervention. # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_SetTerminationProtection.html # ==== Parameters # * JobFlowIds <~String list> - list of strings that uniquely identify the job flows to protect # * TerminationProtected <~Boolean> - indicates whether to protect the job flow # # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def set_termination_protection(is_protected, options={}) if job_ids = options.delete('JobFlowIds') options.merge!(Fog::AWS.serialize_keys('JobFlowIds', job_ids)) end request({ 'Action' => 'SetTerminationProtection', 'TerminationProtected' => is_protected, :parser => Fog::Parsers::AWS::EMR::SetTerminationProtection.new, }.merge(options)) end end class Mock def set_termination_protection(db_name, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/emr/terminate_job_flows.rb000066400000000000000000000017541437344660100243170ustar00rootroot00000000000000module Fog module AWS class EMR class Real require 'fog/aws/parsers/emr/terminate_job_flows' # shuts a list of job flows down. # http://docs.amazonwebservices.com/ElasticMapReduce/latest/API/API_TerminateJobFlows.html # ==== Parameters # * JobFlowIds <~String list> - list of strings that uniquely identify the job flows to protect # # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def terminate_job_flows(options={}) if job_ids = options.delete('JobFlowIds') options.merge!(Fog::AWS.serialize_keys('JobFlowIds', job_ids)) end request({ 'Action' => 'TerminateJobFlows', :parser => Fog::Parsers::AWS::EMR::TerminateJobFlows.new, }.merge(options)) end end class Mock def terminate_job_flows(db_name, options={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/federation/000077500000000000000000000000001437344660100212645ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/federation/get_signin_token.rb000066400000000000000000000005701437344660100251410ustar00rootroot00000000000000module Fog module AWS class Federation class Real def get_signin_token(session) request('getSigninToken', CGI.escape(Fog::JSON.encode(session))) end end class Mock def get_signin_token(session) { 'SigninToken' => Fog::Mock.random_base64(752) } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/000077500000000000000000000000001437344660100205525ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/glacier/abort_multipart_upload.rb000066400000000000000000000020331437344660100256510ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Abort an upload # # ==== Parameters # * name<~String> Name of the vault to upload to # * upload_id<~String> The id of the upload to complete # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-multipart-abort-upload.html # def abort_multipart_upload(vault_name, upload_id, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/multipart-uploads/#{upload_id}" request( :expects => 204, :idempotent => true, :headers => {}, :method => :delete, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/complete_multipart_upload.rb000066400000000000000000000024741437344660100263630ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Complete an upload # # ==== Parameters # * name<~String> Name of the vault to upload to # * upload_id<~String> The id of the upload to complete # * total_size<~Integer> The total archive size # * tree_hash<~String> the treehash for the archive # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-multipart-complete-upload.html # def complete_multipart_upload(vault_name, upload_id, total_size, tree_hash, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/multipart-uploads/#{upload_id}" headers = { 'x-amz-archive-size' => total_size.to_s, 'x-amz-sha256-tree-hash' => tree_hash } request( :expects => 201, :idempotent => true, :headers => headers, :method => :post, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/create_archive.rb000066400000000000000000000026051437344660100240460ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Upload an archive # # ==== Parameters # * name<~String> Name of the vault to upload to # * body<~String> The data to upload # * options<~Hash> # * description<~String> - The archive description # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-archive-post.html # def create_archive(vault_name, body, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/archives" headers = { 'Content-Length' => body.bytesize.to_s, 'x-amz-content-sha256' => OpenSSL::Digest::SHA256.hexdigest(body), 'x-amz-sha256-tree-hash' => Fog::AWS::Glacier::TreeHash.digest(body) } headers['x-amz-archive-description'] = Fog::AWS.escape(options['description']) if options['description'] request( :expects => 201, :headers => headers, :method => :post, :path => path, :body => body ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/create_vault.rb000066400000000000000000000020121437344660100235500ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # This operation creates a new vault with the specified name. . # # ==== Parameters # * name<~String> 1-255 characters. must be unique within a region for an AWS account # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-put.html # def create_vault(name,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}" request(options.merge({ :expects => 201, :idempotent => true, :headers => {}, :method => :put, :path => path, })) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/delete_archive.rb000066400000000000000000000020061437344660100240400ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Delete an archive # # ==== Parameters # * name<~String> Name of the vault to delete # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-delete.html # def delete_archive(name,archive_id,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/archives/#{archive_id}" request( :expects => 204, :idempotent => true, :headers => {}, :method => :delete, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/delete_vault.rb000066400000000000000000000021021437344660100235470ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Delete a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as per the last inventory # and there have been no writes to the vault since the last inventory # # ==== Parameters # * name<~String> Name of the vault to delete # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-delete.html # def delete_vault(name,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}" request( :expects => 204, :idempotent => true, :headers => {}, :method => :delete, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/delete_vault_notification_configuration.rb000066400000000000000000000017401437344660100312530ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Delete vault's notification configuration # # ==== Parameters # * name<~String> Name of the vault # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-notifications-delete.html # def delete_vault_notification_configuration(name,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/notification-configuration" request( :expects => 204, :idempotent => true, :headers => {}, :method => :delete, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/describe_job.rb000066400000000000000000000017311437344660100235130ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Complete an upload # # ==== Parameters # * name<~String> Name of the vault # * job_id<~String> The id of the job # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-describe-job-get.html # def describe_job(vault_name, job_id, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/jobs/#{job_id}" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/describe_vault.rb000066400000000000000000000016631437344660100241000ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # This operation returns information about a vault # # ==== Parameters # * name<~String> Vault name # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-get.html # def describe_vault(name,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/get_job_output.rb000066400000000000000000000024461437344660100241360ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Get the output from a job # # ==== Parameters # * name<~String> Name of the vault # * job_id<~String> The id of the job # * options<~Hash> # * Range<~Range> The range to retrieve # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # * response_block<~Proc> Proc to use for streaming the response # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-job-output-get.html # def get_job_output(vault_name, job_id, options={}) account_id = options.delete('account_id') || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/jobs/#{job_id}/output" headers = {} if range = options.delete('Range') headers['Range'] = "bytes=#{range.begin}-#{range.end}" end request( options.merge( :expects => [200,206], :idempotent => true, :headers => headers, :method => :get, :path => path )) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/get_vault_notification_configuration.rb000066400000000000000000000017611437344660100305730ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Get a vault's notification configuration # # ==== Parameters # * name<~String> Name of the vault # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-notifications-get.html # def get_vault_notification_configuration(name,options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/notification-configuration" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/initiate_job.rb000066400000000000000000000030131437344660100235340ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # This operation initates a multipart upload of an archive to a vault # # ==== Parameters # * name<~String> The vault name # * job_specification<~Hash> A specification of the job # * Type<~String> The job type. Mandatory. Values: archive-retrieval, inventory-retrieval # * Description<~String> The job description # * ArchiveId<~String> The id of the archive to retrieve (only for Type==archive-retrieval) # * Format<~String> The format to return (only for inventory retrieval). Values: CSV, JSON # * SNSTopic ARN of a topic to publish to when the job is complete # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-initiate-job-post.html # def initiate_job(name, job_specification, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/jobs" request({ :expects => 202, :headers => {}, :method => 'POST', :path => path, :body => Fog::JSON.encode(job_specification) }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/initiate_multipart_upload.rb000066400000000000000000000025001437344660100263470ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # This operation initates a multipart upload of an archive to a vault # # ==== Parameters # * name<~String> The vault name # * part_size<~Integer> The part size to use. Must be a power of 2 multiple of 1MB (1,2,4,8,16,...) # * options<~Hash> # * description<~String> - The archive description # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-multipart-initiate-upload.html # def initiate_multipart_upload(name, part_size, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/multipart-uploads" headers = {'x-amz-part-size' => part_size.to_s} headers['x-amz-archive-description'] = Fog::AWS.escape(options['description']) if options['description'] request( :expects => 201, :headers => headers, :method => 'POST', :path => path ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/list_jobs.rb000066400000000000000000000025011437344660100230650ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # lists in-progress and recently jobs for the specified vault # # ==== Parameters # * name<~String> Name of the vault # * options<~Hash> # * completed<~Boolean>Specifies the state of the jobs to return. You can specify true or false # * statuscode<~String> Filter returned jobs by status (InProgress, Succeeded, or Failed) # * limit<~Integer> - The maximum number of items returned in the response. (default 1000) # * marker<~String> - marker used for pagination # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # ==== See Also #http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-jobs-get.html # def list_jobs(vault_name, options={}) account_id = options.delete('account_id') || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/jobs" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path, :query => options ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/list_multipart_uploads.rb000066400000000000000000000022341437344660100257030ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # lists in-progress multipart uploads for the specified vault # # ==== Parameters # * name<~String> Name of the vault # * options<~Hash> # * limit<~Integer> - The maximum number of items returned in the response. (default 1000) # * marker<~String> - marker used for pagination # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-multipart-list-uploads.html # def list_multipart_uploads(vault_name, options={}) account_id = options.delete('account_id') || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/multipart-uploads" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path, :query => options ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/list_parts.rb000066400000000000000000000024071437344660100232660ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # lists the parts of an archive that have been uploaded in a specific multipart upload # # ==== Parameters # * name<~String> Name of the vault # * upload_id<~String> The id of the upload # * options<~Hash> # * limit<~Integer> - The maximum number of items returned in the response. (default 1000) # * marker<~String> - marker used for pagination # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-multipart-list-parts.html # def list_parts(vault_name, upload_id, options={}) account_id = options.delete('account_id') || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/multipart-uploads/#{Fog::AWS.escape(upload_id)}" request( :expects => 200, :idempotent => true, :headers => {}, :method => :get, :path => path, :query => options ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/list_vaults.rb000066400000000000000000000021131437344660100234450ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # This operation lists all vaults owned by the calling user’s account. # # ==== Parameters # * options<~Hash> # * limit<~Integer> - The maximum number of items returned in the response. (default 1000) # * marker<~String> - marker used for pagination # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vaults-get.html # def list_vaults(options={}) account_id = options.delete('account_id') || '-' path = "/#{account_id}/vaults" request( :expects => 200, :idempotent => true, :headers => {}, :method => 'GET', :path => path, :query => options ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/set_vault_notification_configuration.rb000066400000000000000000000024241437344660100306040ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Set a vault's notification configuration # # ==== Parameters # * name<~String> Name of the vault # * SnsTopic<~String> ARN of the topic to notify # * events<~Array> Events you wish to receive. Valid events are ArchiveRetrievalCompleted, InventoryRetrievalCompleted # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-vault-notifications-put.html # def set_vault_notification_configuration(name,sns_topic, events, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(name)}/notification-configuration" request( :expects => 204, :idempotent => true, :headers => {}, :method => :put, :path => path, :body => Fog::JSON.encode('SNSTopic' => sns_topic, 'Events' => events) ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/glacier/upload_part.rb000066400000000000000000000030071437344660100234110ustar00rootroot00000000000000module Fog module AWS class Glacier class Real # Upload an archive # # ==== Parameters # * name<~String> Name of the vault to upload to # * uploadId<~String> Id of the upload # * body<~String> The data to upload # * offset<~Integer> The offset of the data within the archive # * hash<~String> The tree hash for this part # * options<~Hash> # * account_id<~String> - The AWS account id. Defaults to the account owning the credentials making the request # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.amazonwebservices.com/amazonglacier/latest/dev/api-upload-part.html # def upload_part(vault_name, upload_id, body, offset, hash, options={}) account_id = options['account_id'] || '-' path = "/#{account_id}/vaults/#{Fog::AWS.escape(vault_name)}/multipart-uploads/#{Fog::AWS.escape(upload_id)}" headers = { 'Content-Length' => body.bytesize.to_s, 'Content-Range' => "bytes #{offset}-#{offset+body.bytesize-1}/*", 'x-amz-content-sha256' => OpenSSL::Digest::SHA256.hexdigest(body), 'x-amz-sha256-tree-hash' => hash } request( :expects => 204, :idempotent => true, :headers => headers, :method => :put, :path => path, :body => body ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/000077500000000000000000000000001437344660100177125ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/iam/add_role_to_instance_profile.rb000066400000000000000000000031301437344660100261130ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add a role to an instance profile # # ==== Parameters # * instance_profile_name<~String>: Name of the instance profile to update. # * role_name<~String>:Name of the role to add. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_AddRoleToInstanceProfile.html # def add_role_to_instance_profile(role_name, instance_profile_name) request( 'Action' => 'AddRoleToInstanceProfile', 'InstanceProfileName' => instance_profile_name, 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def add_role_to_instance_profile(role_name, instance_profile_name) response = Excon::Response.new unless profile = self.data[:instance_profiles][instance_profile_name] raise Fog::AWS::IAM::NotFound.new("Instance Profile #{instance_profile_name} cannot be found.") end unless role = self.data[:roles][role_name] raise Fog::AWS::IAM::NotFound.new("Role #{role_name} cannot be found.") end profile["Roles"] << role_name response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/add_user_to_group.rb000066400000000000000000000031041437344660100237410ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add a user to a group # # ==== Parameters # * group_name<~String>: name of the group # * user_name<~String>: name of user to add # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_AddUserToGroup.html # def add_user_to_group(group_name, user_name) request( 'Action' => 'AddUserToGroup', 'GroupName' => group_name, 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def add_user_to_group(group_name, user_name) unless data[:groups].key?(group_name) raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end unless data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end unless data[:groups][group_name][:members].include?(user_name) data[:groups][group_name][:members] << user_name end Excon::Response.new.tap do |response| response.status = 200 response.body = { 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/attach_group_policy.rb000066400000000000000000000035271437344660100243050ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Attaches a managed policy to a group # # ==== Parameters # * group_name<~String>: name of the group # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachGroupPolicy.html # def attach_group_policy(group_name, policy_arn) request( 'Action' => 'AttachGroupPolicy', 'GroupName' => group_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def attach_group_policy(group_name, policy_arn) if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:groups].key?(group_name) raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end group = self.data[:groups][group_name] group[:attached_policies] << policy_arn managed_policy["AttachmentCount"] += 1 Excon::Response.new.tap { |response| response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/attach_role_policy.rb000066400000000000000000000035161437344660100241100ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Attaches a managed policy to a role # # ==== Parameters # * role_name<~String>: name of the role # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachRolePolicy.html # def attach_role_policy(role_name, policy_arn) request( 'Action' => 'AttachRolePolicy', 'RoleName' => role_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def attach_role_policy(role_name, policy_arn) response = Excon::Response.new if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:roles][role_name] raise Fog::AWS::IAM::NotFound.new("The role with name #{role_name} cannot be found.") end role = self.data[:roles][role_name] role[:attached_policies] ||= [] role[:attached_policies] << managed_policy['Arn'] managed_policy['AttachmentCount'] += 1 response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/attach_user_policy.rb000066400000000000000000000035051437344660100241230ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Attaches a managed policy to a user # # ==== Parameters # * user_name<~String>: name of the user # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachUserPolicy.html # def attach_user_policy(user_name, policy_arn) request( 'Action' => 'AttachUserPolicy', 'UserName' => user_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def attach_user_policy(user_name, policy_arn) if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end user = self.data[:users][user_name] user[:attached_policies] << policy_arn managed_policy['AttachmentCount'] += 1 Excon::Response.new.tap { |response| response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_access_key.rb000066400000000000000000000043111437344660100236720ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/create_access_key' # Create a access keys for user (by default detects user from access credentials) # # ==== Parameters # * options<~Hash>: # * 'UserName'<~String> - name of the user to create (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'AccessKey'<~Hash>: # * 'AccessKeyId'<~String> - # * 'UserName'<~String> - # * 'SecretAccessKey'<~String> - # * 'Status'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateAccessKey.html # def create_access_key(options = {}) request({ 'Action' => 'CreateAccessKey', :parser => Fog::Parsers::AWS::IAM::CreateAccessKey.new }.merge!(options)) end end class Mock def create_access_key(options) #FIXME: Not 100% correct as AWS will use the signing credentials when there is no 'UserName' in the options hash # Also doesn't raise an error when there are too many keys if user = options['UserName'] if data[:users].key? user access_keys_data = data[:users][user][:access_keys] else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end else access_keys_data = data[:access_keys] end key = { 'SecretAccessKey' => Fog::Mock.random_base64(40), 'Status' => 'Active', 'AccessKeyId' => Fog::AWS::Mock.key_id(20), } if user key["UserName"] = user end access_keys_data << key Excon::Response.new.tap do |response| response.status = 200 response.body = { 'AccessKey' => key, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_account_alias.rb000066400000000000000000000005671437344660100243770ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' def create_account_alias(account_alias) request( 'Action' => 'CreateAccountAlias', 'AccountAlias' => account_alias, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_group.rb000066400000000000000000000036701437344660100227240ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/create_group' # Create a new group # # ==== Parameters # * group_name<~String>: name of the group to create (do not include path) # * path<~String>: optional path to group, defaults to '/' # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Group'<~Hash>: # * Arn<~String> - # * GroupId<~String> - # * GroupName<~String> - # * Path<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateGroup.html # def create_group(group_name, path = '/') request( 'Action' => 'CreateGroup', 'GroupName' => group_name, 'Path' => path, :parser => Fog::Parsers::AWS::IAM::CreateGroup.new ) end end class Mock def create_group(group_name, path = '/') if data[:groups].key? group_name raise Fog::AWS::IAM::EntityAlreadyExists.new("Group with name #{group_name} already exists.") else data[:groups][group_name][:path] = path Excon::Response.new.tap do |response| response.body = { 'Group' => { 'GroupId' => (data[:groups][group_name][:group_id]).strip, 'GroupName' => group_name, 'Path' => path, 'Arn' => (data[:groups][group_name][:arn]).strip }, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_instance_profile.rb000066400000000000000000000044561437344660100251170ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/instance_profile' # Create a new instance_profile # # ==== Parameters # * instance_profile_name<~String>: name of the instance profile to create (do not include path) # * path<~String>: optional path to group, defaults to '/' # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'InstanceProfile'<~Hash>: # * Arn<~String> - # * CreateDate<~Date> # * InstanceProfileId<~String> - # * InstanceProfileName<~String> - # * Path<~String> - # * Roles<~Array> - # role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateInstanceProfile.html # def create_instance_profile(instance_profile_name, path='/', options={}) request({ 'Action' => 'CreateInstanceProfile', 'InstanceProfileName' => instance_profile_name, 'Path' => path, :parser => Fog::Parsers::AWS::IAM::InstanceProfile.new }.merge!(options)) end end class Mock def create_instance_profile(instance_profile_name, path='/', options={}) response = Excon::Response.new profile = { "Arn" => "arn:aws:iam::#{Fog::AWS::Mock.owner_id}:instance-profile#{path}#{instance_profile_name}", "CreateDate" => Time.now.utc, "InstanceProfileId" => Fog::Mock.random_hex(21), "InstanceProfileName" => instance_profile_name, "Path" => path, "Roles" => [], } self.data[:instance_profiles][instance_profile_name] = profile response.body = {"InstanceProfile" => profile, "RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_login_profile.rb000066400000000000000000000036501437344660100244160ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/login_profile' # Creates a login profile for a user # # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateLoginProfile.html # ==== Parameters # * user_name<~String> - Name of user to create a login profile for # * password<~String> - The new password for this user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'LoginProfile'<~Hash> # * UserName<~String> # * CreateDate # * 'RequestId'<~String> - Id of the request # # def create_login_profile(user_name, password) request({ 'Action' => 'CreateLoginProfile', 'UserName' => user_name, 'Password' => password, :parser => Fog::Parsers::AWS::IAM::LoginProfile.new }) end end class Mock def create_login_profile(user_name, password) unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end user = self.data[:users][user_name] if user[:login_profile] raise Fog::AWS::IAM::EntityAlreadyExists, "Login Profile for user #{user_name} already exists." end created_at = Time.now user[:login_profile] = { :created_at => created_at, :password => password, } response = Excon::Response.new response.status = 200 response.body = { "LoginProfile" => { "UserName" => user_name, "CreateDate" => created_at }, "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_policy.rb000066400000000000000000000046371437344660100230730ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/single_policy' # Creates a managed policy # # ==== Parameters # * policy_name<~String>: name of policy document # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # * path <~String>: path of the policy # * description <~String>: description for the policy # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'Policy'<~Hash>: # * Arn # * AttachmentCount # * CreateDate # * DefaultVersionId # * Description # * IsAttachable # * Path # * PolicyId # * PolicyName # * UpdateDate # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_CreatePolicy.html # def create_policy(policy_name, policy_document, path=nil, description=nil) request({ 'Action' => 'CreatePolicy', 'PolicyName' => policy_name, 'PolicyDocument' => Fog::JSON.encode(policy_document), 'Path' => path, 'Description' => description, :parser => Fog::Parsers::AWS::IAM::SinglePolicy.new }.reject {|_, value| value.nil?}) end end class Mock def create_policy(policy_name, policy_document, path="/", description=nil) response = Excon::Response.new arn = "arn:aws:iam:#{Fog::AWS::Mock.owner_id}:policy/#{policy_name}" policy = { "Arn" => arn, "AttachmentCount" => 0, "CreateDate" => Time.now.utc, "DefaultVersionId" => "v1", "Description" => description, "IsAttachable" => true, "Path" => path, "PolicyId" => Fog::Mock.random_hex(21), "PolicyName" => policy_name, "UpdateDate" => Time.now.utc, } self.data[:managed_policies][arn] = policy response.body = {"RequestId" => Fog::AWS::Mock.request_id, "Policy" => policy} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_policy_version.rb000066400000000000000000000046601437344660100246340ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/policy_version' # Creates a managed policy # # ==== Parameters # * policy_arn<~String>: arn of the policy # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # * set_as_default<~Boolean>: sets policy to default version # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'PolicyVersion'<~Array>: # * CreateDate<~DateTime> The date and time, in ISO 8601 date-time format, when the policy version was created. # * Document<~String> The policy document. Pattern: [\u0009\u000A\u000D\u0020-\u00FF]+ # * IsDefaultVersion<~String> Specifies whether the policy version is set as the policy's default version. # * VersionId<~String> The identifier for the policy version. # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_CreatePolicyVersion.html # def create_policy_version(policy_arn, policy_document, set_as_default=true) request({ 'Action' => 'CreatePolicyVersion', 'PolicyArn' => policy_arn, 'PolicyDocument' => Fog::JSON.encode(policy_document), 'SetAsDefault' => set_as_default, :parser => Fog::Parsers::AWS::IAM::PolicyVersion.new }.reject {|_, value| value.nil?}) end end class Mock def create_policy_version(policy_arn, policy_document, set_as_default=true) managed_policy_versions = self.data[:managed_policy_versions][policy_arn] unless managed_policy_versions raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} version #{version_id} does not exist." end version = managed_policy_versions[version_id] unless version raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} version #{version_id} does not exist." end Excon::Response.new.tap do |response| response.body = { 'PolicyVersion' => version, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_role.rb000066400000000000000000000054421437344660100225300ustar00rootroot00000000000000module Fog module AWS class IAM # At the moment this is the only policy you can use EC2_ASSUME_ROLE_POLICY = <<-JSON { "Version":"2008-10-17", "Statement":[ { "Effect":"Allow", "Principal":{ "Service":["ec2.amazonaws.com"] }, "Action":["sts:AssumeRole"] } ] } JSON class Real require 'fog/aws/parsers/iam/single_role' # Creates a new role for your AWS account # # ==== Parameters # * RoleName<~String>: name of the role to create # * AssumeRolePolicyDocument<~String>: The policy that grants an entity permission to assume the role. # * Path<~String>: This parameter is optional. If it is not included, it defaults to a slash (/). # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Role'<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateRole.html # def create_role(role_name, assume_role_policy_document, path = '/') request( 'Action' => 'CreateRole', 'RoleName' => role_name, 'AssumeRolePolicyDocument' => assume_role_policy_document, 'Path' => path, :parser => Fog::Parsers::AWS::IAM::SingleRole.new ) end end class Mock def create_role(role_name, assume_role_policy_document, path = '/') if data[:roles].key?(role_name) raise Fog::AWS::IAM::EntityAlreadyExists.new("Role with name #{role_name} already exists") else data[:roles][role_name][:path] = path Excon::Response.new.tap do |response| response.body = { 'Role' => { 'Arn' => data[:roles][role_name][:arn].strip, 'AssumeRolePolicyDocument' => Fog::JSON.encode(data[:roles][role_name][:assume_role_policy_document]), 'CreateDate' => data[:roles][role_name][:create_date], 'Path' => path || "/", 'RoleId' => data[:roles][role_name][:role_id].strip, 'RoleName' => role_name, }, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/create_user.rb000066400000000000000000000034201437344660100225370ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/create_user' # Create a new user # # ==== Parameters # * user_name<~String>: name of the user to create (do not include path) # * path<~String>: optional path to group, defaults to '/' # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'User'<~Hash>: # * 'Arn'<~String> - # * 'Path'<~String> - # * 'UserId'<~String> - # * 'UserName'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateUser.html # def create_user(user_name, path = '/') request( 'Action' => 'CreateUser', 'UserName' => user_name, 'Path' => path, :parser => Fog::Parsers::AWS::IAM::CreateUser.new ) end end class Mock def create_user(user_name, path='/') if data[:users].key?(user_name) raise Fog::AWS::IAM::EntityAlreadyExists.new "User with name #{user_name} already exists." end data[:users][user_name][:path] = path Excon::Response.new.tap do |response| response.status = 200 response.body = { 'User' => { "UserId" => data[:users][user_name][:user_id], "Path" => path, "UserName" => user_name, "Arn" => (data[:users][user_name][:arn]).strip, }, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_access_key.rb000066400000000000000000000031601437344660100236720ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Delete an access key # # ==== Parameters # * access_key_id<~String> - Access key id to delete # * options<~Hash>: # * 'UserName'<~String> - name of the user to create (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteAccessKey.html # def delete_access_key(access_key_id, options = {}) request({ 'AccessKeyId' => access_key_id, 'Action' => 'DeleteAccessKey', :parser => Fog::Parsers::AWS::IAM::Basic.new }.merge!(options)) end end class Mock def delete_access_key(access_key_id, options = {}) user_name = options['UserName'] if user_name && data[:users].key?(user_name) && data[:users][user_name][:access_keys].any? { |akey| akey['AccessKeyId'] == access_key_id } data[:users][user_name][:access_keys].delete_if { |akey| akey['AccessKeyId'] == access_key_id } Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The Access Key with id #{access_key_id} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_account_alias.rb000066400000000000000000000005661437344660100243750ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' def delete_account_alias(account_alias) request( 'Action' => 'DeleteAccountAlias', 'AccountAlias' => account_alias, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_account_password_policy.rb000066400000000000000000000017311437344660100265200ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update the account password policy # # ==== Parameters # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_UpdateAccountPasswordPolicy.html # def delete_account_password_policy request({ 'Action' => 'DeleteAccountPasswordPolicy', :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def delete_account_password_policy Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_group.rb000066400000000000000000000026571437344660100227270ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Delete a group # # ==== Parameters # * group_name<~String>: name of the group to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteGroup.html # def delete_group(group_name) request( 'Action' => 'DeleteGroup', 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_group(group_name) if data[:groups].key? group_name if data[:groups][group_name][:members].empty? data[:groups].delete group_name Excon::Response.new.tap do |response| response.status = 200 response.body = { 'RequestId' => Fog::AWS::Mock.request_id } end else raise Fog::AWS::IAM::Error.new("DeleteConflict => Cannot delete entity, must delete users in group first.") end else raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_group_policy.rb000066400000000000000000000031201437344660100242700ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Remove a policy from a group # # ==== Parameters # * group_name<~String>: name of the group # * policy_name<~String>: name of policy document # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteGroupPolicy.html # def delete_group_policy(group_name, policy_name) request( 'Action' => 'DeleteGroupPolicy', 'GroupName' => group_name, 'PolicyName' => policy_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_group_policy(group_name, policy_name) if !data[:groups].key? group_name raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") elsif !data[:groups][group_name][:policies].key? policy_name raise Fog::AWS::IAM::NotFound.new("The group policy with name #{policy_name} cannot be found.") else data[:groups][group_name][:policies].delete(policy_name) Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_instance_profile.rb000066400000000000000000000025221437344660100251060ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Delete a instance_profile # # ==== Parameters # * instance_profile_name<~String>: name of the instance_profile to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteInstanceProfile.html # def delete_instance_profile(instance_profile_name) request( 'Action' => 'DeleteInstanceProfile', 'InstanceProfileName' => instance_profile_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_instance_profile(instance_profile_name) response = Excon::Response.new unless profile = self.data[:instance_profiles][instance_profile_name] raise Fog::AWS::IAM::NotFound.new("Instance Profile #{instance_profile_name} cannot be found.") end self.data[:instance_profiles].delete(instance_profile_name) response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_login_profile.rb000066400000000000000000000026731437344660100244210ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Deletes a user's login profile # # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteLoginProfile.html # ==== Parameters # * user_name<~String> - Name of user whose login profile you want to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # def delete_login_profile(user_name) request({ 'Action' => 'DeleteLoginProfile', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def delete_login_profile(user_name) unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end user = self.data[:users][user_name] unless user[:login_profile] raise Fog::AWS::IAM::NotFound, "Cannot find Login Profile for User #{user_name}" end user.delete(:login_profile) response = Excon::Response.new response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_policy.rb000066400000000000000000000023401437344660100230570ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Deletes a manged policy # # ==== Parameters # * policy_arn<~String>: arn of the policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_DeletePolicy.html # def delete_policy(policy_arn) request( 'Action' => 'DeletePolicy', 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_policy(policy_arn) response = Excon::Response.new policy = self.data[:managed_policies][policy_arn] if policy.nil? raise Fog::AWS::IAM::NotFound.new("Policy #{policy_arn} does not exist or is not attachable.") end self.data[:managed_policies].delete(policy_arn) response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_policy_version.rb000066400000000000000000000022661437344660100246330ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Deletes a manged policy # # ==== Parameters # * policy_arn<~String>: arn of the policy # * version_id<~String>: version of policy to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_DeletePolicyVersion.html # def delete_policy_version(policy_arn, version_id) request( 'Action' => 'DeletePolicyVersion', 'PolicyArn' => policy_arn, 'VersionId' => version_id, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end class Mock def delete_policy_version(policy_arn, version_id) Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_role.rb000066400000000000000000000023261437344660100225250ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Delete a role # # ==== Parameters # * role_name<~String>: name of the role to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteRole.html # def delete_role(role_name) request( 'Action' => 'DeleteRole', 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_role(role_name) role = data[:roles][role_name] if role data[:roles].delete(role_name) Excon::Response.new.tap do |response| response.status = 200 response.body = { 'RequestId' => Fog::AWS::Mock.request_id } end else raise Fog::AWS::IAM::NotFound.new("The role with name #{role_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_role_policy.rb000066400000000000000000000016201437344660100241000ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Remove a policy from a role # # ==== Parameters # * role_name<~String>: name of the role # * policy_name<~String>: name of policy document # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteRolePolicy.html # def delete_role_policy(role_name, policy_name) request( 'Action' => 'DeleteRolePolicy', 'PolicyName' => policy_name, 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_server_certificate.rb000066400000000000000000000026411437344660100254340ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Deletes the specified server certificate. # # ==== Parameters # * server_certificate_name<~String>: The name of the server certificate you want to delete. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteServerCertificate.html # def delete_server_certificate(server_certificate_name) request({ 'Action' => 'DeleteServerCertificate', 'ServerCertificateName' => server_certificate_name, :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def delete_server_certificate(server_certificate_name) response = Excon::Response.new response.status = 200 response.body = { 'RequestId' => Fog::AWS::Mock.request_id } unless self.data[:server_certificates].delete(server_certificate_name) raise Fog::AWS::IAM::NotFound.new("The Server Certificate with name #{server_certificate_name} cannot be found.") end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_signing_certificate.rb000066400000000000000000000017541437344660100255700ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Upload signing certificate for user (by default detects user from access credentials) # # ==== Parameters # * options<~Hash>: # * 'UserName'<~String> - name of the user to upload certificate for (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_DeleteSigningCertificate.html # def delete_signing_certificate(certificate_id, options = {}) request({ 'Action' => 'DeleteSigningCertificate', 'CertificateId' => certificate_id, :parser => Fog::Parsers::AWS::IAM::Basic.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_user.rb000066400000000000000000000023001437344660100225320ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Delete a user # # ==== Parameters # * user_name<~String>: name of the user to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteUser.html # def delete_user(user_name) request( 'Action' => 'DeleteUser', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_user(user_name) if data[:users].key? user_name data[:users].delete user_name Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/delete_user_policy.rb000066400000000000000000000027141437344660100241220ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Remove a policy from a user # # ==== Parameters # * user_name<~String>: name of the user # * policy_name<~String>: name of policy document # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_DeleteUserPolicy.html # def delete_user_policy(user_name, policy_name) request( 'Action' => 'DeleteUserPolicy', 'PolicyName' => policy_name, 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def delete_user_policy(user_name, policy_name) if data[:users].key?(user_name) && data[:users][user_name][:policies].key?(policy_name) data[:users][user_name][:policies].delete policy_name Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The user policy with name #{policy_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/detach_group_policy.rb000066400000000000000000000035651437344660100242730ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Detaches a managed policy from a group # # ==== Parameters # * group_name<~String>: name of the group # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_DetachGroupPolicy.html # def detach_group_policy(group_name, policy_arn) request( 'Action' => 'DetachGroupPolicy', 'GroupName' => group_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def detach_group_policy(group_name, policy_arn) if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:groups].key?(group_name) raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end group = self.data[:groups][group_name] group[:attached_policies].delete(policy_arn) managed_policy["AttachmentCount"] -= 1 Excon::Response.new.tap { |response| response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/detach_role_policy.rb000066400000000000000000000034751437344660100241000ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Detaches a managed policy from a role # # ==== Parameters # * role_name<~String>: name of the role # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_DetachRolePolicy.html # def detach_role_policy(role_name, policy_arn) request( 'Action' => 'DetachRolePolicy', 'RoleName' => role_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def detach_role_policy(role_name, policy_arn) response = Excon::Response.new if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:roles].key?(role_name) raise Fog::AWS::IAM::NotFound.new("The role with name #{role_name} cannot be found.") end role = self.data[:roles][role_name] role[:attached_policies].delete(policy_arn) managed_policy["AttachmentCount"] -= 1 response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/detach_user_policy.rb000066400000000000000000000035401437344660100241060ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Detaches a managed policy to a user # # ==== Parameters # * user_name<~String>: name of the user # * policy_arn<~String>: arn of the managed policy # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_DetachUserPolicy.html # def detach_user_policy(user_name, policy_arn) request( 'Action' => 'DetachUserPolicy', 'UserName' => user_name, 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def detach_user_policy(user_name, policy_arn) if policy_arn.nil? raise Fog::AWS::IAM::ValidationError, "1 validation error detected: Value null at 'policyArn' failed to satisfy constraint: Member must not be null" end managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end user = self.data[:users][user_name] user[:attached_policies].delete(policy_arn) managed_policy["AttachmentCount"] -= 1 Excon::Response.new.tap { |response| response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_account_password_policy.rb000066400000000000000000000017271437344660100260420ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update the account password policy # # ==== Parameters # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_UpdateAccountPasswordPolicy.html # def get_account_password_policy() request({ 'Action' => 'DeleteAccountPasswordPolicy', :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def get_account_password_policy() Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_account_summary.rb000066400000000000000000000072441437344660100243160ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_account_summary' # Retrieve account level information about account entity usage and IAM quotas # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Summary'<~Hash>: # * 'AccessKeysPerUserQuota'<~Integer> - Maximum number of access keys that can be created per user # * 'AccountMFAEnabled'<~Integer> - 1 if the root account has an MFA device assigned to it, 0 otherwise # * 'AssumeRolePolicySizeQuota'<~Integer> - Maximum allowed size for assume role policy documents (in kilobytes) # * 'GroupPolicySizeQuota'<~Integer> - Maximum allowed size for Group policy documents (in kilobytes) # * 'Groups'<~Integer> - Number of Groups for the AWS account # * 'GroupsPerUserQuota'<~Integer> - Maximum number of groups a user can belong to # * 'GroupsQuota'<~Integer> - Maximum groups allowed for the AWS account # * 'InstanceProfiles'<~Integer> - Number of instance profiles for the AWS account # * 'InstanceProfilesQuota'<~Integer> - Maximum instance profiles allowed for the AWS account # * 'MFADevices'<~Integer> - Number of MFA devices, either assigned or unassigned # * 'MFADevicesInUse'<~Integer> - Number of MFA devices that have been assigned to an IAM user or to the root account # * 'Providers'<~Integer> - # * 'RolePolicySizeQuota'<~Integer> - Maximum allowed size for role policy documents (in kilobytes) # * 'Roles'<~Integer> - Number of roles for the AWS account # * 'RolesQuota'<~Integer> - Maximum roles allowed for the AWS account # * 'ServerCertificates'<~Integer> - Number of server certificates for the AWS account # * 'ServerCertificatesQuota'<~Integer> - Maximum server certificates allowed for the AWS account # * 'SigningCertificatesPerUserQuota'<~Integer> - Maximum number of X509 certificates allowed for a user # * 'UserPolicySizeQuota'<~Integer> - Maximum allowed size for user policy documents (in kilobytes) # * 'Users'<~Integer> - Number of users for the AWS account # * 'UsersQuota'<~Integer> - Maximum users allowed for the AWS account # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateAccessKey.html # def get_account_summary request( 'Action' => 'GetAccountSummary', :parser => Fog::Parsers::AWS::IAM::GetAccountSummary.new ) end end class Mock def get_account_summary Excon::Response.new.tap do |response| response.status = 200 response.body = { 'Summary' => { 'AccessKeysPerUserQuota' => 2, 'AccountMFAEnabled' => 0, 'GroupPolicySizeQuota' => 10240, 'Groups' => 31, 'GroupsPerUserQuota' => 10, 'GroupsQuota' => 50, 'MFADevices' => 20, 'MFADevicesInUse' => 10, 'ServerCertificates' => 5, 'ServerCertificatesQuota' => 10, 'SigningCertificatesPerUserQuota' => 2, 'UserPolicySizeQuota' => 10240, 'Users' => 35, 'UsersQuota' => 150, }, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_group.rb000066400000000000000000000051461437344660100222400ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_group' # Get Group # # ==== Parameters # * 'GroupName'<~String>: Name of the Group # * options<~Hash>: # * 'Marker'<~String>: Use this only when paginating results, and only in a subsequent request after you've received a response where the results are truncated. Set it to the value of the Marker element in the response you just received. # * 'MaxItems'<~String>: Use this only when paginating results to indicate the maximum number of User names you want in the response. If there are additional User names beyond the maximum you specify, the IsTruncated response element is true. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Group'<~Hash> - Group # * 'Path'<~String> # * 'GroupName'<~String> # * 'Arn'<~String> # * 'Users'<~Hash>? - List of users belonging to the group. # * 'User'<~Hash> - User # * Arn<~String> - # * UserId<~String> - # * UserName<~String> - # * Path<~String> - # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_GetGroup.html # def get_group(group_name, options = {}) request({ 'Action' => 'GetGroup', 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::GetGroup.new }.merge!(options)) end end class Mock def get_group(group_name, options = {}) raise Fog::AWS::IAM::NotFound.new( "The user with name #{group_name} cannot be found." ) unless self.data[:groups].key?(group_name) Excon::Response.new.tap do |response| response.body = { 'Group' => { 'GroupId' => data[:groups][group_name][:group_id], 'Path' => data[:groups][group_name][:path], 'GroupName' => group_name, 'Arn' => (data[:groups][group_name][:arn]).strip }, 'Users' => data[:groups][group_name][:members].map { |user| get_user(user).body['User'] }, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_group_policy.rb000066400000000000000000000037701437344660100236200ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_group_policy' # Get Group Policy # # ==== Parameters # * 'PolicyName'<~String>: Name of the policy to get # * 'GroupName'<~String>: Name of the Group who the policy is associated with. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * PolicyDocument<~String> The policy document. # * PolicyName<~String> The name of the policy. # * GroupName<~String> The Group the policy is associated with. # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetGroupPolicy.html # def get_group_policy(policy_name, group_name) request({ 'Action' => 'GetGroupPolicy', 'PolicyName' => policy_name, 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::GetGroupPolicy.new }) end end class Mock def get_group_policy(policy_name, group_name) raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") unless self.data[:groups].key?(group_name) raise Fog::AWS::IAM::NotFound.new("The policy with name #{policy_name} cannot be found.") unless self.data[:groups][group_name][:policies].key?(policy_name) Excon::Response.new.tap do |response| response.body = { 'Policy' => { 'PolicyName' => policy_name, 'GroupName' => group_name, 'PolicyDocument' => data[:groups][group_name][:policies][policy_name] }, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_instance_profile.rb000066400000000000000000000037501437344660100244270ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/instance_profile' # Retrieves information about an instance profile # # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetInstanceProfile.html # ==== Parameters # * instance_profile_name<~String> - Name of instance_profile to retrieve the information for # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'InstanceProfile'<~Hash>: # * Arn<~String> - # * CreateDate<~Date> # * InstanceProfileId<~String> - # * InstanceProfileName<~String> - # * Path<~String> - # * Roles<~Array> - # role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'RequestId'<~String> - Id of the request def get_instance_profile(instance_profile_name) request({ 'Action' => 'GetInstanceProfile', 'InstanceProfileName' => instance_profile_name, :parser => Fog::Parsers::AWS::IAM::InstanceProfile.new }) end end class Mock def get_instance_profile(instance_profile_name) response = Excon::Response.new instance_profile = self.data[:instance_profiles][instance_profile_name] unless instance_profile raise Fog::AWS::IAM::NotFound.new("Instance Profile #{instance_profile_name} cannot be found.") end instance_profile = instance_profile.dup instance_profile["Roles"].map! { |r| self.data[:roles][r] } response.body = {"InstanceProfile" => instance_profile, "RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_login_profile.rb000066400000000000000000000033261437344660100237320ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/login_profile' # Retrieves the login profile for a user # # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_CreateLoginProfile.html # ==== Parameters # * user_name<~String> - Name of user to retrieve the login profile for # * password<~String> - The new password for this user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'LoginProfile'<~Hash> # * UserName<~String> # * CreateDate # * 'RequestId'<~String> - Id of the request # # def get_login_profile(user_name) request({ 'Action' => 'GetLoginProfile', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::LoginProfile.new }) end end class Mock def get_login_profile(user_name) unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end profile = self.data[:users][user_name][:login_profile] unless profile raise Fog::AWS::IAM::NotFound, "Cannot find Login Profile for User #{user_name}" end response = Excon::Response.new response.status = 200 response.body = { "LoginProfile" => { "UserName" => user_name, "CreateDate" => profile[:created_at] }, "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_policy.rb000066400000000000000000000043521437344660100224010ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/single_policy' # Get Policy # # ==== Parameters # * 'PolicyArn'<~String>: The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * Arn<~String> The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # * AttachmentCount<~Integer> The number of entities (users, groups, and roles) that the policy is attached to. # * CreateDate<~DateTime> The date and time, in ISO 8601 date-time format, when the policy was created. # * DefaultVersionId<~String> The identifier for the version of the policy that is set as the default version. # * Description<~String> A friendly description of the policy. # * IsAttachable<~Boolean> Specifies whether the policy can be attached to an IAM user, group, or role. # * Path<~String> The path to the policy. # * PolicyId<~String> The stable and unique string identifying the policy. # * PolicyName<~String> The friendly name (not ARN) identifying the policy. # * UpdateDate<~DateTime> The date and time, in ISO 8601 date-time format, when the policy was last updated. # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetPolicy.html # def get_policy(policy_arn) request({ 'Action' => 'GetPolicy', 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::SinglePolicy.new }) end end class Mock def get_policy(policy_arn) managed_policy = self.data[:managed_policies][policy_arn] unless managed_policy raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} does not exist." end Excon::Response.new.tap do |response| response.body = { 'Policy' => managed_policy, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_policy_version.rb000066400000000000000000000042711437344660100241460ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/policy_version' # Contains information about a version of a managed policy. # # ==== Parameters # * PolicyArn<~String>: The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # * VersionId<~String>: Identifies the policy version to retrieve. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'PolicyVersion'<~Array>: # * CreateDate<~DateTime> The date and time, in ISO 8601 date-time format, when the policy version was created. # * Document<~String> The policy document. Pattern: [\u0009\u000A\u000D\u0020-\u00FF]+ # * IsDefaultVersion<~String> Specifies whether the policy version is set as the policy's default version. # * VersionId<~String> The identifier for the policy version. # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_PolicyVersion.html # def get_policy_version(policy_arn, version_id) request({ 'Action' => 'GetPolicyVersion', 'PolicyArn' => policy_arn, 'VersionId' => version_id, :parser => Fog::Parsers::AWS::IAM::PolicyVersion.new }) end end class Mock def get_policy_version(policy_arn, version_id) managed_policy_versions = self.data[:managed_policy_versions][policy_arn] unless managed_policy_versions raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} version #{version_id} does not exist." end version = managed_policy_versions[version_id] unless version raise Fog::AWS::IAM::NotFound, "Policy #{policy_arn} version #{version_id} does not exist." end Excon::Response.new.tap do |response| response.body = { 'PolicyVersion' => version, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_role.rb000066400000000000000000000035611437344660100220440ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/single_role' # Get the specified role # # ==== Parameters # role_name<~String> # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * Role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetRole.html # def get_role(role_name) request( 'Action' => 'GetRole', 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::SingleRole.new ) end end class Mock def get_role(role_name) unless self.data[:roles].key?(role_name) raise Fog::AWS::IAM::NotFound.new("The role with name #{role_name} cannot be found") end role = self.data[:roles][role_name] Excon::Response.new.tap do |response| response.body = { 'Role' => { 'Arn' => role[:arn].strip, 'AssumeRolePolicyDocument' => Fog::JSON.encode(role[:assume_role_policy_document]), 'CreateDate' => role[:create_date], 'Path' => role[:path], 'RoleId' => role[:role_id].strip, 'RoleName' => role_name, }, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_role_policy.rb000066400000000000000000000020661437344660100234220ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_role_policy' # Get Role Policy # # ==== Parameters # * 'PolicyName'<~String>: Name of the policy to get # * 'RoleName'<~String>: Name of the Role who the policy is associated with. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * PolicyDocument<~String> The policy document. # * PolicyName<~String> The name of the policy. # * RoleName<~String> The Role the policy is associated with. # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetRolePolicy.html # def get_role_policy(role_name, policy_name) request({ 'Action' => 'GetRolePolicy', 'PolicyName' => policy_name, 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::GetRolePolicy.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_server_certificate.rb000066400000000000000000000024541437344660100247530ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/upload_server_certificate' # Gets the specified server certificate. # # ==== Parameters # * server_certificate_name<~String>: The name of the server certificate you want to get. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetServerCertificate.html # def get_server_certificate(name) request({ 'Action' => 'GetServerCertificate', 'ServerCertificateName' => name, :parser => Fog::Parsers::AWS::IAM::UploadServerCertificate.new }) end end class Mock def get_server_certificate(name) raise Fog::AWS::IAM::NotFound unless certificate = self.data[:server_certificates][name] response = Excon::Response.new response.status = 200 response.body = { 'Certificate' => certificate, 'RequestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_user.rb000066400000000000000000000044621437344660100220620ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_user' # Get User # # ==== Parameters # * username # * options<~Hash>: # * 'UserName'<~String>: Name of the User. Defaults to current user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'User'<~Hash> - User # * Arn<~String> - # * UserId<~String> - # * UserName<~String> - # * Path<~String> - # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_Getuser.html # def get_user(username = nil, options = {}) params = { 'Action' => 'GetUser', :parser => Fog::Parsers::AWS::IAM::GetUser.new } if username params.merge!('UserName' => username) end request(params.merge(options)) end end class Mock def get_user(username = nil, options = {}) response = Excon::Response.new user_body = nil if username.nil? # show current user user = self.current_user user_body = { 'UserId' => user[:user_id], 'Arn' => user[:arn].strip, 'CreateDate' => user[:created_at] } unless @current_user_name == "root" user_body.merge!( 'Path' => user[:path], 'UserName' => @current_user_name ) end elsif !self.data[:users].key?(username) raise Fog::AWS::IAM::NotFound.new("The user with name #{username} cannot be found.") else user = self.data[:users][username] user_body = { 'UserId' => user[:user_id], 'Path' => user[:path], 'UserName' => username, 'Arn' => user[:arn].strip, 'CreateDate' => user[:created_at] } end response.status = 200 response.body = { 'User' => user_body, 'RequestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/get_user_policy.rb000066400000000000000000000037441437344660100234430ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/get_user_policy' # Get User Policy # # ==== Parameters # * 'PolicyName'<~String>: Name of the policy to get # * 'UserName'<~String>: Name of the User who the policy is associated with. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * PolicyDocument<~String> The policy document. # * PolicyName<~String> The name of the policy. # * UserName<~String> The User the policy is associated with. # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_GetUserPolicy.html # def get_user_policy(policy_name, user_name) request({ 'Action' => 'GetUserPolicy', 'PolicyName' => policy_name, 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::GetUserPolicy.new }) end end class Mock def get_user_policy(policy_name, user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The policy with name #{policy_name} cannot be found.") unless self.data[:users][user_name][:policies].key?(policy_name) Excon::Response.new.tap do |response| response.body = { 'Policy' => { 'PolicyName' => policy_name, 'UserName' => user_name, 'PolicyDocument' => data[:users][user_name][:policies][policy_name] }, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_access_keys.rb000066400000000000000000000044161437344660100235730ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_access_keys' # List access_keys # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String> - used to paginate subsequent requests # * 'MaxItems'<~Integer> - limit results to this number per page # * 'UserName'<~String> - optional: username to lookup access keys for, defaults to current user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'AccessKeys'<~Array> - Matching access keys # * access_key<~Hash>: # * AccessKeyId<~String> - # * Status<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListAccessKeys.html # def list_access_keys(options = {}) request({ 'Action' => 'ListAccessKeys', :parser => Fog::Parsers::AWS::IAM::ListAccessKeys.new }.merge!(options)) end end class Mock def list_access_keys(options = {}) #FIXME: Doesn't do anything with options, aside from UserName if user = options['UserName'] if data[:users].key? user access_keys_data = data[:users][user][:access_keys] else raise Fog::AWS::IAM::NotFound.new("The user with name #{user} cannot be found.") end else access_keys_data = data[:access_keys] end Excon::Response.new.tap do |response| response.body = { 'AccessKeys' => access_keys_data.map do |akey| {'Status' => akey['Status'], 'AccessKeyId' => akey['AccessKeyId']} end, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_account_aliases.rb000066400000000000000000000005621437344660100244320ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_account_aliases' def list_account_aliases(options = {}) request({ 'Action' => 'ListAccountAliases', :parser => Fog::Parsers::AWS::IAM::ListAccountAliases.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_attached_group_policies.rb000066400000000000000000000060351437344660100261560ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_managed_policies' # Attaches a managed policy to a group # # ==== Parameters # * group_name<~String>: name of the group # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * AttachedPolicies # * 'PolicyArn'<~String> - The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # * 'PolicName'<~String> - The friendly name of the attached policy. # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachGroupPolicy.html # def list_attached_group_policies(group_name, options={}) request({ 'Action' => 'ListAttachedGroupPolicies', 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::ListManagedPolicies.new }.merge(options)) end end class Mock def list_attached_group_policies(group_name, options={}) unless self.data[:groups].key?(group_name) raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end limit = options['MaxItems'] marker = options['Marker'] group = self.data[:groups][group_name] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else group[:attached_policies].map { |arn| self.data[:managed_policies].fetch(arn) }.map { |mp| { "PolicyName" => mp.fetch("PolicyName"), "PolicyArn" => mp.fetch("Arn") } } end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Policies' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_attached_role_policies.rb000066400000000000000000000060131437344660100257570ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_managed_policies' # Lists managed role policies # # ==== Parameters # * role_name<~String>: name of the role # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * AttachedPolicies # * 'PolicyArn'<~String> - The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # * 'PolicName'<~String> - The friendly name of the attached policy. # # ==== See Also # https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListAttachedRolePolicies.html # def list_attached_role_policies(role_name, options={}) request({ 'Action' => 'ListAttachedRolePolicies', 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::ListManagedPolicies.new }.merge(options)) end end class Mock def list_attached_role_policies(role_name, options={}) unless self.data[:roles].key?(role_name) raise Fog::AWS::IAM::NotFound.new("The role with name #{role_name} cannot be found.") end limit = options['MaxItems'] marker = options['Marker'] role = self.data[:roles][role_name] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else role[:attached_policies].map { |arn| self.data[:managed_policies].fetch(arn) }.map { |mp| { "PolicyName" => mp.fetch("PolicyName"), "PolicyArn" => mp.fetch("Arn") } } end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Policies' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_attached_user_policies.rb000066400000000000000000000060541437344660100260010ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_managed_policies' # Attaches a managed policy to a user # # ==== Parameters # * user_name<~String>: name of the user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * AttachedPolicies # * 'PolicyArn'<~String> - The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. # * 'PolicName'<~String> - The friendly name of the attached policy. # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachUserPolicy.html # def list_attached_user_policies(user_name, options={}) request({ 'Action' => 'ListAttachedUserPolicies', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::ListManagedPolicies.new }.merge(options)) end end class Mock def list_attached_user_policies(user_name, options={}) unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end limit = options['MaxItems'] marker = options['Marker'] user = self.data[:users][user_name] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else user[:attached_policies].map { |arn| self.data[:managed_policies].fetch(arn) }.map { |mp| { "PolicyName" => mp.fetch("PolicyName"), "PolicyArn" => mp.fetch("Arn") } } end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Policies' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id, 'Marker' => nil } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_group_policies.rb000066400000000000000000000036571437344660100243300ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_policies' # List policies for a group # # ==== Parameters # * group_name<~String> - Name of group to list policies for # * options<~Hash>: Optional # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'PolicyNames'<~Array> - Matching policy names # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListGroupPolicies.html # def list_group_policies(group_name, options = {}) request({ 'Action' => 'ListGroupPolicies', 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::ListPolicies.new }.merge!(options)) end end class Mock def list_group_policies(group_name, options = {}) #FIXME: doesn't use options atm if data[:groups].key? group_name Excon::Response.new.tap do |response| response.body = { 'PolicyNames' => data[:groups][group_name][:policies].keys, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_groups.rb000066400000000000000000000040651437344660100226160ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_groups' # List groups # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Groups'<~Array> - Matching groups # * group<~Hash>: # * Arn<~String> - # * GroupId<~String> - # * GroupName<~String> - # * Path<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListGroups.html # def list_groups(options = {}) request({ 'Action' => 'ListGroups', :parser => Fog::Parsers::AWS::IAM::ListGroups.new }.merge!(options)) end end class Mock def list_groups(options = {} ) #FIXME: Doesn't observe options Excon::Response.new.tap do |response| response.status = 200 response.body = { 'Groups' => data[:groups].map do |name, group| { 'GroupId' => group[:group_id], 'GroupName' => name, 'Path' => group[:path], 'Arn' => (group[:arn]).strip } end, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_groups_for_user.rb000066400000000000000000000051521437344660100245200ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_groups_for_user' # List groups_for_user # # ==== Parameters # * user_name<~String> - the username you want to look up group membership for # * options<~Hash>: # * 'Marker'<~String> - used to paginate subsequent requests # * 'MaxItems'<~Integer> - limit results to this number per page # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'GroupsForUser'<~Array> - Groups for a user # * group_for_user<~Hash>: # * 'Arn' - # * 'GroupId' - # * 'GroupName' - # * 'Path' - # * 'IsTruncated'<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListGroupsForUser.html # def list_groups_for_user(user_name, options = {}) request({ 'Action' => 'ListGroupsForUser', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::ListGroupsForUser.new }.merge!(options)) end end class Mock def list_groups_for_user(user_name, options = {}) #FIXME: Does not consider options if data[:users].key? user_name Excon::Response.new.tap do |response| response.status = 200 response.body = { 'GroupsForUser' => data[:groups].select do |name, group| group[:members].include? user_name end.map do |name, group| { 'GroupId' => group[:group_id], 'GroupName' => name, 'Path' => group[:path], 'Arn' => (group[:arn]).strip } end, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_instance_profiles.rb000066400000000000000000000040551437344660100250050ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_instance_profiles' # Lists instance profiles # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'InstanceProfiles'<~Array>: # * instance_profile <~Hash>: # * Arn<~String> - # * CreateDate<~Date> # * InstanceProfileId<~String> - # * InstanceProfileName<~String> - # * Path<~String> - # * Roles<~Array> - # role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListInstanceProfiles.html # def list_instance_profiles(options={}) request({ 'Action' => 'ListInstanceProfiles', :parser => Fog::Parsers::AWS::IAM::ListInstanceProfiles.new }.merge!(options)) end end class Mock def list_instance_profiles(options={}) response = Excon::Response.new profiles = self.data[:instance_profiles].values response.body = { "InstanceProfiles" => profiles, "IsTruncated" => false, "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_instance_profiles_for_role.rb000066400000000000000000000043661437344660100267010ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_instance_profiles' # Lists the instance profiles that have the specified associated role # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'RoleName'<~String>: The name of the role to list instance profiles for. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'InstanceProfiles'<~Array>: # * instance_profile <~Hash>: # * Arn<~String> - # * CreateDate<~Date> # * InstanceProfileId<~String> - # * InstanceProfileName<~String> - # * Path<~String> - # * Roles<~Array> - # role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListInstanceProfilesForRole.html # def list_instance_profiles_for_role(role_name,options={}) request({ 'Action' => 'ListInstanceProfilesForRole', 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::ListInstanceProfiles.new }.merge!(options)) end end class Mock def list_instance_profiles_for_role(role_name, options={}) response = Excon::Response.new profiles = self.data[:instance_profiles].values.select { |p| p["Roles"].include?(role_name) } response.body = { "InstanceProfiles" => profiles, "IsTruncated" => false, "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_mfa_devices.rb000066400000000000000000000043271437344660100235450ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_mfa_devices' # List MFA Devices # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String> - used to paginate subsequent requests # * 'MaxItems'<~Integer> - limit results to this number per page # * 'UserName'<~String> - optional: username to lookup mfa devices for, defaults to current user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'MFADevices'<~Array> - Matching MFA devices # * mfa_device<~Hash>: # * EnableDate - The date when the MFA device was enabled for the user # * SerialNumber<~String> - The serial number that uniquely identifies the MFA device # * UserName<~String> - The user with whom the MFA device is associated # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html # def list_mfa_devices(options = {}) request({ 'Action' => 'ListMFADevices', :parser => Fog::Parsers::AWS::IAM::ListMFADevices.new }.merge!(options)) end end class Mock def list_mfa_devices(options = {}) #FIXME: Doesn't observe options Excon::Response.new.tap do |response| response.status = 200 response.body = { 'MFADevices' => data[:devices].map do |device| { 'EnableDate' => device[:enable_date], 'SerialNumber' => device[:serial_number], 'UserName' => device[:user_name] } end, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_policies.rb000066400000000000000000000057011437344660100231040ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_managed_policies' # Lists managed policies # # ==== Parameters # * options <~Hash>: options that filter the result set # * Marker <~String> # * MaxItems <~Integer> # * OnlyAttached <~Boolean> # * PathPrefix <~String> # * Scope <~String> # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'IsTruncated'<~Boolean> # * 'Marker'<~String> # * 'Policies'<~Array>: # * Arn # * AttachmentCount # * CreateDate # * DefaultVersionId # * Description # * IsAttachable # * Path # * PolicyId # * PolicyName # * UpdateDate # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListPolicies.html # def list_policies(options={}) request({ 'Action' => 'ListPolicies', :parser => Fog::Parsers::AWS::IAM::ListManagedPolicies.new }.merge(options)) end end class Mock def list_policies(options={}) limit = options['MaxItems'] marker = options['Marker'] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else self.data[:managed_policies].values end if options["PathPrefix"] data_set = data_set.select { |p| p["Path"].match(/^#{options["PathPrefix"]}/) } end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Policies' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_policy_versions.rb000066400000000000000000000052041437344660100245220ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_policy_versions' # Lists policy versions # # ==== Parameters # * options <~Hash>: options that filter the result set # * Marker <~String> # * MaxItems <~Integer> # * PolicyArn <~String> # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'IsTruncated'<~Boolean> # * 'Marker'<~String> # * 'Versions'<~Array>: # * CreateDate # * IsDefaultVersion # * VersionId # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListPolicyVersions.html # def list_policy_versions(policy_arn, options={}) request({ 'Action' => 'ListPolicyVersions', 'PolicyArn' => policy_arn, :parser => Fog::Parsers::AWS::IAM::ListPolicyVersions.new }.merge(options)) end end class Mock def list_policy_versions(policy_arn, options={}) limit = options['MaxItems'] marker = options['Marker'] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else self.data[:policy_versions].values end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Versions' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_role_policies.rb000066400000000000000000000024021437344660100241200ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_policies' # Lists the names of policies associated with a role # # ==== Parameters # * role_name<~String>: the role to list policies for # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'PolicyNames'<~Array>: # * policy_name <~String> # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListRoleProfiles.html # def list_role_policies(role_name,options={}) request({ 'Action' => 'ListRolePolicies', 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::ListPolicies.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_roles.rb000066400000000000000000000065561437344660100224320ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_roles' # Lists roles # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * Roles<~Array> - # role<~Hash>: # * 'Arn'<~String> - # * 'AssumeRolePolicyDocument'<~String< # * 'Path'<~String> - # * 'RoleId'<~String> - # * 'RoleName'<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListRoles.html # def list_roles(options={}) request({ 'Action' => 'ListRoles', :parser => Fog::Parsers::AWS::IAM::ListRoles.new }.merge!(options)) end end class Mock def list_roles(options={}) limit = options['MaxItems'] marker = options['Marker'] if limit if limit > 1_000 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::IAM::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end data_set = if marker self.data[:markers][marker] || [] else data[:roles].map { |role, data| { 'Arn' => data[:arn].strip, 'AssumeRolePolicyDocument' => Fog::JSON.encode(data[:assume_role_policy_document]), 'RoleId' => data[:role_id], 'Path' => data[:path], 'RoleName' => role, 'CreateDate' => data[:create_date], } } end data = data_set.slice!(0, limit || 100) truncated = data_set.size > 0 marker = truncated && Base64.encode64("metadata/l/#{account_id}/#{UUID.uuid}") response = Excon::Response.new body = { 'Roles' => data, 'IsTruncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = data_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_server_certificates.rb000066400000000000000000000037251437344660100253340ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_server_certificates' # List server certificates # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String> - The marker from the previous result (for pagination) # * 'MaxItems'<~String> - The maximum number of server certificates you want in the response # * 'PathPrefix'<~String> - The path prefix for filtering the results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Certificates'<~Array> - Matching server certificates # * server_certificate<~Hash>: # * Arn<~String> - # * Path<~String> - # * ServerCertificateId<~String> - # * ServerCertificateName<~String> - # * UploadDate<~Time> - # * 'IsTruncated'<~Boolean> - Whether or not the results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_ListServerCertificates.html # def list_server_certificates(options = {}) request({ 'Action' => 'ListServerCertificates', :parser => Fog::Parsers::AWS::IAM::ListServerCertificates.new }.merge!(options)) end end class Mock def list_server_certificates(options = {}) certificates = self.data[:server_certificates].values certificates = certificates.select { |certificate| certificate['Path'] =~ Regexp.new("^#{options['PathPrefix']}") } if options['PathPrefix'] response = Excon::Response.new response.status = 200 response.body = { 'Certificates' => certificates } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_signing_certificates.rb000066400000000000000000000025121437344660100254550ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_signing_certificates' # List signing certificates for user (by default detects user from access credentials) # # ==== Parameters # * options<~Hash>: # * 'UserName'<~String> - name of the user to list certificates for (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'SigningCertificates'<~Array> - Matching signing certificates # * signing_certificate<~Hash>: # * CertificateId<~String> - # * Status<~String> - # * 'IsTruncated'<~Boolean> - Whether or not the results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_ListSigningCertificates.html # def list_signing_certificates(options = {}) request({ 'Action' => 'ListSigningCertificates', :parser => Fog::Parsers::AWS::IAM::ListSigningCertificates.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_user_policies.rb000066400000000000000000000036411437344660100241430ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_policies' # List policies for a user # # ==== Parameters # * user_name<~String> - Name of user to list policies for # * options<~Hash>: Optional # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'PolicyNames'<~Array> - Matching policy names # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListUserPolicies.html # def list_user_policies(user_name, options = {}) request({ 'Action' => 'ListUserPolicies', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::ListPolicies.new }.merge!(options)) end end class Mock def list_user_policies(user_name, options = {}) #FIXME: doesn't use options atm if data[:users].key? user_name Excon::Response.new.tap do |response| response.body = { 'PolicyNames' => data[:users][user_name][:policies].keys, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/list_users.rb000066400000000000000000000041751437344660100224420ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/list_users' # List users # # ==== Parameters # * options<~Hash>: # * 'Marker'<~String>: used to paginate subsequent requests # * 'MaxItems'<~Integer>: limit results to this number per page # * 'PathPrefix'<~String>: prefix for filtering results # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Users'<~Array> - Matching groups # * user<~Hash>: # * Arn<~String> - # * Path<~String> - # * UserId<~String> - # * UserName<~String> - # * 'IsTruncated<~Boolean> - Whether or not results were truncated # * 'Marker'<~String> - appears when IsTruncated is true as the next marker to use # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_ListUsers.html # def list_users(options = {}) request({ 'Action' => 'ListUsers', :parser => Fog::Parsers::AWS::IAM::ListUsers.new }.merge!(options)) end end class Mock def list_users(options = {}) #FIXME: none of the options are currently supported Excon::Response.new.tap do |response| response.body = {'Users' => data[:users].map do |user, data| { 'UserId' => data[:user_id], 'Path' => data[:path], 'UserName' => user, 'Arn' => (data[:arn]).strip, 'CreateDate' => data[:created_at]} end, 'IsTruncated' => false, 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/put_group_policy.rb000066400000000000000000000034321437344660100236440ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update a policy for a group # # ==== Parameters # * group_name<~String>: name of the group # * policy_name<~String>: name of policy document # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_PutGroupPolicy.html # def put_group_policy(group_name, policy_name, policy_document) request( 'Action' => 'PutGroupPolicy', 'GroupName' => group_name, 'PolicyName' => policy_name, 'PolicyDocument' => Fog::JSON.encode(policy_document), :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock #FIXME: You can't actually use the credentials for anything elsewhere in Fog #FIXME: Doesn't do any validation on the policy def put_group_policy(group_name, policy_name, policy_document) if data[:groups].key? group_name data[:groups][group_name][:policies][policy_name] = policy_document Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/put_role_policy.rb000066400000000000000000000021501437344660100234450ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update a policy for a role # # ==== Parameters # * role_name<~String>: name of the role # * policy_name<~String>: name of policy document # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_PutRolePolicy.html # def put_role_policy(role_name, policy_name, policy_document) request( 'Action' => 'PutRolePolicy', 'RoleName' => role_name, 'PolicyName' => policy_name, 'PolicyDocument' => Fog::JSON.encode(policy_document), :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/put_user_policy.rb000066400000000000000000000034131437344660100234650ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update a policy for a user # # ==== Parameters # * user_name<~String>: name of the user # * policy_name<~String>: name of policy document # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_PutUserPolicy.html # def put_user_policy(user_name, policy_name, policy_document) request( 'Action' => 'PutUserPolicy', 'PolicyName' => policy_name, 'PolicyDocument' => Fog::JSON.encode(policy_document), 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock #FIXME: You can't actually use the credentials for anything elsewhere in Fog #FIXME: Doesn't do any validation on the policy def put_user_policy(user_name, policy_name, policy_document) if data[:users].key? user_name data[:users][user_name][:policies][policy_name] = policy_document Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/remove_role_from_instance_profile.rb000066400000000000000000000034001437344660100272010ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # removes a role from an instance profile # # Make sure you do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. # ==== Parameters # * instance_profile_name<~String>: Name of the instance profile to update. # * role_name<~String>:Name of the role to remove. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_RemoveRoleFromInstanceProfile.html # def remove_role_from_instance_profile(role_name, instance_profile_name) request( 'Action' => 'RemoveRoleFromInstanceProfile', 'InstanceProfileName' => instance_profile_name, 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def remove_role_from_instance_profile(role_name, instance_profile_name) response = Excon::Response.new unless profile = self.data[:instance_profiles][instance_profile_name] raise Fog::AWS::IAM::NotFound.new("Instance Profile #{instance_profile_name} cannot be found.") end unless role = self.data[:roles][role_name] raise Fog::AWS::IAM::NotFound.new("Role #{role_name} cannot be found.") end profile["Roles"].delete(role_name) response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/remove_user_from_group.rb000066400000000000000000000031161437344660100250320ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Remove a user from a group # # ==== Parameters # * group_name<~String>: name of the group # * user_name<~String>: name of user to remove # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_RemoveUserFromGroup.html # def remove_user_from_group(group_name, user_name) request( 'Action' => 'RemoveUserFromGroup', 'GroupName' => group_name, 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def remove_user_from_group(group_name, user_name) if data[:groups].key? group_name if data[:users].key? user_name data[:groups][group_name][:members].delete_if { |item| item == user_name } Excon::Response.new.tap do |response| response.status = 200 response.body = { 'RequestId' => Fog::AWS::Mock.request_id } end else raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end else raise Fog::AWS::IAM::NotFound.new("The group with name #{group_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/set_default_policy_version.rb000066400000000000000000000022511437344660100256620ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Remove a user from a group # # ==== Parameters # * policy_arn<~String>: arn of the policy # * version_id<~String>: version of policy to delete # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_SetDefaultPolicyVersion.html # def set_default_policy_version(policy_arn, version_id) request( 'Action' => 'SetDefaultPolicyVersion', 'PolicyArn' => policy_arn, 'VersionId' => version_id, :parser => Fog::Parsers::AWS::IAM::Basic.new ) end end class Mock def set_default_policy_version(policy_arn, version_id) Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_access_key.rb000066400000000000000000000035011437344660100237110ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Update an access key for a user # # ==== Parameters # * access_key_id<~String> - Access key id to delete # * status<~String> - status of keys in ['Active', 'Inactive'] # * options<~Hash>: # * 'UserName'<~String> - name of the user to create (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_UpdateAccessKey.html # def update_access_key(access_key_id, status, options = {}) request({ 'AccessKeyId' => access_key_id, 'Action' => 'UpdateAccessKey', 'Status' => status, :parser => Fog::Parsers::AWS::IAM::Basic.new }.merge!(options)) end end class Mock def update_access_key(access_key_id, status, options = {}) if user = options['UserName'] if data[:users].key? user access_keys_data = data[:users][user][:access_keys] else raise Fog::AWS::IAM::NotFound.new('The user with name #{user_name} cannot be found.') end else access_keys_data = data[:access_keys] end key = access_keys_data.find{|k| k["AccessKeyId"] == access_key_id} key["Status"] = status Excon::Response.new.tap do |response| response.status = 200 response.body = { 'AccessKey' => key, 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_account_password_policy.rb000066400000000000000000000063741437344660100265500ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Add or update the account password policy # # ==== Parameters # * MinimumPasswordLength<~integer> Minimum length to require for IAM user passwords. # * MaxPasswordAge<~integer> The number of days that an IAM user password is valid. # * PasswordReusePrevention<~integer> Specifies the number of previous passwords that IAM users are prevented from reusing. # * RequireSymbols<~boolean> Specifies whether to require symbols for IAM user passwords. # * RequireNumbers<~boolean> Specifies whether to require numbers for IAM user passwords. # * RequireUppercaseCharacters<~boolean> Specifies whether to require uppercase characters for IAM user passwords. # * RequireLowercaseCharacters<~boolean> Specifies whether to require lowercase characters for IAM user passwords. # * AllowUsersToChangePassword<~boolean> Specifies whether IAM users are allowed to change their own password. # * HardExpiry<~boolean> Specifies whether IAM users are prevented from setting a new password after their password has expired. # * ExpirePasswords<~boolean> Specifies whether IAM users are required to change their password after a specified number of days. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_UpdateAccountPasswordPolicy.html # def update_account_password_policy(minimum_password_length, max_password_age, password_reuse_prevention,require_symbols,require_numbers,require_uppercase_characters, require_lowercase_characters,allow_users_to_change_password, hard_expiry, expire_passwords) request({ 'Action' => 'UpdateAccountPasswordPolicy', 'MinimumPasswordLength' => minimum_password_length, 'MaxPasswordAge' => max_password_age, 'PasswordReusePrevention' => password_reuse_prevention, 'RequireSymbols' => require_symbols, 'RequireNumbers' => require_numbers, 'RequireUppercaseCharacters' => require_uppercase_characters, 'RequireLowercaseCharacters' => require_lowercase_characters, 'AllowUsersToChangePassword' => allow_users_to_change_password, 'HardExpiry' => hard_expiry, 'ExpirePasswords' => expire_passwords, :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def update_account_password_policy(minimum_password_length, max_password_age, password_reuse_prevention,require_symbols,require_numbers,require_uppercase_characters, require_lowercase_characters,allow_users_to_change_password, hard_expiry, expire_passwords) Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_assume_role_policy.rb000066400000000000000000000025221437344660100254770ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Creates a managed policy # # ==== Parameters # * policy_document<~Hash>: policy document, see: http://docs.amazonwebservices.com/IAM/latest/UserGuide/PoliciesOverview.html # * role_name<~String>: name of role to update # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/IAM/latest/APIReference/API_UpdateAssumeRolePolicy.html # def update_assume_role_policy(policy_document, role_name) request({ 'Action' => 'UpdateAssumeRolePolicy', 'PolicyDocument' => Fog::JSON.encode(policy_document), 'RoleName' => role_name, :parser => Fog::Parsers::AWS::IAM::Basic.new }.reject {|_, value| value.nil?}) end class Mock def update_assume_role_policy(policy_document, role_name) Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_group.rb000066400000000000000000000047021437344660100227400ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/update_group' # Update a Group # # ==== Parameters # * group_name<~String> - Required. Name of the Group to update. If you're changing the name of the Group, this is the original Group name. # * options<~Hash>: # * new_path<~String> - New path for the Group. Include this parameter only if you're changing the Group's path. # * new_group_name<~String> - New name for the Group. Include this parameter only if you're changing the Group's name. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'Group'<~Hash> - Changed Group info # * 'Arn'<~String> - # * 'Path'<~String> - # * 'GroupId'<~String> - # * 'GroupName'<~String> - # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateGroup.html # def update_group(group_name, options = {}) request({ 'Action' => 'UpdateGroup', 'GroupName' => group_name, :parser => Fog::Parsers::AWS::IAM::UpdateGroup.new }.merge!(options)) end end class Mock def update_group(group_name, options = {}) raise Fog::AWS::IAM::NotFound.new( "The user with name #{group_name} cannot be found." ) unless self.data[:groups].key?(group_name) response = Excon::Response.new group = self.data[:groups][group_name] new_path = options['NewPath'] new_group_name = options['NewGroupName'] if new_path unless new_path.match(/\A\/[a-zA-Z0-9]+\/\Z/) raise Fog::AWS::IAM::ValidationError, "The specified value for path is invalid. It must begin and end with / and contain only alphanumeric characters and/or / characters." end group[:path] = new_path end if new_group_name self.data[:groups].delete(group_name) self.data[:groups][new_group_name] = group end response.status = 200 response.body = { 'Group' => {}, 'RequestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_login_profile.rb000066400000000000000000000030771437344660100244400ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/basic' # Updates a login profile for a user # # http://docs.amazonwebservices.com/IAM/latest/APIReference/API_UpdateLoginProfile.html # ==== Parameters # * user_name<~String> - Name of user to change the login profile for # * password<~String> - The new password for this user # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # def update_login_profile(user_name, password) request({ 'Action' => 'UpdateLoginProfile', 'UserName' => user_name, 'Password' => password, :parser => Fog::Parsers::AWS::IAM::Basic.new }) end end class Mock def update_login_profile(user_name, password) unless self.data[:users].key?(user_name) raise Fog::AWS::IAM::NotFound.new("The user with name #{user_name} cannot be found.") end user = self.data[:users][user_name] unless user[:login_profile] raise Fog::AWS::IAM::NotFound, "Cannot find Login Profile for User #{user_name}" end user[:login_profile][:password] = password response = Excon::Response.new response.status = 200 response.body = { "RequestId" => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_server_certificate.rb000066400000000000000000000047241437344660100254600ustar00rootroot00000000000000module Fog module AWS class IAM class Real # Updates the name and/or the path of the specified server certificate. # # ==== Parameters # * server_certificate_name<~String> - The name of the server # certificate that you want to update. # * options<~Hash>: # * 'NewPath'<~String> - The new path for the server certificate. # Include this only if you are updating the server certificate's # path. # * 'NewServerCertificateName'<~String> - The new name for the server # certificate. Include this only if you are updating the server # certificate's name. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateServerCertificate.html # def update_server_certificate(server_certificate_name, options = {}) request({ 'Action' => 'UpdateServerCertificate', 'ServerCertificateName' => server_certificate_name, :parser => Fog::Parsers::AWS::IAM::Basic.new }.merge!(options)) end end class Mock def update_server_certificate(server_certificate_name, options = {}) new_server_certificate_name = options['NewServerCertificateName'] if self.data[:server_certificates][new_server_certificate_name] raise Fog::AWS::IAM::EntityAlreadyExists.new("The Server Certificate with name #{server_certificate_name} already exists.") end unless certificate = self.data[:server_certificates].delete(server_certificate_name) raise Fog::AWS::IAM::NotFound.new("The Server Certificate with name #{server_certificate_name} cannot be found.") end if new_server_certificate_name certificate['ServerCertificateName'] = new_server_certificate_name end if new_path = options['NewPath'] certificate['Path'] = new_path end self.data[:server_certificates][certificate['ServerCertificateName']] = certificate Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_signing_certificate.rb000066400000000000000000000020471437344660100256040ustar00rootroot00000000000000module Fog module AWS class IAM class Real # Update a Signing Certificate # # ==== Parameters # * certificate_id<~String> - Required. ID of the Certificate to update. # * status<~String> - Required. Active/Inactive # * options<~Hash>: # * user_name<~String> - Name of the user the signing certificate belongs to. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateSigningCertificate.html # def update_signing_certificate(certificate_id, status, options = {}) request({ 'Action' => 'UpdateSigningCertificate', 'CertificateId' => certificate_id, 'Status' => status, :parser => Fog::Parsers::AWS::IAM::Basic.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/update_user.rb000066400000000000000000000025231437344660100225610ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/update_user' # Update a user # # ==== Parameters # * user_name<~String> - Required. Name of the User to update. If you're changing the name of the User, this is the original User name. # * options<~Hash>: # * new_path<~String> - New path for the User. Include this parameter only if you're changing the User's path. # * new_user_name<~String> - New name for the User. Include this parameter only if you're changing the User's name. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'RequestId'<~String> - Id of the request # * 'User'<~Hash> - Changed user info # * 'Arn'<~String> - # * 'Path'<~String> - # * 'UserId'<~String> - # * 'UserName'<~String> - # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UpdateUser.html # def update_user(user_name, options = {}) request({ 'Action' => 'UpdateUser', 'UserName' => user_name, :parser => Fog::Parsers::AWS::IAM::UpdateUser.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/upload_server_certificate.rb000066400000000000000000000076351437344660100254660ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/upload_server_certificate' # Uploads a server certificate entity for the AWS Account. # Includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded. # # ==== Parameters # * certificate<~Hash>: The contents of the public key certificate in PEM-encoded format. # * private_key<~Hash>: The contents of the private key in PEM-encoded format. # * name<~Hash>: The name for the server certificate. Do not include the path in this value. # * options<~Hash>: # * 'CertificateChain'<~String> - The contents of the certificate chain. Typically a concatenation of the PEM-encoded public key certificates of the chain. # * 'Path'<~String> - The path for the server certificate. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Certificate'<~Hash>: # * 'Arn'<~String> - # * 'Path'<~String> - # * 'ServerCertificateId'<~String> - # * 'ServerCertificateName'<~String> - # * 'UploadDate'<~Time> # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UploadServerCertificate.html # def upload_server_certificate(certificate, private_key, name, options = {}) request({ 'Action' => 'UploadServerCertificate', 'CertificateBody' => certificate, 'PrivateKey' => private_key, 'ServerCertificateName' => name, :parser => Fog::Parsers::AWS::IAM::UploadServerCertificate.new }.merge!(options)) end end class Mock def upload_server_certificate(certificate, private_key, name, options = {}) if certificate.nil? || certificate.empty? || private_key.nil? || private_key.empty? raise Fog::AWS::IAM::ValidationError.new end response = Excon::Response.new # Validate cert and key begin # must be an RSA private key raise OpenSSL::PKey::RSAError unless private_key =~ /BEGIN RSA PRIVATE KEY/ cert = OpenSSL::X509::Certificate.new(certificate) chain = OpenSSL::X509::Certificate.new(options['CertificateChain']) if options['CertificateChain'] key = OpenSSL::PKey::RSA.new(private_key) rescue OpenSSL::X509::CertificateError, OpenSSL::PKey::RSAError => e message = if e.is_a?(OpenSSL::X509::CertificateError) "Invalid Public Key Certificate." else "Invalid Private Key." end raise Fog::AWS::IAM::MalformedCertificate.new(message) end unless cert.check_private_key(key) raise Fog::AWS::IAM::KeyPairMismatch.new end if self.data[:server_certificates][name] raise Fog::AWS::IAM::EntityAlreadyExists.new("The Server Certificate with name #{name} already exists.") else response.status = 200 path = options['Path'] || "/" data = { 'Arn' => Fog::AWS::Mock.arn('iam', self.data[:owner_id], "server-certificate/#{name}"), 'Path' => path, 'ServerCertificateId' => Fog::AWS::IAM::Mock.server_certificate_id, 'ServerCertificateName' => name, 'UploadDate' => Time.now } self.data[:server_certificates][name] = data response.body = { 'Certificate' => data, 'RequestId' => Fog::AWS::Mock.request_id } end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/iam/upload_signing_certificate.rb000066400000000000000000000023441437344660100256060ustar00rootroot00000000000000module Fog module AWS class IAM class Real require 'fog/aws/parsers/iam/upload_signing_certificate' # Upload signing certificate for user (by default detects user from access credentials) # # ==== Parameters # * options<~Hash>: # * 'UserName'<~String> - name of the user to upload certificate for (do not include path) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Certificate'<~Hash>: # * 'CertificateId'<~String> - # * 'UserName'<~String> - # * 'CertificateBody'<~String> - # * 'Status'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.amazonwebservices.com/IAM/latest/APIReference/index.html?API_UploadSigningCertificate.html # def upload_signing_certificate(certificate, options = {}) request({ 'Action' => 'UploadSigningCertificate', 'CertificateBody' => certificate, :parser => Fog::Parsers::AWS::IAM::UploadSigningCertificate.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/000077500000000000000000000000001437344660100206115ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/kinesis/add_tags_to_stream.rb000066400000000000000000000027351437344660100247700ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Adds or updates tags for the specified Amazon Kinesis stream. # # ==== Options # * StreamName<~String>: The name of the stream. # * Tags<~Hash>: The set of key-value pairs to use to create the tags. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_AddTagsToStream.html # def add_tags_to_stream(options={}) body = { "StreamName" => options.delete("StreamName"), "Tags" => options.delete("Tags") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.AddTagsToStream", :body => body, }.merge(options)) end end class Mock def add_tags_to_stream(options={}) stream_name = options.delete("StreamName") tags = options.delete("Tags") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end stream["Tags"] = stream["Tags"].merge(tags) response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/create_stream.rb000066400000000000000000000043141437344660100237560ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Creates a Amazon Kinesis stream. # # ==== Options # * ShardCount<~Number>: The number of shards that the stream will use. # * StreamName<~String>: A name to identify the stream. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html # def create_stream(options={}) body = { "ShardCount" => options.delete("ShardCount") || 1, "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.CreateStream", :body => body, }.merge(options)) end end class Mock def create_stream(options={}) stream_name = options.delete("StreamName") shard_count = options.delete("ShardCount") || 1 stream_arn = "arn:aws:kinesis:#{@region}:#{@account_id}:stream/#{stream_name}" if data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceInUse.new("Stream #{stream_name} under account #{@account_id} already exists.") end shards = (0...shard_count).map do |shard| { "HashKeyRange"=>{ "EndingHashKey"=>"340282366920938463463374607431768211455", "StartingHashKey"=>"0" }, "SequenceNumberRange"=>{ "StartingSequenceNumber"=> next_sequence_number }, "ShardId"=>next_shard_id, "Records" => [] } end data[:kinesis_streams] = [{ "HasMoreShards" => false, "StreamARN" => stream_arn, "StreamName" => stream_name, "StreamStatus" => "ACTIVE", "Shards" => shards, "Tags" => {} }] response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/delete_stream.rb000066400000000000000000000024311437344660100237530ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Deletes a stream and all its shards and data. # # ==== Options # * StreamName<~String>: A name to identify the stream. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DeleteStream.html # def delete_stream(options={}) body = { "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.DeleteStream", :body => body, }.merge(options)) end end class Mock def delete_stream(options={}) stream_name = options.delete("StreamName") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end data[:kinesis_streams].delete(stream) response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/describe_stream.rb000066400000000000000000000036341437344660100242770ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Describes the specified stream. # # ==== Options # * ExclusiveStartShardId<~String>: The shard ID of the shard to start with. # * Limit<~Number>: The maximum number of shards to return. # * StreamName<~String>: The name of the stream to describe. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStream.html # def describe_stream(options={}) body = { "ExclusiveStartShardId" => options.delete("ExclusiveStartShardId"), "Limit" => options.delete("Limit"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } response = request({ :idempotent => true, 'X-Amz-Target' => "Kinesis_#{@version}.DescribeStream", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response.body response end end class Mock def describe_stream(options={}) stream_name = options.delete("StreamName") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end # Strip Records key out of shards for response shards = stream["Shards"].reject{ |k,_| k == "Records" } response = Excon::Response.new response.status = 200 response.body = { "StreamDescription" => stream.dup.merge("Shards" => shards) } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/get_records.rb000066400000000000000000000051501437344660100234370ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Gets data records from a shard. # # ==== Options # * Limit<~Number>: The maximum number of records to return. # * ShardIterator<~String>: The position in the shard from which you want to start sequentially reading data records. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html # def get_records(options={}) body = { "Limit" => options.delete("Limit"), "ShardIterator" => options.delete("ShardIterator") }.reject{ |_,v| v.nil? } response = request({ 'X-Amz-Target' => "Kinesis_#{@version}.GetRecords", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response end end class Mock def get_records(options={}) shard_iterator = Fog::JSON.decode(options.delete("ShardIterator")) limit = options.delete("Limit") || -1 stream_name = shard_iterator["StreamName"] shard_id = shard_iterator["ShardId"] starting_sequence_number = (shard_iterator["StartingSequenceNumber"] || 1).to_i unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end unless shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id } raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{shard_id} in stream #{stream_name} under account #{@account_id}.") end records = [] shard["Records"].each do |record| next if record["SequenceNumber"].to_i < starting_sequence_number records << record break if records.size == limit end shard_iterator["StartingSequenceNumber"] = if records.empty? starting_sequence_number.to_s else (records.last["SequenceNumber"].to_i + 1).to_s end response = Excon::Response.new response.status = 200 response.body = { "MillisBehindLatest"=> 0, "NextShardIterator"=> Fog::JSON.encode(shard_iterator), "Records"=> records } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/get_shard_iterator.rb000066400000000000000000000040361437344660100250120ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Gets a shard iterator. # # ==== Options # * ShardId<~String>: The shard ID of the shard to get the iterator for. # * ShardIteratorType<~String>: Determines how the shard iterator is used to start reading data records from the shard. # * StartingSequenceNumber<~String>: The sequence number of the data record in the shard from which to start reading from. # * StreamName<~String>: A name to identify the stream. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html # def get_shard_iterator(options={}) body = { "ShardId" => options.delete("ShardId"), "ShardIteratorType" => options.delete("ShardIteratorType"), "StartingSequenceNumber" => options.delete("StartingSequenceNumber"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } response = request({ 'X-Amz-Target' => "Kinesis_#{@version}.GetShardIterator", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response end end class Mock def get_shard_iterator(options={}) stream_name = options["StreamName"] unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end response = Excon::Response.new response.status = 200 response.body = { "ShardIterator" => Fog::JSON.encode(options) # just encode the options that were given, we decode them in get_records } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/list_streams.rb000066400000000000000000000023741437344660100236550ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # List availabe streams # # ==== Options # * ExclusiveStartStreamName<~String>: The name of the stream to start the list with. # * Limit<~Number>: The maximum number of streams to list. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListStreams.html # def list_streams(options={}) response = request({ :idempotent => true, 'X-Amz-Target' => "Kinesis_#{@version}.ListStreams", :body => {}, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response end end class Mock def list_streams(options={}) response = Excon::Response.new response.status = 200 response.body = { "HasMoreStreams" => false, "StreamNames" => data[:kinesis_streams].map{ |stream| stream["StreamName"] } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/list_tags_for_stream.rb000066400000000000000000000036321437344660100253540ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Lists the tags for the specified Amazon Kinesis stream. # # ==== Options # * ExclusiveStartTagKey<~String>: The key to use as the starting point for the list of tags. # * Limit<~Number>: The number of tags to return. # * StreamName<~String>: The name of the stream. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListTagsForStream.html # def list_tags_for_stream(options={}) body = { "ExclusiveStartTagKey" => options.delete("ExclusiveStartTagKey"), "Limit" => options.delete("Limit"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } response = request({ :idempotent => true, 'X-Amz-Target' => "Kinesis_#{@version}.ListTagsForStream", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response.body response end end class Mock def list_tags_for_stream(options={}) stream_name = options.delete("StreamName") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end response = Excon::Response.new response.status = 200 response.body = { "HasMoreTags" => false, "Tags" => stream["Tags"].map{ |k,v| {"Key" => k, "Value" => v} } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/merge_shards.rb000066400000000000000000000070621437344660100236060ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Merges two adjacent shards in a stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. # # ==== Options # * AdjacentShardToMerge<~String>: The shard ID of the adjacent shard for the merge. # * ShardToMerge<~String>: The shard ID of the shard to combine with the adjacent shard for the merge. # * StreamName<~String>: The name of the stream for the merge. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_MergeShards.html # def merge_shards(options={}) body = { "AdjacentShardToMerge" => options.delete("AdjacentShardToMerge"), "ShardToMerge" => options.delete("ShardToMerge"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.MergeShards", :body => body, }.merge(options)) end end class Mock def merge_shards(options={}) stream_name = options.delete("StreamName") shard_to_merge_id = options.delete("ShardToMerge") adjacent_shard_to_merge_id = options.delete("AdjacentShardToMerge") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end unless shard_to_merge = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_to_merge_id } raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{shard_to_merge_id} in stream #{stream_name} under account #{@account_id}.") end unless adjacent_shard_to_merge = stream["Shards"].detect{ |shard| shard["ShardId"] == adjacent_shard_to_merge_id } raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{adjacent_shard_to_merge_id} in stream #{stream_name} under account #{@account_id}.") end # Close shards (set an EndingSequenceNumber on them) shard_to_merge["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number adjacent_shard_to_merge["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number new_starting_hash_key = [ shard_to_merge["HashKeyRange"]["StartingHashKey"].to_i, adjacent_shard_to_merge["HashKeyRange"]["StartingHashKey"].to_i ].min.to_s new_ending_hash_key = [ shard_to_merge["HashKeyRange"]["EndingHashKey"].to_i, adjacent_shard_to_merge["HashKeyRange"]["EndingHashKey"].to_i ].max.to_s # create a new shard with ParentShardId and AdjacentParentShardID stream["Shards"] << { "HashKeyRange"=> { "EndingHashKey" => new_ending_hash_key, "StartingHashKey" => new_starting_hash_key }, "SequenceNumberRange" => { "StartingSequenceNumber" => next_sequence_number }, "ShardId" => next_shard_id, "ParentShardId" => shard_to_merge_id, "AdjacentParentShardId" => adjacent_shard_to_merge_id } response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/put_record.rb000066400000000000000000000054211437344660100233060ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Writes a single data record from a producer into an Amazon Kinesis stream. # # ==== Options # * Data<~Blob>: The data blob to put into the record, which is base64-encoded when the blob is serialized. # * ExplicitHashKey<~String>: The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash. # * PartitionKey<~String>: Determines which shard in the stream the data record is assigned to. # * SequenceNumberForOrdering<~String>: Guarantees strictly increasing sequence numbers, for puts from the same client and to the same partition key. # * StreamName<~String>: The stream name associated with the request. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html # def put_record(options={}) body = { "Data" => options.delete("Data"), "ExplicitHashKey" => options.delete("ExplicitHashKey"), "PartitionKey" => options.delete("PartitionKey"), "SequenceNumberForOrdering" => options.delete("SequenceNumberForOrdering"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } response = request({ 'X-Amz-Target' => "Kinesis_#{@version}.PutRecord", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response end end class Mock def put_record(options={}) stream_name = options.delete("StreamName") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end sequence_number = next_sequence_number data = options.delete("Data") partition_key = options.delete("PartitionKey") shard_id = stream["Shards"].sample["ShardId"] shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id } # store the records on the shard(s) shard["Records"] << { "SequenceNumber" => sequence_number, "Data" => data, "PartitionKey" => partition_key } response = Excon::Response.new response.status = 200 response.body = { "SequenceNumber" => sequence_number, "ShardId" => shard_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/put_records.rb000066400000000000000000000051741437344660100234760ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Writes multiple data records from a producer into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). # # ==== Options # * Records<~Array>: The records associated with the request. # * Record<~Hash>: A record. # * Data<~Blob>: The data blob to put into the record, which is base64-encoded when the blob is serialized. # * ExplicitHashKey<~String>: The hash value used to determine explicitly the shard that the data record is assigned to by overriding the partition key hash. # * PartitionKey<~String>: Determines which shard in the stream the data record is assigned to. # * StreamName<~String>: The stream name associated with the request. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html # def put_records(options={}) body = { "Records" => options.delete("Records"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } response = request({ 'X-Amz-Target' => "Kinesis_#{@version}.PutRecords", :body => body, }.merge(options)) response.body = Fog::JSON.decode(response.body) unless response.body.nil? response end end class Mock def put_records(options={}) stream_name = options.delete("StreamName") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end records = options.delete("Records") record_results = records.map { |r| sequence_number = next_sequence_number shard_id = stream["Shards"].sample["ShardId"] shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id } # store the records on the shard(s) shard["Records"] << r.merge("SequenceNumber" => sequence_number) { "SequenceNumber" => sequence_number, "ShardId" => shard_id } } response = Excon::Response.new response.status = 200 response.body = { "FailedRecordCount" => 0, "Records" => record_results } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/remove_tags_from_stream.rb000066400000000000000000000027571437344660100260620ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Deletes tags from the specified Amazon Kinesis stream. # # ==== Options # * StreamName<~String>: The name of the stream. # * TagKeys<~Array>: A list of tag keys. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_RemoveTagsFromStream.html # def remove_tags_from_stream(options={}) body = { "StreamName" => options.delete("StreamName"), "TagKeys" => options.delete("TagKeys") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.RemoveTagsFromStream", :body => body, }.merge(options)) end end class Mock def remove_tags_from_stream(options={}) stream_name = options.delete("StreamName") tags = options.delete("TagKeys") unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end stream["Tags"] = stream["Tags"].delete_if { |k,_| tags.include?(k) } response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kinesis/split_shard.rb000066400000000000000000000064771437344660100234700ustar00rootroot00000000000000module Fog module AWS class Kinesis class Real # Splits a shard into two new shards in the stream, to increase the stream's capacity to ingest and transport data. # # ==== Options # * NewStartingHashKey<~String>: A hash key value for the starting hash key of one of the child shards created by the split. # * ShardToSplit<~String>: The shard ID of the shard to split. # * StreamName<~String>: The name of the stream for the shard split. # ==== Returns # * response<~Excon::Response>: # # ==== See Also # https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SplitShard.html # def split_shard(options={}) body = { "NewStartingHashKey" => options.delete("NewStartingHashKey"), "ShardToSplit" => options.delete("ShardToSplit"), "StreamName" => options.delete("StreamName") }.reject{ |_,v| v.nil? } request({ 'X-Amz-Target' => "Kinesis_#{@version}.SplitShard", :body => body, }.merge(options)) end end class Mock def split_shard(options={}) stream_name = options.delete("StreamName") shard_id = options.delete("ShardToSplit") stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } unless stream = data[:kinesis_streams].detect{ |s| s["StreamName"] == stream_name } raise Fog::AWS::Kinesis::ResourceNotFound.new("Stream #{stream_name} under account #{@account_id} not found.") end unless shard = stream["Shards"].detect{ |shard| shard["ShardId"] == shard_id } raise Fog::AWS::Kinesis::ResourceNotFound.new("Could not find shard #{shard_id} in stream #{stream_name} under account #{@account_id}.") end # Close original shard (set an EndingSequenceNumber on it) shard["SequenceNumberRange"]["EndingSequenceNumber"] = next_sequence_number # Calculate new shard ranges parent_starting_hash_key = shard["HashKeyRange"]["StartingHashKey"] parent_ending_hash_key = shard["HashKeyRange"]["EndingHashKey"] new_starting_hash_key = options.delete("NewStartingHashKey") # Create two new shards using contiguous hash space based on the original shard stream["Shards"] << { "HashKeyRange"=> { "EndingHashKey" => (new_starting_hash_key.to_i - 1).to_s, "StartingHashKey" => parent_starting_hash_key }, "SequenceNumberRange" => { "StartingSequenceNumber" => next_sequence_number }, "ShardId" => next_shard_id, "ParentShardId" => shard_id } stream["Shards"] << { "HashKeyRange" => { "EndingHashKey" => parent_ending_hash_key, "StartingHashKey" => new_starting_hash_key }, "SequenceNumberRange" =>{ "StartingSequenceNumber" => next_sequence_number }, "ShardId" => next_shard_id, "ParentShardId" => shard_id } response = Excon::Response.new response.status = 200 response.body = "" response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kms/000077500000000000000000000000001437344660100177365ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/kms/create_key.rb000066400000000000000000000030401437344660100223730ustar00rootroot00000000000000module Fog module AWS class KMS class Real DEFAULT_KEY_POLICY = <<-JSON { "Version": "2012-10-17", "Id": "key-default-1", "Statement": [ { "Sid": "Enable IAM User Permissions", "Effect": "Allow", "Principal": { "AWS": "arn:aws:iam::915445820265:root" }, "Action": "kms:*", "Resource": "*" } ] } JSON require 'fog/aws/parsers/kms/describe_key' def create_key(policy = nil, description = nil, usage = "ENCRYPT_DECRYPT") request( 'Action' => 'CreateKey', 'Description' => description, 'KeyUsage' => usage, 'Policy' => policy, :parser => Fog::Parsers::AWS::KMS::DescribeKey.new ) end end class Mock def create_key(policy = nil, description = nil, usage = "ENCRYPT_DECRYPT") response = Excon::Response.new key_id = UUID.uuid key_arn = Fog::AWS::Mock.arn("kms", self.account_id, "key/#{key_id}", @region) key = { "KeyUsage" => usage, "AWSAccountId" => self.account_id, "KeyId" => key_id, "Description" => description, "CreationDate" => Time.now, "Arn" => key_arn, "Enabled" => true, } # @todo use default policy self.data[:keys][key_id] = key response.body = { "KeyMetadata" => key } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kms/describe_key.rb000066400000000000000000000011141437344660100227100ustar00rootroot00000000000000module Fog module AWS class KMS class Real require 'fog/aws/parsers/kms/describe_key' def describe_key(identifier) request( 'Action' => 'DescribeKey', 'KeyId' => identifier, :parser => Fog::Parsers::AWS::KMS::DescribeKey.new ) end end class Mock def describe_key(identifier) response = Excon::Response.new key = self.data[:keys][identifier] response.body = { "KeyMetadata" => key } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/kms/list_keys.rb000066400000000000000000000042511437344660100222730ustar00rootroot00000000000000module Fog module AWS class KMS class Real require 'fog/aws/parsers/kms/list_keys' def list_keys(options={}) params = {} if options[:marker] params['Marker'] = options[:marker] end if options[:limit] params['Limit'] = options[:limit] end request({ 'Action' => 'ListKeys', :parser => Fog::Parsers::AWS::KMS::ListKeys.new }.merge(params)) end end class Mock def list_keys(options={}) limit = options[:limit] marker = options[:marker] if limit if limit > 1_000 raise Fog::AWS::KMS::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value less than or equal to 1000" ) elsif limit < 1 raise Fog::AWS::KMS::Error.new( "ValidationError => 1 validation error detected: Value '#{limit}' at 'limit' failed to satisfy constraint: Member must have value greater than or equal to 1" ) end end key_set = if marker self.data[:markers][marker] || [] else self.data[:keys].inject([]) { |r,(k,v)| r << { "KeyId" => k, "KeyArn" => v["Arn"] } } end keys = if limit key_set.slice!(0, limit) else key_set end truncated = keys.size < key_set.size marker = truncated && "metadata/l/#{account_id}/#{UUID.uuid}" response = Excon::Response.new body = { 'Keys' => keys, 'Truncated' => truncated, 'RequestId' => Fog::AWS::Mock.request_id } if marker self.data[:markers][marker] = key_set body.merge!('Marker' => marker) end response.body = body response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/000077500000000000000000000000001437344660100203645ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/lambda/add_permission.rb000066400000000000000000000072751437344660100237240ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Adds a permission to the access policy associated with the specified AWS Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html # ==== Parameters # * FunctionName <~String> - Name of the Lambda function whose access policy you are updating by adding a new permission. # * Action <~String> - AWS Lambda action you want to allow in this statement. # * Principal <~String> - principal who is getting this permission. # * SourceAccount <~String> - AWS account ID (without a hyphen) of the source owner. # * SourceArn <~String> - Amazon Resource Name (ARN) of the source resource to assign permissions. # * StatemendId. <~String> - unique statement identifier. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Statement' <~Hash> - permission statement you specified in the request. def add_permission(params={}) function_name = params.delete('FunctionName') action = params.delete('Action') principal = params.delete('Principal') source_account = params.delete('SourceAccount') source_arn = params.delete('SourceArn') sid = params.delete('StatementId') permission = { 'Action' => action, 'Principal' => principal, 'StatementId' => sid } permission['SourceAccount'] = source_account if source_account permission['SourceArn'] = source_arn if source_arn request({ :method => 'POST', :path => "/functions/#{function_name}/versions/HEAD/policy", :expects => 201, :body => Fog::JSON.encode(permission), :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def add_permission(params={}) function_id = params.delete('FunctionName') function = self.get_function_configuration( 'FunctionName' => function_id ).body function_arn = function['FunctionArn'] action = params.delete('Action') principal = params.delete('Principal') source_account = params.delete('SourceAccount') source_arn = params.delete('SourceArn') sid = params.delete('StatementId') if action.nil? || action.empty? message = 'Action cannot be blank' raise Fog::AWS::Lambda::Error, message end if principal.nil? || principal.empty? message = 'Principal cannot be blank' raise Fog::AWS::Lambda::Error, message end if sid.nil? || sid.empty? message = 'Sid cannot be blank' raise Fog::AWS::Lambda::Error, message end statement = { 'Action' => [action], 'Principal' => { 'Service' => principal }, 'Sid' => sid, 'Resource' => function_arn, 'Effect' => 'Allow' } if source_arn statement['Condition'] = {} statement['Condition']['ArnLike'] = { 'AWS:SourceArn' => source_arn } end self.data[:permissions][function_arn] ||= [] self.data[:permissions][function_arn] << statement response = Excon::Response.new response.status = 201 response.body = { 'Statement' => statement } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/create_event_source_mapping.rb000066400000000000000000000105041437344660100264500ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Identifies a stream as an event source for a Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html # ==== Parameters # * BatchSize <~Integer> - largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. # * Enabled <~Boolean> - indicates whether AWS Lambda should begin polling the event source. # * EventSourceArn <~String> - Amazon Resource Name (ARN) of the stream that is the event source # * FunctionName <~String> - Lambda function to invoke when AWS Lambda detects an event on the stream. # * StartingPosition <~String> - position in the stream where AWS Lambda should start reading. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BatchSize' <~Integer> - largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. # * 'EventSourceArn' <~String> - Amazon Resource Name (ARN) of the stream that is the source of events. # * 'FunctionArn' <~String> - Lambda function to invoke when AWS Lambda detects an event on the stream. # * 'LastModified' <~Time> - UTC time string indicating the last time the event mapping was updated. # * 'LastProcessingResult' <~String> - result of the last AWS Lambda invocation of your Lambda function. # * 'State' <~String> - state of the event source mapping. # * 'StateTransitionReason' <~String> - reason the event source mapping is in its current state. # * 'UUID' <~String> - AWS Lambda assigned opaque identifier for the mapping. def create_event_source_mapping(params={}) enabled = params.delete('Enabled') batch_size = params.delete('BatchSize') event_source_arn = params.delete('EventSourceArn') function_name = params.delete('FunctionName') starting_pos = params.delete('StartingPosition') data = { 'EventSourceArn' => event_source_arn, 'FunctionName' => function_name, 'StartingPosition' => starting_pos } data.merge!('BatchSize' => batch_size) if batch_size data.merge!('Enabled' => enabled) if !enabled.nil? request({ :method => 'POST', :path => '/event-source-mappings/', :expects => 202, :body => Fog::JSON.encode(data) }.merge(params)) end end class Mock def create_event_source_mapping(params={}) enabled = params.delete('Enabled') || false batch_size = params.delete('BatchSize') || 100 event_source_arn = params.delete('EventSourceArn') function_name = params.delete('FunctionName') starting_pos = params.delete('StartingPosition') function = self.get_function_configuration('FunctionName' => function_name).body unless event_source_arn message = "ValidationException => " message << "'eventSourceArn' cannot be blank" raise Fog::AWS::Lambda::Error, message end unless starting_pos message = "ValidationException => " message << "'startingPosition' cannot be blank" raise Fog::AWS::Lambda::Error, message end event_source_mapping_id = UUID.uuid event_source_mapping = { 'BatchSize' => batch_size, 'EventSourceArn' => event_source_arn, 'FunctionArn' => function['FunctionArn'], 'LastModified' => Time.now.to_f, 'LastProcessingResult' => 'No records processed', 'State' => 'Creating', 'StateTransitionReason' => 'User action', 'UUID' => event_source_mapping_id } self.data[:event_source_mappings].merge!( event_source_mapping_id => event_source_mapping ) response = Excon::Response.new response.body = event_source_mapping response.status = 202 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/create_function.rb000066400000000000000000000134611437344660100240660ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Creates a new Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html # ==== Parameters # * Code <~Hash> - code for the Lambda function. # * Description <~String> - short, user-defined function description. # * FunctionName <~String> - name you want to assign to the function you are uploading. # * Handler <~String> - function within your code that Lambda calls to begin execution. # * MemorySize <~Integer> - amount of memory, in MB, your Lambda function is given. # * Role <~String> - ARN of the IAM role that Lambda assumes when it executes your function to access any other AWS resources. # * Runtime <~String> - runtime environment for the Lambda function you are uploading. # * Timeout <~Integer> - function execution time at which Lambda should terminate the function. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'CodeSize' <~Integer> - size, in bytes, of the function .zip file you uploaded. # * 'Description' <~String> - user-provided description. # * 'FunctionArn' <~String> - Amazon Resource Name (ARN) assigned to the function. # * 'FunctionName' <~String> - name of the function. # * 'Handler' <~String> - function Lambda calls to begin executing your function. # * 'LastModified' <~Time> - timestamp of the last time you updated the function. # * 'MemorySize' <~Integer> - memory size, in MB, you configured for the function. # * 'Role' <~String> - ARN of the IAM role that Lambda assumes when it executes your function to access any other AWS resources. # * 'Runtime' <~String> - runtime environment for the Lambda function. # * 'Timeout' <~Integer> - function execution time at which Lambda should terminate the function. def create_function(params={}) runtime = params.delete('Runtime') || 'nodejs' code = params.delete('Code') function_name = params.delete('FunctionName') handler = params.delete('Handler') role = params.delete('Role') data = { 'Runtime' => runtime, 'Code' => code, 'FunctionName' => function_name, 'Handler' => handler, 'Role' => role } description = params.delete('Description') data.merge!('Description' => description) if description memory_size = params.delete('MemorySize') data.merge!('MemorySize' => memory_size) if memory_size timeout = params.delete('Timeout') data.merge!('Timeout' => timeout) if timeout request({ :method => 'POST', :path => '/functions', :expects => 201, :body => Fog::JSON.encode(data), :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def create_function(params={}) response = Excon::Response.new runtime = params.delete('Runtime') || 'nodejs' if !%w(nodejs java8).include?(runtime) message = 'ValidationException: Runtime must be nodejs or java8.' raise Fog::AWS::Lambda::Error, message end unless code = params.delete('Code') message = 'ValidationException: Code cannot be blank.' raise Fog::AWS::Lambda::Error, message end unless function_name = params.delete('FunctionName') message = 'ValidationException: Function name cannot be blank.' raise Fog::AWS::Lambda::Error, message end unless handler = params.delete('Handler') message = 'ValidationException: Handler cannot be blank.' raise Fog::AWS::Lambda::Error, message end unless role = params.delete('Role') message = 'ValidationException: Role cannot be blank.' raise Fog::AWS::Lambda::Error, message end code_size = if code.has_key?('ZipFile') Base64.decode64(code['ZipFile']).length else Fog::Mock.random_numbers(5).to_i end description = params.delete('Description') function = {} begin opts = { 'FunctionName' => function_name } function = self.get_function_configuration(opts).body rescue Fog::AWS::Lambda::Error => e # ignore: if the function doesn't exist we are OK. end if !function.empty? message = "ResourceConflictException => " message << "Function already exist: #{function_name}" raise Fog::AWS::Lambda::Error, message end function_path = "function:#{function_name}" function_arn = Fog::AWS::Mock.arn( 'lambda', self.account_id, function_path, self.region ) function = { 'CodeSize' => code_size, 'FunctionArn' => function_arn, 'FunctionName' => function_name, 'Handler' => handler, 'LastModified' => Time.now.utc, 'MemorySize' => params.delete('MemorySize') || 128, 'Timeout' => params.delete('Timeout') || 3, 'Role' => role, 'Runtime' => runtime } function['Description'] = description if description self.data[:functions][function_arn] = function response.body = function response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/delete_event_source_mapping.rb000066400000000000000000000024361437344660100264540ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Removes an event source mapping. # http://docs.aws.amazon.com/lambda/latest/dg/API_DeleteEventSourceMapping.html # ==== Parameters # * UUID <~String> - event source mapping ID. # ==== Returns # * response<~Excon::Response>: # * body<~String>: def delete_event_source_mapping(params={}) mapping_id = params.delete('UUID') request({ :method => 'DELETE', :path => "/event-source-mappings/#{mapping_id}", :expects => 202 }.merge(params)) end end class Mock def delete_event_source_mapping(params={}) mapping = self.get_event_source_mapping(params).body unless mapping message = "ResourceNotFoundException => " message << "The resource you requested does not exist." raise Fog::AWS::Lambda::Error, message end mapping_id = mapping['UUID'] self.data[:event_source_mappings].delete(mapping_id) mapping['State'] = 'Deleting' response = Excon::Response.new response.status = 202 response.body = mapping response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/delete_function.rb000066400000000000000000000023231437344660100240600ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Deletes the specified Lambda function code and configuration. # http://docs.aws.amazon.com/lambda/latest/dg/API_DeleteFunction.html # ==== Parameters # * FunctionName <~String> - Lambda function to delete. # ==== Returns # * response<~Excon::Response>: # * body<~String>: def delete_function(params={}) function_name = params.delete('FunctionName') request({ :method => 'DELETE', :path => "/functions/#{function_name}", :expects => 204 }.merge(params)) end end class Mock def delete_function(params={}) response = Excon::Response.new response.status = 204 response.body = '' function = self.get_function_configuration(params).body function_id = function['FunctionArn'] self.data[:functions].delete function_id self.data[:permissions].delete function_id self.data[:event_source_mappings].delete_if do |m,f| f['FunctionArn'].eql?(function_id) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/get_event_source_mapping.rb000066400000000000000000000045541437344660100257740ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Returns configuration information for the specified event source mapping. # http://docs.aws.amazon.com/lambda/latest/dg/API_GetEventSourceMapping.html # ==== Parameters # * UUID <~String> - AWS Lambda assigned ID of the event source mapping. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BatchSize' <~Integer> - largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. # * 'EventSourceArn' <~String> - Amazon Resource Name (ARN) of the stream that is the source of events. # * 'FunctionArn' <~String> - Lambda function to invoke when AWS Lambda detects an event on the stream. # * 'LastModified' <~Time> - UTC time string indicating the last time the event mapping was updated. # * 'LastProcessingResult' <~String> - result of the last AWS Lambda invocation of your Lambda function. # * 'State' <~String> - state of the event source mapping. # * 'StateTransitionReason' <~String> - reason the event source mapping is in its current state. # * 'UUID' <~String> - AWS Lambda assigned opaque identifier for the mapping. # * 'Code' <~Hash> - object for the Lambda function location. # * 'Configuration' <~Hash> - function metadata description. def get_event_source_mapping(params={}) mapping_id = params.delete('UUID') request({ :method => 'GET', :path => "/event-source-mappings/#{mapping_id}" }.merge(params)) end end class Mock def get_event_source_mapping(params={}) mapping_id = params.delete('UUID') unless mapping = self.data[:event_source_mappings][mapping_id] message = 'ResourceNotFoundException => ' message << 'The resource you requested does not exist.' raise Fog::AWS::Lambda::Error, message end if mapping['State'].eql?('Creating') mapping['LastProcessingResult'] = 'OK' mapping['State'] = 'Enabled' end response = Excon::Response.new response.status = 200 response.body = mapping response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/get_function.rb000066400000000000000000000047441437344660100234060ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Returns the configuration information of the Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_GetFunction.html # ==== Parameters # * FunctionName <~String> - Lambda function name. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Code' <~Hash> - object for the Lambda function location. # * 'Configuration' <~Hash> - function metadata description. def get_function(params={}) function_name = params.delete('FunctionName') request({ :method => 'GET', :path => "/functions/#{function_name}/versions/HEAD", :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def get_function(params={}) response = Excon::Response.new response.status = 200 response.body = '' unless function_id = params.delete('FunctionName') raise Fog::AWS::Lambda::Error, 'Function name cannot be blank.' end if function_id.match(/^arn:aws:lambda:.+:function:.+/) function = self.data[:functions][function_id] else search_function = Hash[ self.data[:functions].select do |f,v| v['FunctionName'].eql?(function_id) end ] function = search_function.values.first end msg = 'The resource you requested does not exist.' raise Fog::AWS::Lambda::Error, msg if (function.nil? || function.empty?) location = "https://awslambda-#{self.region}-tasks.s3-#{self.region}" location << ".amazonaws.com/snapshot/#{self.account_id}/" location << "#{function['FunctionName']}-#{UUID.uuid}" location << '?x-amz-security-token=' location << Fog::Mock.random_base64(718) location << "&AWSAccessKeyId=#{self.aws_access_key_id}" location << "&Expires=#{Time.now.to_i + 60*10}" location << '&Signature=' location << Fog::Mock.random_base64(28) body = { 'Code' => { 'Location' => location, 'RepositoryType' => 'S3' }, 'Configuration' => function } response.body = body response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/get_function_configuration.rb000066400000000000000000000037261437344660100263340ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Returns the configuration information of the Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_GetFunction.html # ==== Parameters # * FunctionName <~String> - Lambda function name. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'CodeSize' <~Integer> - size, in bytes, of the function .zip file you uploaded. # * 'Description' <~String> - user-provided description. # * 'FunctionArn' <~String> - Amazon Resource Name (ARN) assigned to the function. # * 'FunctionName' <~String> - name of the function. # * 'Handler' <~String> - function Lambda calls to begin executing your function. # * 'LastModified' <~Time> - timestamp of the last time you updated the function. # * 'Memorysize' <~String> - memory size, in MB, you configured for the function. # * 'Role' <~String> - ARN of the IAM role that Lambda assumes when it executes your function to access any other AWS resources. # * 'Runtime' <~String> - runtime environment for the Lambda function. # * 'Timeout' <~Integer> - function execution time at which Lambda should terminate the function. def get_function_configuration(params={}) function_name = params.delete('FunctionName') request({ :method => 'GET', :path => "/functions/#{function_name}/versions/HEAD/configuration", :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def get_function_configuration(params={}) response = self.get_function(params) function_configuration = response.body['Configuration'] response.body = function_configuration response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/get_policy.rb000066400000000000000000000033151437344660100230510ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Returns the access policy, containing a list of permissions granted via the AddPermission API, associated with the specified bucket. # http://docs.aws.amazon.com/lambda/latest/dg/API_GetPolicy.html # ==== Parameters # * FunctionName <~String> - Function name whose access policy you want to retrieve. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Policy' <~Hash> - The access policy associated with the specified function. def get_policy(params={}) function_name = params.delete('FunctionName') request({ :method => 'GET', :path => "/functions/#{function_name}/versions/HEAD/policy", :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def get_policy(params={}) response = Excon::Response.new function = self.get_function_configuration(params).body function_arn = function['FunctionArn'] statements = self.data[:permissions][function_arn] || [] if statements.empty? message = "ResourceNotFoundException => " message << "The resource you requested does not exist." raise Fog::AWS::Lambda::Error, message end policy = { 'Version' => '2012-10-17', 'Statement' => statements, 'Id' => 'default' } response.status = 200 response.body = { 'Policy' => policy } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/invoke.rb000066400000000000000000000062361437344660100222130ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Invokes a specified Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html # ==== Parameters # * ClientContext <~Hash> - client-specific information to the Lambda function you are invoking. # * FunctionName <~String> - Lambda function name. # * InvocationType <~String> - function invocation type. # * LogType <~String> - logs format for function calls of "RequestResponse" invocation type. # * Payload <~Integer> - Lambda function input. # ==== Returns # * response<~Excon::Response>: # * body<~Hash> - JSON representation of the object returned by the Lambda function. def invoke(params={}) headers = {} if client_context = params.delete('ClientContext') headers['X-Amz-Client-Context'] = Base64::encode64(Fog::JSON.encode(client_context)) end if invocation_type = params.delete('InvocationType') headers['X-Amz-Invocation-Type'] = invocation_type end if log_type = params.delete('LogType') headers['X-Amz-Log-Type'] = log_type end payload = Fog::JSON.encode(params.delete('Payload')) function_name = params.delete('FunctionName') request({ :method => 'POST', :path => "/functions/#{function_name}/invocations", :headers => headers, :body => payload, :expects => [200, 202, 204] }.merge(params)) end end class Mock def invoke(params={}) response = Excon::Response.new response.status = 200 response.body = '' unless function_id = params.delete('FunctionName') message = 'AccessDeniedException => ' message << 'Unable to determine service/operation name to be authorized' raise Fog::AWS::Lambda::Error, message end client_context = params.delete('ClientContext') invocation_type = params.delete('InvocationType') log_type = params.delete('LogType') payload = params.delete('Payload') if (client_context || invocation_type || log_type) message = "invoke parameters handling are not yet mocked [light_black](#{caller.first})[/]" Fog::Logger.warning message Fog::Mock.not_implemented end if payload message = "payload parameter is ignored since we are not really " message << "invoking a function [light_black](#{caller.first})[/]" Fog::Logger.warning message end function = self.get_function_configuration('FunctionName' => function_id).body if function.is_a?(Hash) && function.has_key?('FunctionArn') response.body = "\"Imagine #{function['FunctionArn']} was invoked\"" else message = "ResourceNotFoundException => Function not found: #{function_id}" raise Fog::AWS::Lambda::Error, message end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/list_event_source_mappings.rb000066400000000000000000000052651437344660100263530ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Returns a list of event source mappings where you can identify a stream as an event source. # http://docs.aws.amazon.com/lambda/latest/dg/API_ListEventSourceMappings.html # ==== Parameters # * EventSourceArn <~String> - Amazon Resource Name (ARN) of the stream. # * FunctionName <~String> - name of the Lambda function. # * Marker <~String> - opaque pagination token returned from a previous ListEventSourceMappings operation. # * MaxItems <~Integer> - maximum number of event sources to return in response. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'EventSourceMappings' <~Array> - array of EventSourceMappingConfiguration objects. # * 'NextMarker' <~String> - present if there are more event source mappings. def list_event_source_mappings(params={}) event_source_arn = params.delete('EventSourceArn') function_name = params.delete('FunctionName') marker = params.delete('Marker') max_items = params.delete('MaxItems') query = {} query.merge!('EventSourceArn' => event_source_arn) if event_source_arn query.merge!('FunctionName' => function_name) if function_name query.merge!('Marker' => marker) if marker query.merge!('MaxItems' => max_items) if max_items request({ :method => 'GET', :path => '/event-source-mappings/', :query => query }.merge(params)) end end class Mock def list_event_source_mappings(params={}) response = Excon::Response.new response.status = 200 function_name = params.delete('FunctionName') begin function = self.get_function_configuration('FunctionName' => function_name).body function_arn = function['FunctionArn'] rescue Fog::AWS::Lambda::Error => e # interestingly enough, if you try to do a list_event_source_mappings # on a nonexisting function, Lambda API endpoint doesn't return # error, just an empty array. end event_source_mappings = [] if function_arn event_source_mappings = self.data[:event_source_mappings].values.select do |m| m['FunctionArn'].eql?(function_arn) end end response.body = { 'EventSourceMappings' => event_source_mappings, 'NextMarker' => nil } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/list_functions.rb000066400000000000000000000024401437344660100237540ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Returns a list of your Lambda functions. # http://docs.aws.amazon.com/lambda/latest/dg/API_ListFunctions.html # ==== Parameters # * Marker <~String> - opaque pagination token returned from a previous ListFunctions operation. If present, indicates where to continue the listing. # * MaxItems <~Integer> - Specifies the maximum number of AWS Lambda functions to return in response. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Functions' <~Array> - list of Lambda functions. # * 'NextMarker' <~String> - present if there are more functions. def list_functions(params={}) request({ :method => 'GET', :path => '/functions/', :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def list_functions(params={}) response = Excon::Response.new response.status = 200 response.body = { 'Functions' => self.data[:functions].values, 'NextMarker' => nil } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/remove_permission.rb000066400000000000000000000037721437344660100244670ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Remove individual permissions from an access policy associated with a Lambda function by providing a Statement ID. # http://docs.aws.amazon.com/lambda/latest/dg/API_RemovePermission.html # ==== Parameters # * FunctionName <~String> - Lambda function whose access policy you want to remove a permission from. # * StatementId <~String> - Statement ID of the permission to remove. # ==== Returns # * response<~Excon::Response>: # * body<~String>: def remove_permission(params={}) function_name = params.delete('FunctionName') statement_id = params.delete('StatementId') request({ :method => 'DELETE', :path => "/functions/#{function_name}/versions/HEAD/policy/#{statement_id}", :expects => 204 }.merge(params)) end end class Mock def remove_permission(params={}) function_name = params.delete('FunctionName') opts = { 'FunctionName' => function_name } function = self.get_function_configuration(opts).body function_arn = function['FunctionArn'] statement_id = params.delete('StatementId') message = 'Statement ID cannot be blank' raise Fog::AWS::Lambda::Error, message unless statement_id permissions_qty = self.data[:permissions][function_arn].size self.data[:permissions][function_arn].delete_if do |s| s['Sid'].eql?(statement_id) end if self.data[:permissions][function_arn].size.eql?(permissions_qty) message = "ResourceNotFoundException => " message << "The resource you requested does not exist." raise Fog::AWS::Lambda::Error, message end response = Excon::Response.new response.status = 204 response.body = '' response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/update_event_source_mapping.rb000066400000000000000000000073501437344660100264740ustar00rootroot00000000000000module Fog module AWS class Lambda class Real # Change the parameters of the existing mapping without losing your position in the stream. # http://docs.aws.amazon.com/lambda/latest/dg/API_UpdateEventSourceMapping.html # ==== Parameters # * UUID <~String> - event source mapping identifier. # * BatchSize <~Integer> - maximum number of stream records that can be sent to your Lambda function for a single invocation. # * Enabled <~Boolean> - specifies whether AWS Lambda should actively poll the stream or not. # * FunctionName <~String> - Lambda function to which you want the stream records sent. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BatchSize' <~Integer> - largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function. # * 'EventSourceArn' <~String> - Amazon Resource Name (ARN) of the stream that is the source of events. # * 'FunctionArn' <~String> - Lambda function to invoke when AWS Lambda detects an event on the stream. # * 'LastModified' <~Time> - UTC time string indicating the last time the event mapping was updated. # * 'LastProcessingResult' <~String> - result of the last AWS Lambda invocation of your Lambda function. # * 'State' <~String> - state of the event source mapping. # * 'StateTransitionReason' <~String> - reason the event source mapping is in its current state. # * 'UUID' <~String> - AWS Lambda assigned opaque identifier for the mapping. def update_event_source_mapping(params={}) function_name = params.delete('FunctionName') mapping_id = params.delete('UUID') batch_size = params.delete('BatchSize') enabled = params.delete('Enabled') update = {} update.merge!('BatchSize' => batch_size) if batch_size update.merge!('Enabled' => enabled) if !enabled.nil? update.merge!('FunctionName' => function_name) if function_name request({ :method => 'PUT', :path => "/event-source-mappings/#{mapping_id}", :expects => 202, :body => Fog::JSON.encode(update) }.merge(params)) end end class Mock def update_event_source_mapping(params={}) mapping_id = params.delete('UUID') mapping = self.data[:event_source_mappings][mapping_id] unless mapping message = 'ResourceNotFoundException => ' message << 'The resource you requested does not exist.' raise Fog::AWS::Lambda::Error, message end function_name = params.delete('FunctionName') function = {} if function_name function_opts = { 'FunctionName' => function_name } function = self.get_function_configuration(function_opts).body function_arn = function['FunctionArn'] end batch_size = params.delete('BatchSize') enabled = params.delete('Enabled') update = {} if function_name && !function.empty? && function_arn update.merge!('FunctionArn' => function_arn) end update.merge!('BatchSize' => batch_size) if batch_size update.merge!('Enabled' => enabled) if !enabled.nil? mapping.merge!(update) mapping['State'] = 'Disabling' if enabled.eql?(false) mapping['State'] = 'Enabling' if enabled.eql?(true) response = Excon::Response.new response.status = 202 response.body = mapping response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/update_function_code.rb000066400000000000000000000066611437344660100251030ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Updates the code for the specified Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_UpdateFunctionCode.html # ==== Parameters # * FunctionName <~String> - existing Lambda function name whose code you want to replace. # * S3Bucket <~String> - Amazon S3 bucket name where the .zip file containing your deployment package is stored. # * S3Key <~String> - Amazon S3 object (the deployment package) key name you want to upload. # * S3ObjectVersion <~String> - Amazon S3 object (the deployment package) version you want to upload. # * ZipFile <~String> - Based64-encoded .zip file containing your packaged source code. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'CodeSize' <~Integer> - size, in bytes, of the function .zip file you uploaded. # * 'Description' <~String> - user-provided description. # * 'FunctionArn' <~String> - Amazon Resource Name (ARN) assigned to the function. # * 'FunctionName' <~String> - name of the function. # * 'Handler' <~String> - function Lambda calls to begin executing your function. # * 'LastModified' <~Time> - timestamp of the last time you updated the function. # * 'Memorysize' <~String> - memory size, in MB, you configured for the function. # * 'Role' <~String> - ARN of the IAM role that Lambda assumes when it executes your function to access any other AWS resources. # * 'Runtime' <~String> - runtime environment for the Lambda function. # * 'Timeout' <~Integer> - function execution time at which Lambda should terminate the function. def update_function_code(params={}) function_name = params.delete('FunctionName') s3_bucket = params.delete('S3Bucket') s3_key = params.delete('S3Key') s3_object_ver = params.delete('S3ObjectVersion') zip_file = params.delete('ZipFile') update = {} update.merge!('S3Bucket' => s3_bucket) if s3_bucket update.merge!('S3Key' => s3_key) if s3_key update.merge!('S3ObjectVersion' => s3_object_ver) if s3_object_ver update.merge!('ZipFile' => zip_file) if zip_file request({ :method => 'PUT', :path => "/functions/#{function_name}/versions/HEAD/code", :body => Fog::JSON.encode(update), :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def update_function_code(params={}) response = self.get_function_configuration(params) request_data = [] %w(S3Bucket S3Key S3ObjectVersion ZipFile).each do |p| request_data << params.delete(p) if params.has_key?(p) end message = 'Please provide a source for function code.' raise Fog::AWS::Lambda::Error, message if request_data.empty? # we ignore any parameters since we are not uploading any code function_arn = response.body['FunctionArn'] response = Excon::Response.new response.status = 200 response.body = self.data[:functions][function_arn] response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/lambda/update_function_configuration.rb000066400000000000000000000075321437344660100270360ustar00rootroot00000000000000module Fog module AWS class Lambda class Real require 'fog/aws/parsers/lambda/base' # Updates the configuration parameters for the specified Lambda function. # http://docs.aws.amazon.com/lambda/latest/dg/API_UpdateFunctionConfiguration.html # ==== Parameters # * FunctionName <~String> - name of the Lambda function. # * Description <~String> - short user-defined function description. # * Handler <~String> - function that Lambda calls to begin executing your function. # * MemorySize <~Integer> - amount of memory, in MB, your Lambda function is given. # * Role <~String> - ARN of the IAM role that Lambda will assume when it executes your function. # * Timeout <~Integer> - function execution time at which AWS Lambda should terminate the function. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'CodeSize' <~Integer> - size, in bytes, of the function .zip file you uploaded. # * 'Description' <~String> - user-provided description. # * 'FunctionArn' <~String> - Amazon Resource Name (ARN) assigned to the function. # * 'FunctionName' <~String> - name of the function. # * 'Handler' <~String> - function Lambda calls to begin executing your function. # * 'LastModified' <~Time> - timestamp of the last time you updated the function. # * 'Memorysize' <~String> - memory size, in MB, you configured for the function. # * 'Role' <~String> - ARN of the IAM role that Lambda assumes when it executes your function to access any other AWS resources. # * 'Runtime' <~String> - runtime environment for the Lambda function. # * 'Timeout' <~Integer> - function execution time at which Lambda should terminate the function. def update_function_configuration(params={}) function_name = params.delete('FunctionName') description = params.delete('Description') handler = params.delete('Handler') memory_size = params.delete('MemorySize') role = params.delete('Role') timeout = params.delete('Timeout') update = {} update.merge!('Description' => description) if description update.merge!('Handler' => handler) if handler update.merge!('MemorySize' => memory_size) if memory_size update.merge!('Role' => role) if role update.merge!('Timeout' => timeout) if timeout request({ :method => 'PUT', :path => "/functions/#{function_name}/versions/HEAD/configuration", :body => Fog::JSON.encode(update), :parser => Fog::AWS::Parsers::Lambda::Base.new }.merge(params)) end end class Mock def update_function_configuration(params={}) response = self.get_function_configuration(params) function_arn = response.body['FunctionArn'] description = params.delete('Description') handler = params.delete('Handler') memory_size = params.delete('MemorySize') role = params.delete('Role') timeout = params.delete('Timeout') update = {} update.merge!('Description' => description) if description update.merge!('Handler' => handler) if handler update.merge!('MemorySize' => memory_size) if memory_size update.merge!('Role' => role) if role update.merge!('Timeout' => timeout) if timeout self.data[:functions][function_arn].merge!(update) response = Excon::Response.new response.status = 200 response.body = self.data[:functions][function_arn] response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/000077500000000000000000000000001437344660100177345ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/rds/add_tags_to_resource.rb000066400000000000000000000031461437344660100244440ustar00rootroot00000000000000module Fog module AWS class RDS class Real # adds tags to a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_AddTagsToResource.html # ==== Parameters # * rds_id <~String> - name of the RDS instance whose tags are to be retrieved # * tags <~Hash> A Hash of (String) key-value pairs # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def add_tags_to_resource(rds_id, tags) keys = tags.keys.sort values = keys.map { |key| tags[key] } resource_name = "arn:aws:rds:#{@region}:#{owner_id}:db:#{rds_id}" %w[us-gov-west-1 us-gov-east-1].include?(@region) ? resource_name.insert(7, '-us-gov') : resource_name request({ 'Action' => 'AddTagsToResource', 'ResourceName' => resource_name, :parser => Fog::Parsers::AWS::RDS::Base.new }.merge(Fog::AWS.indexed_param('Tags.member.%d.Key', keys)) .merge(Fog::AWS.indexed_param('Tags.member.%d.Value', values))) end end class Mock def add_tags_to_resource(rds_id, tags) response = Excon::Response.new if server = data[:servers][rds_id] data[:tags][rds_id].merge! tags response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::RDS::NotFound, "DBInstance #{rds_id} not found" end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/authorize_db_security_group_ingress.rb000066400000000000000000000070011437344660100276330ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/authorize_db_security_group_ingress' # authorizes a db security group ingress # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_AuthorizeDBSecurityGroupIngress.html # ==== Parameters # * CIDRIP <~String> - The IP range to authorize # * DBSecurityGroupName <~String> - The name for the DB Security Group. # * EC2SecurityGroupName <~String> - Name of the EC2 Security Group to authorize. # * EC2SecurityGroupOwnerId <~String> - AWS Account Number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def authorize_db_security_group_ingress(name, opts={}) unless opts.key?('CIDRIP') || ((opts.key?('EC2SecurityGroupName') || opts.key?('EC2SecurityGroupId')) && opts.key?('EC2SecurityGroupOwnerId')) raise ArgumentError, 'Must specify CIDRIP, or one of EC2SecurityGroupName or EC2SecurityGroupId, and EC2SecurityGroupOwnerId' end request({ 'Action' => 'AuthorizeDBSecurityGroupIngress', :parser => Fog::Parsers::AWS::RDS::AuthorizeDBSecurityGroupIngress.new, 'DBSecurityGroupName' => name }.merge(opts)) end end class Mock def authorize_db_security_group_ingress(name, opts = {}) unless opts.key?('CIDRIP') || ((opts.key?('EC2SecurityGroupName') || opts.key?('EC2SecurityGroupId')) && opts.key?('EC2SecurityGroupOwnerId')) raise ArgumentError, 'Must specify CIDRIP, or one of EC2SecurityGroupName or EC2SecurityGroupId, and EC2SecurityGroupOwnerId' end if ec2_security_group_id = opts.delete("EC2SecurityGroupId") ec2_security_group = (Fog::AWS::Compute::Mock.data[self.region][self.aws_access_key_id][:security_groups] || {}).values.detect { |sg| sg['groupId'] == ec2_security_group_id } opts['EC2SecurityGroupName'] = ec2_security_group['groupName'] end response = Excon::Response.new if sec_group = self.data[:security_groups][name] if opts.key?('CIDRIP') if sec_group['IPRanges'].find{|h| h['CIDRIP'] == opts['CIDRIP']} raise Fog::AWS::RDS::AuthorizationAlreadyExists.new("AuthorizationAlreadyExists => #{opts['CIDRIP']} is alreay defined") end sec_group['IPRanges'] << opts.merge({"Status" => 'authorizing'}) else if sec_group['EC2SecurityGroups'].find{|h| h['EC2SecurityGroupName'] == opts['EC2SecurityGroupName'] || h['EC2SecurityGroupId'] == opts['EC2SecurityGroupId']} raise Fog::AWS::RDS::AuthorizationAlreadyExists.new("AuthorizationAlreadyExists => #{opts['EC2SecurityGroupName']} is alreay defined") end sec_group['EC2SecurityGroups'] << opts.merge({"Status" => 'authorizing'}) end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, 'AuthorizeDBSecurityGroupIngressResult' => { 'DBSecurityGroup' => sec_group } } response else raise Fog::AWS::RDS::NotFound.new("DBSecurityGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/copy_db_snapshot.rb000066400000000000000000000037071437344660100236260ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/copy_db_snapshot' # Copy a db snapshot # # ==== Parameters # * source_db_snapshot_identifier<~String> - Id of db snapshot # * target_db_snapshot_identifier<~String> - Desired Id of the db snapshot copy # * 'copy_tags'<~Boolean> - true to copy all tags from the source DB snapshot to the target DB snapshot; otherwise false. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # # {Amazon API Reference}[http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CopyDBSnapshot.html] def copy_db_snapshot(source_db_snapshot_identifier, target_db_snapshot_identifier, copy_tags = false) request( 'Action' => 'CopyDBSnapshot', 'SourceDBSnapshotIdentifier' => source_db_snapshot_identifier, 'TargetDBSnapshotIdentifier' => target_db_snapshot_identifier, 'CopyTags' => copy_tags, :parser => Fog::Parsers::AWS::RDS::CopyDBSnapshot.new ) end end class Mock # # Usage # # Fog::AWS[:rds].copy_db_snapshot("snap-original-id", "snap-backup-id", true) # def copy_db_snapshot(source_db_snapshot_identifier, target_db_snapshot_identifier, copy_tags = false) response = Excon::Response.new response.status = 200 snapshot_id = Fog::AWS::Mock.snapshot_id data = self.data[:snapshots]["#{source_db_snapshot_identifier}"] data['DBSnapshotIdentifier'] = snapshot_id self.data[:snapshots][snapshot_id] = data response.body = { 'requestId' => Fog::AWS::Mock.request_id, 'CopyDBSnapshotResult' => {'DBSnapshot' => data.dup} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_cluster.rb000066400000000000000000000126211437344660100237340ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_cluster' # Create a db cluster # # @see http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html # # ==== Parameters ==== # * AvailabilityZones<~Array> - A list of EC2 Availability Zones that instances in the DB cluster can be created in # * BackupRetentionPeriod<~String> - The number of days for which automated backups are retained # * CharacterSetName<~String> - A value that indicates that the DB cluster should be associated with the specified CharacterSet # * DatabaseName<~String> - The name for your database of up to 8 alpha-numeric characters. If you do not provide a name, Amazon RDS will not create a database in the DB cluster you are creating # * DBClusterIdentifier<~String> - The DB cluster identifier. This parameter is stored as a lowercase string # * DBClusterParameterGroupName<~String> - The name of the DB cluster parameter group to associate with this DB cluster # * DBSubnetGroupName<~String> - A DB subnet group to associate with this DB cluster # * Engine<~String> - The name of the database engine to be used for this DB cluster # * EngineVersion<~String> - The version number of the database engine to use # * KmsKeyId<~String> - The KMS key identifier for an encrypted DB cluster # * MasterUsername<~String> - The name of the master user for the client DB cluster # * MasterUserPassword<~String> - The password for the master database user # * OptionGroupName<~String> - A value that indicates that the DB cluster should be associated with the specified option group # * Port<~Integer> - The port number on which the instances in the DB cluster accept connections # * PreferredBackupWindow<~String> - The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter # * PreferredMaintenanceWindow<~String> - The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC) # * StorageEncrypted<~Boolean> - Specifies whether the DB cluster is encrypted # * Tags<~Array> - A list of tags # * VpcSecurityGroups<~Array> - A list of EC2 VPC security groups to associate with this DB cluster # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def create_db_cluster(cluster_name, options={}) if security_groups = options.delete('VpcSecurityGroups') options.merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*security_groups])) end request({ 'Action' => 'CreateDBCluster', 'DBClusterIdentifier' => cluster_name, :parser => Fog::Parsers::AWS::RDS::CreateDBCluster.new, }.merge(options)) end end class Mock def create_db_cluster(cluster_name, options={}) response = Excon::Response.new if self.data[:clusters][cluster_name] raise Fog::AWS::RDS::IdentifierTaken.new("DBClusterAlreadyExists") end required_params = %w(Engine MasterUsername MasterUserPassword) required_params.each do |key| unless options.key?(key) && options[key] && !options[key].to_s.empty? raise Fog::AWS::RDS::NotFound.new("The request must contain the parameter #{key}") end end vpc_security_groups = Array(options.delete("VpcSecurityGroups")).map do |group_id| {"VpcSecurityGroupId" => group_id } end data = { 'AllocatedStorage' => "1", 'BackupRetentionPeriod' => (options["BackupRetentionPeriod"] || 35).to_s, 'ClusterCreateTime' => Time.now, 'DBClusterIdentifier' => cluster_name, 'DBClusterMembers' => [], 'DBClusterParameterGroup' => options['DBClusterParameterGroup'] || "default.aurora5.6", 'DBSubnetGroup' => options["DBSubnetGroup"] || "default", 'Endpoint' => "#{cluster_name}.cluster-#{Fog::Mock.random_hex(8)}.#{@region}.rds.amazonaws.com", 'Engine' => options["Engine"] || "aurora5.6", 'EngineVersion' => options["EngineVersion"] || "5.6.10a", 'MasterUsername' => options["MasterUsername"], 'Port' => options["Port"] || "3306", 'PreferredBackupWindow' => options["PreferredBackupWindow"] || "04:45-05:15", 'PreferredMaintenanceWindow' => options["PreferredMaintenanceWindow"] || "sat:05:56-sat:06:26", 'Status' => "available", 'StorageEncrypted' => options["StorageEncrypted"] || false, 'VpcSecurityGroups' => vpc_security_groups, } self.data[:clusters][cluster_name] = data response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "CreateDBClusterResult" => { "DBCluster" => data.dup.reject { |k,v| k == 'ClusterCreateTime' } } } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_cluster_snapshot.rb000066400000000000000000000051521437344660100256540ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_cluster_snapshot' # create a snapshot of a db cluster # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBClusterSnapshot.html # # ==== Parameters ==== # * DBClusterIdentifier<~String> - The identifier of the DB cluster to create a snapshot for # * DBClusterSnapshotIdentifier<~String> - The identifier of the DB cluster snapshot # * Tags<~Array> - The tags to be assigned to the DB cluster snapshot # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def create_db_cluster_snapshot(identifier, name) request( 'Action' => 'CreateDBClusterSnapshot', 'DBClusterIdentifier' => identifier, 'DBClusterSnapshotIdentifier' => name, :parser => Fog::Parsers::AWS::RDS::CreateDBClusterSnapshot.new ) end end class Mock def create_db_cluster_snapshot(identifier, name) response = Excon::Response.new if data[:cluster_snapshots][name] raise Fog::AWS::RDS::IdentifierTaken.new end cluster = self.data[:clusters][identifier] raise Fog::AWS::RDS::NotFound.new("DBCluster #{identifier} not found") unless cluster data = { 'AllocatedStorage' => cluster['AllocatedStorage'].to_i, 'ClusterCreateTime' => cluster['ClusterCreateTime'], 'DBClusterIdentifier' => identifier, 'DBClusterSnapshotIdentifier' => name, 'Engine' => cluster['Engine'], 'EngineVersion' => cluster['EngineVersion'], 'LicenseModel' => cluster['Engine'], 'MasterUsername' => cluster['MasterUsername'], 'SnapshotCreateTime' => Time.now, 'SnapshotType' => 'manual', 'StorageEncrypted' => cluster["StorageEncrypted"], 'Status' => 'creating', } self.data[:cluster_snapshots][name] = data response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "CreateDBClusterSnapshotResult" => { "DBClusterSnapshot" => data.dup }, } self.data[:cluster_snapshots][name]['SnapshotCreateTime'] = Time.now response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_instance.rb000066400000000000000000000261221437344660100240600ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_instance' # Create a db instance # # @param DBInstanceIdentifier [String] name of the db instance to modify # @param AllocatedStorage [Integer] Storage space, in GB # @param AutoMinorVersionUpgrade [Boolean] Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window # @param AvailabilityZone [String] The availability zone to create the instance in # @param BackupRetentionPeriod [Integer] 0-8 The number of days to retain automated backups. # @param DBInstanceClass [String] The new compute and memory capacity of the DB Instance # @param DBName [String] The name of the database to create when the DB Instance is created # @param DBParameterGroupName [String] The name of the DB Parameter Group to apply to this DB Instance # @param DBSecurityGroups [Array] A list of DB Security Groups to authorize on this DB Instance # @param Engine [String] The name of the database engine to be used for this instance. # @param EngineVersion [String] The version number of the database engine to use. # @param Iops [Integer] IOPS rate # @param MasterUsername [String] The db master user # @param MasterUserPassword [String] The new password for the DB Instance master user # @param MultiAZ [Boolean] Specifies if the DB Instance is a Multi-AZ deployment # @param Port [Integer] The port number on which the database accepts connections. # @param PreferredBackupWindow [String] The daily time range during which automated backups are created if automated backups are enabled # @param PreferredMaintenanceWindow [String] The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage # @param DBSubnetGroupName [String] The name, if any, of the VPC subnet for this RDS instance # @param PubliclyAccessible [Boolean] Whether an RDS instance inside of the VPC subnet should have a public-facing endpoint # @param VpcSecurityGroups [Array] A list of VPC Security Groups to authorize on this DB instance # @param StorageType [string] Specifies storage type to be associated with the DB Instance. Valid values: standard | gp2 | io1 # # @return [Excon::Response]: # * body [Hash]: # # @see http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html def create_db_instance(db_name, options={}) if security_groups = options.delete('DBSecurityGroups') options.merge!(Fog::AWS.indexed_param('DBSecurityGroups.member.%d', [*security_groups])) end if vpc_security_groups = options.delete('VpcSecurityGroups') options.merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*vpc_security_groups])) end request({ 'Action' => 'CreateDBInstance', 'DBInstanceIdentifier' => db_name, :parser => Fog::Parsers::AWS::RDS::CreateDBInstance.new, }.merge(options)) end end class Mock def create_db_instance(db_name, options={}) response = Excon::Response.new if self.data[:servers] and self.data[:servers][db_name] # I don't know how to raise an exception that contains the excon data #response.status = 400 #response.body = { # 'Code' => 'DBInstanceAlreadyExists', # 'Message' => "DB Instance already exists" #} #return response raise Fog::AWS::RDS::IdentifierTaken.new("DBInstanceAlreadyExists #{response.body.to_s}") end # These are the required parameters according to the API required_params = %w(DBInstanceClass Engine) required_params += %w{AllocatedStorage DBInstanceClass Engine MasterUserPassword MasterUsername } unless options["DBClusterIdentifier"] required_params.each do |key| unless options.key?(key) and options[key] and !options[key].to_s.empty? #response.status = 400 #response.body = { # 'Code' => 'MissingParameter', # 'Message' => "The request must contain the parameter #{key}" #} #return response raise Fog::AWS::RDS::NotFound.new("The request must contain the parameter #{key}") end end if !!options["MultiAZ"] && !!options["AvailabilityZone"] raise Fog::AWS::RDS::InvalidParameterCombination.new('Requesting a specific availability zone is not valid for Multi-AZ instances.') end ec2 = Fog::AWS::Compute::Mock.data[@region][@aws_access_key_id] db_parameter_groups = if pg_name = options.delete("DBParameterGroupName") group = self.data[:parameter_groups][pg_name] if group [{"DBParameterGroupName" => pg_name, "ParameterApplyStatus" => "in-sync" }] else raise Fog::AWS::RDS::NotFound.new("Parameter group does not exist") end else [{ "DBParameterGroupName" => "default.mysql5.5", "ParameterApplyStatus" => "in-sync" }] end db_security_group_names = Array(options.delete("DBSecurityGroups")) rds_security_groups = self.data[:security_groups].values ec2_security_groups = ec2[:security_groups].values vpc = !ec2[:account_attributes].find { |h| "supported-platforms" == h["attributeName"] }["values"].include?("EC2") db_security_groups = db_security_group_names.map do |group_name| unless rds_security_groups.find { |sg| sg["DBSecurityGroupName"] == group_name } raise Fog::AWS::RDS::Error.new("InvalidParameterValue => Invalid security group , groupId= , groupName=#{group_name}") end {"Status" => "active", "DBSecurityGroupName" => group_name } end if !vpc && db_security_groups.empty? db_security_groups << { "Status" => "active", "DBSecurityGroupName" => "default" } end vpc_security_groups = Array(options.delete("VpcSecurityGroups")).map do |group_id| unless ec2_security_groups.find { |sg| sg["groupId"] == group_id } raise Fog::AWS::RDS::Error.new("InvalidParameterValue => Invalid security group , groupId=#{group_id} , groupName=") end {"Status" => "active", "VpcSecurityGroupId" => group_id } end if options["Engine"] == "aurora" && ! options["DBClusterIdentifier"] raise Fog::AWS::RDS::Error.new("InvalidParameterStateValue => Standalone instances for this engine are not supported") end if cluster_id = options["DBClusterIdentifier"] if vpc_security_groups.any? raise Fog::AWS::RDS::Error.new("InvalidParameterCombination => The requested DB Instance will be a member of a DB Cluster and its vpc security group should not be set directly.") end if options["MultiAZ"] raise Fog::AWS::RDS::Error.new("InvalidParameterCombination => VPC Multi-AZ DB Instances are not available for engine: aurora") end %w(AllocatedStorage BackupRetentionPeriod MasterUsername MasterUserPassword).each do |forbidden| raise Fog::AWS::RDS::Error.new("InvalidParameterCombination => The requested DB Instance will be a member of a DB Cluster and its #{forbidden} should not be set directly.") if options[forbidden] end options["StorageType"] = "aurora" cluster = self.data[:clusters][cluster_id] member = {"DBInstanceIdentifier" => db_name, "master" => cluster['DBClusterMembers'].empty?} cluster['DBClusterMembers'] << member self.data[:clusters][cluster_id] = cluster end data = { "AllocatedStorage" => options["AllocatedStorage"], "AutoMinorVersionUpgrade" => options["AutoMinorVersionUpgrade"].nil? ? true : options["AutoMinorVersionUpgrade"], "AvailabilityZone" => options["AvailabilityZone"], "BackupRetentionPeriod" => options["BackupRetentionPeriod"] || 1, "CACertificateIdentifier" => "rds-ca-2015", "DBClusterIdentifier" => options["DBClusterIdentifier"], "DBInstanceClass" => options["DBInstanceClass"], "DBInstanceIdentifier" => db_name, "DBInstanceStatus" =>"creating", "DBName" => options["DBName"], "DBParameterGroups" => db_parameter_groups, "DBSecurityGroups" => db_security_groups, "DBSubnetGroupName" => options["DBSubnetGroupName"], "Endpoint" =>{}, "Engine" => options["Engine"], "EngineVersion" => options["EngineVersion"] || "5.5.12", "InstanceCreateTime" => nil, "Iops" => options["Iops"], "LicenseModel" => "general-public-license", "MasterUsername" => cluster_id ? cluster["MasterUsername"] : options["MasterUsername"], "MultiAZ" => !!options["MultiAZ"], "PendingModifiedValues" => { "MasterUserPassword" => "****" }, # This clears when is available "PreferredBackupWindow" => options["PreferredBackupWindow"] || "08:00-08:30", "PreferredMaintenanceWindow" => options["PreferredMaintenanceWindow"] || "mon:04:30-mon:05:00", "PubliclyAccessible" => !!options["PubliclyAccessible"], "ReadReplicaDBInstanceIdentifiers" => [], "StorageEncrypted" => cluster_id ? cluster["StorageEncrypted"] : (options["StorageEncrypted"] || false), "StorageType" => options["StorageType"] || "standard", "VpcSecurityGroups" => vpc_security_groups, } self.data[:servers][db_name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CreateDBInstanceResult"=> {"DBInstance"=> data} } response.status = 200 # This values aren't showed at creating time but at available time self.data[:servers][db_name]["InstanceCreateTime"] = Time.now self.data[:tags] ||= {} self.data[:tags][db_name] = {} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_instance_read_replica.rb000066400000000000000000000111501437344660100265450ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_instance_read_replica' # create a read replica db instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_CreateDBInstanceReadReplica.html # ==== Parameters # * DBInstanceIdentifier <~String> - name of the db instance to create # * SourceDBInstanceIdentifier <~String> - name of the db instance that will be the source. Must have backup retention on # * AutoMinorVersionUpgrade <~Boolean> Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window # * AvailabilityZone <~String> The availability zone to create the instance in # * DBInstanceClass <~String> The new compute and memory capacity of the DB Instance # * Port <~Integer> The port number on which the database accepts connections. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_db_instance_read_replica(instance_identifier, source_identifier, options={}) request({ 'Action' => 'CreateDBInstanceReadReplica', 'DBInstanceIdentifier' => instance_identifier, 'SourceDBInstanceIdentifier' => source_identifier, :parser => Fog::Parsers::AWS::RDS::CreateDBInstanceReadReplica.new, }.merge(options)) end end class Mock def create_db_instance_read_replica(instance_identifier, source_identifier, options={}) # TODO: throw error when instance_identifier already exists, # or source_identifier doesn't exist source = self.data[:servers][source_identifier] data = { 'AllocatedStorage' => source['AllocatedStorage'], 'AutoMinorVersionUpgrade' => options.key?('AutoMinorVersionUpgrade') ? options['AutoMinorVersionUpgrade'] : source['AutoMinorVersionUpgrade'], 'AvailabilityZone' => options['AvailabilityZone'], 'BackupRetentionPeriod' => options['BackupRetentionPeriod'] || 0, 'CACertificateIdentifier' => "rds-ca-2015", 'DBInstanceClass' => options['DBInstanceClass'] || 'db.m1.small', 'DBInstanceIdentifier' => instance_identifier, 'DBInstanceStatus' => 'creating', 'DBName' => source['DBName'], 'DBParameterGroups' => source['DBParameterGroups'], 'DBSecurityGroups' => source['DBSecurityGroups'], 'Endpoint' => {}, 'Engine' => source['Engine'], 'EngineVersion' => source['EngineVersion'], 'InstanceCreateTime' => nil, 'Iops' => source['Iops'], 'LatestRestorableTime' => nil, 'LicenseModel' => 'general-public-license', 'MasterUsername' => source['MasterUsername'], 'MultiAZ' => false, 'PendingModifiedValues' => {}, 'PreferredBackupWindow' => '08:00-08:30', 'PreferredMaintenanceWindow' => "mon:04:30-mon:05:00", 'PubliclyAccessible' => !!options["PubliclyAccessible"], 'ReadReplicaDBInstanceIdentifiers' => [], 'ReadReplicaSourceDBInstanceIdentifier' => source_identifier, 'StorageType' => options['StorageType'] || 'standard', 'StorageEncrypted' => false, 'VpcSecurityGroups' => source['VpcSecurityGroups'], } self.data[:servers][instance_identifier] = data self.data[:servers][source_identifier]['ReadReplicaDBInstanceIdentifiers'] << instance_identifier response = Excon::Response.new response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CreateDBInstanceReadReplicaResult"=> {"DBInstance"=> data} } response.status = 200 # This values aren't showed at creating time but at available time self.data[:servers][instance_identifier]["InstanceCreateTime"] = Time.now response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_parameter_group.rb000066400000000000000000000036261437344660100254540ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_parameter_group' # create a database parameter group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html # ==== Parameters # * DBParameterGroupName <~String> - name of the parameter group # * DBParameterGroupFamily <~String> - The DB parameter group family name. Current valid values: MySQL5.1 | MySQL5.5 # * Description <~String> - The description for the DB Parameter Grou # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_db_parameter_group(group_name, group_family, description) request({ 'Action' => 'CreateDBParameterGroup', 'DBParameterGroupName' => group_name, 'DBParameterGroupFamily' => group_family, 'Description' => description, :parser => Fog::Parsers::AWS::RDS::CreateDbParameterGroup.new }) end end class Mock def create_db_parameter_group(group_name, group_family, description) response = Excon::Response.new if self.data[:parameter_groups] and self.data[:parameter_groups][group_name] raise Fog::AWS::RDS::IdentifierTaken.new("Parameter group #{group_name} already exists") end data = { 'DBParameterGroupName' => group_name, 'DBParameterGroupFamily' => group_family.downcase, 'Description' => description } self.data[:parameter_groups][group_name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CreateDBParameterGroupResult"=> {"DBParameterGroup"=> data} } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_security_group.rb000066400000000000000000000036011437344660100253340ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_security_group' # creates a db security group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_CreateDBSecurityGroup.html # ==== Parameters # * DBSecurityGroupDescription <~String> - The description for the DB Security Group # * DBSecurityGroupName <~String> - The name for the DB Security Group. This value is stored as a lowercase string. Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_db_security_group(name, description = name) request({ 'Action' => 'CreateDBSecurityGroup', 'DBSecurityGroupName' => name, 'DBSecurityGroupDescription' => description, :parser => Fog::Parsers::AWS::RDS::CreateDBSecurityGroup.new }) end end class Mock def create_db_security_group(name, description = name) response = Excon::Response.new if self.data[:security_groups] and self.data[:security_groups][name] raise Fog::AWS::RDS::IdentifierTaken.new("DBInstanceAlreadyExists => The security group '#{name}' already exists") end data = { 'DBSecurityGroupName' => name, 'DBSecurityGroupDescription' => description, 'EC2SecurityGroups' => [], 'IPRanges' => [], 'OwnerId' => '0123456789' } self.data[:security_groups][name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, 'CreateDBSecurityGroupResult' => { 'DBSecurityGroup' => data } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_snapshot.rb000066400000000000000000000046561437344660100241230ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_snapshot' # creates a db snapshot # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_CreateDBSnapshot.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to create snapshot for # * DBSnapshotIdentifier <~String> - The identifier for the DB Snapshot. 1-255 alphanumeric or hyphen characters. Must start with a letter # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_db_snapshot(identifier, name) request({ 'Action' => 'CreateDBSnapshot', 'DBInstanceIdentifier' => identifier, 'DBSnapshotIdentifier' => name, :parser => Fog::Parsers::AWS::RDS::CreateDBSnapshot.new }) end end class Mock def create_db_snapshot(identifier, name) response = Excon::Response.new if data[:snapshots][name] raise Fog::AWS::RDS::IdentifierTaken.new end server_data = data[:servers][identifier] unless server_data raise Fog::AWS::RDS::NotFound.new("DBInstance #{identifier} not found") end # TODO: raise an error if the server isn't in 'available' state snapshot_data = { 'Status' => 'creating', 'SnapshotType' => 'manual', 'DBInstanceIdentifier' => identifier, 'DBSnapshotIdentifier' => name, 'InstanceCreateTime' => Time.now } # Copy attributes from server %w(Engine EngineVersion AvailabilityZone AllocatedStorage Iops MasterUsername InstanceCreateTime StorageType).each do |key| snapshot_data[key] = server_data[key] end snapshot_data['Port'] = server_data['Endpoint']['Port'] self.data[:snapshots][name] = snapshot_data # TODO: put the server in 'modifying' state response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "CreateDBSnapshotResult"=> {"DBSnapshot"=> snapshot_data.dup} } response.status = 200 # SnapshotCreateTime is not part of the response. self.data[:snapshots][name]['SnapshotCreateTime'] = Time.now response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_db_subnet_group.rb000066400000000000000000000050551437344660100247720ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_db_subnet_group' # Creates a db subnet group # http://docs.amazonwebservices.com/AmazonRDS/2012-01-15/APIReference/API_CreateDBSubnetGroup.html # ==== Parameters # * DBSubnetGroupName <~String> - The name for the DB Subnet Group. This value is stored as a lowercase string. Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". # * SubnetIds <~Array> - The EC2 Subnet IDs for the DB Subnet Group. # * DBSubnetGroupDescription <~String> - The description for the DB Subnet Group # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def create_db_subnet_group(name, subnet_ids, description = name) params = { 'Action' => 'CreateDBSubnetGroup', 'DBSubnetGroupName' => name, 'DBSubnetGroupDescription' => description, :parser => Fog::Parsers::AWS::RDS::CreateDBSubnetGroup.new } params.merge!(Fog::AWS.indexed_param("SubnetIds.member", Array(subnet_ids))) request(params) end end class Mock def create_db_subnet_group(name, subnet_ids, description = name) response = Excon::Response.new if self.data[:subnet_groups] && self.data[:subnet_groups][name] raise Fog::AWS::RDS::IdentifierTaken.new("DBSubnetGroupAlreadyExists => The subnet group '#{name}' already exists") end # collection = Fog::AWS::Compute.new(:aws_access_key_id => 'mock key', :aws_secret_access_key => 'mock secret') compute_data = Fog::AWS::Compute::Mock.data[self.region][self.aws_access_key_id] subnets = subnet_ids.map do |snid| subnet = compute_data[:subnets].detect { |s| s['subnetId'] == snid } raise Fog::AWS::RDS::NotFound.new("InvalidSubnet => The subnet '#{snid}' was not found") if subnet.nil? subnet end vpc_id = subnets.first['vpcId'] data = { 'DBSubnetGroupName' => name, 'DBSubnetGroupDescription' => description, 'SubnetGroupStatus' => 'Complete', 'Subnets' => subnet_ids, 'VpcId' => vpc_id } self.data[:subnet_groups][name] = data response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, 'CreateDBSubnetGroupResult' => { 'DBSubnetGroup' => data } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/create_event_subscription.rb000066400000000000000000000057141437344660100255400ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/create_event_subscription' # Subscribes a db instance to an SNS queue # # @see http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateEventSubscription.html # === Parameters # * Enabled <~Boolean> - set to true to activate the subscription, set to false to create the subscription but not active it # * EventCategories <~Array> - A list of event categories for a SourceType that you want to subscribe to # * SnsTopicArn <~String> - The Amazon Resource Name of the SNS topic created for event notification # * SourceIds <~Array> - The list of identifiers of the event sources for which events will be returned # * SourceType <~String> - The type of source that will be generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned # * SubscriptionName <~String> - The name of the subscription # * Tags <~Array> - A list of tags def create_event_subscription(options={}) if event_categories = options.delete("EventCategories") options.merge!(Fog::AWS.indexed_param('EventCategories.member.%d', [*event_categories])) end if source_ids = options.delete("SourceIds") options.merge!(Fog::AWS.indexed_param('SourceIds.member.%d', [*source_ids])) end if tags = options.delete("tags") options.merge!(Fog::AWS.indexed_param('Tags.member.%d', [*tags])) end request({ "Action" => "CreateEventSubscription", :parser => Fog::Parsers::AWS::RDS::CreateEventSubscription.new, }.merge(options)) end end class Mock def create_event_subscription(options={}) response = Excon::Response.new name = options.delete('SubscriptionName') arn = options.delete('SnsTopicArn') if self.data[:event_subscriptions][name] raise Fog::AWS::RDS::IdentifierTaken.new("SubscriptionAlreadyExist => Subscription already exists") end subscription = { 'CustSubscriptionId' => name, 'EventCategories' => options['EventCategories'] || [], 'SourceType' => options['SourceType'], 'Enabled' => options.fetch(:enabled, "true"), 'Status' => 'creating', 'CreationTime' => Time.now, 'SnsTopicArn' => arn, } self.data[:event_subscriptions][name] = subscription response.body = { "ResponseMetaData" => {"RequestId" => Fog::AWS::Mock.request_id}, "CreateEventSubscriptionResult" => { "EventSubscription" => subscription } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_cluster.rb000066400000000000000000000041401437344660100237300ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_cluster' # delete a database cluster # # @see http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DeleteDBCluster.html # # ==== Parameters ==== # * DBClusterIdentifier <~String> - The DB cluster identifier for the DB cluster to be deleted # * FinalDBSnapshotIdentifier <~String> - The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is set to false # * SkipFinalSnapshot <~Boolean> - Determines whether a final DB cluster snapshot is created before the DB cluster is deleted # # ==== Returns ==== # * response<~Excon::Response> # * body<~Hash> def delete_db_cluster(identifier, snapshot_identifier, skip_snapshot = false) params = {} params["FinalDBSnapshotIdentifier"] = snapshot_identifier if snapshot_identifier request({ 'Action' => 'DeleteDBCluster', 'DBClusterIdentifier' => identifier, 'SkipFinalSnapshot' => skip_snapshot, }.merge(params)) end end class Mock def delete_db_cluster(identifier, snapshot_identifier, skip_snapshot = false) response = Excon::Response.new cluster = self.data[:clusters][identifier] || raise(Fog::AWS::RDS::NotFound.new("DBCluster #{identifier} not found")) raise Fog::AWS::RDS::Error.new("InvalidDBClusterStateFault => Cluster cannot be deleted, it still contains DB instances in non-deleting state.") if cluster["DBClusterMembers"].any? unless skip_snapshot create_db_cluster_snapshot(identifier, snapshot_identifier) end self.data[:clusters].delete(identifier) response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DeleteDBClusterResult" => { "DBCluster" => cluster} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_cluster_snapshot.rb000066400000000000000000000026131437344660100256520ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_cluster_snapshot' # delete a db cluster snapshot # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DeleteDBClusterSnapshot.html # # ==== Parameters ==== # * DBClusterSnapshotIdentifier<~String> - The identifier of the DB cluster snapshot to delete # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def delete_db_cluster_snapshot(name) request( 'Action' => 'DeleteDBClusterSnapshot', 'DBClusterSnapshotIdentifier' => name, :parser => Fog::Parsers::AWS::RDS::DeleteDBClusterSnapshot.new ) end end class Mock def delete_db_cluster_snapshot(name) response = Excon::Response.new snapshot = self.data[:cluster_snapshots].delete(name) raise Fog::AWS::RDS::NotFound.new("DBClusterSnapshotNotFound => #{name} not found") unless snapshot response.status = 200 response.body = { "ResponseMetadata" => {"RequestId" => Fog::AWS::Mock.request_id}, "DeleteDBClusterSnapshotResult" => {"DBClusterSnapshot" => snapshot} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_instance.rb000066400000000000000000000057751437344660100240720ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_instance' # delete a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBInstance.html # ==== Parameters # * DBInstanceIdentifier <~String> - The DB Instance identifier for the DB Instance to be deleted. # * FinalDBSnapshotIdentifier <~String> - The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to false # * SkipFinalSnapshot <~Boolean> - Determines whether a final DB Snapshot is created before the DB Instance is deleted # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def delete_db_instance(identifier, snapshot_identifier, skip_snapshot = false) params = {} params['FinalDBSnapshotIdentifier'] = snapshot_identifier if snapshot_identifier request({ 'Action' => 'DeleteDBInstance', 'DBInstanceIdentifier' => identifier, 'SkipFinalSnapshot' => skip_snapshot, :parser => Fog::Parsers::AWS::RDS::DeleteDBInstance.new }.merge(params)) end end class Mock def delete_db_instance(identifier, snapshot_identifier, skip_snapshot = false) response = Excon::Response.new server_set = self.data[:servers][identifier] || raise(Fog::AWS::RDS::NotFound.new("DBInstance #{identifier} not found")) unless skip_snapshot if server_set["ReadReplicaSourceDBInstanceIdentifier"] raise Fog::AWS::RDS::Error.new("InvalidParameterCombination => FinalDBSnapshotIdentifier can not be specified when deleting a replica instance") elsif server_set["DBClusterIdentifier"] && snapshot_identifier # for cluster instances, you must pass in skip_snapshot = false, but not specify a snapshot identifier raise Fog::AWS::RDS::Error.new("InvalidParameterCombination => FinalDBSnapshotIdentifier can not be specified when deleting a cluster instance") elsif server_set["DBClusterIdentifier"] && !snapshot_identifier && !skip_snapshot #no-op else create_db_snapshot(identifier, snapshot_identifier) end end cluster = self.data[:clusters].values.detect { |c| c["DBClusterMembers"].any? { |m| m["DBInstanceIdentifier"] == identifier } } if cluster cluster["DBClusterMembers"].delete_if { |v| v["DBInstanceIdentifier"] == identifier } self.data[:clusters][cluster["DBClusterIdentifier"]] = cluster end self.data[:servers].delete(identifier) response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DeleteDBInstanceResult" => { "DBInstance" => server_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_parameter_group.rb000066400000000000000000000024161437344660100254470ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_parameter_group' # delete a database parameter group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBParameterGroup.html # ==== Parameters # * DBParameterGroupName <~String> - name of the parameter group. Must not be associated with any instances # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def delete_db_parameter_group(group_name) request({ 'Action' => 'DeleteDBParameterGroup', 'DBParameterGroupName' => group_name, :parser => Fog::Parsers::AWS::RDS::DeleteDbParameterGroup.new }) end end class Mock def delete_db_parameter_group(group_name) response = Excon::Response.new if self.data[:parameter_groups].delete(group_name) response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, } response else raise Fog::AWS::RDS::NotFound.new("DBParameterGroup not found: #{group_name}") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_security_group.rb000066400000000000000000000023561437344660100253410ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_security_group' # deletes a db security group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_DeleteDBSecurityGroup.html # ==== Parameters # * DBSecurityGroupName <~String> - The name for the DB Security Group to delete # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def delete_db_security_group(name) request({ 'Action' => 'DeleteDBSecurityGroup', 'DBSecurityGroupName' => name, :parser => Fog::Parsers::AWS::RDS::DeleteDBSecurityGroup.new }) end end class Mock def delete_db_security_group(name, description = name) response = Excon::Response.new if self.data[:security_groups].delete(name) response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, } response else raise Fog::AWS::RDS::NotFound.new("DBSecurityGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_snapshot.rb000066400000000000000000000025441437344660100241140ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_snapshot' # delete a database snapshot # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSnapshot.html # ==== Parameters # * DBSnapshotIdentifier <~String> - name of the snapshot # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def delete_db_snapshot(name) request({ 'Action' => 'DeleteDBSnapshot', 'DBSnapshotIdentifier' => name, :parser => Fog::Parsers::AWS::RDS::DeleteDBSnapshot.new }) end end class Mock def delete_db_snapshot(name) # TODO: raise error if snapshot isn't 'available' response = Excon::Response.new snapshot_data = self.data[:snapshots].delete(name) snapshot_data = self.data[:cluster_snapshots].delete(name) unless snapshot_data raise Fog::AWS::RDS::NotFound.new("DBSnapshotNotFound => #{name} not found") unless snapshot_data response.status = 200 response.body = { "ResponseMetadata"=> { "RequestId"=> Fog::AWS::Mock.request_id }, "DeleteDBSnapshotResult"=> {"DBSnapshot"=> snapshot_data} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_db_subnet_group.rb000066400000000000000000000025751437344660100247750ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_db_subnet_group' # Deletes a db subnet group # http://docs.aws.amazon.com/AmazonRDS/2013-09-09/APIReference/API_DeleteDBSubnetGroup.html # ==== Parameters # * DBSubnetGroupName <~String> - The name for the DB Subnet Group. This value is stored as a lowercase string. Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def delete_db_subnet_group(name) params = { 'Action' => 'DeleteDBSubnetGroup', 'DBSubnetGroupName' => name, :parser => Fog::Parsers::AWS::RDS::DeleteDBSubnetGroup.new } request(params) end end class Mock def delete_db_subnet_group(name) response = Excon::Response.new unless self.data[:subnet_groups] && self.data[:subnet_groups][name] raise Fog::AWS::RDS::NotFound.new("DBSubnetGroupNotFound => The subnet group '#{name}' doesn't exists") end self.data[:subnet_groups].delete(name) response.body = { 'ResponseMetadata'=>{ 'RequestId'=> Fog::AWS::Mock.request_id }, 'return' => true, } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/delete_event_subscription.rb000066400000000000000000000024771437344660100255420ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/delete_event_subscription' # deletes an event subscription # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DeleteEventSubscription.html # === Parameters # * SubscriptionName <~String> - The name of the subscription to delete # === Returns # * response<~Excon::Response>: # * body<~Hash> def delete_event_subscription(name) request({ 'Action' => 'DeleteEventSubscription', 'SubscriptionName' => name, :parser => Fog::Parsers::AWS::RDS::DeleteEventSubscription.new }) end end class Mock def delete_event_subscription(name) response = Excon::Response.new if data = self.data[:event_subscriptions][name] data['Status'] = 'deleting' self.data[:event_subscriptions][name] = data response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, } response else raise Fog::AWS::RDS::NotFound.new("EventSubscriptionNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_cluster_snapshots.rb000066400000000000000000000052611437344660100263550ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_cluster_snapshots' # Describe all or specified db cluster snapshots # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusterSnapshots.html # # ==== Parameters ==== # * DBClusterIdentifier<~String> - A DB cluster identifier to retrieve the list of DB cluster snapshots for # * DBClusterSnapshotIdentifier<~String> - A specific DB cluster snapshot identifier to describe # * SnapshotType<~String> - The type of DB cluster snapshots that will be returned. Values can be automated or manual # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def describe_db_cluster_snapshots(opts={}) params = {} params['SnapshotType'] = opts[:type] if opts[:type] params['DBClusterIdentifier'] = opts[:identifier] if opts[:identifier] params['DBClusterSnapshotIdentifier'] = opts[:snapshot_id] if opts[:snapshot_id] params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] request({ 'Action' => 'DescribeDBClusterSnapshots', :parser => Fog::Parsers::AWS::RDS::DescribeDBClusterSnapshots.new }.merge(params)) end end class Mock def describe_db_cluster_snapshots(opts={}) response = Excon::Response.new snapshots = self.data[:cluster_snapshots].values if opts[:identifier] snapshots = snapshots.select { |snapshot| snapshot['DBClusterIdentifier'] == opts[:identifier] } end if opts[:snapshot_id] snapshots = snapshots.select { |snapshot| snapshot['DBClusterSnapshotIdentifier'] == opts[:snapshot_id] } raise Fog::AWS::RDS::NotFound.new("DBClusterSnapshot #{opts[:snapshot_id]} not found") if snapshots.empty? end snapshots.each do |snapshot| case snapshot['Status'] when 'creating' if Time.now - snapshot['SnapshotCreateTime'] > Fog::Mock.delay snapshot['Status'] = 'available' end end end response.status = 200 response.body = { 'ResponseMetadata' => { "RequestId" => Fog::AWS::Mock.request_id }, 'DescribeDBClusterSnapshotsResult' => { 'DBClusterSnapshots' => snapshots } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_clusters.rb000066400000000000000000000033311437344660100244320ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_clusters' # Describe all or specified db clusters # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html # # ==== Parameters ==== # * DBClusterIdentifier<~String> - The user-supplied DB cluster identifier # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def describe_db_clusters(identifier=nil, opts={}) params = {} params['DBClusterIdentifier'] = identifier if identifier params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] request({ 'Action' => 'DescribeDBClusters', :parser => Fog::Parsers::AWS::RDS::DescribeDBClusters.new }.merge(params)) end end class Mock def describe_db_clusters(identifier=nil, opts={}) response = Excon::Response.new cluster_set = [] if identifier if cluster = self.data[:clusters][identifier] cluster_set << cluster else raise Fog::AWS::RDS::NotFound.new("DBCluster #{identifier} not found") end else cluster_set = self.data[:clusters].values end response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeDBClustersResult" => { "DBClusters" => cluster_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_engine_versions.rb000066400000000000000000000024341437344660100257660ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_engine_versions' def describe_db_engine_versions(opts={}) params = {} params['DBParameterGroupFamily'] = opts[:db_parameter_group_family] if opts[:db_parameter_group_family] params['DefaultOnly'] = opts[:default_only] if opts[:default_only] params['Engine'] = opts[:engine] if opts[:engine] params['EngineVersion'] = opts[:engine_version] if opts[:engine_version] params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] request({ 'Action' => 'DescribeDBEngineVersions', :parser => Fog::Parsers::AWS::RDS::DescribeDBEngineVersions.new }.merge(params)) end end class Mock def describe_db_engine_versions(opts={}) response = Excon::Response.new response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeDBEngineVersionsResult" => { "DBEngineVersions" => self.data[:db_engine_versions] } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_instances.rb000066400000000000000000000071231437344660100245600ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_instances' # Describe all or specified load db instances # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to retrieve information for. if absent information for all instances is returned # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_instances(identifier=nil, opts={}) params = {} params['DBInstanceIdentifier'] = identifier if identifier if opts[:marker] params['Marker'] = opts[:marker] end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeDBInstances', :parser => Fog::Parsers::AWS::RDS::DescribeDBInstances.new }.merge(params)) end end class Mock def describe_db_instances(identifier=nil, opts={}) response = Excon::Response.new server_set = [] if identifier if specified_server = self.data[:servers][identifier] server_set << specified_server else raise Fog::AWS::RDS::NotFound.new("DBInstance #{identifier} not found") end else server_set = self.data[:servers].values end server_set.each do |server| case server["DBInstanceStatus"] when "creating" if Time.now - server['InstanceCreateTime'] >= Fog::Mock.delay * 2 server["DBInstanceStatus"] = "available" server["AvailabilityZone"] ||= region + 'a' server["Endpoint"] = {"Port"=>3306, "Address"=> Fog::AWS::Mock.rds_address(server["DBInstanceIdentifier"],region) } server["PendingModifiedValues"] = {} end when "rebooting" if Time.now - self.data[:reboot_time] >= Fog::Mock.delay # apply pending modified values server.merge!(server["PendingModifiedValues"]) server["PendingModifiedValues"] = {} server["DBInstanceStatus"] = 'available' self.data.delete(:reboot_time) end when "modifying" # TODO there are some fields that only applied after rebooting if Time.now - self.data[:modify_time] >= Fog::Mock.delay if new_id = server["PendingModifiedValues"] && server["PendingModifiedValues"]["DBInstanceIdentifier"] self.data[:servers][new_id] = self.data[:servers].delete(server["DBInstanceIdentifier"]) end server.merge!(server["PendingModifiedValues"]) server["PendingModifiedValues"] = {} server["DBInstanceStatus"] = 'available' end when "available" # I'm not sure if amazon does this unless server["PendingModifiedValues"].empty? server["DBInstanceStatus"] = 'modifying' end end end response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeDBInstancesResult" => { "DBInstances" => server_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_log_files.rb000066400000000000000000000065061437344660100245400ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_log_files' # Describe log files for a DB instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBLogFiles.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to retrieve information for. Required. # * Options <~Hash> - Hash of options. Optional. The following keys are used: # * :file_last_written <~Long> - Filter available log files for those written after this time. Optional. # * :file_size <~Long> - Filters the available log files for files larger than the specified size. Optional. # * :filename_contains <~String> - Filters the available log files for log file names that contain the specified string. Optional. # * :marker <~String> - The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. Optional. # * :max_records <~Integer> - The maximum number of records to include in the response. If more records exist, a pagination token is included in the response. Optional. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_log_files(rds_id=nil, opts={}) params = {} params['DBInstanceIdentifier'] = rds_id if rds_id params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] params['FilenameContains'] = opts[:filename_contains] if opts[:filename_contains] params['FileSize'] = opts[:file_size] if opts[:file_size] params['FileLastWritten'] = opts[:file_last_written] if opts[:file_last_written] request({ 'Action' => 'DescribeDBLogFiles', :parser => Fog::Parsers::AWS::RDS::DescribeDBLogFiles.new(rds_id) }.merge(params)) end end class Mock def describe_db_log_files(rds_id=nil, opts={}) response = Excon::Response.new log_file_set = [] if rds_id if server = self.data[:servers][rds_id] log_file_set << {"LastWritten" => Time.parse('2013-07-05 17:00:00 -0700'), "LogFileName" => "error/mysql-error.log", "Size" => 0} log_file_set << {"LastWritten" => Time.parse('2013-07-05 17:10:00 -0700'), "LogFileName" => "error/mysql-error-running.log", "Size" => 0} log_file_set << {"LastWritten" => Time.parse('2013-07-05 17:20:00 -0700'), "LogFileName" => "error/mysql-error-running.log.0", "Size" => 8220} log_file_set << {"LastWritten" => Time.parse('2013-07-05 17:30:00 -0700'), "LogFileName" => "error/mysql-error-running.log.1", "Size" => 0} else raise Fog::AWS::RDS::NotFound.new("DBInstance #{rds_id} not found") end else raise Fog::AWS::RDS::NotFound.new('An identifier for an RDS instance must be provided') end response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeDBLogFilesResult" => { "DBLogFiles" => log_file_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_parameter_groups.rb000066400000000000000000000040161437344660100261460ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_parameter_groups' # This API returns a list of DBParameterGroup descriptions. If a DBParameterGroupName is specified, the list will contain only the descriptions of the specified DBParameterGroup # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBParameterGroups.html # ==== Parameters # * DBParameterGroupName <~String> - The name of a specific database parameter group to return details for. # * Source <~String> - The parameter types to return. user | system | engine-default # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_parameter_groups(name=nil, opts={}) params={} if opts[:marker] params['Marker'] = opts[:marker] end if name params['DBParameterGroupName'] = name end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeDBParameterGroups', :parser => Fog::Parsers::AWS::RDS::DescribeDBParameterGroups.new }.merge(params)) end end class Mock def describe_db_parameter_groups(name=nil, opts={}) response = Excon::Response.new parameter_set = [] if name if server = self.data[:parameter_groups][name] parameter_set << server else raise Fog::AWS::RDS::NotFound.new("DBInstance #{name} not found") end else parameter_set = self.data[:parameter_groups].values end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeDBParameterGroupsResult" => { "DBParameterGroups" => parameter_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_parameters.rb000066400000000000000000000033641437344660100247370ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_parameters' # Describe parameters from a parameter group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBParameters.html # ==== Parameters # * DBParameterGroupName <~String> - name of parameter group to retrieve parameters for # * Source <~String> - The parameter types to return. user | system | engine-default # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_parameters(name, opts={}) params={} if opts[:marker] params['Marker'] = opts[:marker] end if opts[:source] params['Source'] = opts[:source] end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeDBParameters', 'DBParameterGroupName' => name, :parser => Fog::Parsers::AWS::RDS::DescribeDBParameters.new }.merge(params)) end end class Mock def describe_db_parameters(name, opts={}) group = self.data[:parameter_groups][name] unless group raise Fog::AWS::RDS::NotFound.new("parameter group does not exist") end parameters = group[:parameters] response = Excon::Response.new response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeDBParametersResult" => { "Parameters" => parameters } } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_reserved_instances.rb000066400000000000000000000023441437344660100264570ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_reserved_instances' # Describe all or specified load db instances # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to retrieve information for. if absent information for all instances is returned # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_reserved_instances(identifier=nil, opts={}) params = {} params['ReservedDBInstanceId'] = identifier if identifier if opts[:marker] params['Marker'] = opts[:marker] end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeReservedDBInstances', :parser => Fog::Parsers::AWS::RDS::DescribeDBReservedInstances.new }.merge(params)) end end class Mock def describe_db_reserved_instances(identifier=nil, opts={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_security_groups.rb000066400000000000000000000064451437344660100260450ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_security_groups' # Describe all or specified db security groups # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_DescribeDBSecurityGroups.html # ==== Parameters # * DBSecurityGroupName <~String> - The name of the DB Security Group to return details for. # * Marker <~String> - An optional marker provided in the previous DescribeDBInstances request # * MaxRecords <~Integer> - Max number of records to return (between 20 and 100) # Only one of DBInstanceIdentifier or DBSnapshotIdentifier can be specified # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_security_groups(opts={}) opts = {'DBSecurityGroupName' => opts} if opts.is_a?(String) request({ 'Action' => 'DescribeDBSecurityGroups', :parser => Fog::Parsers::AWS::RDS::DescribeDBSecurityGroups.new }.merge(opts)) end end class Mock def describe_db_security_groups(opts={}) response = Excon::Response.new sec_group_set = [] if opts.is_a?(String) sec_group_name = opts if sec_group = self.data[:security_groups][sec_group_name] sec_group_set << sec_group else raise Fog::AWS::RDS::NotFound.new("Security Group #{sec_group_name} not found") end else sec_group_set = self.data[:security_groups].values end # TODO: refactor to not delete items that we're iterating over. Causes # model tests to fail (currently pending) sec_group_set.each do |sec_group| sec_group["IPRanges"].each do |iprange| if iprange["Status"] == "authorizing" || iprange["Status"] == "revoking" iprange[:tmp] ||= Time.now + Fog::Mock.delay * 2 if iprange[:tmp] <= Time.now iprange["Status"] = "authorized" if iprange["Status"] == "authorizing" iprange.delete(:tmp) sec_group["IPRanges"].delete(iprange) if iprange["Status"] == "revoking" end end end # TODO: refactor to not delete items that we're iterating over. Causes # model tests to fail (currently pending) sec_group["EC2SecurityGroups"].each do |ec2_secg| if ec2_secg["Status"] == "authorizing" || ec2_secg["Status"] == "revoking" ec2_secg[:tmp] ||= Time.now + Fog::Mock.delay * 2 if ec2_secg[:tmp] <= Time.now ec2_secg["Status"] = "authorized" if ec2_secg["Status"] == "authorizing" ec2_secg.delete(:tmp) sec_group["EC2SecurityGroups"].delete(ec2_secg) if ec2_secg["Status"] == "revoking" end end end end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeDBSecurityGroupsResult" => { "DBSecurityGroups" => sec_group_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_snapshots.rb000066400000000000000000000054301437344660100246120ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_snapshots' # Describe all or specified db snapshots # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeDBSnapshots.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to retrieve information for. if absent information for all instances is returned # * DBSnapshotIdentifier <~String> - ID of snapshot to retrieve information for. if absent information for all snapshots is returned # * SnapshotType <~String> - type of snapshot to retrive (automated|manual) # * Marker <~String> - An optional marker provided in the previous DescribeDBInstances request # * MaxRecords <~Integer> - Max number of records to return (between 20 and 100) # Only one of DBInstanceIdentifier or DBSnapshotIdentifier can be specified # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_snapshots(opts={}) params = {} params['SnapshotType'] = opts[:type] if opts[:type] params['DBInstanceIdentifier'] = opts[:identifier] if opts[:identifier] params['DBSnapshotIdentifier'] = opts[:snapshot_id] if opts[:snapshot_id] params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] request({ 'Action' => 'DescribeDBSnapshots', :parser => Fog::Parsers::AWS::RDS::DescribeDBSnapshots.new }.merge(params)) end end class Mock def describe_db_snapshots(opts={}) response = Excon::Response.new snapshots = self.data[:snapshots].values if opts[:identifier] snapshots = snapshots.select { |snapshot| snapshot['DBInstanceIdentifier'] == opts[:identifier] } end if opts[:snapshot_id] snapshots = snapshots.select { |snapshot| snapshot['DBSnapshotIdentifier'] == opts[:snapshot_id] } raise Fog::AWS::RDS::NotFound.new("DBSnapshot #{opts[:snapshot_id]} not found") if snapshots.empty? end snapshots.each do |snapshot| case snapshot['Status'] when 'creating' if Time.now - snapshot['SnapshotCreateTime'] > Fog::Mock.delay snapshot['Status'] = 'available' end end end # Build response response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeDBSnapshotsResult" => { "DBSnapshots" => snapshots } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_db_subnet_groups.rb000066400000000000000000000036641437344660100254760ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_db_subnet_groups' # This API returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only # the descriptions of the specified DBSubnetGroup # http://docs.amazonwebservices.com/AmazonRDS/2012-01-15/APIReference/API_DescribeDBSubnetGroups.html # ==== Parameters # * DBSubnetGroupName <~String> - The name of a specific database subnet group to return details for. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_db_subnet_groups(name = nil, opts = {}) params = {} if opts[:marker] params['Marker'] = opts[:marker] end if name params['DBSubnetGroupName'] = name end if opts[:max_records] params['MaxRecords'] = opts[:max_records] end request({ 'Action' => 'DescribeDBSubnetGroups', :parser => Fog::Parsers::AWS::RDS::DescribeDBSubnetGroups.new }.merge(params)) end end class Mock def describe_db_subnet_groups(name = nil, opts = {}) response = Excon::Response.new subnet_group_set = [] if name if subnet_group = self.data[:subnet_groups][name] subnet_group_set << subnet_group else raise Fog::AWS::RDS::NotFound.new("Subnet Group #{name} not found") end else subnet_group_set = self.data[:subnet_groups].values end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeDBSubnetGroupsResult" => { "DBSubnetGroups" => subnet_group_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_engine_default_parameters.rb000066400000000000000000000025631437344660100273230ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_engine_default_parameters' # Returns the default engine and system parameter information for the specified database engine # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEngineDefaultParameters.html # # ==== Parameters ==== # * DBParameterGroupFamily<~String> - The name of the DB parameter group family # # ==== Returns ==== # * response<~Excon::Response>: # * body<~Hash>: def describe_engine_default_parameters(family, opts={}) request({ 'Action' => 'DescribeEngineDefaultParameters', 'DBParameterGroupFamily' => family, :parser => Fog::Parsers::AWS::RDS::DescribeEngineDefaultParameters.new, }.merge(opts)) end end class Mock def describe_engine_default_parameters(family, opts={}) response = Excon::Response.new response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "DescribeEngineDefaultParametersResult" => { "Parameters" => self.data[:default_parameters]} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_event_subscriptions.rb000066400000000000000000000044311437344660100262330ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_event_subscriptions' # Describe all or specified event notifications # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeEventSubscriptions.html # === Parameters # * Marker <~String> - An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request # * MaxRecords <~String> - The maximum number of records to include in the response (20-100) # * SubscriptionName <~String> - The name of the RDS event notification subscription you want to describe # === Returns # * response<~Excon::Response>: # * body<~Hash> def describe_event_subscriptions(options={}) if options[:max_records] params['MaxRecords'] = options[:max_records] end request({ 'Action' => 'DescribeEventSubscriptions', :parser => Fog::Parsers::AWS::RDS::DescribeEventSubscriptions.new }.merge(options)) end end class Mock def describe_event_subscriptions(options={}) response = Excon::Response.new name = options['SubscriptionName'] subscriptions = self.data[:event_subscriptions].values subscriptions = subscriptions.select { |s| s['CustSubscriptionId'] == name } if name non_active = self.data[:event_subscriptions].values.select { |s| s['Status'] != 'active' } non_active.each do |s| name = s['CustSubscriptionId'] if s['Status'] == 'creating' s['Status'] = 'active' self.data[:event_subscriptions][name] = s elsif s['Status'] == 'deleting' self.data[:event_subscriptions].delete(name) end end if options['SubscriptionName'] && subscriptions.empty? raise Fog::AWS::RDS::NotFound.new("Event Subscription #{options['SubscriptionName']} not found.") end response.body = { "ResponseMetadata" => {"RequestId" => Fog::AWS::Mock.request_id}, "DescribeEventSubscriptionsResult" => {"EventSubscriptionsList" => subscriptions} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_events.rb000066400000000000000000000035071437344660100234320ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/event_list' # Returns a list of service events # # For more information see: # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DescribeEvents.html # # === Parameters (optional) # * options <~Hash> (optional): # * :start_time <~DateTime> - starting time for event records # * :end_time <~DateTime> - ending time for event records # * :duration <~Integer> - The number of minutes to retrieve events for # Default = 60 Mins # * :marker <~String> - marker provided in the previous request # * :max_records <~Integer> - the maximum number of records to include # Default = 100 # Constraints: min = 20, maximum 100 # * :source_identifier <~String> - identifier of the event source # * :source_type <~DateTime> - event type, one of: # (db-instance | db-parameter-group | db-security-group | db-snapshot) # === Returns # * response <~Excon::Response>: # * body <~Hash> def describe_events(options = {}) request( 'Action' => 'DescribeEvents', 'StartTime' => options[:start_time], 'EndTime' => options[:end_time], 'Duration' => options[:duration], 'Marker' => options[:marker], 'MaxRecords' => options[:max_records], 'SourceIdentifier' => options[:source_identifier], 'SourceType' => options[:source_type], :parser => Fog::Parsers::AWS::RDS::EventListParser.new ) end end class Mock def describe_events Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/describe_orderable_db_instance_options.rb000066400000000000000000000102551437344660100301670ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/describe_orderable_db_instance_options' # Describe all or specified orderable db instances options # https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeOrderableDBInstanceOptions.html # ==== Parameters # * Engine <~String> - The name of the engine to retrieve DB Instance options for. Required. # * Options <~Hash> - Hash of options. Optional. The following keys are used: # * :db_instance_class <~String> - Filter available offerings matching the specified DB Instance class. Optional. # * :engine_version <~String> - Filters available offerings matching the specified engine version. Optional. # * :license_model <~String> - Filters available offerings matching the specified license model. Optional. # * :marker <~String> - The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. Optional. # * :max_records <~Integer> - The maximum number of records to include in the response. If more records exist, a pagination token is included in the response. Optional. # * :vpc <~Boolean> - Filter to show only the available VPC or non-VPC offerings. Optional. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def describe_orderable_db_instance_options(engine=nil, opts={}) params = {} params['Engine'] = engine if engine params['DBInstanceClass'] = opts[:db_instance_class] if opts[:db_instance_class] params['EngineVersion'] = opts[:engine_version] if opts[:engine_version] params['LicenseModel'] = opts[:license_model] if opts[:license_model] params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] params['Vpc'] = opts[:vpc] if opts[:vpc] request({ 'Action' => 'DescribeOrderableDBInstanceOptions', :parser => Fog::Parsers::AWS::RDS::DescribeOrderableDBInstanceOptions.new }.merge(params)) end end class Mock def describe_orderable_db_instance_options(engine=nil, opts={}) instance_options = [] response = Excon::Response.new if engine (opts[:db_instance_class] || %w(db.m2.xlarge db.m1.large)).each do |size| instance_options << {'MultiAZCapable' => true, 'Engine' => engine, 'LicenseModel' => opts[:license_model] || 'general-public-license', 'ReadReplicaCapable' => true, 'EngineVersion' => opts[:engine_version] || '5.6.12', 'AvailabilityZones' => [ {'Name' => 'us-east-1b'}, {'Name' => 'us-east-1c'}, {'Name' => 'us-east-1d'}, {'Name' => 'us-east-1e'}], 'DBInstanceClass' => size, 'SupportsStorageEncryption' => true, 'SupportsPerformanceInsights' => false, 'StorageType' => 'gp2', 'SupportsIops' => false, 'SupportsIAMDatabaseAuthentication' => false, 'SupportsEnhancedMonitoring' => true, 'Vpc' => opts[:vpc].nil? ? true : opts[:vpc]} end else raise Fog::AWS::RDS::NotFound.new('An engine must be specified to retrieve orderable instance options') end response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'DescribeOrderableDBInstanceOptionsResult' => { 'OrderableDBInstanceOptions' => instance_options } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/download_db_logfile_portion.rb000066400000000000000000000050011437344660100260040ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/download_db_logfile_portion' # Retrieve a portion of a log file of a db instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DownloadDBLogFilePortion.html # ==== Parameters # * DBInstanceIdentifier <~String> - ID of instance to retrieve information for. Required. # * LogFileName <~String> - The name of the log file to be downloaded. Required. # * Options <~Hash> - Hash of options. Optional. The following keys are used: # * :marker <~String> - The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords. Optional. # * :max_records <~Integer> - The maximum number of records to include in the response. If more records exist, a pagination token is included in the response. Optional. # * :number_of_lines <~Integer> - The number of lines to download. Optional. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def download_db_logfile_portion(identifier=nil, filename=nil, opts={}) params = {} params['DBInstanceIdentifier'] = identifier if identifier params['LogFileName'] = filename if filename params['Marker'] = opts[:marker] if opts[:marker] params['MaxRecords'] = opts[:max_records] if opts[:max_records] params['NumberOfLines'] = opts[:number_of_lines] if opts[:number_of_lines] request({ 'Action' => 'DownloadDBLogFilePortion', :parser => Fog::Parsers::AWS::RDS::DownloadDBLogFilePortion.new }.merge(params)) end end class Mock def download_db_logfile_portion(identifier=nil, filename=nil, opts={}) response = Excon::Response.new server_set = [] if identifier if server = self.data[:servers][identifier] server_set << server else raise Fog::AWS::RDS::NotFound.new("DBInstance #{identifier} not found") end else server_set = self.data[:servers].values end response.status = 200 response.body = { "ResponseMetadata" => { "RequestId"=> Fog::AWS::Mock.request_id }, "DescribeDBInstancesResult" => { "DBInstances" => server_set } } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/list_tags_for_resource.rb000066400000000000000000000025751437344660100250400ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/tag_list_parser' # returns a Hash of tags for a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_ListTagsForResource.html # ==== Parameters # * rds_id <~String> - name of the RDS instance whose tags are to be retrieved # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def list_tags_for_resource(rds_id) resource_name = "arn:aws:rds:#{@region}:#{owner_id}:db:#{rds_id}" %w[us-gov-west-1 us-gov-east-1].include?(@region) ? resource_name.insert(7, '-us-gov') : resource_name request( 'Action' => 'ListTagsForResource', 'ResourceName' => resource_name, :parser => Fog::Parsers::AWS::RDS::TagListParser.new ) end end class Mock def list_tags_for_resource(rds_id) response = Excon::Response.new if server = data[:servers][rds_id] response.status = 200 response.body = { 'ListTagsForResourceResult' => { 'TagList' => data[:tags][rds_id] } } response else raise Fog::AWS::RDS::NotFound, "DBInstance #{rds_id} not found" end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/modify_db_instance.rb000066400000000000000000000135361437344660100241110ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/modify_db_instance' # modifies a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_ModifyDBInstance.html # ==== Parameters # * DBInstanceIdentifier <~String> - name of the db instance to modify # * ApplyImmediately <~Boolean> - whether to apply the changes immediately or wait for the next maintenance window # # * AllocatedStorage <~Integer> Storage space, in GB # * AllowMajorVersionUpgrade <~Boolean> Must be set to true if EngineVersion specifies a different major version # * AutoMinorVersionUpgrade <~Boolean> Indicates that minor version upgrades will be applied automatically to the DB Instance during the maintenance window # * BackupRetentionPeriod <~Integer> 0-8 The number of days to retain automated backups. # * DBInstanceClass <~String> The new compute and memory capacity of the DB Instanc # * DBParameterGroupName <~String> The name of the DB Parameter Group to apply to this DB Instance # * DBSecurityGroups <~Array> A list of DB Security Groups to authorize on this DB Instance # * EngineVersion <~String> The version number of the database engine to upgrade to. # * Iops <~Integer> IOPS rate # * MasterUserPassword <~String> The new password for the DB Instance master user # * MultiAZ <~Boolean> Specifies if the DB Instance is a Multi-AZ deployment # * PreferredBackupWindow <~String> The daily time range during which automated backups are created if automated backups are enabled # * PreferredMaintenanceWindow <~String> The weekly time range (in UTC) during which system maintenance can occur, which may result in an outage # * VpcSecurityGroups <~Array> A list of VPC Security Group IDs to authorize on this DB instance # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def modify_db_instance(db_name, apply_immediately, options={}) if security_groups = options.delete('DBSecurityGroups') options.merge!(Fog::AWS.indexed_param('DBSecurityGroups.member.%d', [*security_groups])) end if vpc_security_groups = options.delete('VpcSecurityGroups') options.merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*vpc_security_groups])) end request({ 'Action' => 'ModifyDBInstance', 'DBInstanceIdentifier' => db_name, 'ApplyImmediately' => apply_immediately, :parser => Fog::Parsers::AWS::RDS::ModifyDBInstance.new }.merge(options)) end end class Mock def modify_db_instance(db_name, apply_immediately, _options={}) options = _options.dup response = Excon::Response.new if server = self.data[:servers][db_name] if server["DBInstanceStatus"] != "available" raise Fog::AWS::RDS::NotFound.new("DBInstance #{db_name} not available for modification") else self.data[:modify_time] = Time.now # TODO verify the params options # if apply_immediately is false, all the options go to pending_modified_values and then apply and clear after either # a reboot or the maintainance window #if apply_immediately # modified_server = server.merge(options) #else # modified_server = server["PendingModifiedValues"].merge!(options) # it appends #end if options["NewDBInstanceIdentifier"] options["DBInstanceIdentifier"] = options.delete("NewDBInstanceIdentifier") options["Endpoint"] = {"Port" => server["Endpoint"]["Port"], "Address"=> Fog::AWS::Mock.rds_address(options["DBInstanceIdentifier"],region)} end rds_security_groups = self.data[:security_groups].values ec2_security_groups = Fog::AWS::Compute::Mock.data[@region][@aws_access_key_id][:security_groups].values db_security_group_names = Array(options.delete("DBSecurityGroups")) db_security_groups = db_security_group_names.inject([]) do |r, group_name| unless rds_security_groups.find { |sg| sg["DBSecurityGroupName"] == group_name } raise Fog::AWS::RDS::Error.new("InvalidParameterValue => Invalid security group , groupId= , groupName=#{group_name}") end r << {"Status" => "active", "DBSecurityGroupName" => group_name } end vpc_security_groups = Array(options.delete("VpcSecurityGroups")).inject([]) do |r, group_id| unless ec2_security_groups.find { |sg| sg["groupId"] == group_id } raise Fog::AWS::RDS::Error.new("InvalidParameterValue => Invalid security group , groupId=#{group_id} , groupName=") end r << {"Status" => "active", "VpcSecurityGroupId" => group_id } end options.merge!( "DBSecurityGroups" => db_security_groups, "VpcSecurityGroups" => vpc_security_groups ) self.data[:servers][db_name]["PendingModifiedValues"].merge!(options) # it appends self.data[:servers][db_name]["DBInstanceStatus"] = "modifying" response.status = 200 response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "ModifyDBInstanceResult" => { "DBInstance" => self.data[:servers][db_name] } } response end else raise Fog::AWS::RDS::NotFound.new("DBInstance #{db_name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/modify_db_parameter_group.rb000066400000000000000000000055111437344660100254730ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/modify_db_parameter_group' # modifies a database parameter group # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_ModifyDBParameterGroup.html # ==== Parameters # * DBParameterGroupName <~String> - name of the parameter group # * Parameters<~Array> - Array of up to 20 Hashes describing parameters to set # * 'ParameterName'<~String> - parameter name. # * 'ParameterValue'<~String> - new paremeter value # * 'ApplyMethod'<~String> - immediate | pending-reboot whether to set the parameter immediately or not (may require an instance restart) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def modify_db_parameter_group(group_name, parameters) parameter_names = [] parameter_values = [] parameter_apply_methods = [] parameters.each do |parameter| parameter_names.push(parameter['ParameterName']) parameter_values.push(parameter['ParameterValue']) parameter_apply_methods.push(parameter['ApplyMethod']) end params = {} params.merge!(Fog::AWS.indexed_param('Parameters.member.%d.ParameterName', parameter_names)) params.merge!(Fog::AWS.indexed_param('Parameters.member.%d.ParameterValue', parameter_values)) params.merge!(Fog::AWS.indexed_param('Parameters.member.%d.ApplyMethod', parameter_apply_methods)) request({ 'Action' => 'ModifyDBParameterGroup', 'DBParameterGroupName' => group_name, :parser => Fog::Parsers::AWS::RDS::ModifyDbParameterGroup.new }.merge(params)) end end class Mock def modify_db_parameter_group(group_name, parameters) group = self.data[:parameter_groups][group_name] unless group raise Fog::AWS::RDS::NotFound.new("Parameter group not found") end parameters.each do |p| p.merge!( "Source" => "user", "IsModifiable" => true, "Description" => "some string", "DataType" => "string", "AllowedValues" => p["ParameterValue"], "ApplyType" => "dynamic" ) end group[:parameters] = parameters self.data[:parameter_groups][group_name] = group response = Excon::Response.new response.body = { "ResponseMetadata" => {"RequestId" => Fog::AWS::Mock.request_id}, "ModifyDBParameterGroupResult" => {"DBParameterGroupName" => group_name} } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/modify_db_snapshot_attribute.rb000066400000000000000000000034231437344660100262210ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/modify_db_snapshot_attribute' # Modify db snapshot attributes # # ==== Parameters # * db_snapshot_identifier<~String> - Id of snapshot to modify # * attributes<~Hash>: # * 'Add.MemberId'<~Array> - One or more account ids to grant rds create permission to # * 'Remove.MemberId'<~Array> - One or more account ids to revoke rds create permission from # # {Amazon API Reference}[http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ModifyDBSnapshotAttribute.html] # def modify_db_snapshot_attribute(db_snapshot_identifier, attributes) params = {} params.merge!(Fog::AWS.indexed_param('ValuesToAdd.member.%d', attributes['Add.MemberId'] || [])) params.merge!(Fog::AWS.indexed_param('ValuesToRemove.member.%d', attributes['Remove.MemberId'] || [])) request({ 'Action' => 'ModifyDBSnapshotAttribute', 'DBSnapshotIdentifier' => db_snapshot_identifier, :idempotent => true, 'AttributeName' => "restore", :parser => Fog::Parsers::AWS::RDS::ModifyDbSnapshotAttribute.new }.merge!(params)) end end class Mock # # Usage # # Fog::AWS[:rds].modify_db_snapshot_attribute("snap-identifier", {"Add.MemberId"=>"389480430104"}) # def modify_db_snapshot_attribute(db_snapshot_identifier, attributes) response = Excon::Response.new response.status = 200 response.body = { 'requestId' => Fog::AWS::Mock.request_id }.merge!(data) response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/modify_db_subnet_group.rb000066400000000000000000000022601437344660100250110ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/modify_db_subnet_group.rb' # Creates a db subnet group # http://docs.aws.amazon.com/AmazonRDS/2012-01-15/APIReference/API_ModifyDBSubnetGroup.html # ==== Parameters # * DBSubnetGroupName <~String> - The name for the DB Subnet Group. This value is stored as a lowercase string. Must contain no more than 255 alphanumeric characters or hyphens. Must not be "Default". # * SubnetIds <~Array> - The EC2 Subnet IDs for the DB Subnet Group. # * DBSubnetGroupDescription <~String> - The description for the DB Subnet Group # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def modify_db_subnet_group(name, subnet_ids, description = nil) params = { 'Action' => 'ModifyDBSubnetGroup', 'DBSubnetGroupName' => name, 'DBSubnetGroupDescription' => description, :parser => Fog::Parsers::AWS::RDS::ModifyDBSubnetGroup.new } params.merge!(Fog::AWS.indexed_param("SubnetIds.member", Array(subnet_ids))) request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/promote_read_replica.rb000066400000000000000000000060561437344660100244470ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/promote_read_replica' # promote a read replica to a writable RDS instance # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_PromoteReadReplica.html # ==== Parameters # * DBInstanceIdentifier <~String> - The DB Instance identifier for the DB Instance to be deleted. # * BackupRetentionPeriod <~Integer> - The number of days to retain automated backups. Range: 0-8. # Setting this parameter to a positive number enables backups. # Setting this parameter to 0 disables automated backups. # * PreferredBackupWindow <~String> - The daily time range during which automated backups are created if # automated backups are enabled, using the BackupRetentionPeriod parameter. # Default: A 30-minute window selected at random from an 8-hour block of time per region. # See the Amazon RDS User Guide for the time blocks for each region from which the default backup windows are assigned. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def promote_read_replica(identifier, backup_retention_period = nil, preferred_backup_window = nil) params = {} params['BackupRetentionPeriod'] = backup_retention_period if backup_retention_period params['PreferredBackupWindow'] = preferred_backup_window if preferred_backup_window request({ 'Action' => 'PromoteReadReplica', 'DBInstanceIdentifier' => identifier, :parser => Fog::Parsers::AWS::RDS::PromoteReadReplica.new }.merge(params)) end end class Mock def promote_read_replica(identifier, backup_retention_period = nil, preferred_backup_window = nil) server = self.data[:servers][identifier] server || raise(Fog::AWS::RDS::NotFound.new("DBInstance #{identifier} not found")) if server["ReadReplicaSourceDBInstanceIdentifier"].nil? raise(Fog::AWS::RDS::Error.new("InvalidDBInstanceState => DB Instance is not a read replica.")) end self.data[:modify_time] = Time.now data = { 'BackupRetentionPeriod' => backup_retention_period || 1, 'PreferredBackupWindow' => preferred_backup_window || '08:00-08:30', 'DBInstanceIdentifier' => identifier, 'DBInstanceStatus' => "modifying", 'PendingModifiedValues' => { 'ReadReplicaSourceDBInstanceIdentifier' => nil, } } server.merge!(data) response = Excon::Response.new response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "PromoteReadReplicaResult" => { "DBInstance" => server } } response.status = 200 response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/reboot_db_instance.rb000066400000000000000000000031371437344660100241100ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/reboot_db_instance' # reboots a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_RebootDBInstance.html # ==== Parameters # * DBInstanceIdentifier <~String> - name of the db instance to reboot # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def reboot_db_instance(instance_identifier) request({ 'Action' => 'RebootDBInstance', 'DBInstanceIdentifier' => instance_identifier, :parser => Fog::Parsers::AWS::RDS::RebootDBInstance.new, }) end end class Mock def reboot_db_instance(instance_identifier) response = Excon::Response.new if server = self.data[:servers][instance_identifier] if server["DBInstanceStatus"] != "available" raise Fog::AWS::RDS::NotFound.new("DBInstance #{instance_identifier} not available for rebooting") else server["DBInstanceStatus"] = 'rebooting' self.data[:reboot_time] = Time.now response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, "RebootDBInstanceResult" => { "DBInstance" => server } } response end else raise Fog::AWS::RDS::NotFound.new("DBInstance #{instance_identifier} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/remove_tags_from_resource.rb000066400000000000000000000030011437344660100255200ustar00rootroot00000000000000module Fog module AWS class RDS class Real # removes tags from a database instance # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_RemoveTagsFromResource.html # ==== Parameters # * rds_id <~String> - name of the RDS instance whose tags are to be retrieved # * keys <~Array> A list of String keys for the tags to remove # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def remove_tags_from_resource(rds_id, keys) resource_name = "arn:aws:rds:#{@region}:#{owner_id}:db:#{rds_id}" %w[us-gov-west-1 us-gov-east-1].include?(@region) ? resource_name.insert(7, '-us-gov') : resource_name request( { 'Action' => 'RemoveTagsFromResource', 'ResourceName' => resource_name, :parser => Fog::Parsers::AWS::RDS::Base.new }.merge(Fog::AWS.indexed_param('TagKeys.member.%d', keys)) ) end end class Mock def remove_tags_from_resource(rds_id, keys) response = Excon::Response.new if server = data[:servers][rds_id] keys.each { |key| data[:tags][rds_id].delete key } response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response else raise Fog::AWS::RDS::NotFound, "DBInstance #{rds_id} not found" end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/restore_db_instance_from_db_snapshot.rb000066400000000000000000000111511437344660100277030ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/restore_db_instance_from_db_snapshot' # Restores a DB Instance from a DB Snapshot # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_RestoreDBInstanceFromDBSnapshot.html # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def restore_db_instance_from_db_snapshot(snapshot_id, db_name, opts={}) request({ 'Action' => 'RestoreDBInstanceFromDBSnapshot', 'DBSnapshotIdentifier' => snapshot_id, 'DBInstanceIdentifier' => db_name, :parser => Fog::Parsers::AWS::RDS::RestoreDBInstanceFromDBSnapshot.new, }.merge(opts)) end end class Mock def restore_db_instance_from_db_snapshot(snapshot_id, db_name, options={}) if self.data[:servers] and self.data[:servers][db_name] raise Fog::AWS::RDS::IdentifierTaken.new("DBInstanceAlreadyExists #{response.body.to_s}") end unless self.data[:snapshots] and snapshot = self.data[:snapshots][snapshot_id] raise Fog::AWS::RDS::NotFound.new("DBSnapshotNotFound #{response.body.to_s}") end if !!options["MultiAZ"] && !!options["AvailabilityZone"] raise Fog::AWS::RDS::InvalidParameterCombination.new('Requesting a specific availability zone is not valid for Multi-AZ instances.') end option_group_membership = if option_group_name = options['OptionGroupName'] [{ 'OptionGroupMembership' => [{ 'OptionGroupName' => option_group_name, 'Status' => "pending-apply"}] }] else [{ 'OptionGroupMembership' => [{ 'OptionGroupName' => 'default: mysql-5.6', 'Status' => "pending-apply"}] }] end data = { "AllocatedStorage" => snapshot['AllocatedStorage'], "AutoMinorVersionUpgrade" => options['AutoMinorVersionUpgrade'].nil? ? true : options['AutoMinorVersionUpgrade'], "AvailabilityZone" => options['AvailabilityZone'], "BackupRetentionPeriod" => options['BackupRetentionPeriod'] || 1, "CACertificateIdentifier" => 'rds-ca-2015', "DBInstanceClass" => options['DBInstanceClass'] || 'db.m3.medium', "DBInstanceIdentifier" => db_name, "DBInstanceStatus" => 'creating', "DBName" => options['DBName'], "DBParameterGroups" => [{'DBParameterGroupName'=>'default.mysql5.5', 'ParameterApplyStatus'=>'in-sync'}], "DBSecurityGroups" => [{'Status'=>'active', 'DBSecurityGroupName'=>'default'}], "Endpoint" => {}, "Engine" => options['Engine'] || snapshot['Engine'], "EngineVersion" => options['EngineVersion'] || snapshot['EngineVersion'], "InstanceCreateTime" => nil, "Iops" => options['Iops'], "LicenseModel" => options['LicenseModel'] || snapshot['LicenseModel'] || 'general-public-license', "MasterUsername" => options['MasterUsername'] || snapshot['MasterUsername'], "MultiAZ" => !!options['MultiAZ'], "OptiongroupMemberships" => option_group_membership, "PendingModifiedValues" => { 'MasterUserPassword' => '****' }, # This clears when is available "PreferredBackupWindow" => '08:00-08:30', "PreferredMaintenanceWindow" => 'mon:04:30-mon:05:00', "PubliclyAccessible" => true, "ReadReplicaDBInstanceIdentifiers" => [], "StorageType" => options['StorageType'] || (options['Iops'] ? 'io1' : 'standard'), "VpcSecurityGroups" => nil, "StorageEncrypted" => false, } self.data[:servers][db_name] = data response = Excon::Response.new response.body = { "ResponseMetadata" => { "RequestId" => Fog::AWS::Mock.request_id }, "RestoreDBInstanceFromDBSnapshotResult" => { "DBInstance" => data } } response.status = 200 self.data[:servers][db_name]["InstanceCreateTime"] = Time.now response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/restore_db_instance_to_point_in_time.rb000066400000000000000000000017531437344660100277220ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/restore_db_instance_to_point_in_time' # Restores a DB Instance to a point in time # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_RestoreDBInstanceToPointInTime.html # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def restore_db_instance_to_point_in_time(source_db_name, target_db_name, opts={}) request({ 'Action' => 'RestoreDBInstanceToPointInTime', 'SourceDBInstanceIdentifier' => source_db_name, 'TargetDBInstanceIdentifier' => target_db_name, :parser => Fog::Parsers::AWS::RDS::RestoreDBInstanceToPointInTime.new, }.merge(opts)) end end class Mock def restore_db_instance_to_point_in_time(source_db_name, target_db_name, opts={}) Fog::Mock.not_implemented end end end end end fog-aws-3.18.0/lib/fog/aws/requests/rds/revoke_db_security_group_ingress.rb000066400000000000000000000061361437344660100271240ustar00rootroot00000000000000module Fog module AWS class RDS class Real require 'fog/aws/parsers/rds/revoke_db_security_group_ingress' # revokes a db security group ingress # http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/index.html?API_RevokeDBSecurityGroupIngress.html # ==== Parameters # * CIDRIP <~String> - The IP range to revoke # * DBSecurityGroupName <~String> - The name for the DB Security Group. # * EC2SecurityGroupName <~String> - Name of the EC2 Security Group to revoke. # * EC2SecurityGroupOwnerId <~String> - AWS Account Number of the owner of the security group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: def revoke_db_security_group_ingress(name, opts={}) unless opts.key?('CIDRIP') || ((opts.key?('EC2SecurityGroupName') || opts.key?('EC2SecurityGroupId')) && opts.key?('EC2SecurityGroupOwnerId')) raise ArgumentError, 'Must specify CIDRIP, or one of EC2SecurityGroupName or EC2SecurityGroupId, and EC2SecurityGroupOwnerId' end request({ 'Action' => 'RevokeDBSecurityGroupIngress', :parser => Fog::Parsers::AWS::RDS::RevokeDBSecurityGroupIngress.new, 'DBSecurityGroupName' => name }.merge(opts)) end end class Mock def revoke_db_security_group_ingress(name, opts = {}) unless opts.key?('CIDRIP') || ((opts.key?('EC2SecurityGroupName') || opts.key?('EC2SecurityGroupId')) && opts.key?('EC2SecurityGroupOwnerId')) raise ArgumentError, 'Must specify CIDRIP, or one of EC2SecurityGroupName or EC2SecurityGroupId, and EC2SecurityGroupOwnerId' end if ec2_security_group_id = opts.delete("EC2SecurityGroupId") ec2_security_group = (Fog::AWS::Compute::Mock.data[self.region][self.aws_access_key_id][:security_groups] || {}).values.detect { |sg| sg['groupId'] == ec2_security_group_id } opts['EC2SecurityGroupName'] = ec2_security_group['groupName'] end response = Excon::Response.new if sec_group = self.data[:security_groups][name] if opts.key?('CIDRIP') sec_group['IPRanges'].each do |iprange| iprange['Status']= 'revoking' if iprange['CIDRIP'] == opts['CIDRIP'] end else sec_group['EC2SecurityGroups'].each do |ec2_secg| ec2_secg['Status']= 'revoking' if ec2_secg['EC2SecurityGroupName'] == opts['EC2SecurityGroupName'] end end response.status = 200 response.body = { "ResponseMetadata"=>{ "RequestId"=> Fog::AWS::Mock.request_id }, 'RevokeDBSecurityGroupIngressResult' => { 'DBSecurityGroup' => sec_group } } response else raise Fog::AWS::RDS::NotFound.new("DBSecurityGroupNotFound => #{name} not found") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/000077500000000000000000000000001437344660100207545ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/redshift/authorize_cluster_security_group_ingress.rb000066400000000000000000000050761437344660100317610ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/revoke_cluster_security_group_ingress' # ==== Parameters # # @param [Hash] options # * :cluster_security_group_name - required - (String) # The name of the security Group from which to revoke the ingress rule. # * :cidrip - (String) # The IP range for which to revoke access. This range must be a valid Classless # Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, # EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided. # * :ec2_security_group_name - (String) # The name of the EC2 Security Group whose access is to be revoked. If # EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be # provided and CIDRIP cannot be provided. # * :ec2_security_group_owner_id - (String) # The AWS account number of the owner of the security group specified in the # EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable # value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must # also be provided. and CIDRIP cannot be provided. Example: 111122223333 # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_AuthorizeClusterSecurityGroupIngress.html def authorize_cluster_security_group_ingress(options = {}) cluster_security_group_name = options[:cluster_security_group_name] cidrip = options[:cidrip] ec2_security_group_name = options[:ec2_security_group_name] ec2_security_group_owner_id = options[:ec2_security_group_owner_id] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::RevokeClusterSecurityGroupIngress.new } params[:query]['Action'] = 'AuthorizeClusterSecurityGroupIngress' params[:query]['ClusterSecurityGroupName'] = cluster_security_group_name if cluster_security_group_name params[:query]['CIDRIP'] = cidrip if cidrip params[:query]['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_name params[:query]['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id if ec2_security_group_owner_id request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/authorize_snapshot_access.rb000066400000000000000000000032401437344660100265520ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_snapshot' # ==== Parameters # # @param [Hash] options # * :snapshot_identifier - required - (String) # The identifier of the snapshot the account is authorized to restore. # * :snapshot_cluster_identifier - (String) # * :account_with_restore_access - required - (String) # The identifier of the AWS customer account authorized to restore the specified snapshot. # # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CopyClusterSnapshot.html def authorize_snapshot_access(options = {}) snapshot_identifier = options[:snapshot_identifier] snapshot_cluster_identifier = options[:snapshot_cluster_identifier] account_with_restore_access = options[:account_with_restore_access] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSnapshot.new } params[:query]['Action'] = 'AuthorizeSnapshotAccess' params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['SnapshotClusterIdentifier'] = snapshot_cluster_identifier if snapshot_cluster_identifier params[:query]['AccountWithRestoreAccess'] = account_with_restore_access if account_with_restore_access request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/copy_cluster_snapshot.rb000066400000000000000000000041121437344660100257310ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_snapshot' # ==== Parameters # # @param [Hash] options # * :source_snapshot_identifier - required - (String) # The identifier for the source snapshot. Constraints: Must be the identifier for # a valid automated snapshot whose state is "available". # * :source_snapshot_cluster_identifier - (String) # * :target_snapshot_identifier - required - (String) # The identifier given to the new manual snapshot. Constraints: Cannot be null, # empty, or blank. Must contain from 1 to 255 alphanumeric characters or hyphens. # First character must be a letter. Cannot end with a hyphen or contain two # consecutive hyphens. Must be unique for the AWS account that is making the request. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CopyClusterSnapshot.html def copy_cluster_snapshot(options = {}) source_snapshot_identifier = options[:source_snapshot_identifier] source_snapshot_cluster_identifier = options[:source_snapshot_cluster_identifier] target_snapshot_identifier = options[:target_snapshot_identifier] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSnapshot.new } params[:query]['Action'] = 'CopyClusterSnapshot' params[:query]['SourceSnapshotIdentifier'] = source_snapshot_identifier if source_snapshot_identifier params[:query]['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier if source_snapshot_cluster_identifier params[:query]['TargetSnapshotIdentifier'] = target_snapshot_identifier if target_snapshot_identifier request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/create_cluster.rb000066400000000000000000000232071437344660100243110ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster' # ==== Parameters # # @param [Hash] options # * :db_name - (String) # The name of the first database to be created when the cluster is created. To create # additional databases after the cluster is created, connect to the cluster with a SQL # client and use SQL commands to create a database. Default: dev Constraints: Must # contain 1 to 64 alphanumeric characters. Must contain only lowercase letters. # * :cluster_identifier - required - (String) # A unique identifier for the cluster. You use this identifier to refer to the cluster # for any subsequent cluster operations such as deleting or modifying. Must be unique # for all clusters within an AWS account. Example: myexamplecluster # * :cluster_type - (String) # Type of the cluster. When cluster type is specified as single-node, the NumberOfNodes # parameter is not required. multi-node, the NumberOfNodes parameter is required. Valid # Values: multi-node | single-node Default: multi-node # * :node_type - required - (String) # The node type to be provisioned. Valid Values: dw.hs1.xlarge | dw.hs1.8xlarge. # * :master_username - required - (String) # The user name associated with the master user account for the cluster that is being # created. Constraints: Must be 1 - 128 alphanumeric characters. First character must # be a letter. Cannot be a reserved word. # * :master_user_password - required - (String) # The password associated with the master user account for the cluster that is being # created. Constraints: Must be between 8 and 64 characters in length. Must contain at # least one uppercase letter. Must contain at least one lowercase letter. Must contain # one number. # * :cluster_security_groups - (Array) # A list of security groups to be associated with this cluster. Default: The default # cluster security group for Amazon Redshift. # * :vpc_security_group_ids - (Array) # A list of Virtual Private Cloud (VPC) security groups to be associated with the # cluster. Default: The default VPC security group is associated with the cluster. # * :cluster_subnet_group_name - (String) # The name of a cluster subnet group to be associated with this cluster. If this # parameter is not provided the resulting cluster will be deployed outside virtual # private cloud (VPC). # * :availability_zone - (String) # The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the # cluster. Default: A random, system-chosen Availability Zone in the region that is # specified by the endpoint. Example: us-east-1d Constraint: The specified # Availability Zone must be in the same region as the current endpoint. # * :preferred_maintenance_window - (String) # The weekly time range (in UTC) during which automated cluster maintenance can occur. # Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from # an 8-hour block of time per region, occurring on a random day of the week. # Constraints: Minimum 30-minute window. # * :cluster_parameter_group_name - (String) # The name of the parameter group to be associated with this cluster. Default: The # default Amazon Redshift cluster parameter group. Constraints: Must be 1 to 255 # alphanumeric characters or hyphens. First character must be a letter. Cannot end # with a hyphen or contain two consecutive hyphens. # * :automated_snapshot_retention_period - (Integer) # Number of days that automated snapshots are retained. If the value is 0, automated # snapshots are disabled. Default: 1 Constraints: Must be a value from 0 to 35. # * :port - (Integer) # The port number on which the cluster accepts incoming connections. Default: 5439 # Valid Values: 1150-65535 # * :cluster_version - (String) # The version of the Amazon Redshift engine software that you want to deploy on the # cluster. The version selected runs on all the nodes in the cluster. Constraints: # Only version 1.0 is currently available. Example: 1.0 # * :allow_version_upgrade - (Boolean) # If `true` , upgrades can be applied during the maintenance window to the Amazon # Redshift engine that is running on the cluster. Default: `true` # * :number_of_nodes - (Integer) # The number of compute nodes in the cluster. This parameter is required when the # ClusterType parameter is specified as multi-node. If you don't specify this parameter, # you get a single-node cluster. When requesting a multi-node cluster, you must specify # the number of nodes that you want in the cluster. Default: 1 Constraints: Value must # be at least 1 and no more than 100. # * :publicly_accessible - (Boolean) # If `true` , the cluster can be accessed from a public network. # * :encrypted - (Boolean) # If `true` , the data in cluster is encrypted at rest. Default: `false` # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateCluster.html def create_cluster(options = {}) db_name = options[:db_name] cluster_identifier = options[:cluster_identifier] cluster_type = options[:cluster_type] node_type = options[:node_type] master_username = options[:master_username] master_user_password = options[:master_user_password] cluster_subnet_group_name = options[:cluster_subnet_group_name] availability_zone = options[:availability_zone] preferred_maintenance_window = options[:preferred_maintenance_window] cluster_parameter_group_name = options[:cluster_parameter_group_name] automated_snapshot_retention_period = options[:automated_snapshot_retention_period] port = options[:port] cluster_version = options[:cluster_version] allow_version_upgrade = options[:allow_version_upgrade] number_of_nodes = options[:number_of_nodes] publicly_accessible = options[:publicly_accessible] encrypted = options[:encrypted] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::Cluster.new } if cluster_security_groups = options.delete(:ClusterSecurityGroups) params[:query].merge!(Fog::AWS.indexed_param('ClusterSecurityGroups.member.%d', [*cluster_security_groups])) end if vpc_security_group_ids = options.delete(:VpcSecurityGroupIds) params[:query].merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*vpc_security_group_ids])) end params[:query]['Action'] = 'CreateCluster' params[:query]['DBName'] = db_name if db_name params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['ClusterType'] = cluster_type if cluster_type params[:query]['NodeType'] = node_type if node_type params[:query]['MasterUsername'] = master_username if master_username params[:query]['MasterUserPassword'] = master_user_password if master_user_password params[:query]['ClusterSecurityGroups'] = cluster_security_groups if cluster_security_groups params[:query]['VpcSecurityGroupIds'] = vpc_security_group_ids if vpc_security_group_ids params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name params[:query]['AvailabilityZone'] = availability_zone if availability_zone params[:query]['PreferredMaintenanceWindow'] = preferred_maintenance_window if preferred_maintenance_window params[:query]['ClusterParameterGroupName'] = cluster_parameter_group_name if cluster_parameter_group_name params[:query]['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period if automated_snapshot_retention_period params[:query]['Port'] = port if port params[:query]['ClusterVersion'] = cluster_version if cluster_version params[:query]['AllowVersionUpgrade'] = allow_version_upgrade if allow_version_upgrade params[:query]['NumberOfNodes'] = number_of_nodes if number_of_nodes params[:query]['PubliclyAccessible'] = publicly_accessible if publicly_accessible params[:query]['Encrypted'] = encrypted if encrypted request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/create_cluster_parameter_group.rb000066400000000000000000000047571437344660100275760ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/create_cluster_parameter_group' # ==== Parameters # # @param [Hash] options # * :parameter_group_name - required - (String) # The name of the cluster parameter group. Constraints: Must be 1 to 255 alphanumeric # characters or hyphens First character must be a letter. Cannot end with a hyphen or # contain two consecutive hyphens. Must be unique within your AWS account. This value # is stored as a lower-case string. # * :parameter_group_family - required - (String) # The Amazon Redshift engine version to which the cluster parameter group applies. The # cluster engine version determines the set of parameters. To get a list of valid parameter # group family names, you can call DescribeClusterParameterGroups. By default, Amazon # Redshift returns a list of all the parameter groups that are owned by your AWS account, # including the default parameter groups for each Amazon Redshift engine version. The # parameter group family names associated with the default parameter groups provide you # the valid values. For example, a valid family name is "redshift-1.0". # * :description - required - (String) # A description of the parameter group. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterParameterGroup.html def create_cluster_parameter_group(options = {}) parameter_group_name = options[:parameter_group_name] parameter_group_family = options[:parameter_group_family] description = options[:description] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::CreateClusterParameterGroup.new } params[:query]['Action'] = 'CreateClusterParameterGroup' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name params[:query]['ParameterGroupFamily'] = parameter_group_family if parameter_group_family params[:query]['Description'] = description if description request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/create_cluster_security_group.rb000066400000000000000000000030051437344660100274460ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/create_cluster_security_group' # ==== Parameters # # @param [Hash] options # * :cluster_security_group_name - (String) # The name of a cluster security group for which you are requesting details. You # can specify either the Marker parameter or a ClusterSecurityGroupName parameter, # but not both. Example: securitygroup1 # * :description - required - (String) # A description for the security group. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterSecurityGroup.html def create_cluster_security_group(options = {}) cluster_security_group_name = options[:cluster_security_group_name] description = options[:description] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::CreateClusterSecurityGroup.new } params[:query]['Action'] = 'CreateClusterSecurityGroup' params[:query]['ClusterSecurityGroupName'] = cluster_security_group_name if cluster_security_group_name params[:query]['Description'] = description if description request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/create_cluster_snapshot.rb000066400000000000000000000032151437344660100262250ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_snapshot' # ==== Parameters # # @param [Hash] options # * :snapshot_identifier - required - (String) # A unique identifier for the snapshot that you are requesting. This identifier # must be unique for all snapshots within the AWS account. Constraints: Cannot be # null, empty, or blank Must contain from 1 to 255 alphanumeric characters or # hyphens First character must be a letter Cannot end with a hyphen or contain two # consecutive hyphens Example: my-snapshot-id # * :cluster_identifier - required - (String) # The cluster identifier for which you want a snapshot. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterSnapshot.html def create_cluster_snapshot(options = {}) snapshot_identifier = options[:snapshot_identifier] cluster_identifier = options[:cluster_identifier] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSnapshot.new } params[:query]['Action'] = 'CreateClusterSnapshot' params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/create_cluster_subnet_group.rb000066400000000000000000000036671437344660100271150ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_subnet_group_parser' # ==== Parameters # # @param [Hash] options # * :cluster_subnet_group_name - required - (String) # The name for the subnet group. Amazon Redshift stores the value as a lowercase string. # Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not # be "Default". Must be unique for all subnet groups that are created by your AWS account. # Example: examplesubnetgroup # * :description - required - (String) # A description of the parameter group. # * :subnet_ids - required - (Array<) # An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterSubnetGroup.html def create_cluster_subnet_group(options = {}) cluster_subnet_group_name = options[:cluster_subnet_group_name] description = options[:description] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSubnetGroupParser.new } if subnet_ids = options.delete(:subnet_ids) params[:query].merge!(Fog::AWS.indexed_param('SubnetIds.member.%d', [*subnet_ids])) end params[:query]['Action'] = 'CreateClusterSubnetGroup' params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name params[:query]['Description'] = description if description request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/delete_cluster.rb000066400000000000000000000050241437344660100243050ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - required - (String) # A unique identifier for the cluster. You use this identifier to refer to the cluster # for any subsequent cluster operations such as deleting or modifying. Must be unique # for all clusters within an AWS account. Example: myexamplecluster # * :skip_final_cluster_snapshot - (Boolean) # Determines whether a final snapshot of the cluster is created before Amazon Redshift # deletes the cluster. If `true` , a final cluster snapshot is not created. If `false`, # a final cluster snapshot is created before the cluster is deleted. The # FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot # is `false` . Default: `false` # * :final_cluster_snapshot_identifier - (String) # The identifier of the final snapshot that is to be created immediately before deleting # the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be `false`. # Constraints: Must be 1 to 255 alphanumeric characters. First character must be a letter # Cannot end with a hyphen or contain two consecutive hyphens. # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DeleteCluster.html def delete_cluster(options = {}) cluster_identifier = options[:cluster_identifier] final_cluster_snapshot_identifier = options[:final_cluster_snapshot_identifier] skip_final_cluster_snapshot = options[:skip_final_cluster_snapshot] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::Cluster.new } params[:query]['Action'] = 'DeleteCluster' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier if final_cluster_snapshot_identifier params[:query]['SkipFinalClusterSnapshot'] = skip_final_cluster_snapshot if skip_final_cluster_snapshot request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/delete_cluster_parameter_group.rb000066400000000000000000000020411437344660100275550ustar00rootroot00000000000000module Fog module AWS class Redshift class Real # ==== Parameters # # @param [Hash] options # * :parameter_group_name - required - (String) # The name of the parameter group to be deleted. Constraints: Must be the name of an # existing cluster parameter group. Cannot delete a default cluster parameter group. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DeleteClusterParameterGroup.html def delete_cluster_parameter_group(options = {}) parameter_group_name = options[:parameter_group_name] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {} } params[:query]['Action'] = 'DeleteClusterParameterGroup' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/delete_cluster_security_group.rb000066400000000000000000000017241437344660100274530ustar00rootroot00000000000000module Fog module AWS class Redshift class Real # ==== Parameters # # @param [Hash] options # * :cluster_security_group_name - required - (String) # The name of the cluster security group to be deleted. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DeleteClusterSecurityGroup.html def delete_cluster_security_group(options = {}) cluster_security_group_name = options[:cluster_security_group_name] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {} } params[:query]['Action'] = 'DeleteClusterSecurityGroup' params[:query]['ClusterSecurityGroupName'] = cluster_security_group_name if cluster_security_group_name request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/delete_cluster_snapshot.rb000066400000000000000000000033271437344660100262300ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_snapshot' # ==== Parameters # # @param [Hash] options # * :snapshot_identifier - required - (String) # A unique identifier for the snapshot that you are requesting. This identifier # must be unique for all snapshots within the AWS account. Constraints: Cannot be # null, empty, or blank Must contain from 1 to 255 alphanumeric characters or # hyphens First character must be a letter Cannot end with a hyphen or contain two # consecutive hyphens Example: my-snapshot-id # * :snapshot_cluster_identifier - required - (String) # The cluster identifier for which you want a snapshot. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateClusterSnapshot.html def delete_cluster_snapshot(options = {}) snapshot_identifier = options[:snapshot_identifier] snapshot_cluster_identifier = options[:snapshot_cluster_identifier] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSnapshot.new } params[:query]['Action'] = 'DeleteClusterSnapshot' params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['SnapshotClusterIdentifier'] = snapshot_cluster_identifier if snapshot_cluster_identifier request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/delete_cluster_subnet_group.rb000066400000000000000000000023651437344660100271060ustar00rootroot00000000000000module Fog module AWS class Redshift class Real # ==== Parameters # # @param [Hash] options # * :cluster_subnet_group_name - required - (String) # The name for the subnet group. Amazon Redshift stores the value as a lowercase string. # Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not # be "Default". Must be unique for all subnet groups that are created by your AWS account. # Example: examplesubnetgroup # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DeleteClusterSubnetGroup.html def delete_cluster_subnet_group(options = {}) cluster_subnet_group_name = options[:cluster_subnet_group_name] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :delete, :query => {} } params[:query]['Action'] = 'DeleteClusterSubnetGroup' params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_parameter_groups.rb000066400000000000000000000035661437344660100302730ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_parameter_groups' # ==== Parameters # # @param [Hash] options # * :parameter_group_name (String) # The name of a cluster parameter group for which to return details. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterParameterGroups.html def describe_cluster_parameter_groups(options = {}) parameter_group_name = options[:parameter_group_name] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterParameterGroups.new } params[:query]['Action'] = 'DescribeClusterParameterGroups' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_parameters.rb000066400000000000000000000045051437344660100270510ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_parameters' # ==== Parameters # # @param [Hash] options # * :parameter_group_name - required - (String) # The name of a cluster parameter group for which to return details. # * :source - (String) # The parameter types to return. Specify user to show parameters that are # different form the default. Similarly, specify engine-default to show parameters # that are the same as the default parameter group. Default: All parameter types # returned. Valid Values: user | engine-default # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterParameters.html def describe_cluster_parameters(options = {}) parameter_group_name = options[:parameter_group_name] source = options[:source] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterParameters.new } params[:query]['Action'] = 'DescribeClusterParameters' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name params[:query]['Source'] = source if source params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_security_groups.rb000066400000000000000000000041341437344660100301520ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_security_groups' # ==== Parameters # # @param [Hash] options # * :cluster_security_group_name - (String) # The name of a cluster security group for which you are requesting details. You # can specify either the Marker parameter or a ClusterSecurityGroupName parameter, # but not both. Example: securitygroup1 # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterSecurityGroups.html def describe_cluster_security_groups(options = {}) cluster_security_group_name = options[:cluster_security_group_name] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterSecurityGroups.new } params[:query]['Action'] = 'DescribeClusterSecurityGroups' params[:query]['ClusterSecurityGroupName'] = cluster_security_group_name if cluster_security_group_name params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_snapshots.rb000066400000000000000000000071571437344660100267360ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_snapshots' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - (String) # The identifier of the cluster for which information about snapshots is requested. # * :snapshot_identifier - (String) # The snapshot identifier of the snapshot about which to return information. # * :snapshot_type - (String) # The type of snapshots for which you are requesting information. By default, # snapshots of all types are returned. Valid Values: automated | manual # * :start_time - (String) # A value that requests only snapshots created at or after the specified time. # The time value is specified in ISO 8601 format. For more information about # ISO 8601, go to the ISO8601 Wikipedia page. Example: 2012-07-16T18:00:00Z # * :end_time - (String) # A time value that requests only snapshots created at or before the specified # time. The time value is specified in ISO 8601 format. For more information # about ISO 8601, go to the ISO8601 Wikipedia page. Example: 2012-07-16T18:00:00Z # * :owner_account - (String) # The AWS customer account used to create or copy the snapshot. Use this field to # filter the results to snapshots owned by a particular account. To describe snapshots # you own, either specify your AWS customer account, or do not specify the parameter. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterSnapshots.html def describe_cluster_snapshots(options = {}) cluster_identifier = options[:cluster_identifier] snapshot_identifier = options[:snapshot_identifier] start_time = options[:start_time] end_time = options[:end_time] owner_account = options[:owner_account] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterSnapshots.new } params[:query]['Action'] = 'DescribeClusterSnapshots' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['start_time'] = start_time if start_time params[:query]['end_time'] = end_time if end_time params[:query]['OwnerAccount'] = owner_account if owner_account params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_subnet_groups.rb000066400000000000000000000035321437344660100276040ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_subnet_groups' # ==== Parameters # # @param [Hash] options # * :cluster_subnet_group_name - (String) # The name of the cluster subnet group for which information is requested. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== Returns # * response<~Excon::Response>: # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterSubnetGroups.html def describe_cluster_subnet_groups(cluster_subnet_group_name=nil, marker=nil,max_records=nil) path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterSubnetGroups.new } params[:query]['Action'] = 'DescribeClusterSubnetGroups' params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_cluster_versions.rb000066400000000000000000000046071437344660100265610ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_cluster_versions' # ==== Parameters # # @param [Hash] options # * :cluster_parameter_group_family - (String) # The name of a specific cluster parameter group family to return details for. # Constraints: Must be 1 to 255 alphanumeric characters. First character must be # a letter, and cannot end with a hyphen or contain two consecutive hyphens. # * :cluster_version - (String) # The specific cluster version to return. Example: 1.0 # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusterVersions.html def describe_cluster_versions(options = {}) cluster_version = options[:cluster_version] cluster_parameter_group_family = options[:cluster_parameter_group_family] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusterVersions.new } params[:query]['Action'] = 'DescribeClusterVersions' params[:query]['ClusterVersion'] = cluster_version if cluster_version params[:query]['ClusterParameterGroupFamily'] = cluster_parameter_group_family if cluster_parameter_group_family params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_clusters.rb000066400000000000000000000036461437344660100250160ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_clusters' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - (String) # The unique identifier of a cluster whose properties you are requesting. # This parameter isn't case sensitive. The default is that all clusters # defined for an account are returned. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeClusters.html def describe_clusters(options = {}) cluster_identifier = options[:cluster_identifier] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeClusters.new } params[:query]['Action'] = 'DescribeClusters' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['MaxRecords'] = max_records if max_records params[:query]['Marker'] = marker if marker request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_default_cluster_parameters.rb000066400000000000000000000037361437344660100305620ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_default_cluster_parameters' # ==== Parameters # # @param [Hash] options # * :parameter_group_family - required - (String) # The name of a cluster parameter group family for which to return details. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeDefaultClusterParameters.html def describe_default_cluster_parameters(options = {}) parameter_group_family = options[:parameter_group_family] source = options[:source] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeDefaultClusterParameters.new } params[:query]['Action'] = 'DescribeDefaultClusterParameters' params[:query]['ParameterGroupFamily'] = parameter_group_family if parameter_group_family params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_events.rb000066400000000000000000000101601437344660100244430ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_events' # ==== Parameters # # @param [Hash] options # * :source_identifier - (String) # The identifier of the event source for which events will be returned. If this # parameter is not specified, then all sources are included in the response. # Constraints: If SourceIdentifier is supplied, SourceType must also be provided. # Specify a cluster identifier when SourceType is cluster. Specify a cluster security # group name when SourceType is cluster-security-group. Specify a cluster parameter # group name when SourceType is cluster-parameter-group. Specify a cluster snapshot # identifier when SourceType is cluster-snapshot. # * :source_type - (String) # The event source to retrieve events for. If no value is specified, all events are # returned. Constraints: If SourceType is supplied, SourceIdentifier must also be # provided. Specify cluster when SourceIdentifier is a cluster identifier. Specify # cluster-security-group when SourceIdentifier is a cluster security group name. Specify # cluster-parameter-group when SourceIdentifier is a cluster parameter group name. Specify # cluster-snapshot when SourceIdentifier is a cluster snapshot identifier. Valid values # include: cluster, cluster-parameter-group, cluster-security-group, cluster-snapshot # * :start_time - (String<) # The beginning of the time interval to retrieve events for, specified in ISO 8601 # format. Example: 2009-07-08T18:00Z # * :end_time - (String<) # The end of the time interval for which to retrieve events, specified in ISO 8601 # format. Example: 2009-07-08T18:00Z # * :duration - (Integer) # The number of minutes prior to the time of the request for which to retrieve events. # For example, if the request is sent at 18:00 and you specify a duration of 60, then # only events which have occurred after 17:00 will be returned. Default: 60 # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeEvents.html def describe_events(options = {}) source_identifier = options[:source_identifier] source_type = options[:source_type] start_time = options[:start_time] end_time = options[:end_time] duration = options[:duration] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeEvents.new } params[:query]['Action'] = 'DescribeEvents' params[:query]['SourceIdentifier'] = source_identifier if source_identifier params[:query]['SourceType'] = source_type if source_type params[:query]['StartTime'] = start_time if start_time params[:query]['EndTime'] = end_time if end_time params[:query]['Duration'] = duration if duration params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_orderable_cluster_options.rb000066400000000000000000000044471437344660100304250ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_orderable_cluster_options' # ==== Parameters # # @param [Hash] options # * :cluster_version - (String) # The version filter value. Specify this parameter to show only the available # offerings matching the specified version. Default: All versions. Constraints: # Must be one of the version returned from DescribeClusterVersions. # * :node_type - (String) # The node type filter value. Specify this parameter to show only the available # offerings matching the specified node type. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeOrderableClusterOptions.html def describe_orderable_cluster_options(options = {}) cluster_version = options[:cluster_version] node_type = options[:node_type] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeOrderableClusterOptions.new } params[:query]['Action'] = 'DescribeOrderableClusterOptions' params[:query]['ClusterVersion'] = cluster_version if cluster_version params[:query]['NodeType'] = node_type if node_type params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_reserved_node_offerings.rb000066400000000000000000000035631437344660100300360ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_reserved_node_offerings' # ==== Parameters # # @param [Hash] options # * :reserved_node_offering_id - (String) # The unique identifier for the offering. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeReservedNodeOfferings.html def describe_reserved_node_offerings(options = {}) reserved_node_offering_id = options[:reserved_node_offering_id] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeReservedNodeOfferings.new } params[:query]['Action'] = 'DescribeReservedNodeOfferings' params[:query]['ReservedNodeOfferingId'] = reserved_node_offering_id if reserved_node_offering_id params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_reserved_nodes.rb000066400000000000000000000034121437344660100261500ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_reserved_nodes' # ==== Parameters # # @param [Hash] options # * :reserved_node_id - (String) # The unique identifier for the node reservation. # * :max_records - (Integer) # The maximum number of records to include in the response. If more than the # MaxRecords value is available, a marker is included in the response so that the # following results can be retrieved. Constrained between [20,100]. Default is 100. # * :marker - (String) # The marker returned from a previous request. If this parameter is specified, the # response includes records beyond the marker only, up to MaxRecords. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeReservedNodes.html def describe_reserved_nodes(options = {}) reserved_node_id = options[:reserved_node_id] marker = options[:marker] max_records = options[:max_records] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeReservedNodes.new } params[:query]['Action'] = 'DescribeReservedNodes' params[:query]['ReservedNodeId'] = reserved_node_id if reserved_node_id params[:query]['Marker'] = marker if marker params[:query]['MaxRecords'] = max_records if max_records request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/describe_resize.rb000066400000000000000000000023031437344660100244400ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/describe_resize' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - required - (String) # The unique identifier of a cluster whose resize progress you are requesting. # This parameter isn't case-sensitive. By default, resize operations for all # clusters defined for an AWS account are returned. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DescribeResize.html def describe_resize(options = {}) cluster_identifier = options[:cluster_identifier] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :get, :query => {}, :parser => Fog::Parsers::Redshift::AWS::DescribeResize.new } params[:query]['Action'] = 'DescribeResize' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/modify_cluster.rb000066400000000000000000000153771437344660100243460ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - required - (String) # A unique identifier for the cluster. You use this identifier to refer to the cluster # for any subsequent cluster operations such as deleting or modifying. Must be unique # for all clusters within an AWS account. Example: myexamplecluster # * :allow_version_upgrade - (Boolean) # If `true` , upgrades can be applied during the maintenance window to the Amazon # Redshift engine that is running on the cluster. Default: `true` # * :automated_snapshot_retention_period - (Integer) # Number of days that automated snapshots are retained. If the value is 0, automated # snapshots are disabled. Default: 1 Constraints: Must be a value from 0 to 35. # * :cluster_parameter_group_name - (String) # The name of the parameter group to be associated with this cluster. Default: The # default Amazon Redshift cluster parameter group. Constraints: Must be 1 to 255 # alphanumeric characters or hyphens. First character must be a letter. Cannot end # with a hyphen or contain two consecutive hyphens. # * :cluster_security_groups - (Array) # A list of security groups to be associated with this cluster. Default: The default # cluster security group for Amazon Redshift. # * :cluster_type - (String) # Type of the cluster. When cluster type is specified as single-node, the NumberOfNodes # parameter is not required. multi-node, the NumberOfNodes parameter is required. Valid # Values: multi-node | single-node Default: multi-node # * :cluster_version - (String) # The version of the Amazon Redshift engine software that you want to deploy on the # cluster. The version selected runs on all the nodes in the cluster. Constraints: # Only version 1.0 is currently available. Example: 1.0 # * :master_user_password - required - (String) # The password associated with the master user account for the cluster that is being # created. Constraints: Must be between 8 and 64 characters in length. Must contain at # least one uppercase letter. Must contain at least one lowercase letter. Must contain # one number. # * :node_type - required - (String) # The node type to be provisioned. Valid Values: dw.hs1.xlarge | dw.hs1.8xlarge. # * :number_of_nodes - (Integer) # The number of compute nodes in the cluster. This parameter is required when the # ClusterType parameter is specified as multi-node. If you don't specify this parameter, # you get a single-node cluster. When requesting a multi-node cluster, you must specify # the number of nodes that you want in the cluster. Default: 1 Constraints: Value must # be at least 1 and no more than 100. # * :preferred_maintenance_window - (String) # The weekly time range (in UTC) during which automated cluster maintenance can occur. # Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from # an 8-hour block of time per region, occurring on a random day of the week. # Constraints: Minimum 30-minute window. # * :vpc_security_group_ids - (Array) # A list of Virtual Private Cloud (VPC) security groups to be associated with the # cluster. Default: The default VPC security group is associated with the cluster. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_CreateCluster.html def modify_cluster(options = {}) cluster_identifier = options[:cluster_identifier] cluster_type = options[:cluster_type] node_type = options[:node_type] master_user_password = options[:master_user_password] preferred_maintenance_window = options[:preferred_maintenance_window] cluster_parameter_group_name = options[:cluster_parameter_group_name] automated_snapshot_retention_period = options[:automated_snapshot_retention_period] cluster_version = options[:cluster_version] allow_version_upgrade = options[:allow_version_upgrade] number_of_nodes = options[:number_of_nodes] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::Cluster.new } if cluster_security_groups = options.delete(:ClusterSecurityGroups) params[:query].merge!(Fog::AWS.indexed_param('ClusterSecurityGroups.member.%d', [*cluster_security_groups])) end if vpc_security_group_ids = options.delete(:VpcSecurityGroupIds) params[:query].merge!(Fog::AWS.indexed_param('VpcSecurityGroupIds.member.%d', [*vpc_security_group_ids])) end params[:query]['Action'] = 'ModifyCluster' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['ClusterParameterGroupName'] = cluster_parameter_group_name if cluster_parameter_group_name params[:query]['ClusterType'] = cluster_type if cluster_type params[:query]['NodeType'] = node_type if node_type params[:query]['MasterUserPassword'] = master_user_password if master_user_password params[:query]['PreferredMaintenanceWindow'] = preferred_maintenance_window if preferred_maintenance_window params[:query]['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period if automated_snapshot_retention_period params[:query]['ClusterVersion'] = cluster_version if cluster_version params[:query]['AllowVersionUpgrade'] = allow_version_upgrade if allow_version_upgrade params[:query]['NumberOfNodes'] = number_of_nodes if number_of_nodes params[:query]['ClusterSecurityGroups'] = cluster_security_groups if cluster_security_groups params[:query]['VpcSecurityGroupIds'] = vpc_security_group_ids if vpc_security_group_ids request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/modify_cluster_parameter_group.rb000066400000000000000000000027521437344660100276130ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/update_cluster_parameter_group_parser' # ==== Parameters # # @param [Hash] options # * :parameter_group_name - required - (String) # The name of the parameter group to be deleted. Constraints: Must be the name of an # existing cluster parameter group. Cannot delete a default cluster parameter group. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_ModifyClusterParameterGroup.html def modify_cluster_parameter_group(options = {}) parameter_group_name = options[:parameter_group_name] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::UpdateClusterParameterGroupParser.new } params[:query]['Action'] = 'ModifyClusterParameterGroup' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name if options['Parameters'] options['Parameters'].keys.each_with_index do |name, index| params[:query].merge!({ "Parameters.member.#{index+1}.#{name}" => options['Parameters'][name] }) end end request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/modify_cluster_subnet_group.rb000066400000000000000000000036671437344660100271410ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_subnet_group_parser' # ==== Parameters # # @param [Hash] options # * :cluster_subnet_group_name - required - (String) # The name for the subnet group. Amazon Redshift stores the value as a lowercase string. # Constraints: Must contain no more than 255 alphanumeric characters or hyphens. Must not # be "Default". Must be unique for all subnet groups that are created by your AWS account. # Example: examplesubnetgroup # * :description - required - (String) # A description of the parameter group. # * :subnet_ids - required - (Array<) # An array of VPC subnet IDs. A maximum of 20 subnets can be modified in a single request. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_ModifyClusterSubnetGroup.html def modify_cluster_subnet_group(options = {}) cluster_subnet_group_name = options[:cluster_subnet_group_name] description = options[:description] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSubnetGroupParser.new } if subnet_ids = options.delete(:subnet_ids) params[:query].merge!(Fog::AWS.indexed_param('SubnetIds.member.%d', [*subnet_ids])) end params[:query]['Action'] = 'ModifyClusterSubnetGroup' params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name params[:query]['Description'] = description if description request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/purchase_reserved_node_offering.rb000066400000000000000000000026001437344660100276740ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/purchase_reserved_node_offering' # ==== Parameters # # @param [Hash] options # * :reserved_node_offering_id - required - (String) # The unique identifier of the reserved node offering you want to purchase. # * :node_count - (Integer) # The number of reserved nodes you want to purchase. Default: 1 # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_PurchaseReservedNodeOffering.html def purchase_reserved_node_offering(options = {}) reserved_node_offering_id = options[:reserved_node_offering_id] node_count = options[:node_count] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::PurchaseReservedNodeOffering.new } params[:query]['Action'] = 'PurchaseReservedNodeOffering' params[:query]['ReservedNodeOfferingId'] = reserved_node_offering_id if reserved_node_offering_id params[:query]['NodeCount'] = node_count if node_count request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/reboot_cluster.rb000066400000000000000000000023211437344660100243320ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - required - (String) # A unique identifier for the cluster. You use this identifier to refer to the cluster # for any subsequent cluster operations such as deleting or modifying. Must be unique # for all clusters within an AWS account. Example: myexamplecluster # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_DeleteCluster.html def reboot_cluster(options = {}) cluster_identifier = options[:cluster_identifier] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::Cluster.new } params[:query]['Action'] = 'RebootCluster' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/reset_cluster_parameter_group.rb000066400000000000000000000052371437344660100274470ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/update_cluster_parameter_group_parser' # ==== Parameters # # @param [Hash] options # * :parameter_group_name - required - (String) The name of the cluster parameter group to be reset. # * :reset_all_parameters - (Boolean) If true , all parameters in the specified parameter group will be reset to their default values. Default: true # * :parameters - (Array<) An array of names of parameters to be reset. If ResetAllParameters option is not used, then at least one parameter name must be supplied. Constraints: A maximum of 20 parameters can be reset in a single request. # * :parameter_name - (String) The name of the parameter. # * :parameter_value - (String) The value of the parameter. # * :description - (String) A description of the parameter. # * :source - (String) The source of the parameter value, such as "engine-default" or "user". # * :data_type - (String) The data type of the parameter. # * :allowed_values - (String) The valid range of values for the parameter. # * :is_modifiable - (Boolean) If true , the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed. # * :minimum_engine_version - (String) The earliest engine version to which the parameter can apply. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_ResetClusterParameterGroup.html def reset_cluster_parameter_group(options = {}) parameter_group_name = options[:parameter_group_name] reset_all_parameters = options[:reset_all_parameters] path = "/" params = { :idempotent => true, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::UpdateClusterParameterGroupParser.new } if options['Parameters'] options['Parameters'].keys.each_with_index do |name, index| params[:query].merge!({ "Parameters.member.#{index+1}.#{name}" => options['Parameters'][name] }) end end params[:query]['Action'] = 'ResetClusterSubnetGroup' params[:query]['ParameterGroupName'] = parameter_group_name if parameter_group_name params[:query]['ResetAllParameters'] = reset_all_parameters if reset_all_parameters request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/restore_from_cluster_snapshot.rb000066400000000000000000000102451437344660100274710ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster' # ==== Parameters # # @param [Hash] options # * :cluster_identifier - required - (String) # The identifier of the cluster that will be created from restoring the snapshot. # Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. # Alphabetic characters must be lowercase. First character must be a letter. Cannot # end with a hyphen or contain two consecutive hyphens. Must be unique for all # clusters within an AWS account. # * :snapshot_identifier - required - (String) # The name of the snapshot from which to create the new cluster. This parameter # isn't case sensitive. Example: my-snapshot-id # * :snapshot_cluster_identifier - (String) # * :port - (Integer) # The port number on which the cluster accepts connections. Default: The same port # as the original cluster. Constraints: Must be between 1115 and 65535. # * :availability_zone - (String) # The Amazon EC2 Availability Zone in which to restore the cluster. Default: A # random, system-chosen Availability Zone. Example: us-east-1a # * :allow_version_upgrade - (Boolean) # If true , upgrades can be applied during the maintenance window to the Amazon # Redshift engine that is running on the cluster. Default: true # * :cluster_subnet_group_name - (String) # The name of the subnet group where you want to cluster restored. A snapshot of # cluster in VPC can be restored only in VPC. Therefore, you must provide subnet # group name where you want the cluster restored. # * :publicly_accessible - (Boolean) # If true , the cluster can be accessed from a public network. # * :owner_account - (String) # The AWS customer account used to create or copy the snapshot. Required if you are # restoring a snapshot you do not own, optional if you own the snapshot. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_RestoreFromClusterSnapshot.html def restore_from_cluster_snapshot(options = {}) cluster_identifier = options[:cluster_identifier] snapshot_identifier = options[:snapshot_identifier] snapshot_cluster_identifier = options[:snapshot_cluster_identifier] port = options[:port] availability_zone = options[:availability_zone] allow_version_upgrade = options[:allow_version_upgrade] cluster_subnet_group_name = options[:cluster_subnet_group_name] publicly_accessible = options[:publicly_accessible] owner_account = options[:owner_account] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::Cluster.new } params[:query]['Action'] = 'RestoreFromClusterSnapshot' params[:query]['ClusterIdentifier'] = cluster_identifier if cluster_identifier params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['SnapshotClusterIdentifier'] = snapshot_cluster_identifier if snapshot_cluster_identifier params[:query]['Port'] = port if port params[:query]['AvailabilityZone'] = availability_zone if availability_zone params[:query]['AllowVersionUpgrade'] = allow_version_upgrade if allow_version_upgrade params[:query]['ClusterSubnetGroupName'] = cluster_subnet_group_name if cluster_subnet_group_name params[:query]['PubliclyAccessible'] = publicly_accessible if publicly_accessible params[:query]['OwnerAccount'] = owner_account if owner_account request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/revoke_cluster_security_group_ingress.rb000066400000000000000000000047621437344660100312430ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/revoke_cluster_security_group_ingress' # ==== Parameters # # @param [Hash] options # * :cluster_security_group_name - required - (String) # The name of the security Group from which to revoke the ingress rule. # * :cidrip - (String) # The IP range for which to revoke access. This range must be a valid Classless # Inter-Domain Routing (CIDR) block of IP addresses. If CIDRIP is specified, # EC2SecurityGroupName and EC2SecurityGroupOwnerId cannot be provided. # * :ec2_security_group_name - (String) # The name of the EC2 Security Group whose access is to be revoked. If # EC2SecurityGroupName is specified, EC2SecurityGroupOwnerId must also be # provided and CIDRIP cannot be provided. # * :ec2_security_group_owner_id - (String) # The AWS account number of the owner of the security group specified in the # EC2SecurityGroupName parameter. The AWS access key ID is not an acceptable # value. If EC2SecurityGroupOwnerId is specified, EC2SecurityGroupName must # also be provided. and CIDRIP cannot be provided. Example: 111122223333 # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_RevokeClusterSecurityGroupIngress.html def revoke_cluster_security_group_ingress(options = {}) cluster_security_group_name = options[:cluster_security_group_name] cidrip = options[:cidrip] ec2_security_group_name = options[:ec2_security_group_name] ec2_security_group_owner_id = options[:ec2_security_group_owner_id] path = "/" params = { :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::RevokeClusterSecurityGroupIngress.new } params[:query]['Action'] = 'RevokeClusterSecurityGroupIngress' params[:query]['ClusterSecurityGroupName'] = cluster_security_group_name if cluster_security_group_name params[:query]['CIDRIP'] = cidrip if cidrip params[:query]['EC2SecurityGroupName'] = ec2_security_group_name if ec2_security_group_name params[:query]['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id if ec2_security_group_owner_id request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/redshift/revoke_snapshot_access.rb000066400000000000000000000032661437344660100260430ustar00rootroot00000000000000module Fog module AWS class Redshift class Real require 'fog/aws/parsers/redshift/cluster_snapshot' # ==== Parameters # # @param [Hash] options # * :snapshot_identifier - required - (String) # The identifier of the snapshot that the account can no longer access. # * :snapshot_cluster_identifier - (String) # * :account_with_restore_access - required - (String) # The identifier of the AWS customer account that can no longer restore the specified snapshot. # # ==== See Also # http://docs.aws.amazon.com/redshift/latest/APIReference/API_RevokeSnapshotAccess.html def revoke_snapshot_access(options = {}) snapshot_identifier = options[:snapshot_identifier] snapshot_cluster_identifier = options[:snapshot_cluster_identifier] account_with_restore_access = options[:account_with_restore_access] path = "/" params = { :expects => 200, :headers => {}, :path => path, :method => :put, :query => {}, :parser => Fog::Parsers::Redshift::AWS::ClusterSnapshot.new } params[:query]['Action'] = 'RevokeSnapshotAccess' params[:query]['SnapshotIdentifier'] = snapshot_identifier if snapshot_identifier params[:query]['SnapshotClusterIdentifier'] = snapshot_cluster_identifier if snapshot_cluster_identifier params[:query]['AccountWithRestoreAccess'] = account_with_restore_access if account_with_restore_access request(params) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/000077500000000000000000000000001437344660100177365ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/ses/delete_verified_email_address.rb000066400000000000000000000014351437344660100262610ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/delete_verified_email_address' # Delete an existing verified email address # # ==== Parameters # * email_address<~String> - Email Address to be removed # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def delete_verified_email_address(email_address) request({ 'Action' => 'DeleteVerifiedEmailAddress', 'EmailAddress' => email_address, :parser => Fog::Parsers::AWS::SES::DeleteVerifiedEmailAddress.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/get_send_quota.rb000066400000000000000000000014121437344660100232620ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/get_send_quota' # Returns the user's current activity limits. # # ==== Parameters # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'GetSendQuotaResult'<~Hash> # * 'Max24HourSend' <~String> # * 'MaxSendRate' <~String> # * 'SentLast24Hours' <~String> # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def get_send_quota request({ 'Action' => 'GetSendQuota', :parser => Fog::Parsers::AWS::SES::GetSendQuota.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/get_send_statistics.rb000066400000000000000000000016411437344660100243270ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/get_send_statistics' # Returns the user's current activity limits. # # ==== Parameters # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'GetSendStatisticsResult'<~Hash> # * 'SendDataPoints' <~Array> # * 'Bounces' <~String> # * 'Complaints' <~String> # * 'DeliveryAttempts' <~String> # * 'Rejects' <~String> # * 'Timestamp' <~String> # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def get_send_statistics request({ 'Action' => 'GetSendStatistics', :parser => Fog::Parsers::AWS::SES::GetSendStatistics.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/list_verified_email_addresses.rb000066400000000000000000000013471437344660100263240ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/list_verified_email_addresses' # Returns a list containing all of the email addresses that have been verified # # ==== Parameters # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'VerifiedEmailAddresses' <~Array> # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def list_verified_email_addresses request({ 'Action' => 'ListVerifiedEmailAddresses', :parser => Fog::Parsers::AWS::SES::ListVerifiedEmailAddresses.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/send_email.rb000066400000000000000000000052651437344660100223730ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/send_email' # Send an email # # ==== Parameters # * Source <~String> - The sender's email address # * Destination <~Hash> - The destination for this email, composed of To:, From:, and CC: fields. # * BccAddresses <~Array> - The BCC: field(s) of the message. # * CcAddresses <~Array> - The CC: field(s) of the message. # * ToAddresses <~Array> - The To: field(s) of the message. # * Message <~Hash> - The message to be sent. # * Body <~Hash> # * Html <~Hash> # * Charset <~String> # * Data <~String> # * Text <~Hash> # * Charset <~String> # * Data <~String> # * Subject <~Hash> # * Charset <~String> # * Data <~String> # * options <~Hash>: # * ReplyToAddresses <~Array> - The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply. # * ReturnPath <~String> - The email address to which bounce notifications are to be forwarded. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'MessageId'<~String> - Id of message # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def send_email(source, destination, message, options = {}) params = { 'Source' => source } for key, values in destination params.merge!(Fog::AWS.indexed_param("Destination.#{key}.member", [*values])) end for key, value in message['Subject'] params["Message.Subject.#{key}"] = value end for type, data in message['Body'] for key, value in data params["Message.Body.#{type}.#{key}"] = value end end if options.key?('ReplyToAddresses') params.merge!(Fog::AWS.indexed_param("ReplyToAddresses.member", [*options['ReplyToAddresses']])) end if options.key?('ReturnPath') params['ReturnPath'] = options['ReturnPath'] end request({ 'Action' => 'SendEmail', :parser => Fog::Parsers::AWS::SES::SendEmail.new }.merge(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/send_raw_email.rb000066400000000000000000000024511437344660100232360ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/send_raw_email' # Send a raw email # # ==== Parameters # * RawMessage <~String> - The message to be sent. # * Options <~Hash> # * Source <~String> - The sender's email address. Takes precenence over Return-Path if specified in RawMessage # * Destinations <~Array> - All destinations for this email. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'MessageId'<~String> - Id of message # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def send_raw_email(raw_message, options = {}) params = {} if options.key?('Destinations') params.merge!(Fog::AWS.indexed_param('Destinations.member', [*options['Destinations']])) end if options.key?('Source') params['Source'] = options['Source'] end request({ 'Action' => 'SendRawEmail', 'RawMessage.Data' => Base64.encode64(raw_message.to_s).chomp!, :parser => Fog::Parsers::AWS::SES::SendRawEmail.new }.merge(params)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/verify_domain_identity.rb000066400000000000000000000016211437344660100250270ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/verify_domain_identity' # Verifies a domain. This action returns a verification authorization # token which must be added as a DNS TXT record to the domain. # # ==== Parameters # * domain<~String> - The domain to be verified # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'VerificationToken'<~String> - Verification token # * 'RequestId'<~String> - Id of request def verify_domain_identity(domain) request({ 'Action' => 'VerifyDomainIdentity', 'Domain' => domain, :parser => Fog::Parsers::AWS::SES::VerifyDomainIdentity.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/ses/verify_email_address.rb000066400000000000000000000015061437344660100244450ustar00rootroot00000000000000module Fog module AWS class SES class Real require 'fog/aws/parsers/ses/verify_email_address' # Verifies an email address. This action causes a confirmation email message to be sent to the specified address. # # ==== Parameters # * email_address<~String> - The email address to be verified # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'ResponseMetadata'<~Hash>: # * 'RequestId'<~String> - Id of request def verify_email_address(email_address) request({ 'Action' => 'VerifyEmailAddress', 'EmailAddress' => email_address, :parser => Fog::Parsers::AWS::SES::VerifyEmailAddress.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/000077500000000000000000000000001437344660100207435ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/simpledb/batch_put_attributes.rb000066400000000000000000000046331437344660100255150ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real # Put items attributes into a SimpleDB domain # # ==== Parameters # * domain_name<~String> - Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # * items<~Hash> - Keys are the items names and may use any UTF-8 # characters valid in xml. Control characters and sequences not allowed # in xml are not valid. Can be up to 1024 bytes long. Values are the # attributes to add to the given item and may use any UTF-8 characters # valid in xml. Control characters and sequences not allowed in xml are # not valid. Each name and value can be up to 1024 bytes long. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'RequestId' def batch_put_attributes(domain_name, items, replace_attributes = Hash.new([])) request({ 'Action' => 'BatchPutAttributes', 'DomainName' => domain_name, :parser => Fog::Parsers::AWS::SimpleDB::Basic.new(@nil_string) }.merge!(encode_batch_attributes(items, replace_attributes))) end end class Mock def batch_put_attributes(domain_name, items, replace_attributes = Hash.new([])) response = Excon::Response.new if self.data[:domains][domain_name] for item_name, attributes in items do for key, value in attributes do self.data[:domains][domain_name][item_name] ||= {} if replace_attributes[item_name] && replace_attributes[item_name].include?(key) self.data[:domains][domain_name][item_name][key.to_s] = [] else self.data[:domains][domain_name][item_name][key.to_s] ||= [] end self.data[:domains][domain_name][item_name][key.to_s] << value.to_s end end response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/create_domain.rb000066400000000000000000000021311437344660100240570ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real # Create a SimpleDB domain # # ==== Parameters # * domain_name<~String>:: Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'RequestId' def create_domain(domain_name) request( 'Action' => 'CreateDomain', 'DomainName' => domain_name, :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::Basic.new(@nil_string) ) end end class Mock def create_domain(domain_name) response = Excon::Response.new self.data[:domains][domain_name] = {} response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/delete_attributes.rb000066400000000000000000000051341437344660100250030ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real # List metadata for SimpleDB domain # # ==== Parameters # * domain_name<~String> - Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # * item_name<~String> - Name of the item. May use any UTF-8 characters valid # in xml. Control characters and sequences not allowed in xml are not # valid. Can be up to 1024 bytes long. # * attributes<~Hash> - Name/value pairs to remove from the item. Defaults to # nil, which will delete the entire item. Attribute names and values may # use any UTF-8 characters valid in xml. Control characters and sequences # not allowed in xml are not valid. Each name and value can be up to 1024 # bytes long. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'RequestId' def delete_attributes(domain_name, item_name, attributes = nil) request({ 'Action' => 'DeleteAttributes', 'DomainName' => domain_name, 'ItemName' => item_name, :parser => Fog::Parsers::AWS::SimpleDB::Basic.new(@nil_string) }.merge!(encode_attributes(attributes))) end end class Mock def delete_attributes(domain_name, item_name, attributes = nil) response = Excon::Response.new if self.data[:domains][domain_name] if self.data[:domains][domain_name][item_name] if attributes for key, value in attributes if self.data[:domains][domain_name][item_name][key] if value.nil? || value.empty? self.data[:domains][domain_name][item_name].delete(key) else for v in value self.data[:domains][domain_name][item_name][key].delete(v) end end end end else self.data[:domains][domain_name][item_name].clear end end response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/delete_domain.rb000066400000000000000000000021661437344660100240660ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real # Delete a SimpleDB domain # # ==== Parameters # * domain_name<~String>:: Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'RequestId' def delete_domain(domain_name) request( 'Action' => 'DeleteDomain', 'DomainName' => domain_name, :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::Basic.new(@nil_string) ) end end class Mock def delete_domain(domain_name) response = Excon::Response.new if self.data[:domains].delete(domain_name) response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/domain_metadata.rb000066400000000000000000000052461437344660100244060ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real require 'fog/aws/parsers/simpledb/domain_metadata' # List metadata for SimpleDB domain # # ==== Parameters # * domain_name<~String> - Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'AttributeNameCount' - number of unique attribute names in domain # * 'AttributeNamesSizeBytes' - total size of unique attribute names, in bytes # * 'AttributeValueCount' - number of all name/value pairs in domain # * 'AttributeValuesSizeBytes' - total size of attributes, in bytes # * 'BoxUsage' # * 'ItemCount' - number of items in domain # * 'ItemNameSizeBytes' - total size of item names in domain, in bytes # * 'RequestId' # * 'Timestamp' - last update time for metadata. def domain_metadata(domain_name) request( 'Action' => 'DomainMetadata', 'DomainName' => domain_name, :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::DomainMetadata.new(@nil_string) ) end end class Mock def domain_metadata(domain_name) response = Excon::Response.new if domain = self.data[:domains][domain_name] response.status = 200 attribute_names = [] attribute_values = [] for item in domain.values for key, values in item attribute_names << key for value in values attribute_values << value end end end response.body = { 'AttributeNameCount' => attribute_names.length, 'AttributeNamesSizeBytes' => attribute_names.join('').length, 'AttributeValueCount' => attribute_values.length, 'AttributeValuesSizeBytes' => attribute_values.join('').length, 'BoxUsage' => Fog::AWS::Mock.box_usage, 'ItemCount' => domain.keys.length, 'ItemNamesSizeBytes' => domain.keys.join('').length, 'RequestId' => Fog::AWS::Mock.request_id, 'Timestamp' => Time.now } else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/get_attributes.rb000066400000000000000000000070271437344660100243230ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real require 'fog/aws/parsers/simpledb/get_attributes' # List metadata for SimpleDB domain # # ==== Parameters # * domain_name<~String> - Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # * item_name<~String> - Name of the item. May use any UTF-8 characters valid # in xml. Control characters and sequences not allowed in xml are not # valid. Can be up to 1024 bytes long. # * options<~Hash>: # * AttributeName<~Array> - Attributes to return from the item. Defaults to # {}, which will return all attributes. Attribute names and values may use # any UTF-8 characters valid in xml. Control characters and sequences not # allowed in xml are not valid. Each name and value can be up to 1024 # bytes long. # * ConsistentRead<~Boolean> - When set to true, ensures most recent data is returned. Defaults to false. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Attributes' - list of attribute name/values for the item # * 'BoxUsage' # * 'RequestId' def get_attributes(domain_name, item_name, options = {}) if options.is_a?(Array) Fog::Logger.deprecation("get_attributes with array attributes param is deprecated, use 'AttributeName' => attributes) instead [light_black](#{caller.first})[/]") options = {'AttributeName' => options} end options['AttributeName'] ||= [] request({ 'Action' => 'GetAttributes', 'ConsistentRead' => !!options['ConsistentRead'], 'DomainName' => domain_name, 'ItemName' => item_name, :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::GetAttributes.new(@nil_string) }.merge!(encode_attribute_names(options['AttributeName']))) end end class Mock def get_attributes(domain_name, item_name, options = {}) if options.is_a?(Array) Fog::Logger.deprecation("get_attributes with array attributes param is deprecated, use 'AttributeName' => attributes) instead [light_black](#{caller.first})[/]") options['AttributeName'] ||= options if options.is_a?(Array) end options['AttributeName'] ||= [] response = Excon::Response.new if self.data[:domains][domain_name] object = {} if !options['AttributeName'].empty? for attribute in options['AttributeName'] if self.data[:domains][domain_name].key?(item_name) && self.data[:domains][domain_name][item_name].key?(attribute) object[attribute] = self.data[:domains][domain_name][item_name][attribute] end end elsif self.data[:domains][domain_name][item_name] object = self.data[:domains][domain_name][item_name] end response.status = 200 response.body = { 'Attributes' => object, 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/list_domains.rb000066400000000000000000000033271437344660100237620ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real require 'fog/aws/parsers/simpledb/list_domains' # List SimpleDB domains # # ==== Parameters # * options<~Hash> - options, defaults to {} # * 'MaxNumberOfDomains'<~Integer> - number of domains to return # between 1 and 100, defaults to 100 # * 'NextToken'<~String> - Offset token to start listing, defaults to nil # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'Domains' - array of domain names. # * 'NextToken' - offset to start with if there are are more domains to list # * 'RequestId' def list_domains(options = {}) request({ 'Action' => 'ListDomains', :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::ListDomains.new(@nil_string) }.merge!(options)) end end class Mock def list_domains(options = {}) response = Excon::Response.new keys = self.data[:domains].keys max = options['MaxNumberOfDomains'] || keys.size offset = options['NextToken'] || 0 domains = [] for key, value in self.data[:domains].keys[offset...max] domains << key end response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'Domains' => domains, 'RequestId' => Fog::AWS::Mock.request_id } if max < keys.size response.body['NextToken'] = max + 1 end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/put_attributes.rb000066400000000000000000000061471437344660100243560ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real # Put item attributes into a SimpleDB domain # # ==== Parameters # * domain_name<~String> - Name of domain. Must be between 3 and 255 of the # following characters: a-z, A-Z, 0-9, '_', '-' and '.'. # * item_name<~String> - Name of the item. May use any UTF-8 characters valid # in xml. Control characters and sequences not allowed in xml are not # valid. Can be up to 1024 bytes long. # * attributes<~Hash> - Name/value pairs to add to the item. Attribute names # and values may use any UTF-8 characters valid in xml. Control characters # and sequences not allowed in xml are not valid. Each name and value can # be up to 1024 bytes long. # * options<~Hash> - Accepts the following keys. # :replace => [Array of keys to replace] # :expect => {name/value pairs for performing conditional put} # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage' # * 'RequestId' def put_attributes(domain_name, item_name, attributes, options = {}) options[:expect] = {} unless options[:expect] options[:replace] = [] unless options[:replace] request({ 'Action' => 'PutAttributes', 'DomainName' => domain_name, 'ItemName' => item_name, :parser => Fog::Parsers::AWS::SimpleDB::Basic.new(@nil_string) }.merge!(encode_attributes(attributes, options[:replace], options[:expect]))) end end class Mock def put_attributes(domain_name, item_name, attributes, options = {}) options[:expect] = {} unless options[:expect] options[:replace] = [] unless options[:replace] response = Excon::Response.new if self.data[:domains][domain_name] options[:expect].each do |ck, cv| if self.data[:domains][domain_name][item_name][ck] != [cv] response.status = 409 raise(Excon::Errors.status_error({:expects => 200}, response)) end end attributes.each do |key, value| self.data[:domains][domain_name][item_name] ||= {} self.data[:domains][domain_name][item_name][key.to_s] = [] unless self.data[:domains][domain_name][item_name][key.to_s] if options[:replace].include?(key.to_s) self.data[:domains][domain_name][item_name][key.to_s] = [*value].map {|x| x.to_s} else self.data[:domains][domain_name][item_name][key.to_s] += [*value].map {|x| x.to_s} end end response.status = 200 response.body = { 'BoxUsage' => Fog::AWS::Mock.box_usage, 'RequestId' => Fog::AWS::Mock.request_id } else response.status = 400 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/simpledb/select.rb000066400000000000000000000032521437344660100225510ustar00rootroot00000000000000module Fog module AWS class SimpleDB class Real require 'fog/aws/parsers/simpledb/select' # Select item data from SimpleDB # # ==== Parameters # * select_expression<~String> - Expression to query domain with. # * options<~Hash>: # * ConsistentRead<~Boolean> - When set to true, ensures most recent data is returned. Defaults to false. # * NextToken<~String> - Offset token to start list, defaults to nil. # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'BoxUsage'<~Float> # * 'RequestId'<~String> # * 'Items'<~Hash> - list of attribute name/values for the items formatted as # { 'item_name' => { 'attribute_name' => ['attribute_value'] }} # * 'NextToken'<~String> - offset to start with if there are are more domains to list def select(select_expression, options = {}) if options.is_a?(String) Fog::Logger.deprecation("get_attributes with string next_token param is deprecated, use 'AttributeName' => attributes) instead [light_black](#{caller.first})[/]") options = {'NextToken' => options} end options['NextToken'] ||= nil request( 'Action' => 'Select', 'ConsistentRead' => !!options['ConsistentRead'], 'NextToken' => options['NextToken'], 'SelectExpression' => select_expression, :idempotent => true, :parser => Fog::Parsers::AWS::SimpleDB::Select.new(@nil_string) ) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/000077500000000000000000000000001437344660100177475ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/sns/add_permission.rb000066400000000000000000000016751437344660100233050ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/add_permission' def add_permission(options = {}) request({ 'Action' => 'AddPermission', :parser => Fog::Parsers::AWS::SNS::AddPermission.new }.merge!(options)) end end class Mock def add_permission(options = {}) topic_arn = options.delete('TopicArn') label = options.delete('Label') actions = Hash[options.select { |k,v| k.match(/^ActionName/) }].values members = Hash[options.select { |k,v| k.match(/^AWSAccountId/) }].values self.data[:permissions][topic_arn][label] = { :members => members, :actions => actions, } response = Excon::Response.new response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/confirm_subscription.rb000066400000000000000000000016761437344660100245470ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/confirm_subscription' # Confirm a subscription # # ==== Parameters # * arn<~String> - Arn of topic to confirm subscription to # * token<~String> - Token sent to endpoint during subscribe action # * options<~Hash>: # * AuthenticateOnUnsubscribe<~Boolean> - whether or not unsubscription should be authenticated, defaults to false # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_ConfirmSubscription.html # def confirm_subscription(arn, token, options = {}) request({ 'Action' => 'ConfirmSubscription', 'Token' => token, 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::ConfirmSubscription.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/create_topic.rb000066400000000000000000000040101437344660100227300ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/create_topic' # Create a topic # # ==== Parameters # * name<~String> - Name of topic to create # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_CreateTopic.html # def create_topic(name) request({ 'Action' => 'CreateTopic', 'Name' => name, :parser => Fog::Parsers::AWS::SNS::CreateTopic.new }) end end class Mock def create_topic(name) response = Excon::Response.new topic_arn = Fog::AWS::Mock.arn(@module, @account_id, name, @region) self.data[:topics][topic_arn] = { "Owner" => @account_id, "SubscriptionsPending" => 0, "SubscriptionsConfirmed" => 0, "SubscriptionsDeleted" => 0, "DisplayName" => name, "TopicArn" => topic_arn, "EffectiveDeliveryPolicy" => %Q|{"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,"numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}|, "Policy" => %Q|{"Version":"2008-10-17","Id":"__default_policy_ID","Statement":[{"Sid":"__default_statement_ID","Effect":"Allow","Principal":{"AWS":"*"},"Action":["SNS:Publish","SNS:RemovePermission","SNS:SetTopicAttributes","SNS:DeleteTopic","SNS:ListSubscriptionsByTopic","SNS:GetTopicAttributes","SNS:Receive","SNS:AddPermission","SNS:Subscribe"],"Resource":"arn:aws:sns:us-east-1:990279267269:Smithy","Condition":{"StringEquals":{"AWS:SourceOwner":"990279267269"}}}]}| } self.data[:permissions][topic_arn] = {} response.body = {"TopicArn" => topic_arn, "RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/delete_topic.rb000066400000000000000000000015121437344660100227330ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/delete_topic' # Delete a topic # # ==== Parameters # * arn<~String> - The Arn of the topic to delete # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_DeleteTopic.html # def delete_topic(arn) request({ 'Action' => 'DeleteTopic', 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::DeleteTopic.new }) end end class Mock def delete_topic(arn) self.data[:topics].delete(arn) response = Excon::Response.new response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/get_topic_attributes.rb000066400000000000000000000016571437344660100245300ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/get_topic_attributes' # Get attributes of a topic # # ==== Parameters # * arn<~Hash>: The Arn of the topic to get attributes for # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_GetTopicAttributes.html # def get_topic_attributes(arn) request({ 'Action' => 'GetTopicAttributes', 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::GetTopicAttributes.new }) end end class Mock def get_topic_attributes(arn) response = Excon::Response.new attributes = self.data[:topics][arn] response.body = {"Attributes" => attributes, "RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/list_subscriptions.rb000066400000000000000000000016631437344660100242440ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/list_subscriptions' # List subscriptions # # ==== Parameters # * options<~Hash>: # * 'NextToken'<~String> - Token returned from previous request, used for pagination # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_ListSubscriptions.html # def list_subscriptions(options = {}) request({ 'Action' => 'ListSubscriptions', :parser => Fog::Parsers::AWS::SNS::ListSubscriptions.new }.merge!(options)) end end class Mock def list_subscriptions(options={}) response = Excon::Response.new response.body = {'Subscriptions' => self.data[:subscriptions].values, 'RequestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/list_subscriptions_by_topic.rb000066400000000000000000000022441437344660100261300ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/list_subscriptions' # List subscriptions for a topic # # ==== Parameters # * arn<~String> - Arn of topic to list subscriptions for # * options<~Hash>: # * 'NextToken'<~String> - Token returned from previous request, used for pagination # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_ListSubscriptionsByTopic.html # def list_subscriptions_by_topic(arn, options = {}) request({ 'Action' => 'ListSubscriptionsByTopic', 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::ListSubscriptions.new }.merge!(options)) end end class Mock def list_subscriptions_by_topic(arn, options={}) response = Excon::Response.new subscriptions = self.data[:subscriptions].values.select { |s| s["TopicArn"] == arn } response.body = {'Subscriptions' => subscriptions, 'RequestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/list_topics.rb000066400000000000000000000015641437344660100226360ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/list_topics' # List topics # # ==== Parameters # * options<~Hash>: # * 'NextToken'<~String> - Token returned from previous request, used for pagination # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_ListTopics.html # def list_topics(options = {}) request({ 'Action' => 'ListTopics', :parser => Fog::Parsers::AWS::SNS::ListTopics.new }.merge!(options)) end end class Mock def list_topics(options={}) response = Excon::Response.new response.body = {'Topics' => self.data[:topics].keys, 'RequestId' => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/publish.rb000066400000000000000000000016121437344660100217420ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/publish' # Send a message to a topic # # ==== Parameters # * arn<~String> - Arn of topic to send message to # * message<~String> - Message to send to topic # * options<~Hash>: # * MessageStructure<~String> - message structure, in ['json'] # * Subject<~String> - value to use for subject when delivering by email # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_Publish.html # def publish(arn, message, options = {}) request({ 'Action' => 'Publish', 'Message' => message, 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::Publish.new }.merge!(options)) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/remove_permission.rb000066400000000000000000000013131437344660100240370ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/remove_permission' def remove_permission(options = {}) request({ 'Action' => 'RemovePermission', :parser => Fog::Parsers::AWS::SNS::RemovePermission.new }.merge!(options)) end end class Mock def remove_permission(options = {}) topic_arn = options['TopicArn'] label = options['Label'] self.data[:permissions][topic_arn].delete(label) response = Excon::Response.new response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/set_topic_attributes.rb000066400000000000000000000026461437344660100245430ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/set_topic_attributes' # Set attributes of a topic # # ==== Parameters # * arn<~Hash> - The Arn of the topic to get attributes for # * attribute_name<~String> - Name of attribute to set, in ['DisplayName', 'Policy'] # * attribute_value<~String> - Value to set attribute to # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_SetTopicAttributes.html # def set_topic_attributes(arn, attribute_name, attribute_value) request({ 'Action' => 'SetTopicAttributes', 'AttributeName' => attribute_name, 'AttributeValue' => attribute_value, 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::SetTopicAttributes.new }) end end class Mock def set_topic_attributes(arn, attribute_name, attribute_value) attributes = self.data[:topics][arn] if %w(Policy DisplayName DeliveryPolicy).include?(attribute_name) attributes[attribute_name] = attribute_value self.data[:topics][arn] = attributes end response = Excon::Response.new response.body = {"RequestId" => Fog::AWS::Mock.request_id} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/subscribe.rb000066400000000000000000000063601437344660100222620ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/subscribe' # Create a subscription # # ==== Parameters # * arn<~String> - Arn of topic to subscribe to # * endpoint<~String> - Endpoint to notify # * protocol<~String> - Protocol to notify endpoint with, in ['email', 'email-json', 'http', 'https', 'sqs'] # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_Subscribe.html # def subscribe(arn, endpoint, protocol) request({ 'Action' => 'Subscribe', 'Endpoint' => endpoint, 'Protocol' => protocol, 'TopicArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::Subscribe.new }) end end class Mock def subscribe(arn, endpoint, protocol) response = Excon::Response.new unless topic = self.data[:topics][arn] response.status = 400 response.body = { 'Code' => 'InvalidParameterValue', 'Message' => 'Invalid parameter: TopicArn', 'Type' => 'Sender', } return response end subscription_arn = Fog::AWS::Mock.arn(@module, @account_id, "#{topic["DisplayName"]}:#{Fog::AWS::Mock.request_id}", @region) self.data[:subscriptions][subscription_arn] = { "Protocol" => protocol, "Owner" => @account_id.to_s, "TopicArn" => arn, "SubscriptionArn" => subscription_arn, "Endpoint" => endpoint, } mock_data = Fog::AWS::SQS::Mock.data.values.find { |a| a.values.find { |d| d[:queues][endpoint] } } access_key = mock_data && mock_data.keys.first if protocol == "sqs" && access_key token = SecureRandom.hex(128) message = "You have chosen to subscribe to the topic #{arn}.\nTo confirm the subscription, visit the SubscribeURL included in this message." signature = Fog::HMAC.new("sha256", token).sign(message) Fog::AWS::SQS.new( :region => self.region, :aws_access_key_id => access_key, :aws_secret_access_key => SecureRandom.hex(3) ).send_message(endpoint, Fog::JSON.encode( "Type" => "SubscriptionConfirmation", "MessageId" => UUID.uuid, "Token" => token, "TopicArn" => arn, "Message" => message, "SubscribeURL" => "https://sns.#{self.region}.amazonaws.com/?Action=ConfirmSubscription&TopicArn=#{arn}&Token=#{token}", "Timestamp" => Time.now.iso8601, "SignatureVersion" => "1", "Signature" => signature, "SigningCertURL" => "https://sns.#{self.region}.amazonaws.com/SimpleNotificationService-#{SecureRandom.hex(16)}.pem" )) end response.body = { 'SubscriptionArn' => 'pending confirmation', 'RequestId' => Fog::AWS::Mock.request_id } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sns/unsubscribe.rb000066400000000000000000000011571437344660100226240ustar00rootroot00000000000000module Fog module AWS class SNS class Real require 'fog/aws/parsers/sns/unsubscribe' # Delete a subscription # # ==== Parameters # * arn<~String> - Arn of subscription to delete # # ==== See Also # http://docs.amazonwebservices.com/sns/latest/api/API_Unsubscribe.html # def unsubscribe(arn) request({ 'Action' => 'Unsubscribe', 'SubscriptionArn' => arn.strip, :parser => Fog::Parsers::AWS::SNS::Unsubscribe.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/000077500000000000000000000000001437344660100177525ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/sqs/change_message_visibility.rb000066400000000000000000000040211437344660100254740ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/basic' # Change visibility timeout for a message # # ==== Parameters # * queue_url<~String> - Url of queue for message to update # * receipt_handle<~String> - Token from previous recieve message # * visibility_timeout<~Integer> - New visibility timeout in 0..43200 # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryChangeMessageVisibility.html # def change_message_visibility(queue_url, receipt_handle, visibility_timeout) request({ 'Action' => 'ChangeMessageVisibility', 'ReceiptHandle' => receipt_handle, 'VisibilityTimeout' => visibility_timeout, :parser => Fog::Parsers::AWS::SQS::Basic.new, :path => path_from_queue_url(queue_url) }) end end class Mock def change_message_visibility(queue_url, receipt_handle, visibility_timeout) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) message_id, _ = queue[:receipt_handles].find { |message_id, receipts| receipts.key?(receipt_handle) } if message_id queue[:messages][message_id]['Attributes']['VisibilityTimeout'] = visibility_timeout response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response.status = 200 else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/create_queue.rb000066400000000000000000000041571437344660100227550ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/create_queue' # Create a queue # # ==== Parameters # * name<~String> - Name of queue to create # * options<~Hash>: # * DefaultVisibilityTimeout<~String> - Time, in seconds, to hide a message after it has been received, in 0..43200, defaults to 30 # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryCreateQueue.html # def create_queue(name, options = {}) request({ 'Action' => 'CreateQueue', 'QueueName' => name, :parser => Fog::Parsers::AWS::SQS::CreateQueue.new }.merge!(options)) end end class Mock def create_queue(name, options = {}) Excon::Response.new.tap do |response| response.status = 200 now = Time.now queue_url = "https://queue.amazonaws.com/#{data[:owner_id]}/#{name}" queue = { 'QueueName' => name, 'Attributes' => { 'VisibilityTimeout' => 30, 'ApproximateNumberOfMessages' => 0, 'ApproximateNumberOfMessagesNotVisible' => 0, 'CreatedTimestamp' => now, 'LastModifiedTimestamp' => now, 'QueueArn' => Fog::AWS::Mock.arn('sqs', 'us-east-1', data[:owner_id], name), 'MaximumMessageSize' => 8192, 'MessageRetentionPeriod' => 345600 }, :messages => {}, :receipt_handles => {} } data[:queues][queue_url] = queue unless data[:queues][queue_url] response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'QueueUrl' => queue_url } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/delete_message.rb000066400000000000000000000033351437344660100232510ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/basic' # Delete a message from a queue # # ==== Parameters # * queue_url<~String> - Url of queue to delete message from # * receipt_handle<~String> - Token from previous recieve message # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryDeleteMessage.html # def delete_message(queue_url, receipt_handle) request({ 'Action' => 'DeleteMessage', 'ReceiptHandle' => receipt_handle, :parser => Fog::Parsers::AWS::SQS::Basic.new, :path => path_from_queue_url(queue_url), }) end end class Mock def delete_message(queue_url, receipt_handle) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) message_id, _ = queue[:receipt_handles].find { |msg_id, receipts| receipts.key?(receipt_handle) } if message_id queue[:receipt_handles].delete(message_id) queue[:messages].delete(message_id) queue['Attributes']['LastModifiedTimestamp'] = Time.now end response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } response.status = 200 else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/delete_queue.rb000066400000000000000000000023061437344660100227460ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/basic' # Delete a queue # # ==== Parameters # * queue_url<~String> - Url of queue to delete # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryDeleteQueue.html # def delete_queue(queue_url) request({ 'Action' => 'DeleteQueue', :parser => Fog::Parsers::AWS::SQS::Basic.new, :path => path_from_queue_url(queue_url), }) end end class Mock def delete_queue(queue_url) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) response.status = 200 data[:queues].delete(queue_url) response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/get_queue_attributes.rb000066400000000000000000000032351437344660100245330ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/get_queue_attributes' # Get attributes of a queue # # ==== Parameters # * queue_url<~String> - Url of queue to get attributes for # * attribute_name<~Array> - Name of attribute to return, in ['All', 'ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesNotVisible', 'CreatedTimestamp', 'LastModifiedTimestamp', 'MaximumMessageSize', 'MessageRetentionPeriod', 'Policy', 'QueueArn', 'VisibilityTimeout'] # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryGetQueueAttributes.html # def get_queue_attributes(queue_url, attribute_name) request({ 'Action' => 'GetQueueAttributes', 'AttributeName' => attribute_name, :path => path_from_queue_url(queue_url), :parser => Fog::Parsers::AWS::SQS::GetQueueAttributes.new }) end end class Mock def get_queue_attributes(queue_url, attribute_name) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'Attributes' => queue['Attributes'] } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/list_queues.rb000066400000000000000000000020511437344660100226370ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/list_queues' # List queues # # ==== Parameters # * options<~Hash>: # * QueueNamePrefix<~String> - String used to filter results to only those with matching prefixes # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryListQueues.html # def list_queues(options = {}) request({ 'Action' => 'ListQueues', :parser => Fog::Parsers::AWS::SQS::ListQueues.new }.merge!(options)) end end class Mock def list_queues(options = {}) Excon::Response.new.tap do |response| response.status = 200 response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'QueueUrls' => data[:queues].keys } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/receive_message.rb000066400000000000000000000062241437344660100234310ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/receive_message' # Get a message from a queue (marks it as unavailable temporarily, but does not remove from queue, see delete_message) # # ==== Parameters # * queue_url<~String> - Url of queue to get message from # * options<~Hash>: # * Attributes<~Array> - List of attributes to return, in ['All', 'ApproximateFirstReceiveTimestamp', 'ApproximateReceiveCount', 'SenderId', 'SentTimestamp'], defaults to 'All' # * MaxNumberOfMessages<~Integer> - Maximum number of messages to return, defaults to 1 # * VisibilityTimeout<~Integer> - Duration, in seconds, to hide message from other receives. In 0..43200, defaults to visibility timeout for queue # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QueryReceiveMessage.html # def receive_message(queue_url, options = {}) request({ 'Action' => 'ReceiveMessage', 'AttributeName' => 'All', :path => path_from_queue_url(queue_url), :parser => Fog::Parsers::AWS::SQS::ReceiveMessage.new }.merge!(options)) end end class Mock def receive_message(queue_url, options = {}) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) max_number_of_messages = options['MaxNumberOfMessages'] || 1 now = Time.now messages = [] queue[:messages].values.each do |m| message_id = m['MessageId'] invisible = if (received_handles = queue[:receipt_handles][message_id]) visibility_timeout = m['Attributes']['VisibilityTimeout'] || queue['Attributes']['VisibilityTimeout'] received_handles.any? { |handle, time| now < time + visibility_timeout } else false end unless invisible receipt_handle = Fog::Mock.random_base64(300) queue[:receipt_handles][message_id] ||= {} queue[:receipt_handles][message_id][receipt_handle] = now m['Attributes'].tap do |attrs| attrs['ApproximateFirstReceiveTimestamp'] ||= now attrs['ApproximateReceiveCount'] = (attrs['ApproximateReceiveCount'] || 0) + 1 end messages << m.merge({ 'ReceiptHandle' => receipt_handle }) break if messages.size >= max_number_of_messages end end response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'Message' => messages } response.status = 200 else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/send_message.rb000066400000000000000000000037161437344660100227430ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/send_message' # Add a message to a queue # # ==== Parameters # * queue_url<~String> - Url of queue to add message to # * message<~String> - Message to add to queue # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QuerySendMessage.html # def send_message(queue_url, message) request({ 'Action' => 'SendMessage', 'MessageBody' => message, :path => path_from_queue_url(queue_url), :parser => Fog::Parsers::AWS::SQS::SendMessage.new }) end end class Mock def send_message(queue_url, message) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) response.status = 200 now = Time.now message_id = Fog::AWS::Mock.sqs_message_id md5 = OpenSSL::Digest::MD5.hexdigest(message) queue[:messages][message_id] = { 'MessageId' => message_id, 'Body' => message, 'MD5OfBody' => md5, 'Attributes' => { 'SenderId' => Fog::AWS::Mock.sqs_message_id, 'SentTimestamp' => now } } queue['Attributes']['LastModifiedTimestamp'] = now response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id }, 'MessageId' => message_id, 'MD5OfMessageBody' => md5 } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sqs/set_queue_attributes.rb000066400000000000000000000032501437344660100245440ustar00rootroot00000000000000module Fog module AWS class SQS class Real require 'fog/aws/parsers/sqs/basic' # Get attributes of a queue # # ==== Parameters # * queue_url<~String> - Url of queue to get attributes for # * attribute_name<~String> - Name of attribute to set, keys in ['MaximumMessageSize', 'MessageRetentionPeriod', 'Policy', 'VisibilityTimeout'] # * attribute_value<~String> - Value to set for attribute # # ==== See Also # http://docs.amazonwebservices.com/AWSSimpleQueueService/latest/APIReference/Query_QuerySetQueueAttributes.html # def set_queue_attributes(queue_url, attribute_name, attribute_value) request({ 'Action' => 'SetQueueAttributes', 'Attribute.Name' => attribute_name, 'Attribute.Value' => attribute_value, :path => path_from_queue_url(queue_url), :parser => Fog::Parsers::AWS::SQS::Basic.new }) end end class Mock def set_queue_attributes(queue_url, attribute_name, attribute_value) Excon::Response.new.tap do |response| if (queue = data[:queues][queue_url]) response.status = 200 queue['Attributes'][attribute_name] = attribute_value response.body = { 'ResponseMetadata' => { 'RequestId' => Fog::AWS::Mock.request_id } } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/000077500000000000000000000000001437344660100206105ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/storage/abort_multipart_upload.rb000066400000000000000000000027161437344660100257170ustar00rootroot00000000000000module Fog module AWS class Storage class Real # # Abort a multipart upload # # @param [String] bucket_name Name of bucket to abort multipart upload on # @param [String] object_name Name of object to abort multipart upload on # @param [String] upload_id Id of upload to add part to # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html # def abort_multipart_upload(bucket_name, object_name, upload_id) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :object_name => object_name, :method => 'DELETE', :query => {'uploadId' => upload_id} }) end end # Real class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def abort_multipart_upload(bucket_name, object_name, upload_id) verify_mock_bucket_exists(bucket_name) upload_info = get_upload_info(bucket_name, upload_id, true) response = Excon::Response.new if upload_info response.status = 204 response else response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) end end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/storage/acl_utils.rb000066400000000000000000000042431437344660100231170ustar00rootroot00000000000000module Fog module AWS class Storage require 'fog/aws/parsers/storage/access_control_list' private def self.hash_to_acl(acl) data = "\n" if acl['Owner'] && (acl['Owner']['ID'] || acl['Owner']['DisplayName']) data << " \n" data << " #{acl['Owner']['ID']}\n" if acl['Owner']['ID'] data << " #{acl['Owner']['DisplayName']}\n" if acl['Owner']['DisplayName'] data << " \n" end grants = [acl['AccessControlList']].flatten.compact data << " \n" if grants.any? grants.each do |grant| data << " \n" grantee = grant['Grantee'] type = case when grantee.key?('ID') 'CanonicalUser' when grantee.key?('EmailAddress') 'AmazonCustomerByEmail' when grantee.key?('URI') 'Group' end data << " \n" case type when 'CanonicalUser' data << " #{grantee['ID']}\n" if grantee['ID'] data << " #{grantee['DisplayName']}\n" if grantee['DisplayName'] when 'AmazonCustomerByEmail' data << " #{grantee['EmailAddress']}\n" if grantee['EmailAddress'] when 'Group' data << " #{grantee['URI']}\n" if grantee['URI'] end data << " \n" data << " #{grant['Permission']}\n" data << " \n" end data << " \n" if grants.any? data << "" data end def self.acl_to_hash(acl_xml) parser = Fog::Parsers::AWS::Storage::AccessControlList.new Nokogiri::XML::SAX::Parser.new(parser).parse(acl_xml) parser.response end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/complete_multipart_upload.rb000066400000000000000000000057111437344660100264160ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/complete_multipart_upload' # Complete a multipart upload # # @param [String] bucket_name Name of bucket to complete multipart upload for # @param [String] object_name Name of object to complete multipart upload for # @param [String] upload_id Id of upload to add part to # @param [Array] parts Array of etags as Strings for parts # # @return [Excon::Response] # * body [Hash]: (success) # * Bucket [String] - bucket of new object # * ETag [String] - etag of new object # * Key [String] - key of new object # * Location [String] - location of new object # * body [Hash]: (failure) # * Code [String] - Error status code # * Message [String] - Error description # # @note This request could fail and still return +200 OK+, so it's important that you check the response. # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html # def complete_multipart_upload(bucket_name, object_name, upload_id, parts) data = "" parts.each_with_index do |part, index| data << "" data << "#{index + 1}" data << "#{part}" data << "" end data << "" request({ :body => data, :expects => 200, :headers => { 'Content-Length' => data.length }, :bucket_name => bucket_name, :object_name => object_name, :method => 'POST', :parser => Fog::Parsers::AWS::Storage::CompleteMultipartUpload.new, :query => {'uploadId' => upload_id} }) end end # Real class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def complete_multipart_upload(bucket_name, object_name, upload_id, parts) bucket = verify_mock_bucket_exists(bucket_name) upload_info = get_upload_info(bucket_name, upload_id, true) body = parts.map { |pid| upload_info[:parts][pid.to_i] }.join object = store_mock_object(bucket, object_name, body, upload_info[:options]) response = Excon::Response.new response.status = 200 response.body = { 'Location' => "http://#{bucket_name}.s3.amazonaws.com/#{object_name}", 'Bucket' => bucket_name, 'Key' => object_name, 'ETag' => object['ETag'], } response.headers['x-amz-version-id'] = object['VersionId'] if object['VersionId'] != 'null' response end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/storage/copy_object.rb000066400000000000000000000071411437344660100234400ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/copy_object' # Copy an object from one S3 bucket to another # # @param source_bucket_name [String] Name of source bucket # @param source_object_name [String] Name of source object # @param target_bucket_name [String] Name of bucket to create copy in # @param target_object_name [String] Name for new copy of object # # @param options [Hash]: # @option options [String] x-amz-metadata-directive Specifies whether to copy metadata from source or replace with data in request. Must be in ['COPY', 'REPLACE'] # @option options [String] x-amz-copy_source-if-match Copies object if its etag matches this value # @option options [Time] x-amz-copy_source-if-modified_since Copies object it it has been modified since this time # @option options [String] x-amz-copy_source-if-none-match Copies object if its etag does not match this value # @option options [Time] x-amz-copy_source-if-unmodified-since Copies object it it has not been modified since this time # @option options [String] x-amz-storage-class Default is 'STANDARD', set to 'REDUCED_REDUNDANCY' for non-critical, reproducable data # # # @return [Excon::Response] # * body [Hash]: # * ETag [String] - etag of new object # * LastModified [Time] - date object was last modified # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html # def copy_object(source_bucket_name, source_object_name, target_bucket_name, target_object_name, options = {}) headers = { 'x-amz-copy-source' => "/#{source_bucket_name}#{object_to_path(source_object_name)}" }.merge!(options) request({ :expects => 200, :headers => headers, :bucket_name => target_bucket_name, :object_name => target_object_name, :method => 'PUT', :parser => Fog::Parsers::AWS::Storage::CopyObject.new, }) end end class Mock # :nodoc:all def copy_object(source_bucket_name, source_object_name, target_bucket_name, target_object_name, options = {}) response = Excon::Response.new source_bucket = self.data[:buckets][source_bucket_name] source_object = source_bucket && source_bucket[:objects][source_object_name] && source_bucket[:objects][source_object_name].first target_bucket = self.data[:buckets][target_bucket_name] acl = options['x-amz-acl'] || 'private' if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') else self.data[:acls][:object][target_bucket_name] ||= {} self.data[:acls][:object][target_bucket_name][target_object_name] = self.class.acls(acl) end if source_object && target_bucket response.status = 200 target_object = source_object.dup target_object.merge!({'Key' => target_object_name}) target_bucket[:objects][target_object_name] = [target_object] response.body = { 'ETag' => target_object['ETag'], 'LastModified' => Time.parse(target_object['Last-Modified']) } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/cors_utils.rb000066400000000000000000000020211437344660100233160ustar00rootroot00000000000000module Fog module AWS class Storage require 'fog/aws/parsers/storage/cors_configuration' private def self.hash_to_cors(cors) data = "\n" [cors['CORSConfiguration']].flatten.compact.each do |rule| data << " \n" ['ID', 'MaxAgeSeconds'].each do |key| data << " <#{key}>#{rule[key]}\n" if rule[key] end ['AllowedOrigin', 'AllowedMethod', 'AllowedHeader', 'ExposeHeader'].each do |key| [rule[key]].flatten.compact.each do |value| data << " <#{key}>#{value}\n" end end data << " \n" end data << "" data end def self.cors_to_hash(cors_xml) parser = Fog::Parsers::AWS::Storage::CorsConfiguration.new Nokogiri::XML::SAX::Parser.new(parser).parse(cors_xml) parser.response end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket.rb000066400000000000000000000023321437344660100237340ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete an S3 bucket # # @param bucket_name [String] name of bucket to delete # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html def delete_bucket(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE' }) end end class Mock # :nodoc:all def delete_bucket(bucket_name) response = Excon::Response.new if self.data[:buckets][bucket_name].nil? response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) elsif self.data[:buckets][bucket_name] && !self.data[:buckets][bucket_name][:objects].empty? response.status = 409 raise(Excon::Errors.status_error({:expects => 204}, response)) else self.data[:buckets].delete(bucket_name) response.status = 204 end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket_cors.rb000066400000000000000000000013111437344660100247560ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Deletes the cors configuration information set for the bucket. # # @param bucket_name [String] name of bucket to delete cors rules from # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html def delete_bucket_cors(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE', :query => {'cors' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket_lifecycle.rb000066400000000000000000000014021437344660100257500ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete lifecycle configuration for a bucket # # @param bucket_name [String] name of bucket to delete lifecycle configuration from # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html def delete_bucket_lifecycle(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE', :query => {'lifecycle' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket_policy.rb000066400000000000000000000021641437344660100253160ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete policy for a bucket # # @param bucket_name [String] name of bucket to delete policy from # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html def delete_bucket_policy(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE', :query => {'policy' => nil} }) end end class Mock def delete_bucket_policy(bucket_name) if bucket = data[:buckets][bucket_name] bucket[:policy] = nil Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise(Excon::Errors.status_error({:expects => 200}, response)) end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket_tagging.rb000066400000000000000000000021331437344660100254330ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete tagging for a bucket # # @param bucket_name [String] name of bucket to delete tagging from # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html def delete_bucket_tagging(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE', :query => {'tagging' => nil} }) end end class Mock # :nodoc:all def delete_bucket_tagging(bucket_name) response = Excon::Response.new if self.data[:buckets][bucket_name] self.data[:bucket_tagging].delete(bucket_name) response.status = 204 else response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_bucket_website.rb000066400000000000000000000013101437344660100254510ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete website configuration for a bucket # # @param bucket_name [String] name of bucket to delete website configuration from # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html def delete_bucket_website(bucket_name) request({ :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'DELETE', :query => {'website' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_multiple_objects.rb000066400000000000000000000167361437344660100260400ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/delete_multiple_objects' # Delete multiple objects from S3 # @note For versioned deletes, options should include a version_ids hash, which # maps from filename to an array of versions. # The semantics are that for each (object_name, version) tuple, the # caller must insert the object_name and an associated version (if # desired), so for n versions, the object must be inserted n times. # # @param bucket_name [String] Name of bucket containing object to delete # @param object_names [Array] Array of object names to delete # # @return [Excon::Response] response: # * body [Hash]: # * DeleteResult [Array]: # * Deleted [Hash]: # * Key [String] - Name of the object that was deleted # * VersionId [String] - ID for the versioned onject in case of a versioned delete # * DeleteMarker [Boolean] - Indicates if the request accessed a delete marker # * DeleteMarkerVersionId [String] - Version ID of the delete marker accessed # * Error [Hash]: # * Key [String] - Name of the object that failed to be deleted # * VersionId [String] - ID of the versioned object that was attempted to be deleted # * Code [String] - Status code for the result of the failed delete # * Message [String] - Error description # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html def delete_multiple_objects(bucket_name, object_names, options = {}) headers = options.dup data = "" data << "true" if headers.delete(:quiet) version_ids = headers.delete('versionId') object_names.each do |object_name| object_version = version_ids.nil? ? nil : version_ids[object_name] if object_version object_version = object_version.is_a?(String) ? [object_version] : object_version object_version.each do |version_id| data << "" data << "#{CGI.escapeHTML(object_name)}" data << "#{CGI.escapeHTML(version_id)}" data << "" end else data << "" data << "#{CGI.escapeHTML(object_name)}" data << "" end end data << "" headers['Content-Length'] = data.bytesize headers['Content-MD5'] = Base64.encode64(OpenSSL::Digest::MD5.digest(data)). gsub("\n", '') request({ :body => data, :expects => 200, :headers => headers, :bucket_name => bucket_name, :method => 'POST', :parser => Fog::Parsers::AWS::Storage::DeleteMultipleObjects.new, :query => {'delete' => nil} }) end end class Mock # :nodoc:all def delete_multiple_objects(bucket_name, object_names, options = {}) headers = options.dup headers.delete(:quiet) response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] response.status = 200 response.body = { 'DeleteResult' => [] } version_ids = headers.delete('versionId') object_names.each do |object_name| object_version = version_ids.nil? ? [nil] : version_ids[object_name] object_version = object_version.is_a?(String) ? [object_version] : object_version object_version.each do |version_id| response.body['DeleteResult'] << delete_object_helper(bucket, object_name, version_id) end end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end private def delete_object_helper(bucket, object_name, version_id) response = { 'Deleted' => {} } if bucket[:versioning] bucket[:objects][object_name] ||= [] if version_id version = bucket[:objects][object_name].find { |object| object['VersionId'] == version_id} # S3 special cases the 'null' value to not error out if no such version exists. if version || (version_id == 'null') bucket[:objects][object_name].delete(version) bucket[:objects].delete(object_name) if bucket[:objects][object_name].empty? response['Deleted'] = { 'Key' => object_name, 'VersionId' => version_id, 'DeleteMarker' => 'true', 'DeleteMarkerVersionId' => version_id } else response = delete_error_body(object_name, version_id, 'InvalidVersion', 'Invalid version ID specified') end else delete_marker = { :delete_marker => true, 'Key' => object_name, 'VersionId' => bucket[:versioning] == 'Enabled' ? Fog::Mock.random_base64(32) : 'null', 'Last-Modified' => Fog::Time.now.to_date_header } # When versioning is suspended, a delete marker is placed if the last object ID is not the value 'null', # otherwise the last object is replaced. if bucket[:versioning] == 'Suspended' && bucket[:objects][object_name].first['VersionId'] == 'null' bucket[:objects][object_name].shift end bucket[:objects][object_name].unshift(delete_marker) response['Deleted'] = { 'Key' => object_name, 'VersionId' => delete_marker['VersionId'], 'DeleteMarkerVersionId' => delete_marker['VersionId'], 'DeleteMarker' => 'true', } end else if version_id && version_id != 'null' response = delete_error_body(object_name, version_id, 'InvalidVersion', 'Invalid version ID specified') response = invalid_version_id_payload(version_id) else bucket[:objects].delete(object_name) response['Deleted'] = { 'Key' => object_name } end end response end def delete_error_body(key, version_id, message, code) { 'Error' => { 'Code' => code, 'Message' => message, 'VersionId' => version_id, 'Key' => key, } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_object.rb000066400000000000000000000102101437344660100237170ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Delete an object from S3 # # @param bucket_name [String] Name of bucket containing object to delete # @param object_name [String] Name of object to delete # # @return [Excon::Response] response: # * status [Integer] - 204 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html def delete_object(bucket_name, object_name, options = {}) if version_id = options.delete('versionId') query = {'versionId' => version_id} else query = {} end headers = options request({ :expects => 204, :headers => headers, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'DELETE', :query => query }) end end class Mock # :nodoc:all def delete_object(bucket_name, object_name, options = {}) response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] response.status = 204 version_id = options.delete('versionId') if bucket[:versioning] bucket[:objects][object_name] ||= [] if version_id version = bucket[:objects][object_name].find { |object| object['VersionId'] == version_id} # S3 special cases the 'null' value to not error out if no such version exists. if version || (version_id == 'null') bucket[:objects][object_name].delete(version) bucket[:objects].delete(object_name) if bucket[:objects][object_name].empty? response.headers['x-amz-delete-marker'] = 'true' if version[:delete_marker] response.headers['x-amz-version-id'] = version_id else response.status = 400 response.body = invalid_version_id_payload(version_id) raise(Excon::Errors.status_error({:expects => 200}, response)) end else delete_marker = { :delete_marker => true, 'Key' => object_name, 'VersionId' => bucket[:versioning] == 'Enabled' ? Fog::Mock.random_base64(32) : 'null', 'Last-Modified' => Fog::Time.now.to_date_header } # When versioning is suspended, a delete marker is placed if the last object ID is not the value 'null', # otherwise the last object is replaced. if bucket[:versioning] == 'Suspended' && bucket[:objects][object_name].first['VersionId'] == 'null' bucket[:objects][object_name].shift end bucket[:objects][object_name].unshift(delete_marker) response.headers['x-amz-delete-marker'] = 'true' response.headers['x-amz-version-id'] = delete_marker['VersionId'] end else if version_id && version_id != 'null' response.status = 400 response.body = invalid_version_id_payload(version_id) raise(Excon::Errors.status_error({:expects => 200}, response)) else bucket[:objects].delete(object_name) response.headers['x-amz-version-id'] = 'null' end end else response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) end response end private def invalid_version_id_payload(version_id) { 'Error' => { 'Code' => 'InvalidArgument', 'Message' => 'Invalid version id specified', 'ArgumentValue' => version_id, 'ArgumentName' => 'versionId', 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/delete_object_url.rb000066400000000000000000000022471437344660100246140ustar00rootroot00000000000000module Fog module AWS class Storage module DeleteObjectUrl def delete_object_url(bucket_name, object_name, expires, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end signed_url(options.merge({ :bucket_name => bucket_name, :object_name => object_name, :method => 'DELETE' }), expires) end end class Real # Get an expiring object url from S3 for deleting an object # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] - url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include DeleteObjectUrl end class Mock # :nodoc:all include DeleteObjectUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket.rb000066400000000000000000000113151437344660100232520ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket' # List information about objects in an S3 bucket # # @param bucket_name [String] name of bucket to list object keys from # @param options [Hash] config arguments for list. Defaults to {}. # @option options delimiter [String] causes keys with the same string between the prefix # value and the first occurence of delimiter to be rolled up # @option options marker [String] limits object keys to only those that appear # lexicographically after its value. # @option options max-keys [Integer] limits number of object keys returned # @option options prefix [String] limits object keys to those beginning with its value. # # @return [Excon::Response] response: # * body [Hash]: # * Delimeter [String] - Delimiter specified for query # * IsTruncated [Boolean] - Whether or not the listing is truncated # * Marker [String]- Marker specified for query # * MaxKeys [Integer] - Maximum number of keys specified for query # * Name [String] - Name of the bucket # * Prefix [String] - Prefix specified for query # * CommonPrefixes [Array] - Array of strings for common prefixes # * Contents [Array]: # * ETag [String] - Etag of object # * Key [String] - Name of object # * LastModified [String] - Timestamp of last modification of object # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * Size [Integer] - Size of object # * StorageClass [String] - Storage class of object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html def get_bucket(bucket_name, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucket.new, :query => options }) end end class Mock # :nodoc:all def get_bucket(bucket_name, options = {}) prefix, marker, delimiter, max_keys = \ options['prefix'], options['marker'], options['delimiter'], options['max-keys'] common_prefixes = [] unless bucket_name raise ArgumentError.new('bucket_name is required') end response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] contents = bucket[:objects].values.map(&:first).sort {|x,y| x['Key'] <=> y['Key']}.reject do |object| (prefix && object['Key'][0...prefix.length] != prefix) || (marker && object['Key'] <= marker) || (delimiter && object['Key'][(prefix ? prefix.length : 0)..-1].include?(delimiter) \ && common_prefixes << object['Key'].sub(/^(#{prefix}[^#{delimiter}]+.).*/, '\1')) || object.key?(:delete_marker) end.map do |object| data = object.reject {|key, value| !['ETag', 'Key', 'StorageClass'].include?(key)} data.merge!({ 'LastModified' => Time.parse(object['Last-Modified']), 'Owner' => bucket['Owner'], 'Size' => object['Content-Length'].to_i }) data end max_keys = max_keys || 1000 size = [max_keys, 1000].min truncated_contents = contents[0...size] response.status = 200 response.body = { 'CommonPrefixes' => common_prefixes.uniq, 'Contents' => truncated_contents, 'IsTruncated' => truncated_contents.size != contents.size, 'Marker' => marker, 'MaxKeys' => max_keys, 'Name' => bucket['Name'], 'Prefix' => prefix } if max_keys && max_keys < response.body['Contents'].length response.body['IsTruncated'] = true response.body['Contents'] = response.body['Contents'][0...max_keys] end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_acl.rb000066400000000000000000000042421437344660100240720ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/access_control_list' # Get access control list for an S3 bucket # # @param bucket_name [String] name of bucket to get access control list for # # @return [Excon::Response] response: # * body [Hash]: # * AccessControlPolicy [Hash]: # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * AccessControlList [Array]: # * Grant [Hash]: # * Grantee [Hash]: # * DisplayName [String] - Display name of grantee # * ID [String] - Id of grantee # or # * URI [String] - URI of group to grant access for # * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html def get_bucket_acl(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::AccessControlList.new, :query => {'acl' => nil} }) end end class Mock # :nodoc:all require 'fog/aws/requests/storage/acl_utils' def get_bucket_acl(bucket_name) response = Excon::Response.new if acl = self.data[:acls][:bucket][bucket_name] response.status = 200 if acl.is_a?(String) response.body = Fog::AWS::Storage.acl_to_hash(acl) else response.body = acl end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_cors.rb000066400000000000000000000051631437344660100243040ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/cors_configuration' # Gets the CORS configuration for an S3 bucket # # @param bucket_name [String] name of bucket to get access control list for # # @return [Excon::Response] response: # * body [Hash]: # * CORSConfiguration [Array]: # * CORSRule [Hash]: # * AllowedHeader [String] - Which headers are allowed in a pre-flight OPTIONS request through the Access-Control-Request-Headers header. # * AllowedMethod [String] - Identifies an HTTP method that the domain/origin specified in the rule is allowed to execute. # * AllowedOrigin [String] - One or more response headers that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object). # * ExposeHeader [String] - One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object). # * ID [String] - An optional unique identifier for the rule. The ID value can be up to 255 characters long. The IDs help you find a rule in the configuration. # * MaxAgeSeconds [Integer] - The time in seconds that your browser is to cache the preflight response for the specified resource. # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html def get_bucket_cors(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::CorsConfiguration.new, :query => {'cors' => nil} }) end end class Mock # :nodoc:all require 'fog/aws/requests/storage/cors_utils' def get_bucket_cors(bucket_name) response = Excon::Response.new if cors = self.data[:cors][:bucket][bucket_name] response.status = 200 if cors.is_a?(String) response.body = Fog::AWS::Storage.cors_to_hash(cors) else response.body = cors end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_lifecycle.rb000066400000000000000000000024441437344660100252740ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_lifecycle' # Get bucket lifecycle configuration # # @param bucket_name [String] name of bucket to get lifecycle configuration for # # @return [Excon::Response] response: # * body [Hash]: # * Rules - object expire rules [Array]: # * ID [String] - Unique identifier for the rule # * Prefix [String] - Prefix identifying one or more objects to which the rule applies # * Enabled [Boolean] - if rule is currently being applied # * Days [Integer] - lifetime, in days, of the objects that are subject to the rule # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html def get_bucket_lifecycle(bucket_name) request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketLifecycle.new, :query => {'lifecycle' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_location.rb000066400000000000000000000031431437344660100251420ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_location' # Get location constraint for an S3 bucket # # @param bucket_name [String] name of bucket to get location constraint for # # @return [Excon::Response] response: # * body [Hash]: # * LocationConstraint [String] - Location constraint of the bucket # # @see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html def get_bucket_location(bucket_name) request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketLocation.new, :query => {'location' => nil} }) end end class Mock # :nodoc:all def get_bucket_location(bucket_name) response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] location_constraint = case bucket['LocationConstraint'] when 'us-east-1' nil when 'eu-east-1' 'EU' else bucket['LocationConstraint'] end response.status = 200 response.body = {'LocationConstraint' => location_constraint } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_logging.rb000066400000000000000000000032271437344660100247630ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_logging' # Get logging status for an S3 bucket # # @param bucket_name [String] name of bucket to get logging status for # # @return [Excon::Response] response: # * body [Hash]: # * BucketLoggingStatus (will be empty if logging is disabled) [Hash]: # * LoggingEnabled [Hash]: # * TargetBucket [String] - bucket where logs are stored # * TargetPrefix [String] - prefix logs are stored with # * TargetGrants [Array]: # * Grant [Hash]: # * Grantee [Hash]: # * DisplayName [String] - Display name of grantee # * ID [String] - Id of grantee # or # * URI [String] - URI of group to grant access for # * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html def get_bucket_logging(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketLogging.new, :query => {'logging' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_notification.rb000066400000000000000000000052231437344660100260210ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_notification' # Get bucket notification configuration # # @param bucket_name [String] name of bucket to get notification configuration for # # @return [Excon::Response] response: # * body [Hash]: # * Topics [Array] SNS topic configurations for the notification # * ID [String] Unique identifier for the configuration # * Topic [String] Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type # * Event [String] Bucket event for which to send notifications # * Queues [Array] SQS queue configurations for the notification # * ID [String] Unique identifier for the configuration # * Queue [String] Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type # * Event [String] Bucket event for which to send notifications # * CloudFunctions [Array] AWS Lambda notification configurations # * ID [String] Unique identifier for the configuration # * CloudFunction [String] Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type # * InvocationRole [String] IAM role ARN that Amazon S3 can assume to invoke the specified cloud function on your behalf # * Event [String] Bucket event for which to send notifications # # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETnotification.html def get_bucket_notification(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketNotification.new, :query => {'notification' => nil} }) end end class Mock def get_bucket_notification(bucket_name) response = Excon::Response.new if self.data[:buckets][bucket_name] && self.data[:bucket_notifications][bucket_name] response.status = 200 response.body = self.data[:bucket_notifications][bucket_name] else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_object_versions.rb000066400000000000000000000166451437344660100265430ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_object_versions' # List information about object versions in an S3 bucket # # @param bucket_name [String] name of bucket to list object keys from # @param options [Hash] config arguments for list # @option options delimiter [String] causes keys with the same string between the prefix value and the first occurence of delimiter to be rolled up # @option options key-marker [String] limits object keys to only those that appear lexicographically after its value. # @option options max-keys [Integer] limits number of object keys returned # @option options prefix [String] limits object keys to those beginning with its value. # @option options version-id-marker [String] limits object versions to only those that appear lexicographically after its value # # @return [Excon::Response] response: # * body [Hash]: # * Delimeter [String] - Delimiter specified for query # * KeyMarker [String] - Key marker specified for query # * MaxKeys [Integer] - Maximum number of keys specified for query # * Name [String] - Name of the bucket # * Prefix [String] - Prefix specified for query # * VersionIdMarker [String] - Version id marker specified for query # * IsTruncated [Boolean] - Whether or not this is the totality of the bucket # * Versions [Array]: # * DeleteMarker [Hash]: # * IsLatest [Boolean] - Whether or not this is the latest version # * Key [String] - Name of object # * LastModified [String]: Timestamp of last modification of object # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * VersionId [String] - The id of this version # or # * Version [Hash]: # * ETag [String]: Etag of object # * IsLatest [Boolean] - Whether or not this is the latest version # * Key [String] - Name of object # * LastModified [String]: Timestamp of last modification of object # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * Size [Integer] - Size of object # * StorageClass [String] - Storage class of object # * VersionId [String] - The id of this version # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html def get_bucket_object_versions(bucket_name, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketObjectVersions.new, :query => {'versions' => nil}.merge!(options) }) end end class Mock def get_bucket_object_versions(bucket_name, options = {}) delimiter, key_marker, max_keys, prefix, version_id_marker = \ options['delimiter'], options['key-marker'], options['max-keys'],options['prefix'],options['version-id-marker'] unless bucket_name raise ArgumentError.new('bucket_name is required') end response = Excon::Response.new # Invalid arguments. if version_id_marker && !key_marker response.status = 400 response.body = { 'Error' => { 'Code' => 'InvalidArgument', 'Message' => 'A version-id marker cannot be specified without a key marker.', 'ArgumentValue' => version_id_marker, 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) # Valid case. # TODO: (nirvdrum 12/15/11) It's not clear to me how to actually use version-id-marker, so I didn't implement it below. elsif bucket = self.data[:buckets][bucket_name] # We need to order results by S3 key, but since our data store is key => [versions], we want to ensure the integrity # of the versions as well. So, sort the keys, then fetch the versions, and then combine them all as a sorted list by # flattening the results. contents = bucket[:objects].keys.sort.map { |key| bucket[:objects][key] }.flatten.reject do |object| (prefix && object['Key'][0...prefix.length] != prefix) || (key_marker && object['Key'] <= key_marker) || (delimiter && object['Key'][(prefix ? prefix.length : 0)..-1].include?(delimiter) \ && common_prefixes << object['Key'].sub(/^(#{prefix}[^#{delimiter}]+.).*/, '\1')) end.map do |object| if object.key?(:delete_marker) tag_name = 'DeleteMarker' extracted_attrs = ['Key', 'VersionId'] else tag_name = 'Version' extracted_attrs = ['ETag', 'Key', 'StorageClass', 'VersionId'] end data = {} data[tag_name] = object.reject { |key, value| !extracted_attrs.include?(key) } data[tag_name].merge!({ 'LastModified' => Time.parse(object['Last-Modified']), 'Owner' => bucket['Owner'], 'IsLatest' => object == bucket[:objects][object['Key']].first }) data[tag_name]['Size'] = object['Content-Length'].to_i if tag_name == 'Version' data end max_keys = max_keys || 1000 size = [max_keys, 1000].min truncated_contents = contents[0...size] response.status = 200 response.body = { 'Versions' => truncated_contents, 'IsTruncated' => truncated_contents.size != contents.size, 'KeyMarker' => key_marker, 'VersionIdMarker' => version_id_marker, 'MaxKeys' => max_keys, 'Name' => bucket['Name'], 'Prefix' => prefix } if max_keys && max_keys < response.body['Versions'].length response.body['IsTruncated'] = true response.body['Versions'] = response.body['Versions'][0...max_keys] end # Missing bucket case. else response.status = 404 response.body = { 'Error' => { 'Code' => 'NoSuchBucket', 'Message' => 'The specified bucket does not exist', 'BucketName' => bucket_name, 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_policy.rb000066400000000000000000000016331437344660100246330ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Get bucket policy for an S3 bucket # # @param bucket_name [String] name of bucket to get policy for # # @return [Excon::Response] response: # * body [Hash] - policy document # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html def get_bucket_policy(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end response = request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :query => {'policy' => nil} }) response.body = Fog::JSON.decode(response.body) unless response.body.nil? end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_tagging.rb000066400000000000000000000030241437344660100247500ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_tagging' # Get tags for an S3 bucket # # @param bucket_name [String] name of bucket to get tags for # # @return [Excon::Response] response: # * body [Hash]: # * BucketTagging [Hash]: # * Key [String] - tag key # * Value [String] - tag value # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETtagging.html def get_bucket_tagging(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketTagging.new, :query => {'tagging' => nil} }) end end class Mock # :nodoc:all def get_bucket_tagging(bucket_name) response = Excon::Response.new if self.data[:buckets][bucket_name] && self.data[:bucket_tagging][bucket_name] response.status = 200 response.body = {'BucketTagging' => self.data[:bucket_tagging][bucket_name]} else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_versioning.rb000066400000000000000000000040741437344660100255210ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_versioning' # Get versioning status for an S3 bucket # # @param bucket_name [String] name of bucket to get versioning status for # # @return [Excon::Response] response: # * body [Hash]: # * VersioningConfiguration [Hash]: # * Status [String] - Versioning status in ['Enabled', 'Suspended', nil] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html def get_bucket_versioning(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketVersioning.new, :query => {'versioning' => nil} }) end end class Mock def get_bucket_versioning(bucket_name) response = Excon::Response.new bucket = self.data[:buckets][bucket_name] if bucket response.status = 200 if bucket[:versioning] response.body = { 'VersioningConfiguration' => { 'Status' => bucket[:versioning] } } else response.body = { 'VersioningConfiguration' => {} } end else response.status = 404 response.body = { 'Error' => { 'Code' => 'NoSuchBucket', 'Message' => 'The specified bucket does not exist', 'BucketName' => bucket_name, 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_bucket_website.rb000066400000000000000000000023011437344660100247670ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_bucket_website' # Get website configuration for an S3 bucket # # # @param bucket_name [String] name of bucket to get website configuration for # # @return [Excon::Response] response: # * body [Hash]: # * IndexDocument [Hash]: # * Suffix [String] - Suffix appended when directory is requested # * ErrorDocument [Hash]: # * Key [String] - Object key to return for 4XX class errors # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html def get_bucket_website(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetBucketWebsite.new, :query => {'website' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object.rb000066400000000000000000000201131437344660100232370ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Get an object from S3 # # @param bucket_name [String] Name of bucket to read from # @param object_name [String] Name of object to read # @param options [Hash] # @option options If-Match [String] Returns object only if its etag matches this value, otherwise returns 412 (Precondition Failed). # @option options If-Modified-Since [Time] Returns object only if it has been modified since this time, otherwise returns 304 (Not Modified). # @option options If-None-Match [String] Returns object only if its etag differs from this value, otherwise returns 304 (Not Modified) # @option options If-Unmodified-Since [Time] Returns object only if it has not been modified since this time, otherwise returns 412 (Precodition Failed). # @option options Range [String] Range of object to download # @option options versionId [String] specify a particular version to retrieve # @option options query[Hash] specify additional query string # # @return [Excon::Response] response: # * body [String]- Contents of object # * headers [Hash]: # * Content-Length [String] - Size of object contents # * Content-Type [String] - MIME type of object # * ETag [String] - Etag of object # * Last-Modified [String] - Last modified timestamp for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html def get_object(bucket_name, object_name, options = {}, &block) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end params = { :headers => {} } params[:query] = options.delete('query') || {} if version_id = options.delete('versionId') params[:query] = params[:query].merge({'versionId' => version_id}) end params[:headers].merge!(options) if options['If-Modified-Since'] params[:headers]['If-Modified-Since'] = Fog::Time.at(options['If-Modified-Since'].to_i).to_date_header end if options['If-Unmodified-Since'] params[:headers]['If-Unmodified-Since'] = Fog::Time.at(options['If-Unmodified-Since'].to_i).to_date_header end idempotent = true if block_given? params[:response_block] = Proc.new(&block) idempotent = false end request(params.merge!({ :expects => [ 200, 206 ], :bucket_name => bucket_name, :object_name => object_name, :idempotent => idempotent, :method => 'GET', })) end end class Mock # :nodoc:all def get_object(bucket_name, object_name, options = {}, &block) version_id = options.delete('versionId') unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end response = Excon::Response.new if (bucket = self.data[:buckets][bucket_name]) object = nil if bucket[:objects].key?(object_name) object = version_id ? bucket[:objects][object_name].find { |object| object['VersionId'] == version_id} : bucket[:objects][object_name].first end if (object && !object[:delete_marker]) if options['If-Match'] && options['If-Match'] != object['ETag'] response.status = 412 raise(Excon::Errors.status_error({:expects => 200}, response)) elsif options['If-Modified-Since'] && options['If-Modified-Since'] >= Time.parse(object['Last-Modified']) response.status = 304 raise(Excon::Errors.status_error({:expects => 200}, response)) elsif options['If-None-Match'] && options['If-None-Match'] == object['ETag'] response.status = 304 raise(Excon::Errors.status_error({:expects => 200}, response)) elsif options['If-Unmodified-Since'] && options['If-Unmodified-Since'] < Time.parse(object['Last-Modified']) response.status = 412 raise(Excon::Errors.status_error({:expects => 200}, response)) else response.status = 200 for key, value in object case key when 'Cache-Control', 'Content-Disposition', 'Content-Encoding', 'Content-Length', 'Content-MD5', 'Content-Type', 'ETag', 'Expires', 'Last-Modified', /^x-amz-meta-/ response.headers[key] = value end end response.headers['x-amz-version-id'] = object['VersionId'] if bucket[:versioning] body = object[:body] if options['Range'] # since AWS S3 itself does not support multiple range headers, we will use only the first ranges = byte_ranges(options['Range'], body.size) unless ranges.nil? || ranges.empty? response.status = 206 body = body[ranges.first] end end unless block_given? response.body = body else data = StringIO.new(body) remaining = total_bytes = data.length while remaining > 0 chunk = data.read([remaining, Excon::CHUNK_SIZE].min) block.call(chunk, remaining, total_bytes) remaining -= Excon::CHUNK_SIZE end end end elsif version_id && !object response.status = 400 response.body = { 'Error' => { 'Code' => 'InvalidArgument', 'Message' => 'Invalid version id specified', 'ArgumentValue' => version_id, 'ArgumentName' => 'versionId', 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) else response.status = 404 response.body = "...NoSuchKey<\/Code>..." raise(Excon::Errors.status_error({:expects => 200}, response)) end else response.status = 404 response.body = "...NoSuchBucket..." raise(Excon::Errors.status_error({:expects => 200}, response)) end response end private # === Borrowed from rack # Parses the "Range:" header, if present, into an array of Range objects. # Returns nil if the header is missing or syntactically invalid. # Returns an empty array if none of the ranges are satisfiable. def byte_ranges(http_range, size) # See return nil unless http_range ranges = [] http_range.split(/,\s*/).each do |range_spec| matches = range_spec.match(/bytes=(\d*)-(\d*)/) return nil unless matches r0,r1 = matches[1], matches[2] if r0.empty? return nil if r1.empty? # suffix-byte-range-spec, represents trailing suffix of file r0 = [size - r1.to_i, 0].max r1 = size - 1 else r0 = r0.to_i if r1.empty? r1 = size - 1 else r1 = r1.to_i return nil if r1 < r0 # backwards range is syntactically invalid r1 = size-1 if r1 >= size end end ranges << (r0..r1) if r0 <= r1 end ranges end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_acl.rb000066400000000000000000000053501437344660100240640ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/access_control_list' # Get access control list for an S3 object # # @param bucket_name [String] name of bucket containing object # @param object_name [String] name of object to get access control list for # @param options [Hash] # @option options versionId [String] specify a particular version to retrieve # # @return [Excon::Response] response: # * body [Hash]: # * [AccessControlPolicy [Hash]: # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * AccessControlList [Array]: # * Grant [Hash]: # * Grantee [Hash]: # * DisplayName [String] - Display name of grantee # * ID [String] - Id of grantee # or # * URI [String] - URI of group to grant access for # * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html def get_object_acl(bucket_name, object_name, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end query = {'acl' => nil} if version_id = options.delete('versionId') query['versionId'] = version_id end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::AccessControlList.new, :query => query }) end end class Mock # :nodoc:all require 'fog/aws/requests/storage/acl_utils' def get_object_acl(bucket_name, object_name, options = {}) response = Excon::Response.new if acl = self.data[:acls][:object][bucket_name] && self.data[:acls][:object][bucket_name][object_name] response.status = 200 if acl.is_a?(String) response.body = Fog::AWS::Storage.acl_to_hash(acl) else response.body = acl end else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_http_url.rb000066400000000000000000000015661437344660100251730ustar00rootroot00000000000000module Fog module AWS class Storage module GetObjectHttpUrl def get_object_http_url(bucket_name, object_name, expires, options = {}) get_object_url(bucket_name, object_name, expires, options.merge(:scheme => 'http')) end end class Real # Get an expiring object http url from S3 # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] - url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include GetObjectHttpUrl end class Mock # :nodoc:all include GetObjectHttpUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_https_url.rb000066400000000000000000000015741437344660100253550ustar00rootroot00000000000000module Fog module AWS class Storage module GetObjectHttpsUrl def get_object_https_url(bucket_name, object_name, expires, options = {}) get_object_url(bucket_name, object_name, expires, options.merge(:scheme => 'https')) end end class Real # Get an expiring object https url from S3 # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] - url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include GetObjectHttpsUrl end class Mock # :nodoc:all include GetObjectHttpsUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_tagging.rb000066400000000000000000000023521437344660100247440ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_object_tagging' # Get tags for an S3 object # # @param bucket_name [String] Name of bucket to read from # @param object_name [String] Name of object to get tags for # # @return [Excon::Response] response: # * body [Hash]: # * ObjectTagging [Hash]: # * Key [String] - tag key # * Value [String] - tag value # @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html def get_object_tagging(bucket_name, object_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetObjectTagging.new, :query => {'tagging' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_torrent.rb000066400000000000000000000030641437344660100250220ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Get torrent for an S3 object # # @param bucket_name [String] name of bucket containing object # @param object_name [String] name of object to get torrent for # # @return [Excon::Response] response: # * body [Hash]: # * AccessControlPolicy [Hash: # * Owner [Hash]: # * DisplayName [String] - Display name of object owner # * ID [String] - Id of object owner # * AccessControlList [Array]: # * Grant [Hash]: # * Grantee [Hash]: # * DisplayName [String] - Display name of grantee # * ID [String] - Id of grantee # * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html def get_object_torrent(bucket_name, object_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'GET', :query => {'torrent' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_object_url.rb000066400000000000000000000022011437344660100241170ustar00rootroot00000000000000module Fog module AWS class Storage module GetObjectUrl def get_object_url(bucket_name, object_name, expires, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end signed_url(options.merge({ :bucket_name => bucket_name, :object_name => object_name, :method => 'GET' }), expires) end end class Real # Get an expiring object url from S3 # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] - url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include GetObjectUrl end class Mock # :nodoc:all include GetObjectUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_request_payment.rb000066400000000000000000000025341437344660100252250ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_request_payment' # Get configured payer for an S3 bucket # # @param bucket_name [String] name of bucket to get payer for # # @return [Excon::Response] response: # * body [Hash]: # * Payer [String] - Specifies who pays for download and requests # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html def get_request_payment(bucket_name) request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetRequestPayment.new, :query => {'requestPayment' => nil} }) end end class Mock # :nodoc:all def get_request_payment(bucket_name) response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] response.status = 200 response.body = { 'Payer' => bucket['Payer'] } else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/get_service.rb000066400000000000000000000027521437344660100234420ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/get_service' # List information about S3 buckets for authorized user # # @return [Excon::Response] response: # * body [Hash]: # * Buckets [Hash]: # * Name [String] - Name of bucket # * CreationTime [Time] - Timestamp of bucket creation # * Owner [Hash]: # * DisplayName [String] - Display name of bucket owner # * ID [String] - Id of bucket owner # # @see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html # def get_service request({ :expects => 200, :headers => {}, :host => 's3.amazonaws.com', :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::GetService.new }) end end class Mock # :nodoc:all def get_service response = Excon::Response.new response.headers['Status'] = 200 buckets = self.data[:buckets].values.map do |bucket| bucket.reject do |key, value| !['CreationDate', 'Name'].include?(key) end end response.body = { 'Buckets' => buckets, 'Owner' => { 'DisplayName' => 'owner', 'ID' => 'some_id'} } response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/head_bucket.rb000066400000000000000000000021471437344660100233770ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Get headers for an S3 bucket, used to verify if it exists and if you have permission to access it # # @param bucket_name [String] Name of bucket to read from # # @return [Excon::Response] 200 response implies it exists, 404 does not exist, 403 no permissions # * body [String] Empty # * headers [Hash]: # * Content-Type [String] - MIME type of object # # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html # def head_bucket(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :bucket_name => bucket_name, :idempotent => true, :method => 'HEAD', }) end end class Mock # :nodoc:all def head_bucket(bucket_name) response = get_bucket(bucket_name) response.body = nil response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/head_object.rb000066400000000000000000000053571437344660100233760ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Get headers for an object from S3 # # @param bucket_name [String] Name of bucket to read from # @param object_name [String] Name of object to read # @param options [Hash]: # @option options [String] If-Match Returns object only if its etag matches this value, otherwise returns 412 (Precondition Failed). # @option options [Time] If-Modified-Since Returns object only if it has been modified since this time, otherwise returns 304 (Not Modified). # @option options [String] If-None-Match Returns object only if its etag differs from this value, otherwise returns 304 (Not Modified) # @option options [Time] If-Unmodified-Since Returns object only if it has not been modified since this time, otherwise returns 412 (Precodition Failed). # @option options [String] Range Range of object to download # @option options [String] versionId specify a particular version to retrieve # # @return [Excon::Response] response: # * body [String] Contents of object # * headers [Hash]: # * Content-Length [String] - Size of object contents # * Content-Type [String] - MIME type of object # * ETag [String] - Etag of object # * Last-Modified - [String] Last modified timestamp for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html # def head_object(bucket_name, object_name, options={}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end if version_id = options.delete('versionId') query = {'versionId' => version_id} end headers = {} headers['If-Modified-Since'] = Fog::Time.at(options['If-Modified-Since'].to_i).to_date_header if options['If-Modified-Since'] headers['If-Unmodified-Since'] = Fog::Time.at(options['If-Unmodified-Since'].to_i).to_date_header if options['If-Modified-Since'] headers.merge!(options) request({ :expects => 200, :headers => headers, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'HEAD', :query => query }) end end class Mock # :nodoc:all def head_object(bucket_name, object_name, options = {}) response = get_object(bucket_name, object_name, options) response.body = nil response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/head_object_url.rb000066400000000000000000000022101437344660100242410ustar00rootroot00000000000000module Fog module AWS class Storage module HeadObjectUrl def head_object_url(bucket_name, object_name, expires, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end signed_url(options.merge({ :bucket_name => bucket_name, :object_name => object_name, :method => 'HEAD' }), expires) end end class Real # An expiring head request url from S3 # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] - url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include HeadObjectUrl end class Mock # :nodoc:all include HeadObjectUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/initiate_multipart_upload.rb000066400000000000000000000054201437344660100264110ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/initiate_multipart_upload' # Initiate a multipart upload to an S3 bucket # # @param bucket_name [String] Name of bucket to create object in # @param object_name [String] Name of object to create # @param options [Hash]: # @option options [String] Cache-Control Caching behaviour # @option options [String] Content-Disposition Presentational information for the object # @option options [String] Content-Encoding Encoding of object data # @option options [String] Content-MD5 Base64 encoded 128-bit MD5 digest of message (defaults to Base64 encoded MD5 of object.read) # @option options [String] Content-Type Standard MIME type describing contents (defaults to MIME::Types.of.first) # @option options [String] x-amz-acl Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] # @option options [String] x-amz-meta-#{name} Headers to be returned with object, note total size of request without body must be less than 8 KB. # # @return [Excon::Response] response: # * body [Hash]: # * Bucket [String] - Bucket where upload was initiated # * Key [String] - Object key where the upload was initiated # * UploadId [String] - Id for initiated multipart upload # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html # def initiate_multipart_upload(bucket_name, object_name, options = {}) request({ :expects => 200, :headers => options, :bucket_name => bucket_name, :object_name => object_name, :method => 'POST', :parser => Fog::Parsers::AWS::Storage::InitiateMultipartUpload.new, :query => {'uploads' => nil} }) end end # Real class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def initiate_multipart_upload(bucket_name, object_name, options = {}) verify_mock_bucket_exists(bucket_name) upload_id = UUID.uuid self.data[:multipart_uploads][bucket_name] ||= {} self.data[:multipart_uploads][bucket_name][upload_id] = { :parts => {}, :options => options, } response = Excon::Response.new response.status = 200 response.body = { "Bucket" => bucket_name, "Key" => object_name, "UploadId" => upload_id, } response end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/storage/list_multipart_uploads.rb000066400000000000000000000047571437344660100257550ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/list_multipart_uploads' # List multipart uploads for a bucket # # @param [String] bucket_name Name of bucket to list multipart uploads for # @param [Hash] options config arguments for list. Defaults to {}. # @option options [String] key-marker limits parts to only those that appear lexicographically after this key. # @option options [Integer] max-uploads limits number of uploads returned # @option options [String] upload-id-marker limits uploads to only those that appear lexicographically after this upload id. # # @return [Excon::Response] response: # * body [Hash]: # * Bucket [String] Bucket where the multipart upload was initiated # * IsTruncated [Boolean] Whether or not the listing is truncated # * KeyMarker [String] first key in list, only upload ids after this lexographically will appear # * MaxUploads [Integer] Maximum results to return # * NextKeyMarker [String] last key in list, for further pagination # * NextUploadIdMarker [String] last key in list, for further pagination # * Upload [Hash]: # * Initiated [Time] Time when upload was initiated # * Initiator [Hash]: # * DisplayName [String] Display name of upload initiator # * ID [String] Id of upload initiator # * Key [String] Key where multipart upload was initiated # * Owner [Hash]: # * DisplayName [String] Display name of upload owner # * ID [String] Id of upload owner # * StorageClass [String] Storage class of object # * UploadId [String] upload id of upload containing part # * UploadIdMarker [String] first key in list, only upload ids after this lexographically will appear # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html # def list_multipart_uploads(bucket_name, options = {}) request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::ListMultipartUploads.new, :query => options.merge!({'uploads' => nil}) }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/list_parts.rb000066400000000000000000000045441437344660100233300ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/parsers/storage/list_parts' # List parts for a multipart upload # # @param bucket_name [String] Name of bucket to list parts for # @param object_name [String] Name of object to list parts for # @param upload_id [String] upload id to list objects for # @param options [Hash] config arguments for list. Defaults to {}. # @option options max-parts [Integer] limits number of parts returned # @option options part-number-marker [String] limits parts to only those that appear lexicographically after this part number. # # @return [Excon::Response] response: # * body [Hash]: # * Bucket [string] Bucket where the multipart upload was initiated # * Initiator [Hash]: # * DisplayName [String] Display name of upload initiator # * ID [String] Id of upload initiator # * IsTruncated [Boolean] Whether or not the listing is truncated # * Key [String] Key where multipart upload was initiated # * MaxParts [String] maximum number of replies alllowed in response # * NextPartNumberMarker [String] last item in list, for further pagination # * Part [Array]: # * ETag [String] ETag of part # * LastModified [Timestamp] Last modified for part # * PartNumber [String] Part number for part # * Size [Integer] Size of part # * PartNumberMarker [String] Part number after which listing begins # * StorageClass [String] Storage class of object # * UploadId [String] upload id of upload containing part # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html # def list_parts(bucket_name, object_name, upload_id, options = {}) options['uploadId'] = upload_id request({ :expects => 200, :headers => {}, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::AWS::Storage::ListParts.new, :query => options.merge!({'uploadId' => upload_id}) }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/post_object_hidden_fields.rb000066400000000000000000000056401437344660100263160ustar00rootroot00000000000000module Fog module AWS class Storage module PostObjectHiddenFields # Get a hash of hidden fields for form uploading to S3, in the form {:field_name => :field_value} # Form should look like:
# These hidden fields should then appear, followed by a field named 'file' which is either a textarea or file input. # # @param options Hash: # @option options acl [String] access control list, in ['private', 'public-read', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control'] # @option options Cache-Control [String] same as REST header # @option options Content-Type [String] same as REST header # @option options Content-Disposition [String] same as REST header # @option options Content-Encoding [String] same as REST header # @option options Expires same as REST header # @option options key key for object, set to '${filename}' to use filename provided by user # @option options policy security policy for upload # @option options success_action_redirect url to redirct to upon success # @option options success_action_status status code to return on success, in [200, 201, 204] # @option options x-amz-security token devpay security token # @option options x-amz-meta... meta data tags # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/HTTPPOSTForms.html # def post_object_hidden_fields(options = {}) options = options.dup if policy = options['policy'] date = Fog::Time.now credential = "#{@aws_access_key_id}/#{@signer.credential_scope(date)}" extra_conditions = [ {'x-amz-date' => date.to_iso8601_basic}, {'x-amz-credential' => credential}, {'x-amz-algorithm' => Fog::AWS::SignatureV4::ALGORITHM} ] extra_conditions << {'x-amz-security-token' => @aws_session_token } if @aws_session_token policy_with_auth_fields = policy.merge('conditions' => policy['conditions'] + extra_conditions) options['policy'] = Base64.encode64(Fog::JSON.encode(policy_with_auth_fields)).gsub("\n", "") options['X-Amz-Credential'] = credential options['X-Amz-Date'] = date.to_iso8601_basic options['X-Amz-Algorithm'] = Fog::AWS::SignatureV4::ALGORITHM if @aws_session_token options['X-Amz-Security-Token'] = @aws_session_token end options['X-Amz-Signature'] = @signer.derived_hmac(date).sign(options['policy']).unpack('H*').first end options end end class Real include PostObjectHiddenFields end class Mock include PostObjectHiddenFields end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/post_object_restore.rb000066400000000000000000000036251437344660100252210ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Restore an object from Glacier to its original S3 path # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to restore # @option days [Integer] Number of days to restore object for. Defaults to 100000 (a very long time) # # @return [Excon::Response] response: # * status [Integer] 200 (OK) Object is previously restored # * status [Integer] 202 (Accepted) Object is not previously restored # * status [Integer] 409 (Conflict) Restore is already in progress # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPOSTrestore.html # def post_object_restore(bucket_name, object_name, days = 100000) raise ArgumentError.new('bucket_name is required') unless bucket_name raise ArgumentError.new('object_name is required') unless object_name data = '' + days.to_s + '' headers = {} headers['Content-MD5'] = Base64.encode64(OpenSSL::Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/xml' headers['Date'] = Fog::Time.now.to_date_header request({ :headers => headers, :bucket_name => bucket_name, :expects => [200, 202, 409], :body => data, :method => 'POST', :query => {'restore' => nil}, :object_name => object_name }) end end class Mock # :nodoc:all def post_object_restore(bucket_name, object_name, days = 100000) response = get_object(bucket_name, object_name) response.body = nil response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket.rb000066400000000000000000000051231437344660100233030ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Create an S3 bucket # # @param bucket_name [String] name of bucket to create # @option options [Hash] config arguments for bucket. Defaults to {}. # @option options LocationConstraint [Symbol] sets the location for the bucket # @option options x-amz-acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] # # @return [Excon::Response] response: # * status [Integer] 200 # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html # def put_bucket(bucket_name, options = {}) if location_constraint = options.delete('LocationConstraint') data = <<-DATA #{location_constraint} DATA else data = nil end request({ :expects => 200, :body => data, :headers => options, :idempotent => true, :bucket_name => bucket_name, :method => 'PUT' }) end end class Mock # :nodoc:all def put_bucket(bucket_name, options = {}) acl = options['x-amz-acl'] || 'private' if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') else self.data[:acls][:bucket][bucket_name] = self.class.acls(acl) end response = Excon::Response.new response.status = 200 bucket = { :objects => {}, 'Name' => bucket_name, 'CreationDate' => Time.now, 'Owner' => { 'DisplayName' => 'owner', 'ID' => 'some_id'}, 'Payer' => 'BucketOwner' } if options['LocationConstraint'] bucket['LocationConstraint'] = options['LocationConstraint'] else bucket['LocationConstraint'] = nil end if !self.data[:buckets][bucket_name] self.data[:buckets][bucket_name] = bucket elsif self.region != 'us-east-1' response.status = 409 Fog::Logger.warning "Your region '#{self.region}' does not match the default region 'us-east-1'" raise(Excon::Errors.status_error({:expects => 201}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_acl.rb000066400000000000000000000046261437344660100241310ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/requests/storage/acl_utils' # Change access control list for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param acl [Hash] # * Owner [Hash]: # * ID [String]: id of owner # * DisplayName [String]: display name of owner # * AccessControlList [Array]: # * Grantee [Hash]: # * DisplayName [String] Display name of grantee # * ID [String] Id of grantee # or # * EmailAddress [String] Email address of grantee # or # * URI [String] URI of group to grant access for # * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # * acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html def put_bucket_acl(bucket_name, acl) data = "" headers = {} if acl.is_a?(Hash) data = Fog::AWS::Storage.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end headers['x-amz-acl'] = acl end headers['Content-MD5'] = Base64.encode64(OpenSSL::Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :bucket_name => bucket_name, :method => 'PUT', :query => {'acl' => nil} }) end end class Mock def put_bucket_acl(bucket_name, acl) if acl.is_a?(Hash) self.data[:acls][:bucket][bucket_name] = Fog::AWS::Storage.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end self.data[:acls][:bucket][bucket_name] = acl end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_cors.rb000066400000000000000000000036501437344660100243340ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/requests/storage/cors_utils' # Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it. # # @param bucket_name [String] name of bucket to modify # @param cors [Hash] # * CORSConfiguration [Array]: # * ID [String]: A unique identifier for the rule. # * AllowedMethod [String]: An HTTP method that you want to allow the origin to execute. # * AllowedOrigin [String]: An origin that you want to allow cross-domain requests from. # * AllowedHeader [String]: Specifies which headers are allowed in a pre-flight OPTIONS request via the Access-Control-Request-Headers header. # * MaxAgeSeconds [String]: The time in seconds that your browser is to cache the preflight response for the specified resource. # * ExposeHeader [String]: One or more headers in the response that you want customers to be able to access from their applications. # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html def put_bucket_cors(bucket_name, cors) data = Fog::AWS::Storage.hash_to_cors(cors) headers = {} headers['Content-MD5'] = Base64.encode64(OpenSSL::Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :bucket_name => bucket_name, :method => 'PUT', :query => {'cors' => nil} }) end end class Mock def put_bucket_cors(bucket_name, cors) self.data[:cors][:bucket][bucket_name] = Fog::AWS::Storage.hash_to_cors(cors) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_lifecycle.rb000066400000000000000000000124121437344660100253210ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change lifecycle configuration for an S3 bucket # # @param bucket_name [String] name of bucket to set lifecycle configuration for # * lifecycle [Hash]: # * Rules [Array] object expire rules # * ID [String] Unique identifier for the rule # * Prefix [String] Prefix identifying one or more objects to which the rule applies # * Enabled [Boolean] if rule is currently being applied # * [NoncurrentVersion]Expiration [Hash] Container for the object expiration rule. # * [Noncurrent]Days [Integer] lifetime, in days, of the objects that are subject to the rule # * Date [Date] Indicates when the specific rule take effect. # The date value must conform to the ISO 8601 format. The time is always midnight UTC. # * [NoncurrentVersion]Transition [Hash] Container for the transition rule that describes when objects transition # to the Glacier storage class # * [Noncurrent]Days [Integer] lifetime, in days, of the objects that are subject to the rule # * Date [Date] Indicates when the specific rule take effect. # The date value must conform to the ISO 8601 format. The time is always midnight UTC. # * StorageClass [String] Indicates the Amazon S3 storage class to which you want the object # to transition to. # # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html # def put_bucket_lifecycle(bucket_name, lifecycle) builder = Nokogiri::XML::Builder.new do LifecycleConfiguration { lifecycle['Rules'].each do |rule| Rule { ID rule['ID'] Prefix rule['Prefix'] Status rule['Enabled'] ? 'Enabled' : 'Disabled' unless (rule['Expiration'] or rule['Transition'] or rule['NoncurrentVersionExpiration'] or rule['NoncurrentVersionTransition']) Expiration { Days rule['Days'] } else if rule['Expiration'] if rule['Expiration']['Days'] Expiration { Days rule['Expiration']['Days'] } elsif rule['Expiration']['Date'] Expiration { Date rule['Expiration']['Date'].is_a?(Time) ? rule['Expiration']['Date'].utc.iso8601 : Time.parse(rule['Expiration']['Date']).utc.iso8601 } end end if rule['NoncurrentVersionExpiration'] if rule['NoncurrentVersionExpiration']['NoncurrentDays'] NoncurrentVersionExpiration { NoncurrentDays rule['NoncurrentVersionExpiration']['NoncurrentDays'] } elsif rule['NoncurrentVersionExpiration']['Date'] NoncurrentVersoinExpiration { if Date rule['NoncurrentVersionExpiration']['Date'].is_a?(Time) rule['NoncurrentVersionExpiration']['Date'].utc.iso8601 else Time.parse(rule['NoncurrentVersionExpiration']['Date']).utc.iso8601 end } end end if rule['Transition'] Transition { if rule['Transition']['Days'] Days rule['Transition']['Days'] elsif rule['Transition']['Date'] Date rule['Transition']['Date'].is_a?(Time) ? time.utc.iso8601 : Time.parse(time).utc.iso8601 end StorageClass rule['Transition']['StorageClass'].nil? ? 'GLACIER' : rule['Transition']['StorageClass'] } end if rule['NoncurrentVersionTransition'] NoncurrentVersionTransition { if rule['NoncurrentVersionTransition']['NoncurrentDays'] NoncurrentDays rule['NoncurrentVersionTransition']['NoncurrentDays'] elsif rule['NoncurrentVersionTransition']['Date'] Date rule['NoncurrentVersionTransition']['Date'].is_a?(Time) ? time.utc.iso8601 : Time.parse(time).utc.iso8601 end StorageClass rule['NoncurrentVersionTransition']['StorageClass'].nil? ? 'GLACIER' : rule['NoncurrentVersionTransition']['StorageClass'] } end end } end } end body = builder.to_xml body.gsub! /<([^<>]+)\/>/, '<\1>' request({ :body => body, :expects => 200, :headers => {'Content-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(body)).chomp!, 'Content-Type' => 'application/xml'}, :bucket_name => bucket_name, :method => 'PUT', :query => {'lifecycle' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_logging.rb000066400000000000000000000052511437344660100250130ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change logging status for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param logging_status [Hash]: # * LoggingEnabled [Hash]: logging options or {} to disable # * Owner [Hash]: # * ID [String]: id of owner # * DisplayName [String]: display name of owner # * AccessControlList [Array]: # * Grantee [Hash]: # * DisplayName [String] Display name of grantee # * ID [String] Id of grantee # or # * EmailAddress [String] Email address of grantee # or # * URI [String] URI of group to grant access for # * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html def put_bucket_logging(bucket_name, logging_status) if logging_status['LoggingEnabled'].empty? data = <<-DATA DATA else data = <<-DATA #{logging_status['LoggingEnabled']['TargetBucket']} #{logging_status['LoggingEnabled']['TargetBucket']} DATA logging_status['AccessControlList'].each do |grant| data << " " type = case grant['Grantee'].keys.sort when ['DisplayName', 'ID'] 'CanonicalUser' when ['EmailAddress'] 'AmazonCustomerByEmail' when ['URI'] 'Group' end data << " " for key, value in grant['Grantee'] data << " <#{key}>#{value}" end data << " " data << " #{grant['Permission']}" data << " " end data << <<-DATA DATA end request({ :body => data, :expects => 200, :headers => {}, :bucket_name => bucket_name, :method => 'PUT', :query => {'logging' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_notification.rb000066400000000000000000000067451437344660100260640ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change notification configuration for an S3 bucket # # @param bucket_name [String] name of bucket to set notification configuration for # * notications [Hash]: # * Topics [Array] SNS topic configurations for the notification # * ID [String] Unique identifier for the configuration # * Topic [String] Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type # * Event [String] Bucket event for which to send notifications # * Queues [Array] SQS queue configurations for the notification # * ID [String] Unique identifier for the configuration # * Queue [String] Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type # * Event [String] Bucket event for which to send notifications # * CloudFunctions [Array] AWS Lambda notification configurations # * ID [String] Unique identifier for the configuration # * CloudFunction [String] Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type # * InvocationRole [String] IAM role ARN that Amazon S3 can assume to invoke the specified cloud function on your behalf # * Event [String] Bucket event for which to send notifications # # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTnotification.html # def put_bucket_notification(bucket_name, notification) builder = Nokogiri::XML::Builder.new do NotificationConfiguration do notification.fetch('Topics', []).each do |topic| TopicConfiguration do Id topic['Id'] Topic topic['Topic'] Event topic['Event'] end end notification.fetch('Queues', []).each do |queue| QueueConfiguration do Id queue['Id'] Queue queue['Queue'] Event queue['Event'] end end notification.fetch('CloudFunctions', []).each do |func| CloudFunctionConfiguration do Id func['Id'] CloudFunction func['CloudFunction'] InvocationRole func['InvocationRole'] Event func['Event'] end end end end body = builder.to_xml body.gsub!(/<([^<>]+)\/>/, '<\1>') request({ :body => body, :expects => 200, :headers => {'Content-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(body)).chomp!, 'Content-Type' => 'application/xml'}, :bucket_name => bucket_name, :method => 'PUT', :query => {'notification' => nil} }) end end class Mock def put_bucket_notification(bucket_name, notification) response = Excon::Response.new if self.data[:buckets][bucket_name] self.data[:bucket_notifications][bucket_name] = notification response.status = 204 else response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_policy.rb000066400000000000000000000024361437344660100246660ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change bucket policy for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param policy [Hash] policy document # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html def put_bucket_policy(bucket_name, policy) request({ :body => Fog::JSON.encode(policy), :expects => 204, :headers => {}, :bucket_name => bucket_name, :method => 'PUT', :query => {'policy' => nil} }) end end class Mock #FIXME: You can't actually use the credentials for anything elsewhere in Fog #FIXME: Doesn't do any validation on the policy def put_bucket_policy(bucket_name, policy) if bucket = data[:buckets][bucket_name] bucket[:policy] = policy Excon::Response.new.tap do |response| response.body = { 'RequestId' => Fog::AWS::Mock.request_id } response.status = 200 end else raise Fog::AWS::IAM::NotFound.new("The bucket with name #{bucket_name} cannot be found.") end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_tagging.rb000066400000000000000000000027631437344660100250120ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change tag set for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param tags [Hash]: # * Key [String]: tag key # * Value [String]: tag value # # @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTtagging.html def put_bucket_tagging(bucket_name, tags) tagging = tags.map do |k,v| "#{k}#{v}" end.join("\n") data = <<-DATA #{tagging} DATA request({ :body => data, :expects => 204, :headers => {'Content-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(data)).chomp!, 'Content-Type' => 'application/xml'}, :bucket_name => bucket_name, :method => 'PUT', :query => {'tagging' => nil} }) end end class Mock # :nodoc:all def put_bucket_tagging(bucket_name, tags) response = Excon::Response.new if self.data[:buckets][bucket_name] self.data[:bucket_tagging][bucket_name] = tags response.status = 204 else response.status = 404 raise(Excon::Errors.status_error({:expects => 204}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_versioning.rb000066400000000000000000000043321437344660100255470ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change versioning status for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param status [String] Status to change to in ['Enabled', 'Suspended'] # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html def put_bucket_versioning(bucket_name, status) data = <<-DATA #{status} DATA request({ :body => data, :expects => 200, :headers => {}, :bucket_name => bucket_name, :method => 'PUT', :query => {'versioning' => nil} }) end end class Mock def put_bucket_versioning(bucket_name, status) response = Excon::Response.new bucket = self.data[:buckets][bucket_name] if bucket if ['Enabled', 'Suspended'].include?(status) bucket[:versioning] = status response.status = 200 else response.status = 400 response.body = { 'Error' => { 'Code' => 'MalformedXML', 'Message' => 'The XML you provided was not well-formed or did not validate against our published schema', 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) end else response.status = 404 response.body = { 'Error' => { 'Code' => 'NoSuchBucket', 'Message' => 'The specified bucket does not exist', 'BucketName' => bucket_name, 'RequestId' => Fog::Mock.random_hex(16), 'HostId' => Fog::Mock.random_base64(65) } } raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_bucket_website.rb000066400000000000000000000064571437344660100250400ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change website configuration for an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param options [Hash] # @option options RedirectAllRequestsTo [String] Host name to redirect all requests to - if this is set, other options are ignored # @option options IndexDocument [String] suffix to append to requests for the bucket # @option options ErrorDocument [String] key to use for 4XX class errors # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html def put_bucket_website(bucket_name, options, options_to_be_deprecated = {}) options ||= {} # Method used to be called with the suffix as the second parameter. Warn user that this is not the case any more and move on if options.is_a?(String) Fog::Logger.deprecation("put_bucket_website with #{options.class} param is deprecated, use put_bucket_website('#{bucket_name}', :IndexDocument => '#{options}') instead [light_black](#{caller.first})[/]") options = { :IndexDocument => options } end # Parameter renamed from "key" to "ErrorDocument" if options_to_be_deprecated[:key] Fog::Logger.deprecation("put_bucket_website with three parameters is deprecated, use put_bucket_website('#{bucket_name}', :ErrorDocument => '#{options_to_be_deprecated[:key]}') instead [light_black](#{caller.first})[/]") options[:ErrorDocument] = options_to_be_deprecated[:key] end options.merge!(options_to_be_deprecated) { |key, o1, o2| o1 } data = "" if options[:RedirectAllRequestsTo] # Redirect precludes all other options data << <<-DATA #{options[:RedirectAllRequestsTo]} DATA else if options[:IndexDocument] data << <<-DATA #{options[:IndexDocument]} DATA end if options[:ErrorDocument] data << <<-DATA #{options[:ErrorDocument]} DATA end end data << '' request({ :body => data, :expects => 200, :headers => {}, :bucket_name => bucket_name, :method => 'PUT', :query => {'website' => nil} }) end end class Mock # :nodoc:all def put_bucket_website(bucket_name, suffix, options = {}) response = Excon::Response.new if self.data[:buckets][bucket_name] response.status = 200 else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_object.rb000066400000000000000000000110071437344660100232720ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Create an object in an S3 bucket # # @param bucket_name [String] Name of bucket to create object in # @param object_name [String] Name of object to create # @param data [File||String] File or String to create object from # @param options [Hash] # @option options Cache-Control [String] Caching behaviour # @option options Content-Disposition [String] Presentational information for the object # @option options Content-Encoding [String] Encoding of object data # @option options Content-Length [String] Size of object in bytes (defaults to object.read.length) # @option options Content-MD5 [String] Base64 encoded 128-bit MD5 digest of message # @option options Content-Type [String] Standard MIME type describing contents (defaults to MIME::Types.of.first) # @option options Expires [String] Cache expiry # @option options x-amz-acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] # @option options x-amz-storage-class [String] Default is 'STANDARD', set to 'REDUCED_REDUNDANCY' for non-critical, reproducable data # @option options x-amz-meta-#{name} Headers to be returned with object, note total size of request without body must be less than 8 KB. Each name, value pair must conform to US-ASCII. # @option options x-amz-server-side-encryption [String] Sets HTTP header for server-side encryption. Set to 'AES256' for SSE-S3 and SSE-C. Set to 'aws:kms' for SSE-KMS # @option options x-amz-server-side​-encryption​-customer-algorithm [String] Algorithm to use to when encrypting the object for SSE-C. # @option options x-amz-server-side​-encryption​-customer-key [String] Encryption customer key for SSE-C # @option options x-amz-server-side​-encryption​-customer-key-MD5 [String] MD5 digest of the encryption key for SSE-C # @option options x-amz-server-side-encryption-aws-kms-key-id [String] KMS key ID of the encryption key for SSE-KMS # @option options x-amz-server-side-encryption-context [String] Encryption context for SSE-KMS # # @return [Excon::Response] response: # * headers [Hash]: # * ETag [String] etag of new object # # @see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html def self.conforming_to_us_ascii!(keys, hash) keys.each do |k| v = hash[k] if !v.encode(::Encoding::US_ASCII, :undef => :replace).eql?(v) raise Excon::Errors::BadRequest.new("invalid #{k} header: value must be us-ascii") end end end def put_object(bucket_name, object_name, data, options = {}) data = Fog::Storage.parse_data(data) headers = data[:headers].merge!(options) self.class.conforming_to_us_ascii! headers.keys.grep(/^x-amz-meta-/), headers request({ :body => data[:body], :expects => 200, :headers => headers, :bucket_name => bucket_name, :object_name => object_name, :idempotent => true, :method => 'PUT', }) end end class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def put_object(bucket_name, object_name, data, options = {}) define_mock_acl(bucket_name, object_name, options) data = parse_mock_data(data) headers = data[:headers].merge!(options) Fog::AWS::Storage::Real.conforming_to_us_ascii! headers.keys.grep(/^x-amz-meta-/), headers bucket = verify_mock_bucket_exists(bucket_name) options['Content-Type'] ||= data[:headers]['Content-Type'] options['Content-Length'] ||= data[:headers]['Content-Length'] object = store_mock_object(bucket, object_name, data[:body], options) response = Excon::Response.new response.status = 200 response.headers = { 'Content-Length' => object['Content-Length'], 'Content-Type' => object['Content-Type'], 'ETag' => object['ETag'], 'Last-Modified' => object['Last-Modified'], } response.headers['x-amz-version-id'] = object['VersionId'] if object['VersionId'] != 'null' response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_object_acl.rb000066400000000000000000000055421437344660100241200ustar00rootroot00000000000000module Fog module AWS class Storage class Real require 'fog/aws/requests/storage/acl_utils' # Change access control list for an S3 object # # @param [String] bucket_name name of bucket to modify # @param [String] object_name name of object to get access control list for # @param [Hash] acl # * Owner [Hash] # * ID [String] id of owner # * DisplayName [String] display name of owner # * AccessControlList [Array] # * Grantee [Hash] # * DisplayName [String] Display name of grantee # * ID [String] Id of grantee # or # * EmailAddress [String] Email address of grantee # or # * URI [String] URI of group to grant access for # * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP] # @param [String] acl Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] # @param [Hash] options # @option options [String] versionId specify a particular version to retrieve # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html def put_object_acl(bucket_name, object_name, acl, options = {}) query = {'acl' => nil} if version_id = options.delete('versionId') query['versionId'] = version_id end data = "" headers = {} if acl.is_a?(Hash) data = Fog::AWS::Storage.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end headers['x-amz-acl'] = acl end headers['Content-MD5'] = Base64.encode64(OpenSSL::Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :bucket_name => bucket_name, :object_name => object_name, :method => 'PUT', :query => query }) end end class Mock def put_object_acl(bucket_name, object_name, acl, options = {}) if acl.is_a?(Hash) self.data[:acls][:object][bucket_name][object_name] = Fog::AWS::Storage.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end self.data[:acls][:object][bucket_name][object_name] = acl end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_object_tagging.rb000066400000000000000000000023331437344660100247740ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change tag set for an S3 object # # @param bucket_name [String] Name of bucket to modify object in # @param object_name [String] Name of object to modify # # @param tags [Hash]: # * Key [String]: tag key # * Value [String]: tag value # # @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html def put_object_tagging(bucket_name, object_name, tags) tagging = tags.map do |k,v| "#{k}#{v}" end.join("\n") data = <<-DATA #{tagging} DATA request({ :body => data, :expects => 200, :headers => {'Content-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(data)).chomp!, 'Content-Type' => 'application/xml'}, :bucket_name => bucket_name, :object_name => object_name, :method => 'PUT', :query => {'tagging' => nil} }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_object_url.rb000066400000000000000000000023101437344660100241510ustar00rootroot00000000000000module Fog module AWS class Storage module PutObjectUrl def put_object_url(bucket_name, object_name, expires, headers = {}, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end signed_url(options.merge({ :bucket_name => bucket_name, :object_name => object_name, :method => 'PUT', :headers => headers, }), expires) end end class Real # Get an expiring object url from S3 for putting an object # # @param bucket_name [String] Name of bucket containing object # @param object_name [String] Name of object to get expiring url for # @param expires [Time] An expiry time for this url # # @return [Excon::Response] response: # * body [String] url for object # # @see http://docs.amazonwebservices.com/AmazonS3/latest/dev/S3_QSAuth.html include PutObjectUrl end class Mock # :nodoc:all include PutObjectUrl end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/put_request_payment.rb000066400000000000000000000024241437344660100252540ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Change who pays for requests to an S3 bucket # # @param bucket_name [String] name of bucket to modify # @param payer [String] valid values are BucketOwner or Requester # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html def put_request_payment(bucket_name, payer) data = <<-DATA #{payer} DATA request({ :body => data, :expects => 200, :headers => {}, :bucket_name => bucket_name, :method => 'PUT', :query => {'requestPayment' => nil} }) end end class Mock # :nodoc:all def put_request_payment(bucket_name, payer) response = Excon::Response.new if bucket = self.data[:buckets][bucket_name] response.status = 200 bucket['Payer'] = payer else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/shared_mock_methods.rb000066400000000000000000000061771437344660100251520ustar00rootroot00000000000000module Fog module AWS class Storage module SharedMockMethods def define_mock_acl(bucket_name, object_name, options) acl = options['x-amz-acl'] || 'private' if !['private', 'public-read', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') else self.data[:acls][:object][bucket_name] ||= {} self.data[:acls][:object][bucket_name][object_name] = self.class.acls(acl) end end def parse_mock_data(data) data = Fog::Storage.parse_data(data) unless data[:body].is_a?(String) data[:body].rewind if data[:body].eof? data[:body] = data[:body].read end data end def verify_mock_bucket_exists(bucket_name) if (bucket = self.data[:buckets][bucket_name]) return bucket end response = Excon::Response.new response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end def get_upload_info(bucket_name, upload_id, delete = false) if delete upload_info = self.data[:multipart_uploads][bucket_name].delete(upload_id) else upload_info = self.data[:multipart_uploads][bucket_name][upload_id] end if !upload_info response = Excon::Response.new response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end upload_info end def store_mock_object(bucket, object_name, body, options) object = { :body => body, 'Content-Type' => options['Content-Type'], 'ETag' => OpenSSL::Digest::MD5.hexdigest(body), 'Key' => object_name, 'Last-Modified' => Fog::Time.now.to_date_header, 'Content-Length' => options['Content-Length'], 'StorageClass' => options['x-amz-storage-class'] || 'STANDARD', 'VersionId' => bucket[:versioning] == 'Enabled' ? Fog::Mock.random_base64(32) : 'null' } for key, value in options case key when 'Cache-Control', 'Content-Disposition', 'Content-Encoding', 'Content-MD5', 'Expires', /^x-amz-meta-/ object[key] = value end end if bucket[:versioning] bucket[:objects][object_name] ||= [] # When versioning is suspended, putting an object will create a new 'null' version if the latest version # is a value other than 'null', otherwise it will replace the latest version. if bucket[:versioning] == 'Suspended' && bucket[:objects][object_name].first['VersionId'] == 'null' bucket[:objects][object_name].shift end bucket[:objects][object_name].unshift(object) else bucket[:objects][object_name] = [object] end object end end end end end fog-aws-3.18.0/lib/fog/aws/requests/storage/sync_clock.rb000066400000000000000000000010431437344660100232620ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Sync clock against S3 to avoid skew errors # def sync_clock response = begin Excon.get("#{@scheme}://#{@host}") rescue Excon::Errors::HTTPStatusError => error error.response end Fog::Time.now = Time.parse(response.headers['Date']) end end # Real class Mock # :nodoc:all def sync_clock true end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/storage/upload_part.rb000066400000000000000000000042441437344660100234530ustar00rootroot00000000000000module Fog module AWS class Storage class Real # Upload a part for a multipart upload # # @param bucket_name [String] Name of bucket to add part to # @param object_name [String] Name of object to add part to # @param upload_id [String] Id of upload to add part to # @param part_number [String] Index of part in upload # @param data [File||String] Content for part # @param options [Hash] # @option options Content-MD5 [String] Base64 encoded 128-bit MD5 digest of message # # @return [Excon::Response] response # * headers [Hash]: # * ETag [String] etag of new object (will be needed to complete upload) # # @see http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html # def upload_part(bucket_name, object_name, upload_id, part_number, data, options = {}) data = Fog::Storage.parse_data(data) headers = options headers['Content-Length'] = data[:headers]['Content-Length'] request({ :body => data[:body], :expects => 200, :idempotent => true, :headers => headers, :bucket_name => bucket_name, :object_name => object_name, :method => 'PUT', :query => {'uploadId' => upload_id, 'partNumber' => part_number} }) end end # Real class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def upload_part(bucket_name, object_name, upload_id, part_number, data, options = {}) data = parse_mock_data(data) verify_mock_bucket_exists(bucket_name) upload_info = get_upload_info(bucket_name, upload_id) upload_info[:parts][part_number] = data[:body] response = Excon::Response.new response.status = 200 # just use the part number as the ETag, for simplicity response.headers["ETag"] = part_number.to_s response end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/storage/upload_part_copy.rb000066400000000000000000000120351437344660100245020ustar00rootroot00000000000000module Fog module AWS class Storage # From https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ALLOWED_UPLOAD_PART_OPTIONS = %i( x-amz-copy-source x-amz-copy-source-if-match x-amz-copy-source-if-modified-since x-amz-copy-source-if-none-match x-amz-copy-source-if-unmodified-since x-amz-copy-source-range x-amz-copy-source-server-side-encryption-customer-algorithm x-amz-copy-source-server-side-encryption-customer-key x-amz-copy-source-server-side-encryption-customer-key-MD5 x-amz-expected-bucket-owner x-amz-request-payer x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 x-amz-source-expected-bucket-owner ).freeze class Real require 'fog/aws/parsers/storage/upload_part_copy_object' # Upload a part for a multipart copy # # @param target_bucket_name [String] Name of bucket to create copy in # @param target_object_name [String] Name for new copy of object # @param upload_id [String] Id of upload to add part to # @param part_number [String] Index of part in upload # @param options [Hash]: # @option options [String] x-amz-metadata-directive Specifies whether to copy metadata from source or replace with data in request. Must be in ['COPY', 'REPLACE'] # @option options [String] x-amz-copy_source-if-match Copies object if its etag matches this value # @option options [Time] x-amz-copy_source-if-modified_since Copies object it it has been modified since this time # @option options [String] x-amz-copy_source-if-none-match Copies object if its etag does not match this value # @option options [Time] x-amz-copy_source-if-unmodified-since Copies object it it has not been modified since this time # @option options [Time] x-amz-copy-source-range Specifes the range of bytes to copy from the source object # # @return [Excon::Response] # * body [Hash]: # * ETag [String] - etag of new object # * LastModified [Time] - date object was last modified # # @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html # def upload_part_copy(target_bucket_name, target_object_name, upload_id, part_number, options = {}) headers = options request({ :expects => 200, :idempotent => true, :headers => headers, :bucket_name => target_bucket_name, :object_name => target_object_name, :method => 'PUT', :query => {'uploadId' => upload_id, 'partNumber' => part_number}, :parser => Fog::Parsers::AWS::Storage::UploadPartCopyObject.new, }) end end # Real class Mock # :nodoc:all require 'fog/aws/requests/storage/shared_mock_methods' include Fog::AWS::Storage::SharedMockMethods def upload_part_copy(target_bucket_name, target_object_name, upload_id, part_number, options = {}) validate_options!(options) copy_source = options['x-amz-copy-source'] copy_range = options['x-amz-copy-source-range'] raise 'No x-amz-copy-source header provided' unless copy_source raise 'No x-amz-copy-source-range header provided' unless copy_range source_bucket_name, source_object_name = copy_source.split('/', 2) verify_mock_bucket_exists(source_bucket_name) source_bucket = self.data[:buckets][source_bucket_name] source_object = source_bucket && source_bucket[:objects][source_object_name] && source_bucket[:objects][source_object_name].first upload_info = get_upload_info(target_bucket_name, upload_id) response = Excon::Response.new if source_object start_pos, end_pos = byte_range(copy_range, source_object[:body].length) upload_info[:parts][part_number] = source_object[:body][start_pos..end_pos] response.status = 200 response.body = { # just use the part number as the ETag, for simplicity 'ETag' => part_number.to_i, 'LastModified' => Time.parse(source_object['Last-Modified']) } response else response.status = 404 raise(Excon::Errors.status_error({:expects => 200}, response)) end end def byte_range(range, size) matches = range.match(/bytes=(\d*)-(\d*)/) return nil unless matches end_pos = [matches[2].to_i, size].min [matches[1].to_i, end_pos] end def validate_options!(options) options.keys.each do |key| raise "Invalid UploadPart option: #{key}" unless ::Fog::AWS::Storage::ALLOWED_UPLOAD_PART_OPTIONS.include?(key.to_sym) end end end # Mock end # Storage end # AWS end # Fog fog-aws-3.18.0/lib/fog/aws/requests/sts/000077500000000000000000000000001437344660100177555ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/sts/assume_role.rb000066400000000000000000000055451437344660100226310ustar00rootroot00000000000000module Fog module AWS class STS class Real require 'fog/aws/parsers/sts/assume_role' # Assume Role # # ==== Parameters # * role_session_name<~String> - An identifier for the assumed role. # * role_arn<~String> - The ARN of the role the caller is assuming. # * external_id<~String> - An optional unique identifier required by the assuming role's trust identity. # * policy<~String> - An optional JSON policy document # * duration<~Integer> - Duration (of seconds) for the assumed role credentials to be valid (default 3600) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Arn'<~String>: The ARN of the assumed role/user # * 'AccessKeyId'<~String>: The AWS access key of the temporary credentials for the assumed role # * 'SecretAccessKey'<~String>: The AWS secret key of the temporary credentials for the assumed role # * 'SessionToken'<~String>: The AWS session token of the temporary credentials for the assumed role # * 'Expiration'<~Time>: The expiration time of the temporary credentials for the assumed role # # ==== See Also # http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html # def assume_role(role_session_name, role_arn, external_id=nil, policy=nil, duration=3600) request({ 'Action' => 'AssumeRole', 'RoleSessionName' => role_session_name, 'RoleArn' => role_arn, 'Policy' => policy && Fog::JSON.encode(policy), 'DurationSeconds' => duration, 'ExternalId' => external_id, :idempotent => true, :parser => Fog::Parsers::AWS::STS::AssumeRole.new }) end end class Mock def assume_role(role_session_name, role_arn, external_id=nil, policy=nil, duration=3600) account_id = /[0-9]{12}/.match(role_arn) request_id = Fog::AWS::Mock.request_id Excon::Response.new.tap do |response| response.status = 200 response.body = { 'Arn' => "arn:aws:sts::#{account_id}:assumed-role/#{role_session_name}/#{role_session_name}", 'AssumedRoleId' => "#{Fog::Mock.random_base64(21)}:#{role_session_name}", 'AccessKeyId' => Fog::Mock.random_base64(20), 'SecretAccessKey' => Fog::Mock.random_base64(40), 'SessionToken' => Fog::Mock.random_base64(580), 'Expiration' => (Time.now + duration).utc.iso8601, 'RequestId' => request_id, } response.headers = { 'x-amzn-RequestId' => request_id, } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sts/assume_role_with_saml.rb000066400000000000000000000037431437344660100246760ustar00rootroot00000000000000module Fog module AWS class STS class Real require 'fog/aws/parsers/sts/assume_role_with_saml' # Assume Role with SAML # # ==== Parameters # * role_arn<~String> - The ARN of the role the caller is assuming. # * principal_arn<~String> - The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP. # * saml_assertion<~String> - The base-64 encoded SAML authentication response provided by the IdP. # * policy<~String> - An optional JSON policy document # * duration<~Integer> - Duration (of seconds) for the assumed role credentials to be valid (default 3600) # # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'Arn'<~String>: The ARN of the assumed role/user # * 'AccessKeyId'<~String>: The AWS access key of the temporary credentials for the assumed role # * 'SecretAccessKey'<~String>: The AWS secret key of the temporary credentials for the assumed role # * 'SessionToken'<~String>: The AWS session token of the temporary credentials for the assumed role # * 'Expiration'<~Time>: The expiration time of the temporary credentials for the assumed role # # ==== See Also # http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html # def assume_role_with_saml(role_arn, principal_arn, saml_assertion, policy=nil, duration=3600) request_unsigned({ 'Action' => 'AssumeRoleWithSAML', 'RoleArn' => role_arn, 'PrincipalArn' => principal_arn, 'SAMLAssertion' => saml_assertion, 'Policy' => policy && Fog::JSON.encode(policy), 'DurationSeconds' => duration, :idempotent => true, :parser => Fog::Parsers::AWS::STS::AssumeRoleWithSAML.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sts/assume_role_with_web_identity.rb000066400000000000000000000032671437344660100264310ustar00rootroot00000000000000module Fog module AWS class STS class Real require 'fog/aws/parsers/sts/assume_role_with_web_identity' def assume_role_with_web_identity(role_arn, web_identity_token, role_session_name, options={}) request_unsigned( 'Action' => 'AssumeRoleWithWebIdentity', 'RoleArn' => role_arn, 'RoleSessionName' => role_session_name, 'WebIdentityToken' => web_identity_token, 'DurationSeconds' => options[:duration] || 3600, :idempotent => true, :parser => Fog::Parsers::AWS::STS::AssumeRoleWithWebIdentity.new ) end end class Mock def assume_role_with_web_identity(role_arn, web_identity_token, role_session_name, options={}) role = options[:iam].data[:roles].values.detect { |r| r[:arn] == role_arn } Excon::Response.new.tap do |response| response.body = { 'AssumedRoleUser' => { 'Arn' => role[:arn], 'AssumedRoleId' => role[:role_id] }, 'Audience' => 'fog', 'Credentials' => { 'AccessKeyId' => Fog::AWS::Mock.key_id(20), 'Expiration' => options[:expiration] || Time.now + 3600, 'SecretAccessKey' => Fog::AWS::Mock.key_id(40), 'SessionToken' => Fog::Mock.random_hex(8) }, 'Provider' => 'fog', 'SubjectFromWebIdentityToken' => Fog::Mock.random_hex(8) } response.status = 200 end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sts/get_federation_token.rb000066400000000000000000000046431437344660100244700ustar00rootroot00000000000000module Fog module AWS class STS class Real require 'fog/aws/parsers/sts/get_session_token' # Get federation token # # ==== Parameters # * name<~String>: The name of the federated user. # Minimum length of 2. Maximum length of 32. # * policy<~String>: Optional policy that specifies the permissions # that are granted to the federated user # Minimum length of 1. Maximum length of 2048. # * duration<~Integer>: Optional duration, in seconds, that the session # should last. # ==== Returns # * response<~Excon::Response>: # * body<~Hash>: # * 'SessionToken'<~String> - # * 'SecretAccessKey'<~String> - # * 'Expiration'<~String> - # * 'AccessKeyId'<~String> - # * 'Arn'<~String> - # * 'FederatedUserId'<~String> - # * 'PackedPolicySize'<~String> - # * 'RequestId'<~String> - Id of the request # # ==== See Also # http://docs.aws.amazon.com/STS/latest/APIReference/API_GetFederationToken.html def get_federation_token(name, policy, duration=43200) request({ 'Action' => 'GetFederationToken', 'Name' => name, 'Policy' => Fog::JSON.encode(policy), 'DurationSeconds' => duration, :idempotent => true, :parser => Fog::Parsers::AWS::STS::GetSessionToken.new }) end end class Mock def get_federation_token(name, policy, duration=43200) Excon::Response.new.tap do |response| response.status = 200 response.body = { 'SessionToken' => Fog::Mock.random_base64(580), 'SecretAccessKey' => Fog::Mock.random_base64(40), 'Expiration' => (DateTime.now + duration).strftime('%FT%TZ'), 'AccessKeyId' => Fog::AWS::Mock.key_id(20), 'Arn' => "arn:aws:sts::#{Fog::AWS::Mock.owner_id}:federated-user/#{name}", 'FederatedUserId' => "#{Fog::AWS::Mock.owner_id}:#{name}", 'PackedPolicySize' => Fog::Mock.random_numbers(2), 'RequestId' => Fog::AWS::Mock.request_id } end end end end end end fog-aws-3.18.0/lib/fog/aws/requests/sts/get_session_token.rb000066400000000000000000000006721437344660100240310ustar00rootroot00000000000000module Fog module AWS class STS class Real require 'fog/aws/parsers/sts/get_session_token' def get_session_token(duration=43200) request({ 'Action' => 'GetSessionToken', 'DurationSeconds' => duration, :idempotent => true, :parser => Fog::Parsers::AWS::STS::GetSessionToken.new }) end end end end end fog-aws-3.18.0/lib/fog/aws/requests/support/000077500000000000000000000000001437344660100206605ustar00rootroot00000000000000fog-aws-3.18.0/lib/fog/aws/requests/support/describe_trusted_advisor_check_result.rb000066400000000000000000000020511437344660100310170ustar00rootroot00000000000000module Fog module AWS class Support class Real # Describe Trusted Advisor Check Result # http://docs.aws.amazon.com/awssupport/latest/APIReference/API_DescribeTrustedAdvisorCheckResult.html # ==== Parameters # * checkId <~String> - Id of the check obtained from #describe_trusted_advisor_checks # * language <~String> - Language to return. Supported values are 'en' and 'jp' # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def describe_trusted_advisor_check_result(options={}) request( 'Action' => 'DescribeTrustedAdvisorCheckResult', 'checkId' => options[:id], 'language' => options[:language] || 'en' ) end end class Mock def describe_trusted_advisor_check_result(options={}) response = Excon::Response.new response.body = {'result' => self.data[:trusted_advisor_check_results][options[:id]]} response end end end end end fog-aws-3.18.0/lib/fog/aws/requests/support/describe_trusted_advisor_checks.rb000066400000000000000000000015671437344660100276170ustar00rootroot00000000000000module Fog module AWS class Support class Real # Describe Trusted Advisor Checks # http://docs.aws.amazon.com/awssupport/latest/APIReference/API_DescribeTrustedAdvisorChecks.html # ==== Parameters # * language <~String> - Language to return. Supported values are 'en' and 'jp' # ==== Returns # * response<~Excon::Response>: # * body<~Hash> def describe_trusted_advisor_checks(options={}) request( 'Action' => 'DescribeTrustedAdvisorChecks', 'language' => options[:language] || 'en' ) end end class Mock def describe_trusted_advisor_checks(options={}) response = Excon::Response.new response.body = {'checks' => self.data[:trusted_advisor_checks].values} response end end end end end fog-aws-3.18.0/lib/fog/aws/service_mapper.rb000066400000000000000000000111651437344660100206260ustar00rootroot00000000000000module Fog module AWS # @api private # # This is a temporary lookup helper for extracting into external module. # # Cleaner provider/service registration will replace this code. # class ServiceMapper def self.class_for(key) case key when :auto_scaling Fog::AWS::AutoScaling when :beanstalk Fog::AWS::ElasticBeanstalk when :cdn Fog::AWS::CDN when :cloud_formation Fog::AWS::CloudFormation when :cloud_watch Fog::AWS::CloudWatch when :compute Fog::AWS::Compute when :data_pipeline Fog::AWS::DataPipeline when :ddb, :dynamodb Fog::AWS::DynamoDB when :dns Fog::AWS::DNS when :elasticache Fog::AWS::Elasticache when :elb Fog::AWS::ELB when :emr Fog::AWS::EMR when :glacier Fog::AWS::Glacier when :iam Fog::AWS::IAM when :redshift Fog::AWS::Redshift when :sdb, :simpledb Fog::AWS::SimpleDB when :ses Fog::AWS::SES when :sqs Fog::AWS::SQS when :eu_storage, :storage Fog::AWS::Storage when :rds Fog::AWS::RDS when :sns Fog::AWS::SNS when :sts Fog::AWS::STS else # @todo Replace most instances of ArgumentError with NotImplementedError # @todo For a list of widely supported Exceptions, see: # => http://www.zenspider.com/Languages/Ruby/QuickRef.html#35 raise ArgumentError, "Unsupported #{self} service: #{key}" end end def self.[](service) @@connections ||= Hash.new do |hash, key| hash[key] = case key when :auto_scaling Fog::AWS::AutoScaling.new when :beanstalk Fog::AWS::ElasticBeanstalk.new when :cdn Fog::Logger.warning("AWS[:cdn] is not recommended, use CDN[:aws] for portability") Fog::CDN.new(:provider => 'AWS') when :cloud_formation Fog::AWS::CloudFormation.new when :cloud_watch Fog::AWS::CloudWatch.new when :compute Fog::Logger.warning("AWS[:compute] is not recommended, use Compute[:aws] for portability") Fog::Compute.new(:provider => 'AWS') when :data_pipeline Fog::AWS::DataPipeline.new when :ddb, :dynamodb Fog::AWS::DynamoDB.new when :dns Fog::Logger.warning("AWS[:dns] is not recommended, use DNS[:aws] for portability") Fog::DNS.new(:provider => 'AWS') when :elasticache Fog::AWS::Elasticache.new when :elb Fog::AWS::ELB.new when :emr Fog::AWS::EMR.new when :glacier Fog::AWS::Glacier.new when :iam Fog::AWS::IAM.new when :redshift Fog::AWS::Redshift.new when :rds Fog::AWS::RDS.new when :eu_storage Fog::Storage.new(:provider => 'AWS', :region => 'eu-west-1') when :sdb, :simpledb Fog::AWS::SimpleDB.new when :ses Fog::AWS::SES.new when :sqs Fog::AWS::SQS.new when :storage Fog::Logger.warning("AWS[:storage] is not recommended, use Storage[:aws] for portability") Fog::Storage.new(:provider => 'AWS') when :sns Fog::AWS::SNS.new when :sts Fog::AWS::STS.new else raise ArgumentError, "Unrecognized service: #{key.inspect}" end end @@connections[service] end def self.services Fog::AWS.services end end end end fog-aws-3.18.0/lib/fog/aws/ses.rb000066400000000000000000000122301437344660100164060ustar00rootroot00000000000000module Fog module AWS class SES < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class InvalidParameterError < Fog::Errors::Error; end class MessageRejected < Fog::Errors::Error; end requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/ses' request :delete_verified_email_address request :verify_email_address request :verify_domain_identity request :get_send_quota request :get_send_statistics request :list_verified_email_addresses request :send_email request :send_raw_email class Mock def initialize(options={}) Fog::Mock.not_implemented end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to SES # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # ses = SES.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'us-east-1' and etc. # # ==== Returns # * SES object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.ses' @connection_options = options[:connection_options] || {} options[:region] ||= 'us-east-1' @host = options[:host] || "email.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @hmac = Fog::HMAC.new('sha256', @aws_secret_access_key) end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) headers = { 'Content-Type' => 'application/x-www-form-urlencoded', 'Date' => Fog::Time.now.to_date_header, } headers['x-amz-security-token'] = @aws_session_token if @aws_session_token #AWS3-HTTPS AWSAccessKeyId=, Algorithm=HmacSHA256, Signature= headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' headers['X-Amzn-Authorization'] << 'AWSAccessKeyId=' << @aws_access_key_id headers['X-Amzn-Authorization'] << ', Algorithm=HmacSHA256' headers['X-Amzn-Authorization'] << ', Signature=' << Base64.encode64(@hmac.sign(headers['Date'])).chomp! body = '' for key in params.keys.sort unless (value = params[key]).nil? body << "#{key}=#{CGI.escape(value.to_s).gsub(/\+/, '%20')}&" end end body.chop! # remove trailing '&' if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => headers, :idempotent => idempotent, :host => @host, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'MessageRejected' Fog::AWS::SES::MessageRejected.slurp(error, match[:message]) when 'InvalidParameterValue' Fog::AWS::SES::InvalidParameterError.slurp(error, match[:message]) else Fog::AWS::SES::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/signaturev4.rb000066400000000000000000000100001437344660100200600ustar00rootroot00000000000000# See http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html module Fog module AWS class SignatureV4 ALGORITHM = 'AWS4-HMAC-SHA256' def initialize(aws_access_key_id, secret_key, region, service) @region = region @service = service @aws_access_key_id = aws_access_key_id @hmac = Fog::HMAC.new('sha256', 'AWS4' + secret_key) end def signature_parameters(params, date, body_sha = nil) params = params.dup.merge(:query => params[:query].merge( 'X-Amz-Algorithm' => ALGORITHM, 'X-Amz-Credential' => "#{@aws_access_key_id}/#{credential_scope(date)}", 'X-Amz-SignedHeaders' => signed_headers(params[:headers]) )) signature_components(params, date, body_sha) end def signature_header(params, date, body_sha = nil) components_to_header(signature_components(params, date, body_sha)) end def sign(params, date) #legacy method name signature_header(params, date) end def components_to_header components "#{components['X-Amz-Algorithm']} Credential=#{components['X-Amz-Credential']}, SignedHeaders=#{components['X-Amz-SignedHeaders']}, Signature=#{components['X-Amz-Signature']}" end def signature_components(params, date, body_sha) canonical_request = <<-DATA #{params[:method].to_s.upcase} #{canonical_path(params[:path])} #{canonical_query_string(params[:query])} #{canonical_headers(params[:headers])} #{signed_headers(params[:headers])} #{body_sha || OpenSSL::Digest::SHA256.hexdigest(params[:body] || '')} DATA canonical_request.chop! string_to_sign = <<-DATA #{ALGORITHM} #{date.to_iso8601_basic} #{credential_scope(date)} #{OpenSSL::Digest::SHA256.hexdigest(canonical_request)} DATA string_to_sign.chop! signature = derived_hmac(date).sign(string_to_sign) { 'X-Amz-Algorithm' => ALGORITHM, 'X-Amz-Credential' => "#{@aws_access_key_id}/#{credential_scope(date)}", 'X-Amz-SignedHeaders' => signed_headers(params[:headers]), 'X-Amz-Signature' => signature.unpack('H*').first } end def derived_hmac(date) kDate = @hmac.sign(date.utc.strftime('%Y%m%d')) kRegion = Fog::HMAC.new('sha256', kDate).sign(@region) kService = Fog::HMAC.new('sha256', kRegion).sign(@service) kSigning = Fog::HMAC.new('sha256', kService).sign('aws4_request') Fog::HMAC.new('sha256', kSigning) end def credential_scope(date) "#{date.utc.strftime('%Y%m%d')}/#{@region}/#{@service}/aws4_request" end protected def canonical_path(path) unless @service == 's3' #S3 implements signature v4 different - paths are not canonialized #leading and trailing repeated slashes are collapsed, but not ones that appear elsewhere path = path.gsub(%r{\A/+},'/').gsub(%r{/+\z},'/') components = path.split('/',-1) path = components.inject([]) do |acc, component| case component when '.' #canonicalize by removing . when '..' then acc.pop#canonicalize by reducing .. else acc << component end acc end.join('/') end path.empty? ? '/' : path end def canonical_query_string(query) canonical_query_string = [] for key in (query || {}).keys.sort_by {|k| k.to_s} component = "#{Fog::AWS.escape(key.to_s)}=#{Fog::AWS.escape(query[key].to_s)}" canonical_query_string << component end canonical_query_string.join("&") end def canonical_headers(headers) canonical_headers = '' for key in headers.keys.sort_by {|k| k.to_s.downcase} canonical_headers << "#{key.to_s.downcase}:#{headers[key].to_s.strip}\n" end canonical_headers end def signed_headers(headers) headers.keys.map {|key| key.to_s.downcase}.sort.join(';') end end end end fog-aws-3.18.0/lib/fog/aws/simpledb.rb000066400000000000000000000157201437344660100174220ustar00rootroot00000000000000module Fog module AWS class SimpleDB < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :nil_string, :path, :port, :scheme, :persistent, :region, :aws_session_token, :use_iam_profile, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/simpledb' request :batch_put_attributes request :create_domain request :delete_attributes request :delete_domain request :domain_metadata request :get_attributes request :list_domains request :put_attributes request :select class Mock def self.data @data ||= Hash.new do |hash, key| hash[key] = { :domains => {} } end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) end def data self.class.data[@aws_access_key_id] end def reset_data self.class.data.delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to SimpleDB # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # sdb = SimpleDB.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * SimpleDB object with connection to aws. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @connection_options = options[:connection_options] || {} @nil_string = options[:nil_string]|| 'nil' @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.simpledb' options[:region] ||= 'us-east-1' @host = options[:host] || case options[:region] when 'us-east-1' 'sdb.amazonaws.com' else "sdb.#{options[:region]}.amazonaws.com" end @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @hmac = Fog::HMAC.new('sha256', @aws_secret_access_key) end def encode_attributes(attributes, replace_attributes = [], expected_attributes = {}) encoded_attributes = {} if attributes expected_attributes.keys.each_with_index do |exkey, index| for value in Array(expected_attributes[exkey]) encoded_attributes["Expected.#{index}.Name"] = exkey.to_s encoded_attributes["Expected.#{index}.Value"] = sdb_encode(value) end end index = 0 for key in attributes.keys for value in Array(attributes[key]) encoded_attributes["Attribute.#{index}.Name"] = key.to_s if replace_attributes.include?(key) encoded_attributes["Attribute.#{index}.Replace"] = 'true' end encoded_attributes["Attribute.#{index}.Value"] = sdb_encode(value) index += 1 end end end encoded_attributes end def encode_attribute_names(attributes) Fog::AWS.indexed_param('AttributeName', attributes.map {|attribute| attributes.to_s}) end def encode_batch_attributes(items, replace_attributes = Hash.new([])) encoded_attributes = {} if items item_index = 0 for item_key in items.keys encoded_attributes["Item.#{item_index}.ItemName"] = item_key.to_s attribute_index = 0 for attribute_key in items[item_key].keys for value in Array(items[item_key][attribute_key]) encoded_attributes["Item.#{item_index}.Attribute.#{attribute_index}.Name"] = attribute_key.to_s if replace_attributes[item_key].include?(attribute_key) encoded_attributes["Item.#{item_index}.Attribute.#{attribute_index}.Replace"] = 'true' end encoded_attributes["Item.#{item_index}.Attribute.#{attribute_index}.Value"] = sdb_encode(value) attribute_index += 1 end end item_index += 1 end end encoded_attributes end def reload @connection.reset end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body = Fog::AWS.signed_params( params, { :aws_access_key_id => @aws_access_key_id, :aws_session_token => @aws_session_token, :hmac => @hmac, :host => @host, :path => @path, :port => @port, :version => '2009-04-15' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, idempotent, parser) end else _request(body, idempotent, parser) end end def _request(body, idempotent, parser) @connection.request({ :body => body, :expects => 200, :headers => { 'Content-Type' => 'application/x-www-form-urlencoded; charset=utf-8' }, :idempotent => idempotent, :method => 'POST', :parser => parser }) end def sdb_encode(value) if value.nil? @nil_string else value.to_s end end end end end end fog-aws-3.18.0/lib/fog/aws/sns.rb000066400000000000000000000117651437344660100164330ustar00rootroot00000000000000module Fog module AWS class SNS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :persistent, :region, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/sns' request :add_permission request :confirm_subscription request :create_topic request :delete_topic request :get_topic_attributes request :list_subscriptions request :list_subscriptions_by_topic request :list_topics request :publish request :remove_permission request :set_topic_attributes request :subscribe request :unsubscribe model_path 'fog/aws/models/sns' model :topic collection :topics class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :topics => {}, :subscriptions => {}, :permissions => {}, } end end end attr_reader :region attr_writer :account_id def initialize(options={}) @region = options[:region] || 'us-east-1' @aws_access_key_id = options[:aws_access_key_id] @account_id = Fog::AWS::Mock.owner_id @module = "sns" Fog::AWS.validate_region!(@region) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to SNS # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # sns = SNS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * SNS object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.sns' options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "sns.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end attr_reader :region def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'sns') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :method => 'POST', :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) end end end end end fog-aws-3.18.0/lib/fog/aws/sqs.rb000066400000000000000000000122761437344660100164340ustar00rootroot00000000000000module Fog module AWS class SQS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :region, :host, :path, :port, :scheme, :persistent, :aws_session_token, :use_iam_profile, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/sqs' request :change_message_visibility request :create_queue request :delete_message request :delete_queue request :get_queue_attributes request :list_queues request :receive_message request :send_message request :set_queue_attributes class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :owner_id => Fog::AWS::Mock.owner_id, :queues => {} } end end end def self.reset @data = nil end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) @region = options[:region] || 'us-east-1' Fog::AWS.validate_region!(@region) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to SQS # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # sqs = SQS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # * region<~String> - optional region to use. For instance, 'eu-west-1', 'us-east-1' and etc. # # ==== Returns # * SQS object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.sqs' @connection_options = options[:connection_options] || {} options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || "sqs.#{options[:region]}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) setup_credentials(options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 'sqs') end def path_from_queue_url(queue_url) queue_url.split('.com', 2).last.sub(/^:[0-9]+/, '') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) path = params.delete(:path) body, headers = AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :method => 'POST', :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => path || @path, :port => @port, :version => '2012-11-05' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser, path) end else _request(body, headers, idempotent, parser, path) end end def _request(body, headers, idempotent, parser, path) args = { :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser, :path => path }.reject{|_,v| v.nil? } @connection.request(args) end end end end end fog-aws-3.18.0/lib/fog/aws/storage.rb000066400000000000000000000726271437344660100173000ustar00rootroot00000000000000module Fog module AWS class Storage < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods COMPLIANT_BUCKET_NAMES = /^(?:[a-z]|\d(?!\d{0,2}(?:\.\d{1,3}){3}$))(?:[a-z0-9]|\.(?![\.\-])|\-(?![\.])){1,61}[a-z0-9]$/ DEFAULT_REGION = 'us-east-1' ACCELERATION_HOST = 's3-accelerate.amazonaws.com' DEFAULT_SCHEME = 'https' DEFAULT_SCHEME_PORT = { 'http' => 80, 'https' => 443 } MIN_MULTIPART_CHUNK_SIZE = 5242880 MAX_SINGLE_PUT_SIZE = 5368709120 VALID_QUERY_KEYS = %w[ acl cors delete lifecycle location logging notification partNumber policy requestPayment response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires restore tagging torrent uploadId uploads versionId versioning versions website ] requires :aws_access_key_id, :aws_secret_access_key recognizes :endpoint, :region, :host, :port, :scheme, :persistent, :use_iam_profile, :aws_session_token, :aws_credentials_expire_at, :path_style, :acceleration, :instrumentor, :instrumentor_name, :aws_signature_version, :enable_signature_v4_streaming, :virtual_host, :cname, :max_put_chunk_size, :max_copy_chunk_size, :aws_credentials_refresh_threshold_seconds, :disable_content_md5_validation secrets :aws_secret_access_key, :hmac model_path 'fog/aws/models/storage' collection :directories model :directory collection :files model :file request_path 'fog/aws/requests/storage' request :abort_multipart_upload request :complete_multipart_upload request :copy_object request :delete_bucket request :delete_bucket_cors request :delete_bucket_lifecycle request :delete_bucket_policy request :delete_bucket_website request :delete_object request :delete_object_url request :delete_multiple_objects request :delete_bucket_tagging request :get_bucket request :get_bucket_acl request :get_bucket_cors request :get_bucket_lifecycle request :get_bucket_location request :get_bucket_logging request :get_bucket_object_versions request :get_bucket_policy request :get_bucket_tagging request :get_bucket_versioning request :get_bucket_website request :get_bucket_notification request :get_object request :get_object_acl request :get_object_torrent request :get_object_http_url request :get_object_https_url request :get_object_url request :get_object_tagging request :get_request_payment request :get_service request :head_bucket request :head_object request :head_object_url request :initiate_multipart_upload request :list_multipart_uploads request :list_parts request :post_object_hidden_fields request :post_object_restore request :put_bucket request :put_bucket_acl request :put_bucket_cors request :put_bucket_lifecycle request :put_bucket_logging request :put_bucket_policy request :put_bucket_tagging request :put_bucket_versioning request :put_bucket_website request :put_bucket_notification request :put_object request :put_object_acl request :put_object_url request :put_object_tagging request :put_request_payment request :sync_clock request :upload_part request :upload_part_copy module Utils attr_accessor :region attr_accessor :disable_content_md5_validation # Amazon S3 limits max chunk size that can be uploaded/copied in a single request to 5GB. # Other S3-compatible storages (like, Ceph) do not have such limit. # Ceph shows much better performance when file is copied as a whole, in a single request. # fog-aws user can use these settings to configure chunk sizes. # A non-positive value will tell fog-aws to use a single put/copy request regardless of file size. # # @return [Integer] # @see https://docs.aws.amazon.com/AmazonS3/latest/userguide/copy-object.html attr_reader :max_put_chunk_size attr_reader :max_copy_chunk_size def cdn @cdn ||= Fog::AWS::CDN.new( :aws_access_key_id => @aws_access_key_id, :aws_secret_access_key => @aws_secret_access_key, :use_iam_profile => @use_iam_profile ) end def http_url(params, expires) signed_url(params.merge(:scheme => 'http'), expires) end def https_url(params, expires) signed_url(params.merge(:scheme => 'https'), expires) end def url(params, expires) Fog::Logger.deprecation("Fog::AWS::Storage => #url is deprecated, use #https_url instead [light_black](#{caller.first})[/]") https_url(params, expires) end def require_mime_types begin # Use mime/types/columnar if available, for reduced memory usage require 'mime/types/columnar' rescue LoadError begin require 'mime/types' rescue LoadError Fog::Logger.warning("'mime-types' missing, please install and try again.") exit(1) end end end def request_url(params) params = request_params(params) params_to_url(params) end def signed_url(params, expires) refresh_credentials_if_expired #convert expires from a point in time to a delta to now expires = expires.to_i if @signature_version == 4 params = v4_signed_params_for_url(params, expires) else params = v2_signed_params_for_url(params, expires) end params_to_url(params) end # @param value [int] # @param description [str] def validate_chunk_size(value, description) raise "#{description} (#{value}) is less than minimum #{MIN_MULTIPART_CHUNK_SIZE}" unless value <= 0 || value >= MIN_MULTIPART_CHUNK_SIZE end private def validate_signature_version! unless @signature_version == 2 || @signature_version == 4 raise "Unknown signature version #{@signature_version}; valid versions are 2 or 4" end end def init_max_put_chunk_size!(options = {}) @max_put_chunk_size = options.fetch(:max_put_chunk_size, MAX_SINGLE_PUT_SIZE) validate_chunk_size(@max_put_chunk_size, 'max_put_chunk_size') end def init_max_copy_chunk_size!(options = {}) @max_copy_chunk_size = options.fetch(:max_copy_chunk_size, MAX_SINGLE_PUT_SIZE) validate_chunk_size(@max_copy_chunk_size, 'max_copy_chunk_size') end def v4_signed_params_for_url(params, expires) now = Fog::Time.now expires = expires - now.to_i params[:headers] ||= {} params[:query]||= {} params[:query]['X-Amz-Expires'] = expires params[:query]['X-Amz-Date'] = now.to_iso8601_basic if @aws_session_token params[:query]['X-Amz-Security-Token'] = @aws_session_token end params = request_params(params) params[:headers][:host] = params[:host] params[:headers][:host] += ":#{params[:port]}" if params.fetch(:port, nil) signature_query_params = @signer.signature_parameters(params, now, "UNSIGNED-PAYLOAD") params[:query] = (params[:query] || {}).merge(signature_query_params) params end def v2_signed_params_for_url(params, expires) if @aws_session_token params[:headers]||= {} params[:headers]['x-amz-security-token'] = @aws_session_token end signature = signature_v2(params, expires) params = request_params(params) signature_query_params = { 'AWSAccessKeyId' => @aws_access_key_id, 'Signature' => signature, 'Expires' => expires, } params[:query] = (params[:query] || {}).merge(signature_query_params) params[:query]['x-amz-security-token'] = @aws_session_token if @aws_session_token params end def region_to_host(region=nil) case region.to_s when DEFAULT_REGION, '' 's3.amazonaws.com' when %r{\Acn-.*} "s3.#{region}.amazonaws.com.cn" else "s3.#{region}.amazonaws.com" end end def object_to_path(object_name=nil) '/' + escape(object_name.to_s).gsub('%2F','/') end def bucket_to_path(bucket_name, path=nil) "/#{escape(bucket_name.to_s)}#{path}" end # NOTE: differs from Fog::AWS.escape by NOT escaping `/` def escape(string) string.gsub(/([^a-zA-Z0-9_.\-~\/]+)/) { "%" + $1.unpack("H2" * $1.bytesize).join("%").upcase } end # Transforms things like bucket_name, object_name, region # # Should yield the same result when called f*f def request_params(params) headers = params[:headers] || {} if params[:scheme] scheme = params[:scheme] port = params[:port] || DEFAULT_SCHEME_PORT[scheme] else scheme = @scheme port = @port end if DEFAULT_SCHEME_PORT[scheme] == port port = nil end if params[:region] region = params[:region] host = params[:host] || region_to_host(region) else region = @region || DEFAULT_REGION host = params[:host] || @host || region_to_host(region) end path = params[:path] || object_to_path(params[:object_name]) path = '/' + path if path[0..0] != '/' if params[:bucket_name] bucket_name = params[:bucket_name] if params[:bucket_cname] host = bucket_name else path_style = params.fetch(:path_style, @path_style) if !path_style if COMPLIANT_BUCKET_NAMES !~ bucket_name Fog::Logger.warning("fog: the specified s3 bucket name(#{bucket_name}) is not a valid dns name, which will negatively impact performance. For details see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html") path_style = true elsif scheme == 'https' && !path_style && bucket_name =~ /\./ Fog::Logger.warning("fog: the specified s3 bucket name(#{bucket_name}) contains a '.' so is not accessible over https as a virtual hosted bucket, which will negatively impact performance. For details see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html") path_style = true end end # uses the bucket name as host if `virtual_host: true`, you can also # manually specify the cname if required. if params[:virtual_host] host = params.fetch(:cname, bucket_name) elsif path_style path = bucket_to_path bucket_name, path elsif host.start_with?("#{bucket_name}.") # no-op else host = [bucket_name, host].join('.') end end end ret = params.merge({ :scheme => scheme, :host => host, :port => port, :path => path, :headers => headers }) # ret.delete(:path_style) ret.delete(:bucket_name) ret.delete(:object_name) ret.delete(:region) ret end def params_to_url(params) query = params[:query] && params[:query].map do |key, value| if value # URL parameters need / to be escaped [key, Fog::AWS.escape(value.to_s)].join('=') else key end end.join('&') URI::Generic.build({ :scheme => params[:scheme], :host => params[:host], :port => params[:port], :path => params[:path], :query => query, }).to_s end end class Mock include Utils include Fog::AWS::CredentialFetcher::ConnectionMethods def self.acls(type) case type when 'private' { "AccessControlList" => [ { "Permission" => "FULL_CONTROL", "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} } ], "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} } when 'public-read' { "AccessControlList" => [ { "Permission" => "FULL_CONTROL", "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} }, { "Permission" => "READ", "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"} } ], "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} } when 'public-read-write' { "AccessControlList" => [ { "Permission" => "FULL_CONTROL", "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} }, { "Permission" => "READ", "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"} }, { "Permission" => "WRITE", "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AllUsers"} } ], "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} } when 'authenticated-read' { "AccessControlList" => [ { "Permission" => "FULL_CONTROL", "Grantee" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} }, { "Permission" => "READ", "Grantee" => {"URI" => "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"} } ], "Owner" => {"DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0"} } end end def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| region_hash[key] = { :acls => { :bucket => {}, :object => {} }, :buckets => {}, :cors => { :bucket => {} }, :bucket_notifications => {}, :bucket_tagging => {}, :multipart_uploads => {} } end end end def self.reset @data = nil end def initialize(options={}) require_mime_types @use_iam_profile = options[:use_iam_profile] @region = options[:region] || DEFAULT_REGION if @endpoint = options[:endpoint] endpoint = URI.parse(@endpoint) @host = endpoint.host @scheme = endpoint.scheme @port = endpoint.port else @host = options[:host] || region_to_host(@region) @scheme = options[:scheme] || DEFAULT_SCHEME @port = options[:port] || DEFAULT_SCHEME_PORT[@scheme] end @path_style = options[:path_style] || false init_max_put_chunk_size!(options) init_max_copy_chunk_size!(options) @disable_content_md5_validation = options[:disable_content_md5_validation] || false @signature_version = options.fetch(:aws_signature_version, 4) validate_signature_version! setup_credentials(options) end def data self.class.data[@region][@aws_access_key_id] end def reset_data self.class.data[@region].delete(@aws_access_key_id) end def setup_credentials(options) @aws_credentials_refresh_threshold_seconds = options[:aws_credentials_refresh_threshold_seconds] @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 's3') end def signature_v2(params, expires) 'foo' end end class Real include Utils include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to S3 # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # s3 = Fog::Storage.new( # :provider => "AWS", # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * S3 object with connection to aws. def initialize(options={}) require_mime_types @use_iam_profile = options[:use_iam_profile] @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.storage' @connection_options = options[:connection_options] || {} @persistent = options.fetch(:persistent, false) @acceleration = options.fetch(:acceleration, false) @signature_version = options.fetch(:aws_signature_version, 4) @enable_signature_v4_streaming = options.fetch(:enable_signature_v4_streaming, true) validate_signature_version! @path_style = options[:path_style] || false init_max_put_chunk_size!(options) init_max_copy_chunk_size!(options) @disable_content_md5_validation = options[:disable_content_md5_validation] || false @region = options[:region] || DEFAULT_REGION if @endpoint = options[:endpoint] endpoint = URI.parse(@endpoint) @host = endpoint.host @scheme = endpoint.scheme @port = endpoint.port else @host = options[:host] || region_to_host(@region) @scheme = options[:scheme] || DEFAULT_SCHEME @port = options[:port] || DEFAULT_SCHEME_PORT[@scheme] end @host = ACCELERATION_HOST if @acceleration setup_credentials(options) end def reload @connection.reset if @connection end private def setup_credentials(options) @aws_credentials_refresh_threshold_seconds = options[:aws_credentials_refresh_threshold_seconds] @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] if @signature_version == 4 @signer = Fog::AWS::SignatureV4.new( @aws_access_key_id, @aws_secret_access_key, @region, 's3') elsif @signature_version == 2 @hmac = Fog::HMAC.new('sha1', @aws_secret_access_key) end end def connection(scheme, host, port) uri = "#{scheme}://#{host}:#{port}" if @persistent unless uri == @connection_uri @connection_uri = uri reload @connection = nil end else @connection = nil end @connection ||= Fog::XML::Connection.new(uri, @persistent, @connection_options) end def request(params, &block) refresh_credentials_if_expired date = Fog::Time.now params = params.dup stringify_query_keys(params) params[:headers] = (params[:headers] || {}).dup params[:headers]['x-amz-security-token'] = @aws_session_token if @aws_session_token if @signature_version == 2 expires = date.to_date_header params[:headers]['Date'] = expires params[:headers]['Authorization'] = "AWS #{@aws_access_key_id}:#{signature_v2(params, expires)}" end params = request_params(params) scheme = params.delete(:scheme) host = params.delete(:host) port = params.delete(:port) || DEFAULT_SCHEME_PORT[scheme] params[:headers]['Host'] = host if @signature_version == 4 params[:headers]['x-amz-date'] = date.to_iso8601_basic if params[:body].respond_to?(:read) if @enable_signature_v4_streaming # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html # We ignore the bit about setting the content-encoding to aws-chunked because # this can cause s3 to serve files with a blank content encoding which causes problems with some CDNs # AWS have confirmed that s3 can infer that the content-encoding is aws-chunked from the x-amz-content-sha256 header # params[:headers]['x-amz-content-sha256'] = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' params[:headers]['x-amz-decoded-content-length'] = params[:headers].delete 'Content-Length' else params[:headers]['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' end else params[:headers]['x-amz-content-sha256'] ||= OpenSSL::Digest::SHA256.hexdigest(params[:body] || '') end signature_components = @signer.signature_components(params, date, params[:headers]['x-amz-content-sha256']) params[:headers]['Authorization'] = @signer.components_to_header(signature_components) if params[:body].respond_to?(:read) && @enable_signature_v4_streaming body = params.delete :body params[:request_block] = S3Streamer.new(body, signature_components['X-Amz-Signature'], @signer, date) end end # FIXME: ToHashParser should make this not needed original_params = params.dup if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(scheme, host, port, params, original_params, &block) end else _request(scheme, host, port, params, original_params, &block) end end def _request(scheme, host, port, params, original_params, &block) connection(scheme, host, port).request(params, &block) rescue Excon::Errors::MovedPermanently, Excon::Errors::TemporaryRedirect => error headers = (error.response.is_a?(Hash) ? error.response[:headers] : error.response.headers) new_params = {} if headers.has_key?('Location') new_params[:host] = URI.parse(headers['Location']).host else body = error.response.is_a?(Hash) ? error.response[:body] : error.response.body # some errors provide info indirectly new_params[:bucket_name] = %r{([^<]*)}.match(body).captures.first new_params[:host] = %r{([^<]*)}.match(body).captures.first # some errors provide it directly @new_region = %r{([^<]*)}.match(body) ? Regexp.last_match.captures.first : nil end Fog::Logger.warning("fog: followed redirect to #{host}, connecting to the matching region will be more performant") original_region, original_signer = @region, @signer @region = @new_region || case new_params[:host] when /s3.amazonaws.com/, /s3-external-1.amazonaws.com/ DEFAULT_REGION else %r{s3[\.\-]([^\.]*).amazonaws.com}.match(new_params[:host]).captures.first end if @signature_version == 4 @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 's3') original_params[:headers].delete('Authorization') end response = request(original_params.merge(new_params), &block) @region, @signer = original_region, original_signer response end # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html class S3Streamer attr_accessor :body, :signature, :signer, :finished, :date, :initial_signature def initialize(body, signature, signer, date) self.body = body self.date = date self.signature = signature self.initial_signature = signature self.signer = signer if body.respond_to?(:binmode) body.binmode end if body.respond_to?(:pos=) body.pos = 0 end end #called if excon wants to retry the request. As well as rewinding the body #we must also reset the signature def rewind self.signature = initial_signature self.finished = false body.rewind end def call if finished '' else next_chunk end end def next_chunk data = body.read(0x10000) if data.nil? self.finished = true data = '' end self.signature = sign_chunk(data, signature) "#{data.length.to_s(16)};chunk-signature=#{signature}\r\n#{data}\r\n" end def sign_chunk(data, previous_signature) string_to_sign = <<-DATA AWS4-HMAC-SHA256-PAYLOAD #{date.to_iso8601_basic} #{signer.credential_scope(date)} #{previous_signature} #{OpenSSL::Digest::SHA256.hexdigest('')} #{OpenSSL::Digest::SHA256.hexdigest(data)} DATA hmac = signer.derived_hmac(date) hmac.sign(string_to_sign.strip).unpack('H*').first end end def signature_v2(params, expires) headers = params[:headers] || {} string_to_sign = <<-DATA #{params[:method].to_s.upcase} #{headers['Content-MD5']} #{headers['Content-Type']} #{expires} DATA amz_headers, canonical_amz_headers = {}, '' for key, value in headers if key[0..5] == 'x-amz-' amz_headers[key] = value end end amz_headers = amz_headers.sort {|x, y| x[0] <=> y[0]} for key, value in amz_headers canonical_amz_headers << "#{key}:#{value}\n" end string_to_sign << canonical_amz_headers query_string = '' if params[:query] query_args = [] for key in params[:query].keys.sort if VALID_QUERY_KEYS.include?(key) value = params[:query][key] if value query_args << "#{key}=#{value}" else query_args << key end end end if query_args.any? query_string = '?' + query_args.join('&') end end canonical_path = (params[:path] || object_to_path(params[:object_name])).to_s canonical_path = '/' + canonical_path if canonical_path[0..0] != '/' if params[:bucket_name] canonical_resource = "/#{params[:bucket_name]}#{canonical_path}" else canonical_resource = canonical_path end canonical_resource << query_string string_to_sign << canonical_resource signed_string = @hmac.sign(string_to_sign) Base64.encode64(signed_string).chomp! end def stringify_query_keys(params) params[:query] = Hash[params[:query].map { |k,v| [k.to_s, v] }] if params[:query] end end end end # @deprecated module Storage # @deprecated class AWS < Fog::AWS::Storage # @deprecated # @overrides Fog::Service.new (from the fog-core gem) def self.new(*) Fog::Logger.deprecation 'Fog::Storage::AWS is deprecated, please use Fog::AWS::Storage.' super end end end end fog-aws-3.18.0/lib/fog/aws/sts.rb000066400000000000000000000143401437344660100164310ustar00rootroot00000000000000module Fog module AWS class STS < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods class EntityAlreadyExists < Fog::AWS::STS::Error; end class ValidationError < Fog::AWS::STS::Error; end class AwsAccessKeysMissing < Fog::AWS::STS::Error; end recognizes :region, :aws_access_key_id, :aws_secret_access_key, :host, :path, :port, :scheme, :persistent, :aws_session_token, :use_iam_profile, :aws_credentials_expire_at, :instrumentor, :instrumentor_name request_path 'fog/aws/requests/sts' request :get_federation_token request :get_session_token request :assume_role request :assume_role_with_saml request :assume_role_with_web_identity class Mock def self.data @data ||= Hash.new do |hash, key| hash[key] = { :owner_id => Fog::AWS::Mock.owner_id, :server_certificates => {} } end end def self.reset @data = nil end def self.server_certificate_id Fog::Mock.random_hex(16) end def initialize(options={}) @use_iam_profile = options[:use_iam_profile] setup_credentials(options) end def data self.class.data[@aws_access_key_id] end def reset_data self.class.data.delete(@aws_access_key_id) end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods # Initialize connection to STS # # ==== Notes # options parameter must include values for :aws_access_key_id and # :aws_secret_access_key in order to create a connection # # ==== Examples # iam = STS.new( # :aws_access_key_id => your_aws_access_key_id, # :aws_secret_access_key => your_aws_secret_access_key # ) # # ==== Parameters # * options<~Hash> - config arguments for connection. Defaults to {}. # # ==== Returns # * STS object with connection to AWS. def initialize(options={}) @use_iam_profile = options[:use_iam_profile] @region = options[:region] || 'us-east-1' setup_credentials(options) @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.sts' @connection_options = options[:connection_options] || {} @host = options[:host] || "sts.#{@region}.amazonaws.com" @path = options[:path] || '/' @persistent = options[:persistent] || false @port = options[:port] || 443 @scheme = options[:scheme] || 'https' @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end def reload @connection.reset end private def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] if (@aws_access_key_id && @aws_secret_access_key) @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'sts') end end def request(params) if (@signer == nil) raise AwsAccessKeysMissing.new("Can't make unsigned requests, need aws_access_key_id and aws_secret_access_key") end idempotent = params.delete(:idempotent) parser = params.delete(:parser) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => 'application/x-www-form-urlencoded' }, { :method => 'POST', :aws_session_token => @aws_session_token, :signer => @signer, :host => @host, :path => @path, :port => @port, :version => '2011-06-15' } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def request_unsigned(params) idempotent = params.delete(:idempotent) parser = params.delete(:parser) params['Version'] = '2011-06-15' headers = { 'Content-Type' => 'application/x-www-form-urlencoded', 'Host' => @host } body = '' for key in params.keys.sort unless (value = params[key]).nil? body << "#{key}=#{Fog::AWS.escape(value.to_s)}&" end end body.chop! if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) rescue Excon::Errors::HTTPStatusError => error match = Fog::AWS::Errors.match_error(error) raise if match.empty? raise case match[:code] when 'EntityAlreadyExists', 'KeyPairMismatch', 'LimitExceeded', 'MalformedCertificate', 'ValidationError' Fog::AWS::STS.const_get(match[:code]).slurp(error, match[:message]) else Fog::AWS::STS::Error.slurp(error, "#{match[:code]} => #{match[:message]}") end end end end end end fog-aws-3.18.0/lib/fog/aws/support.rb000066400000000000000000000212451437344660100173360ustar00rootroot00000000000000module Fog module AWS class Support < Fog::Service extend Fog::AWS::CredentialFetcher::ServiceMethods requires :aws_access_key_id, :aws_secret_access_key recognizes :host, :path, :port, :scheme, :instrumentor, :instrumentor_name, :region, :persistent, :aws_session_token model_path 'fog/aws/models/support' request_path 'fog/aws/requests/support' collection :flagged_resources collection :trusted_advisor_checks model :flagged_resource model :trusted_advisor_check request :describe_trusted_advisor_checks request :describe_trusted_advisor_check_result class Mock def self.data @data ||= Hash.new do |hash, region| hash[region] = Hash.new do |region_hash, key| tac_id = Fog::Mock.random_hex(5) region_hash[key] = { :trusted_advisor_checks => { tac_id => { "category"=>"cost_optimizing", "description"=>"Checks the Amazon Elastic Compute Cloud (Amazon EC2) instances that were running at any time during the last 14 days and alerts you if the daily CPU utilization was 10% or less and network I/O was 5 MB or less on 4 or more days. Running instances generate hourly usage charges. Although some scenarios can result in low utilization by design, you can often lower your costs by managing the number and size of your instances.\n

\nEstimated monthly savings are calculated by using the current usage rate for On-Demand Instances and the estimated number of days the instance might be underutilized. Actual savings will vary if you are using Reserved Instances or Spot Instances, or if the instance is not running for a full day. To get daily utilization data, download the report for this check. \n
\n
\nAlert Criteria
\nYellow: An instance had 10% or less daily average CPU utilization and 5 MB or less network I/O on at least 4 of the previous 14 days.
\n
\nRecommended Action
\nConsider stopping or terminating instances that have low utilization, or scale the number of instances by using Auto Scaling. For more information, see Stop and Start Your Instance, Terminate Your Instance, and What is Auto Scaling?
\n
\nAdditional Resources
\nMonitoring Amazon EC2
\nInstance Metadata and User Data
\nAmazon CloudWatch Developer Guide
\nAuto Scaling Developer Guide", "id"=>tac_id, "metadata"=>["Region/AZ", "Instance ID", "Instance Name", "Instance Type", "Estimated Monthly Savings", "Day 1", "Day 2", "Day 3", "Day 4", "Day 5", "Day 6", "Day 7", "Day 8", "Day 9", "Day 10", "Day 11", "Day 12", "Day 13", "Day 14", "14-Day Average CPU Utilization", "14-Day Average Network I/O", "Number of Days Low Utilization"], "name"=>"Low Utilization Amazon EC2 Instances" } }, :trusted_advisor_check_results => { tac_id => { 'checkId' => tac_id, 'status' => "warning", 'timestamp' => "2016-09-18T13:19:35Z", 'resourcesSummary' => { "resourcesFlagged" => 40, "resourcesIgnored" => 0, "resourcesProcessed" => 47, "resourcesSuppressed" => 0 }, 'categorySpecificSummary' => { "costOptimizing" => { "estimatedMonthlySavings" => 4156.920000000003, "estimatedPercentMonthlySavings" => 0.9918398900532555 } }, 'flaggedResources' => [{ "region" => "us-west-2", "resourceId" => Fog::Mock.random_hex(22), "status" => "warning", "isSuppressed" => false, "metadata" => ["us-west-2a", "i-#{Fog::Mock.random_hex(5)}", "instance_tags", "m3.large", "$95.76", "2.3% 0.23MB", "2.3% 0.20MB", "2.3% 0.21MB", "2.4% 0.28MB", "2.3% 0.20MB", "2.3% 0.20MB", "2.3% 0.20MB", "2.3% 0.20MB", "2.3% 0.20MB", "2.3% 0.20MB", "2.6% 0.54MB", "2.4% 0.31MB", "2.3% 0.21MB", "2.3% 0.20MB", "2.3%", "0.24MB", "14 days"] }] } } } end end end def self.reset @data = nil end def reset self.class.reset end attr_accessor :region def initialize(options={}) @region = 'us-east-1' end def data self.class.data[@region][@aws_access_key_id] end end class Real include Fog::AWS::CredentialFetcher::ConnectionMethods def initialize(options={}) @connection_options = options[:connection_options] || {} @instrumentor = options[:instrumentor] @instrumentor_name = options[:instrumentor_name] || 'fog.aws.support' @region = 'us-east-1' @host = options[:host] || "support.#{@region}.amazonaws.com" @path = options[:path] || "/" @port = options[:port] || 443 @scheme = options[:scheme] || "https" @persistent = options[:persistent] || false @connection = Fog::XML::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) @version = options[:version] || '2013-04-15' setup_credentials(options) end def reload @connection.reset end def setup_credentials(options) @aws_access_key_id = options[:aws_access_key_id] @aws_secret_access_key = options[:aws_secret_access_key] @aws_session_token = options[:aws_session_token] @aws_credentials_expire_at = options[:aws_credentials_expire_at] #global services that have no region are signed with the us-east-1 region #the only exception is GovCloud, which requires the region to be explicitly specified as us-gov-west-1 @signer = Fog::AWS::SignatureV4.new(@aws_access_key_id, @aws_secret_access_key, @region, 'support') end def request(params) refresh_credentials_if_expired idempotent = params.delete(:idempotent) parser = params.delete(:parser) action = params.delete('Action') request_body = Fog::JSON.encode(params) body, headers = Fog::AWS.signed_params_v4( params, { 'Content-Type' => "application/x-amz-json-1.1", "X-Amz-Target" => "AWSSupport_#{@version.gsub("-", "")}.#{action}" }, { :host => @host, :path => @path, :port => @port, :version => @version, :signer => @signer, :aws_session_token => @aws_session_token, :method => 'POST', :body => request_body } ) if @instrumentor @instrumentor.instrument("#{@instrumentor_name}.request", params) do _request(body, headers, idempotent, parser) end else _request(body, headers, idempotent, parser) end end def _request(body, headers, idempotent, parser) response = @connection.request({ :body => body, :expects => 200, :idempotent => idempotent, :headers => headers, :method => 'POST', :parser => parser }) response.body = Fog::JSON.decode(response.body) response end end end end end fog-aws-3.18.0/lib/fog/aws/version.rb000066400000000000000000000000711437344660100173010ustar00rootroot00000000000000module Fog module AWS VERSION = "3.18.0" end end fog-aws-3.18.0/tests/000077500000000000000000000000001437344660100143205ustar00rootroot00000000000000fog-aws-3.18.0/tests/credentials_tests.rb000066400000000000000000000233551437344660100203740ustar00rootroot00000000000000# frozen_string_literal: true Shindo.tests('AWS | credentials', ['aws']) do old_mock_value = Excon.defaults[:mock] fog_was_mocked = Fog.mocking? Excon.stubs.clear Fog.unmock! begin Excon.defaults[:mock] = true Excon.stub({ method: :put, path: '/latest/api/token' }, { status: 200, body: 'token1234' }) Excon.stub({ method: :get, path: '/latest/meta-data/iam/security-credentials/' }, { status: 200, body: 'arole' }) Excon.stub({ method: :get, path: '/latest/meta-data/placement/availability-zone/' }, { status: 200, body: 'us-west-1a' }) expires_at = Time.at(Time.now.to_i + 500) credentials = { 'AccessKeyId' => 'dummykey', 'SecretAccessKey' => 'dummysecret', 'Token' => 'dummytoken', 'Expiration' => expires_at.xmlschema } Excon.stub({ method: :get, path: '/latest/meta-data/iam/security-credentials/arole' }, { status: 200, body: Fog::JSON.encode(credentials) }) tests('#fetch_credentials') do returns(aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', aws_credentials_expire_at: expires_at) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end tests('#fetch_credentials when the v2 token 404s') do Excon.stub({ method: :put, path: '/latest/api/token' }, { status: 404, body: 'not found' }) returns(aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', aws_credentials_expire_at: expires_at) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end tests('#fetch_credentials when the v2 disabled') do returns(aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', aws_credentials_expire_at: expires_at) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true, disable_imds_v2: true) } end ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] = '/v1/credentials?id=task_id' Excon.stub({ method: :get, path: '/v1/credentials?id=task_id' }, { status: 200, body: Fog::JSON.encode(credentials) }) tests('#fetch_credentials') do returns(aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', aws_credentials_expire_at: expires_at) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] = nil ENV['AWS_WEB_IDENTITY_TOKEN_FILE'] = File.dirname(__FILE__) + '/lorem.txt' ENV['AWS_ROLE_ARN'] = "dummyrole" ENV['AWS_ROLE_SESSION_NAME'] = "dummyrolesessionname" document = ''\ ''\ ''\ 'dummytoken'\ 'dummysecret'\ "#{expires_at.xmlschema}"\ 'dummykey'\ ''\ ''\ '' Excon.stub({method: :get, path: "/", idempotent: true}, { status: 200, body: document}) tests('#fetch_credentials token based') do returns( aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', sts_endpoint: "https://sts.amazonaws.com", aws_credentials_expire_at: expires_at ) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end ENV['AWS_ROLE_SESSION_NAME'] = nil tests('#fetch_credentials token based without session name') do returns( aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', sts_endpoint: "https://sts.amazonaws.com", aws_credentials_expire_at: expires_at ) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true, region: 'us-west-1') } end ENV["AWS_STS_REGIONAL_ENDPOINTS"] = "regional" tests('#fetch_credentials with no region specified') do returns( aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', sts_endpoint: "https://sts.amazonaws.com", aws_credentials_expire_at: expires_at ) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end tests('#fetch_credentials with regional STS endpoint') do returns( aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', sts_endpoint: "https://sts.us-west-1.amazonaws.com", aws_credentials_expire_at: expires_at ) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true, region: 'us-west-1') } end ENV["AWS_DEFAULT_REGION"] = "us-west-1" tests('#fetch_credentials with regional STS endpoint with region in env') do returns( aws_access_key_id: 'dummykey', aws_secret_access_key: 'dummysecret', aws_session_token: 'dummytoken', region: 'us-west-1', sts_endpoint: "https://sts.us-west-1.amazonaws.com", aws_credentials_expire_at: expires_at ) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end ENV["AWS_STS_REGIONAL_ENDPOINTS"] = nil ENV["AWS_DEFAULT_REGION"] = nil ENV['AWS_WEB_IDENTITY_TOKEN_FILE'] = nil storage = Fog::Storage.new( :provider => 'AWS', :region => 'us-east-1', :use_iam_profile => true, :aws_credentials_refresh_threshold_seconds => 30) tests('#credentials_refresh_threshold') do returns(30) { storage.send(:credentials_refresh_threshold) } end Fog::Time.now = storage.instance_variable_get(:@aws_credentials_expire_at) - 31 tests('#refresh_credentials_if_expired before credentials have expired and before refresh threshold') do returns(nil) { storage.refresh_credentials_if_expired } returns('dummykey') { storage.instance_variable_get(:@aws_access_key_id) } returns('dummysecret') { storage.instance_variable_get(:@aws_secret_access_key) } returns(expires_at) { storage.instance_variable_get(:@aws_credentials_expire_at) } end Fog::Time.now = Time.now credentials['AccessKeyId'] = 'newkey-1' credentials['SecretAccessKey'] = 'newsecret-1' credentials['Expiration'] = (expires_at + 10).xmlschema Excon.stub({ method: :get, path: '/latest/meta-data/iam/security-credentials/arole' }, { status: 200, body: Fog::JSON.encode(credentials) }) Fog::Time.now = storage.instance_variable_get(:@aws_credentials_expire_at) - 29 tests('#refresh_credentials_if_expired after refresh threshold is crossed but before expiration') do returns(true) { storage.refresh_credentials_if_expired } returns('newkey-1') { storage.instance_variable_get(:@aws_access_key_id) } returns('newsecret-1') { storage.instance_variable_get(:@aws_secret_access_key) } returns(expires_at + 10) { storage.instance_variable_get(:@aws_credentials_expire_at) } end Fog::Time.now = Time.now credentials['AccessKeyId'] = 'newkey-2' credentials['SecretAccessKey'] = 'newsecret-2' credentials['Expiration'] = (expires_at + 20).xmlschema Excon.stub({ method: :get, path: '/latest/meta-data/iam/security-credentials/arole' }, { status: 200, body: Fog::JSON.encode(credentials) }) Fog::Time.now = storage.instance_variable_get(:@aws_credentials_expire_at) + 1 tests('#refresh_credentials_if_expired after credentials have expired') do returns(true) { storage.refresh_credentials_if_expired } returns('newkey-2') { storage.instance_variable_get(:@aws_access_key_id) } returns('newsecret-2') { storage.instance_variable_get(:@aws_secret_access_key) } returns(expires_at + 20) { storage.instance_variable_get(:@aws_credentials_expire_at) } end Fog::Time.now = Time.now compute = Fog::AWS::Compute.new(use_iam_profile: true) tests('#credentials_refresh_threshold when "aws_credentials_refresh_threshold_seconds" is unspecified') do returns(15) { compute.send(:credentials_refresh_threshold) } end default_credentials = Fog::AWS::Compute.fetch_credentials({}) tests('#fetch_credentials when the url 404s') do Excon.stub({ method: :put, path: '/latest/api/token' }, { status: 404, body: 'not found' }) Excon.stub({ method: :get, path: '/latest/meta-data/iam/security-credentials/' }, { status: 404, body: 'not bound' }) Excon.stub({ method: :get, path: '/latest/meta-data/placement/availability-zone/' }, { status: 400, body: 'not found' }) returns(default_credentials) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end mocked_credentials = { aws_access_key_id: 'access-key-id', aws_secret_access_key: 'secret-access-key', aws_session_token: 'session-token', aws_credentials_expire_at: Time.at(Time.now.to_i + 500).xmlschema } tests('#fetch_credentials when mocking') do Fog.mock! Fog::AWS::Compute::Mock.data[:iam_role_based_creds] = mocked_credentials returns(mocked_credentials) { Fog::AWS::Compute.fetch_credentials(use_iam_profile: true) } end ensure ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] = nil ENV['AWS_WEB_IDENTITY_TOKEN_FILE'] = nil Excon.stubs.clear Excon.defaults[:mock] = old_mock_value Fog.mock! if fog_was_mocked end end fog-aws-3.18.0/tests/helper.rb000066400000000000000000000014231437344660100161240ustar00rootroot00000000000000begin require 'simplecov' SimpleCov.start SimpleCov.command_name 'Shindo' rescue LoadError => e $stderr.puts "not recording test coverage: #{e.inspect}" end require File.expand_path('../../lib/fog/aws', __FILE__) Bundler.require(:test) Excon.defaults.merge!(debug_request: true, debug_response: true) require File.expand_path(File.join(File.dirname(__FILE__), 'helpers', 'mock_helper')) # This overrides the default 600 seconds timeout during live test runs unless Fog.mocking? Fog.timeout = ENV['FOG_TEST_TIMEOUT'] || 2_000 Fog::Logger.warning "Setting default fog timeout to #{Fog.timeout} seconds" end def lorem_file File.open(File.dirname(__FILE__) + '/lorem.txt', 'r') end def array_differences(array_a, array_b) (array_a - array_b) | (array_b - array_a) end fog-aws-3.18.0/tests/helpers/000077500000000000000000000000001437344660100157625ustar00rootroot00000000000000fog-aws-3.18.0/tests/helpers/collection_helper.rb000066400000000000000000000044431437344660100220060ustar00rootroot00000000000000def collection_tests(collection, params = {}, mocks_implemented = true) tests('success') do tests("#new(#{params.inspect})").succeeds do pending if Fog.mocking? && !mocks_implemented collection.new(params) end tests("#create(#{params.inspect})").succeeds do pending if Fog.mocking? && !mocks_implemented @instance = collection.create(params) end # FIXME: work around for timing issue on AWS describe_instances mocks if Fog.mocking? && @instance.respond_to?(:ready?) @instance.wait_for { ready? } end tests("#all").succeeds do pending if Fog.mocking? && !mocks_implemented collection.all end if !Fog.mocking? || mocks_implemented @identity = @instance.identity end tests("#get(#{@identity})").succeeds do pending if Fog.mocking? && !mocks_implemented collection.get(@identity) end tests('Enumerable') do pending if Fog.mocking? && !mocks_implemented methods = [ 'all?', 'any?', 'find', 'detect', 'collect', 'map', 'find_index', 'flat_map', 'collect_concat', 'group_by', 'none?', 'one?' ] methods.each do |enum_method| if collection.respond_to?(enum_method) tests("##{enum_method}").succeeds do block_called = false collection.send(enum_method) { block_called = true } block_called end end end [ 'max_by','min_by' ].each do |enum_method| if collection.respond_to?(enum_method) tests("##{enum_method}").succeeds do block_called = false collection.send(enum_method) { block_called = true; 0 } block_called end end end end if block_given? yield(@instance) end if !Fog.mocking? || mocks_implemented @instance.destroy end end tests('failure') do if !Fog.mocking? || mocks_implemented @identity = @identity.to_s @identity = @identity.gsub(/[a-zA-Z]/) { Fog::Mock.random_letters(1) } @identity = @identity.gsub(/\d/) { Fog::Mock.random_numbers(1) } @identity end tests("#get('#{@identity}')").returns(nil) do pending if Fog.mocking? && !mocks_implemented collection.get(@identity) end end end fog-aws-3.18.0/tests/helpers/compute/000077500000000000000000000000001437344660100174365ustar00rootroot00000000000000fog-aws-3.18.0/tests/helpers/compute/flavors_helper.rb000066400000000000000000000014571437344660100230050ustar00rootroot00000000000000def flavors_tests(connection, params = {}, mocks_implemented = true) tests('success') do tests('#all').succeeds do pending if Fog.mocking? && !mocks_implemented connection.flavors.all end if !Fog.mocking? || mocks_implemented @identity = connection.flavors.first.identity end tests("#get('#{@identity}')").succeeds do pending if Fog.mocking? && !mocks_implemented connection.flavors.get(@identity) end end tests('failure') do if !Fog.mocking? || mocks_implemented invalid_flavor_identity = connection.flavors.first.identity.to_s.gsub(/\w/, '0') end tests("#get('#{invalid_flavor_identity}')").returns(nil) do pending if Fog.mocking? && !mocks_implemented connection.flavors.get(invalid_flavor_identity) end end end fog-aws-3.18.0/tests/helpers/compute/server_helper.rb000066400000000000000000000012421437344660100226270ustar00rootroot00000000000000def server_tests(connection, params = {}, mocks_implemented = true) model_tests(connection.servers, params, mocks_implemented) do tests('#reload').returns(true) do pending if Fog.mocking? && !mocks_implemented @instance.wait_for { ready? } identity = @instance.identity !identity.nil? && identity == @instance.reload.identity end responds_to(%i[ready state]) yield if block_given? tests('#reboot').succeeds do pending if Fog.mocking? && !mocks_implemented @instance.wait_for { ready? } @instance.reboot end if !Fog.mocking? || mocks_implemented @instance.wait_for { ready? } end end end fog-aws-3.18.0/tests/helpers/compute/servers_helper.rb000066400000000000000000000004061437344660100230130ustar00rootroot00000000000000def servers_tests(connection, params = {}, mocks_implemented = true) collection_tests(connection.servers, params, mocks_implemented) do if !Fog.mocking? || mocks_implemented @instance.wait_for { ready? } yield if block_given? end end end fog-aws-3.18.0/tests/helpers/dns_helper.rb000066400000000000000000000017451437344660100204410ustar00rootroot00000000000000def dns_providers { aws: { mocked: false }, bluebox: { mocked: false, zone_attributes: { ttl: 60 } }, dnsimple: { mocked: false }, dnsmadeeasy: { mocked: false }, dynect: { mocked: false, zone_attributes: { email: 'fog@example.com' } }, linode: { mocked: false, zone_attributes: { email: 'fog@example.com' } }, zerigo: { mocked: false }, rackspace: { mocked: false, zone_attributes: { email: 'fog@example.com' } }, rage4: { mocked: false } } end def generate_unique_domain(with_trailing_dot = false) # get time (with 1/100th of sec accuracy) # want unique domain name and if provider is fast, # this can be called more than once per second time = (Time.now.to_f * 100).to_i domain = 'test-' + time.to_s + '.com' if with_trailing_dot domain += '.' end domain end fog-aws-3.18.0/tests/helpers/formats_helper.rb000066400000000000000000000067731437344660100213360ustar00rootroot00000000000000# frozen_string_literal: true require 'fog/schema/data_validator' # format related hackery # allows both true.is_a?(Fog::Boolean) and false.is_a?(Fog::Boolean) # allows both nil.is_a?(Fog::Nullable::String) and ''.is_a?(Fog::Nullable::String) module Fog module Boolean; end module Nullable module Boolean; end module Integer; end module String; end module Time; end module Float; end module Hash; end module Array; end end end [FalseClass, TrueClass].each { |klass| klass.send(:include, Fog::Boolean) } [FalseClass, TrueClass, NilClass, Fog::Boolean].each { |klass| klass.send(:include, Fog::Nullable::Boolean) } [NilClass, String].each { |klass| klass.send(:include, Fog::Nullable::String) } [NilClass, Time].each { |klass| klass.send(:include, Fog::Nullable::Time) } [Integer, NilClass].each { |klass| klass.send(:include, Fog::Nullable::Integer) } [Float, NilClass].each { |klass| klass.send(:include, Fog::Nullable::Float) } [Hash, NilClass].each { |klass| klass.send(:include, Fog::Nullable::Hash) } [Array, NilClass].each { |klass| klass.send(:include, Fog::Nullable::Array) } module Shindo # Generates a Shindo test that compares a hash schema to the result # of the passed in block returning true if they match. # # The schema that is passed in is a Hash or Array of hashes that # have Classes in place of values. When checking the schema the # value should match the Class. # # Strict mode will fail if the data has additional keys. Setting # +strict+ to +false+ will allow additional keys to appear. # # @param [Hash] schema A Hash schema # @param [Hash] options Options to change validation rules # @option options [Boolean] :allow_extra_keys # If +true+ does not fail when keys are in the data that are # not specified in the schema. This allows new values to # appear in API output without breaking the check. # @option options [Boolean] :allow_optional_rules # If +true+ does not fail if extra keys are in the schema # that do not match the data. Not recommended! # @yield Data to check with schema # # @example Using in a test # Shindo.tests("comparing welcome data against schema") do # data = {:welcome => "Hello" } # data_matches_schema(:welcome => String) { data } # end # # comparing welcome data against schema # + data matches schema # # @example Example schema # { # "id" => String, # "ram" => Integer, # "disks" => [ # { # "size" => Float # } # ], # "dns_name" => Fog::Nullable::String, # "active" => Fog::Boolean, # "created" => DateTime # } # # @return [Boolean] class Tests def data_matches_schema(schema, options = {}) test('data matches schema') do validator = Fog::Schema::DataValidator.new valid = validator.validate(yield, schema, options) @message = validator.message unless valid valid end end # @deprecated #formats is deprecated. Use #data_matches_schema instead def formats(format, strict = true) test('has proper format') do if strict options = { allow_extra_keys: false, allow_optional_rules: true } else options = { allow_extra_keys: true, allow_optional_rules: true } end validator = Fog::Schema::DataValidator.new valid = validator.validate(yield, format, options) @message = validator.message unless valid valid end end end end fog-aws-3.18.0/tests/helpers/formats_helper_tests.rb000066400000000000000000000070051437344660100225450ustar00rootroot00000000000000Shindo.tests('test_helper', 'meta') do tests('comparing welcome data against schema') do data = { welcome: 'Hello' } data_matches_schema(welcome: String) { data } end tests('#data_matches_schema') do tests('when value matches schema expectation') do data_matches_schema('key' => String) { { 'key' => 'Value' } } end tests('when values within an array all match schema expectation') do data_matches_schema('key' => [Integer]) { { 'key' => [1, 2] } } end tests('when nested values match schema expectation') do data_matches_schema('key' => { nested_key: String }) { { 'key' => { nested_key: 'Value' } } } end tests('when collection of values all match schema expectation') do data_matches_schema([{ 'key' => String }]) { [{ 'key' => 'Value' }, { 'key' => 'Value' }] } end tests('when collection is empty although schema covers optional members') do data_matches_schema([{ 'key' => String }], allow_optional_rules: true) { [] } end tests('when additional keys are passed and not strict') do data_matches_schema({ 'key' => String }, allow_extra_keys: true) { { 'key' => 'Value', extra: 'Bonus' } } end tests('when value is nil and schema expects NilClass') do data_matches_schema('key' => NilClass) { { 'key' => nil } } end tests('when value and schema match as hashes') do data_matches_schema({}) { {} } end tests('when value and schema match as arrays') do data_matches_schema([]) { [] } end tests('when value is a Time') do data_matches_schema('time' => Time) { { 'time' => Time.now } } end tests('when key is missing but value should be NilClass (#1477)') do data_matches_schema({ 'key' => NilClass }, allow_optional_rules: true) { {} } end tests('when key is missing but value is nullable (#1477)') do data_matches_schema({ 'key' => Fog::Nullable::String }, allow_optional_rules: true) { {} } end end tests('#formats backwards compatible changes') do tests('when value matches schema expectation') do formats('key' => String) { { 'key' => 'Value' } } end tests('when values within an array all match schema expectation') do formats('key' => [Integer]) { { 'key' => [1, 2] } } end tests('when nested values match schema expectation') do formats('key' => { nested_key: String }) { { 'key' => { nested_key: 'Value' } } } end tests('when collection of values all match schema expectation') do formats([{ 'key' => String }]) { [{ 'key' => 'Value' }, { 'key' => 'Value' }] } end tests('when collection is empty although schema covers optional members') do formats([{ 'key' => String }]) { [] } end tests('when additional keys are passed and not strict') do formats({ 'key' => String }, false) { { 'key' => 'Value', :extra => 'Bonus' } } end tests('when value is nil and schema expects NilClass') do formats('key' => NilClass) { { 'key' => nil } } end tests('when value and schema match as hashes') do formats({}) { {} } end tests('when value and schema match as arrays') do formats([]) { [] } end tests('when value is a Time') do formats('time' => Time) { { 'time' => Time.now } } end tests('when key is missing but value should be NilClass (#1477)') do formats('key' => NilClass) { {} } end tests('when key is missing but value is nullable (#1477)') do formats('key' => Fog::Nullable::String) { {} } end end end fog-aws-3.18.0/tests/helpers/mock_helper.rb000066400000000000000000000136541437344660100206100ustar00rootroot00000000000000# Use so you can run in mock mode from the command line # # FOG_MOCK=true fog if ENV["FOG_MOCK"] == "true" Fog.mock! end # if in mocked mode, fill in some fake credentials for us if Fog.mock? Fog.credentials = { aws_access_key_id: 'aws_access_key_id', aws_secret_access_key: 'aws_secret_access_key', ia_access_key_id: 'aws_access_key_id', ia_secret_access_key: 'aws_secret_access_key', bluebox_api_key: 'bluebox_api_key', bluebox_customer_id: 'bluebox_customer_id', brightbox_client_id: 'brightbox_client_id', brightbox_secret: 'brightbox_secret', cloudstack_disk_offering_id: '', cloudstack_host: 'http://cloudstack.example.org', cloudstack_network_ids: '', cloudstack_service_offering_id: '4437ac6c-9fe3-477a-57ec-60a5a45896a4', cloudstack_template_id: '8a31cf9c-f248-0588-256e-9dbf58785216', cloudstack_zone_id: 'c554c592-e09c-9df5-7688-4a32754a4305', cloudstack_project_id: 'f1f1f1f1-f1f1-f1f1-f1f1-f1f1f1f1f1f1', clodo_api_key: 'clodo_api_key', clodo_username: 'clodo_username', digitalocean_api_key: 'digitalocean_api_key', digitalocean_client_id: 'digitalocean_client_id', dnsimple_email: 'dnsimple_email', dnsimple_password: 'dnsimple_password', dnsmadeeasy_api_key: 'dnsmadeeasy_api_key', dnsmadeeasy_secret_key: 'dnsmadeeasy_secret_key', glesys_username: 'glesys_username', glesys_api_key: 'glesys_api_key', go_grid_api_key: 'go_grid_api_key', go_grid_shared_secret: 'go_grid_shared_secret', google_storage_access_key_id: 'google_storage_access_key_id', google_storage_secret_access_key: 'google_storage_secret_access_key', google_project: 'google_project_name', google_client_email: 'fake@developer.gserviceaccount.com', google_key_location: '~/fake.p12', hp_access_key: 'hp_access_key', hp_secret_key: 'hp_secret_key', hp_tenant_id: 'hp_tenant_id', hp_avl_zone: 'hp_avl_zone', os_account_meta_temp_url_key: 'os_account_meta_temp_url_key', ibm_username: 'ibm_username', ibm_password: 'ibm_password', joyent_username: 'joyentuser', joyent_password: 'joyentpass', linode_api_key: 'linode_api_key', local_root: '~/.fog', bare_metal_cloud_password: 'bare_metal_cloud_password', bare_metal_cloud_username: 'bare_metal_cloud_username', ninefold_compute_key: 'ninefold_compute_key', ninefold_compute_secret: 'ninefold_compute_secret', ninefold_storage_secret: 'ninefold_storage_secret', ninefold_storage_token: 'ninefold_storage_token', # public_key_path: '~/.ssh/id_rsa.pub', # private_key_path: '~/.ssh/id_rsa', opennebula_endpoint: 'http://opennebula:2633/RPC2', opennebula_username: 'oneadmin', opennebula_password: 'oneadmin', openstack_api_key: 'openstack_api_key', openstack_username: 'openstack_username', openstack_tenant: 'openstack_tenant', openstack_auth_url: 'http://openstack:35357/v2.0/tokens', ovirt_url: 'http://ovirt:8080/api', ovirt_username: 'admin@internal', ovirt_password: '123123', profitbricks_username: 'profitbricks_username', profitbricks_password: 'profitbricks_password', libvirt_uri: 'qemu://libvirt/system', rackspace_api_key: 'rackspace_api_key', rackspace_region: 'dfw', rackspace_username: 'rackspace_username', riakcs_access_key_id: 'riakcs_access_key_id', riakcs_secret_access_key: 'riakcs_secret_access_key', sakuracloud_api_token: 'sakuracloud_api_token', sakuracloud_api_token_secret: 'sakuracloud_api_token_secret', storm_on_demand_username: 'storm_on_demand_username', storm_on_demand_password: 'storm_on_demand_password', vcloud_host: 'vcloud_host', vcloud_password: 'vcloud_password', vcloud_username: 'vcloud_username', vcloud_director_host: 'vcloud-director-host', vcloud_director_password: 'vcloud_director_password', vcloud_director_username: 'vcd_user@vcd_org_name', zerigo_email: 'zerigo_email', zerigo_token: 'zerigo_token', dynect_customer: 'dynect_customer', dynect_username: 'dynect_username', dynect_password: 'dynect_password', vsphere_server: 'virtualcenter.lan', vsphere_username: 'apiuser', vsphere_password: 'apipassword', vsphere_expected_pubkey_hash: 'abcdef1234567890', libvirt_username: 'root', libvirt_password: 'password', cloudsigma_username: 'csuname', cloudsigma_password: 'cspass', docker_username: 'docker-fan', docker_password: 'i<3docker', docker_email: 'dockerfan@gmail.com', docker_url: 'unix://var/run/docker.sock' }.merge(Fog.credentials) end fog-aws-3.18.0/tests/helpers/model_helper.rb000066400000000000000000000014201437344660100207430ustar00rootroot00000000000000def model_tests(collection, params = {}, mocks_implemented = true) tests('success') do @instance = collection.new(params) tests('#save').succeeds do pending if Fog.mocking? && !mocks_implemented @instance.save end if block_given? yield(@instance) end tests('#destroy').succeeds do pending if Fog.mocking? && !mocks_implemented @instance.destroy end end end # Generates a unique identifier with a random differentiator. # Useful when rapidly re-running tests, so we don't have to wait # serveral minutes for deleted objects to disappear from the API # E.g. 'fog-test-1234' def uniq_id(base_name = 'fog-test') # random_differentiator suffix = rand(65_536).to_s(16).rjust(4, '0') [base_name, suffix].join('-') end fog-aws-3.18.0/tests/helpers/responds_to_helper.rb000066400000000000000000000003771437344660100222140ustar00rootroot00000000000000module Shindo class Tests def responds_to(method_names) [*method_names].each do |method_name| tests("#respond_to?(:#{method_name})").returns(true) do @instance.respond_to?(method_name) end end end end end fog-aws-3.18.0/tests/helpers/schema_validator_tests.rb000066400000000000000000000072441437344660100230450ustar00rootroot00000000000000# frozen_string_literal: true Shindo.tests('Fog::Schema::DataValidator', 'meta') do validator = Fog::Schema::DataValidator.new tests('#validate') do tests('returns true') do returns(true, 'when value matches schema expectation') do validator.validate({ 'key' => 'Value' }, 'key' => String) end returns(true, 'when values within an array all match schema expectation') do validator.validate({ 'key' => [1, 2] }, 'key' => [Integer]) end returns(true, 'when nested values match schema expectation') do validator.validate({ 'key' => { nested_key: 'Value' } }, 'key' => { nested_key: String }) end returns(true, 'when collection of values all match schema expectation') do validator.validate([{ 'key' => 'Value' }, 'key' => 'Value'], [{ 'key' => String }]) end returns(true, 'when collection is empty although schema covers optional members') do validator.validate([], [{ 'key' => String }]) end returns(true, 'when additional keys are passed and not strict') do validator.validate({ 'key' => 'Value', extra: 'Bonus' }, { 'key' => String }, allow_extra_keys: true) end returns(true, 'when value is nil and schema expects NilClass') do validator.validate({ 'key' => nil }, 'key' => NilClass) end returns(true, 'when value and schema match as hashes') do validator.validate({}, {}) end returns(true, 'when value and schema match as arrays') do validator.validate([], []) end returns(true, 'when value is a Time') do validator.validate({ 'time' => Time.now }, 'time' => Time) end returns(true, 'when key is missing but value should be NilClass (#1477)') do validator.validate({}, { 'key' => NilClass }, allow_optional_rules: true) end returns(true, 'when key is missing but value is nullable (#1477)') do validator.validate({}, { 'key' => Fog::Nullable::String }, allow_optional_rules: true) end end tests('returns false') do returns(false, 'when value does not match schema expectation') do validator.validate({ 'key' => nil }, 'key' => String) end returns(false, 'when key formats do not match') do validator.validate({ 'key' => 'Value' }, key: String) end returns(false, 'when additional keys are passed and strict') do validator.validate({ 'key' => 'Missing' }, {}) end returns(false, 'when some keys do not appear') do validator.validate({}, 'key' => String) end returns(false, 'when collection contains a member that does not match schema') do validator.validate([{ 'key' => 'Value' }, 'key' => 5], ['key' => String]) end returns(false, 'when collection has multiple schema patterns') do validator.validate([{ 'key' => 'Value' }], [{ 'key' => Integer }, { 'key' => String }]) end returns(false, 'when hash and array are compared') do validator.validate({}, []) end returns(false, 'when array and hash are compared') do validator.validate([], {}) end returns(false, 'when a hash is expected but another data type is found') do validator.validate({ 'key' => { nested_key: [] } }, 'key' => { nested_key: {} }) end returns(false, 'when key is missing but value should be NilClass (#1477)') do validator.validate({}, { 'key' => NilClass }, allow_optional_rules: false) end returns(false, 'when key is missing but value is nullable (#1477)') do validator.validate({}, { 'key' => Fog::Nullable::String }, allow_optional_rules: false) end end end end fog-aws-3.18.0/tests/helpers/succeeds_helper.rb000066400000000000000000000002321437344660100214410ustar00rootroot00000000000000module Shindo class Tests def succeeds(&block) test('succeeds') do !instance_eval(&Proc.new(&block)).nil? end end end end fog-aws-3.18.0/tests/lorem.txt000066400000000000000000000006761437344660100162100ustar00rootroot00000000000000Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.fog-aws-3.18.0/tests/models/000077500000000000000000000000001437344660100156035ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/auto_scaling/000077500000000000000000000000001437344660100202535ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/auto_scaling/activities_tests.rb000066400000000000000000000003101437344660100241600ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | activities', ['aws', 'auto_scaling_m']) do pending # FIXME: activity#save is not implemented collection_tests(Fog::AWS[:auto_scaling].activities, {}, false) end fog-aws-3.18.0/tests/models/auto_scaling/configuration_test.rb000066400000000000000000000004551437344660100245120ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | configuration', ['aws', 'auto_scaling_m']) do params = { :id => uniq_id, :image_id => 'ami-8c1fece5', :instance_type => 't1.micro' } model_tests(Fog::AWS[:auto_scaling].configurations, params, false) do @instance.wait_for { ready? } end end fog-aws-3.18.0/tests/models/auto_scaling/configurations_tests.rb000066400000000000000000000004101437344660100250470ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | configurations', ['aws', 'auto_scaling_m']) do params = { :id => uniq_id, :image_id => 'ami-8c1fece5', :instance_type => 't1.micro' } collection_tests(Fog::AWS[:auto_scaling].configurations, params, false) end fog-aws-3.18.0/tests/models/auto_scaling/groups_test.rb000066400000000000000000000012711437344660100231570ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | group', ['aws', 'auto_scaling_m']) do params = { :id => uniq_id, :auto_scaling_group_name => "name", :availability_zones => [], :launch_configuration_name => "lc" } lc_params = { :id => params[:launch_configuration_name], :image_id => "image-id", :instance_type => "instance-type", } Fog::AWS[:auto_scaling].configurations.new(lc_params).save model_tests(Fog::AWS[:auto_scaling].groups, params, true) do @instance.update end test("setting attributes in the constructor") do group = Fog::AWS[:auto_scaling].groups.new(:min_size => 1, :max_size => 2) group.min_size == 1 && group.max_size == 2 end end fog-aws-3.18.0/tests/models/auto_scaling/helper.rb000066400000000000000000000000001437344660100220450ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/auto_scaling/instance_tests.rb000066400000000000000000000005741437344660100236340ustar00rootroot00000000000000require 'fog/aws/models/auto_scaling/instance' Shindo.tests("Fog::AWS::AutoScaling::Instance", 'aws') do @instance = Fog::AWS::AutoScaling::Instance.new test('#healthy? = true') do @instance.health_status = 'Healthy' @instance.healthy? == true end test('#heatlhy? = false') do @instance.health_status = 'Unhealthy' @instance.healthy? == false end end fog-aws-3.18.0/tests/models/auto_scaling/instances_tests.rb000066400000000000000000000003031437344660100240050ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | instances', ['aws', 'auto_scaling_m']) do pending # FIXME: instance#save is not defined #collection_tests(Fog::AWS[:auto_scaling].instances, {}, false) end fog-aws-3.18.0/tests/models/beanstalk/000077500000000000000000000000001437344660100175475ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/beanstalk/application_tests.rb000066400000000000000000000035211437344660100236220ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | application", ['aws', 'beanstalk']) do pending if Fog.mocking? @application_opts = { :name => uniq_id('fog-test-app'), :description => 'A nice description.' } model_tests(Fog::AWS[:beanstalk].applications, @application_opts, false) do test("#attributes") do @instance.name == @application_opts[:name] && @instance.description == @application_opts[:description] end test("#events") do # There should be some events now. @instance.events.length > 0 end version_name = uniq_id('fog-test-ver') @instance.versions.create( :application_name => @instance.name, :label => version_name ) test("#versions") do # We should have one version. @instance.versions.length == 1 end template_name = uniq_id('fog-test-template') @instance.templates.create( :application_name => @instance.name, :name => template_name, :solution_stack_name => '32bit Amazon Linux running Tomcat 7' ) test('#templates') do # We should have one template now. @instance.templates.length == 1 end environment_name = uniq_id('fog-test-env') environment = @instance.environments.create( :application_name => @instance.name, :name => environment_name, :version_label => version_name, :solution_stack_name => '32bit Amazon Linux running Tomcat 7' ) # Go ahead an terminate immediately. environment.destroy # Create an environment test("#environments") do # We should have one environment now. @instance.environments.length == 1 end # Must wait for termination before destroying application tests("waiting for test environment to terminate").succeeds do environment.wait_for { terminated? } end end end fog-aws-3.18.0/tests/models/beanstalk/applications_tests.rb000066400000000000000000000003171437344660100240050ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | applications", ['aws', 'beanstalk']) do pending if Fog.mocking? collection_tests(Fog::AWS[:beanstalk].applications, {:name => uniq_id('fog-test-app')}, false) end fog-aws-3.18.0/tests/models/beanstalk/environment_tests.rb000066400000000000000000000064621437344660100236720ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | environment", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @environment_name = uniq_id('fog-test-env') @version_names = [] # Create two unique version names 2.times { @version_names << uniq_id('fog-test-version') } @application = @beanstalk.applications.create({:name => @application_name}) @versions = [] @version_names.each { |name| @versions << @beanstalk.versions.create({ :label => name, :application_name => @application_name, }) } @environment_opts = { :application_name => @application_name, :name => @environment_name, :version_label => @version_names[0], :solution_stack_name => '32bit Amazon Linux running Tomcat 7' } # Note: These model tests can take quite a bit of time to run, about 10 minutes typically. model_tests(@beanstalk.environments, @environment_opts, false) do # Wait for initial ready before next tests. tests("#ready?").succeeds do @instance.wait_for { ready? } end tests("#healthy?").succeeds do @instance.wait_for { healthy? } end test("#events") do # There should be some events now. @instance.events.length > 0 end test("#version") do @instance.version.label == @version_names[0] end test("#version= string") do # Set to version 2 @instance.version = @version_names[1] count = 0 if @instance.version.label == @version_names[1] @instance.events.each { |event| if event.message == "Environment update is starting." count = count + 1 end } end count == 1 end tests("#ready? after version= string").succeeds do @instance.wait_for { ready? } end test("#version= version object") do # reset back to first version using version object @instance.version = @versions[0] count = 0 if @instance.version.label == @version_names[0] @instance.events.each { |event| if event.message == "Environment update is starting." count = count + 1 end } end # Pass if we have two environment updating events count == 2 end tests("#ready? after version= version object").succeeds do @instance.wait_for { ready? } end test('#restart_app_server') do @instance.restart_app_server passed = false @instance.events.each { |event| if event.message == "restartAppServer is starting." passed = true end } passed end test('#rebuild') do @instance.rebuild passed = false @instance.events.each { |event| if event.message == "rebuildEnvironment is starting." passed = true end } passed end # Wait for ready or next tests may fail tests("#ready? after rebuild").succeeds do @instance.wait_for { ready? } end end # Wait for instance to terminate before deleting application tests('#terminated?').succeeds do @instance.wait_for { terminated? } end # delete application @application.destroy end fog-aws-3.18.0/tests/models/beanstalk/environments_tests.rb000066400000000000000000000020051437344660100240420ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | environments", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @environment_name = uniq_id('fog-test-env') @version_name = uniq_id('fog-test-version') # Create an application/version to use for testing. @version = @beanstalk.versions.create({ :label => @version_name, :application_name => @application_name, :auto_create_application => true }) @application = @beanstalk.applications.get(@application_name) @environment_opts = { :application_name => @application_name, :name => @environment_name, :version_label => @version_name, :solution_stack_name => '32bit Amazon Linux running Tomcat 7' } collection_tests(@beanstalk.environments, @environment_opts, false) # Wait for instance to terminate before deleting application @instance.wait_for { terminated? } # delete application @application.destroy end fog-aws-3.18.0/tests/models/beanstalk/template_tests.rb000066400000000000000000000024431437344660100231340ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | template", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @template_name = uniq_id('fog-test-template') @template_description = 'A nice description' @application = @beanstalk.applications.create({:name => @application_name}) @template_opts = { :application_name => @application_name, :name => @template_name, :description => @template_description, :solution_stack_name => '32bit Amazon Linux running Tomcat 7' } model_tests(@beanstalk .templates, @template_opts, false) do test("#attributes") do @instance.name == @template_name && @instance.description == @template_description && @instance.application_name == @application_name && @instance.solution_stack_name == @template_opts[:solution_stack_name] end test("#options") do options = @instance.options passed = false if options.each { |option| # See if we recognize at least one option if option["Name"] == 'LoadBalancerHTTPPort' && option["Namespace"] == 'aws:elb:loadbalancer' passed = true end } end passed end end # delete application @application.destroy end fog-aws-3.18.0/tests/models/beanstalk/templates_tests.rb000066400000000000000000000026501437344660100233170ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | templates", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @template_name = uniq_id('fog-test-template') @template_description = 'A nice description' @application = @beanstalk.applications.create({:name => @application_name}) params = { :application_name => @application_name, :name => @template_name, :description => @template_description, :solution_stack_name => '32bit Amazon Linux running Tomcat 7' } collection = @beanstalk.templates tests('success') do tests("#new(#{params.inspect})").succeeds do pending if Fog.mocking? collection.new(params) end tests("#create(#{params.inspect})").succeeds do pending if Fog.mocking? @instance = collection.create(params) end tests("#all").succeeds do pending if Fog.mocking? collection.all end tests("#get(#{@application_name}, #{@template_name})").succeeds do pending if Fog.mocking? collection.get(@application_name, @template_name) end if !Fog.mocking? @instance.destroy end end tests('failure') do tests("#get(#{@application_name}, #{@template_name})").returns(nil) do pending if Fog.mocking? collection.get(@application_name, @template_name) end end # delete application @application.destroy end fog-aws-3.18.0/tests/models/beanstalk/version_tests.rb000066400000000000000000000034771437344660100230160ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | version", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @version_name = uniq_id('fog-test-version') @version_description = 'A nice description' @application = @beanstalk.applications.create({:name => @application_name}) @version_opts = { :application_name => @application_name, :label => @version_name, :description => @version_description } model_tests(@beanstalk.versions, @version_opts, false) do test("attributes") do @instance.label == @version_name && @instance.description == @version_description && @instance.application_name == @application_name end test("#events") do # There should be some events now. @instance.events.length > 0 end test("#update description") do new_description = "A completely new description." @instance.description = new_description @instance.update passed = false if @instance.description == new_description # reload version from AWS to verify save is committed to server, not just on local object if @beanstalk.versions.get(@application_name, @version_name).description == new_description passed = true end end passed end test("#update description empty") do @instance.description = '' # Set to empty to nil out @instance.update passed = false if @instance.description == nil # reload version from AWS to verify save is committed to server, not just on local object if @beanstalk.versions.get(@application_name, @version_name).description == nil passed = true end end passed end end # delete application @application.destroy end fog-aws-3.18.0/tests/models/beanstalk/versions_tests.rb000066400000000000000000000025301437344660100231660ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:beanstalk] | versions", ['aws', 'beanstalk']) do pending if Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] @application_name = uniq_id('fog-test-app') @version_name = uniq_id('fog-test-version') @version_description = 'A nice description' @application = @beanstalk.applications.create({:name => @application_name}) params = { :application_name => @application_name, :label => @version_name, :description => @version_description } collection = @beanstalk.versions tests('success') do tests("#new(#{params.inspect})").succeeds do pending if Fog.mocking? collection.new(params) end tests("#create(#{params.inspect})").succeeds do pending if Fog.mocking? @instance = collection.create(params) end tests("#all").succeeds do pending if Fog.mocking? collection.all end tests("#get(#{@application_name}, #{@version_name})").succeeds do pending if Fog.mocking? collection.get(@application_name, @version_name) end if !Fog.mocking? @instance.destroy end end tests('failure') do tests("#get(#{@application_name}, #{@version_name})").returns(nil) do pending if Fog.mocking? collection.get(@application_name, @version_name) end end # delete application @application.destroy end fog-aws-3.18.0/tests/models/cdn/000077500000000000000000000000001437344660100163475ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/cdn/distribution_tests.rb000066400000000000000000000011121437344660100226300ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | distribution", ['aws', 'cdn']) do params = { :s3_origin => { 'DNSName' => 'fog_test_cdn.s3.amazonaws.com'}, :enabled => true } model_tests(Fog::CDN[:aws].distributions, params, true) do # distribution needs to be ready before being disabled tests("#ready? - may take 15 minutes to complete...").succeeds do @instance.wait_for { ready? } end # and disabled before being distroyed tests("#disable - may take 15 minutes to complete...").succeeds do @instance.disable @instance.wait_for { ready? } end end end fog-aws-3.18.0/tests/models/cdn/distributions_tests.rb000066400000000000000000000011171437344660100230200ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | distributions", ['aws', 'cdn']) do params = { :s3_origin => { 'DNSName' => 'fog_test_cdn.s3.amazonaws.com'}, :enabled => true} collection_tests(Fog::CDN[:aws].distributions, params, true) do # distribution needs to be ready before being disabled tests("#ready? - may take 15 minutes to complete...").succeeds do @instance.wait_for { ready? } end # and disabled before being distroyed tests("#disable - may take 15 minutes to complete...").succeeds do @instance.disable @instance.wait_for { ready? } end end end fog-aws-3.18.0/tests/models/cdn/invalidation_tests.rb000066400000000000000000000016351437344660100226040ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | invalidation", ['aws', 'cdn']) do tests("distributions#create").succeeds do @distribution = Fog::CDN[:aws].distributions.create(:s3_origin => {'DNSName' => 'fog_test.s3.amazonaws.com'}, :enabled => true) end params = { :paths => [ '/index.html', '/path/to/index.html' ] } model_tests(@distribution.invalidations, params, true) do tests("#id") do returns(true) { @instance.identity != nil } end tests("#paths") do returns([ '/index.html', '/path/to/index.html' ].sort) { @instance.paths.sort } end tests("#ready? - may take 15 minutes to complete...").succeeds do @instance.wait_for { ready? } end end tests("distribution#destroy - may take around 15/20 minutes to complete...").succeeds do @distribution.wait_for { ready? } @distribution.disable @distribution.wait_for { ready? } @distribution.destroy end end fog-aws-3.18.0/tests/models/cdn/invalidations_tests.rb000066400000000000000000000010531437344660100227610ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | invalidations", ['aws', 'cdn']) do tests("distributions#create").succeeds do @distribution = Fog::CDN[:aws].distributions.create(:s3_origin => {'DNSName' => 'fog_test.s3.amazonaws.com'}, :enabled => true) end collection_tests(@distribution.invalidations, { :paths => [ '/index.html' ]}, true) tests("distribution#destroy - may take 15/20 minutes to complete").succeeds do @distribution.wait_for { ready? } @distribution.disable @distribution.wait_for { ready? } @distribution.destroy end end fog-aws-3.18.0/tests/models/cdn/streaming_distribution_tests.rb000066400000000000000000000011361437344660100247070ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | streaming_distribution", ['aws', 'cdn']) do params = { :s3_origin => { 'DNSName' => 'fog_test_cdn.s3.amazonaws.com'}, :enabled => true } model_tests(Fog::CDN[:aws].streaming_distributions, params, true) do # distribution needs to be ready before being disabled tests("#ready? - may take 15 minutes to complete...").succeeds do @instance.wait_for { ready? } end # and disabled before being distroyed tests("#disable - may take 15 minutes to complete...").succeeds do @instance.disable @instance.wait_for { ready? } end end end fog-aws-3.18.0/tests/models/cdn/streaming_distributions_tests.rb000066400000000000000000000011431437344660100250700ustar00rootroot00000000000000Shindo.tests("Fog::CDN[:aws] | streaming_distributions", ['aws', 'cdn']) do params = { :s3_origin => { 'DNSName' => 'fog_test_cdn.s3.amazonaws.com'}, :enabled => true} collection_tests(Fog::CDN[:aws].streaming_distributions, params, true) do # distribution needs to be ready before being disabled tests("#ready? - may take 15 minutes to complete...").succeeds do @instance.wait_for { ready? } end # and disabled before being distroyed tests("#disable - may take 15 minutes to complete...").succeeds do @instance.disable @instance.wait_for { ready? } end end end fog-aws-3.18.0/tests/models/cloud_watch/000077500000000000000000000000001437344660100200775ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/cloud_watch/alarm_data_tests.rb000066400000000000000000000021351437344660100237340ustar00rootroot00000000000000Shindo.tests("AWS::CloudWatch | alarm_data", ['aws', 'cloudwatch']) do pending if Fog.mocking? tests('success') do tests("#all").succeeds do Fog::AWS[:cloud_watch].alarm_data.all end alarm_name_prefix = {'AlarmNamePrefix'=>'tmp'} tests("#all_by_prefix").succeeds do Fog::AWS[:cloud_watch].alarm_data.all(alarm_name_prefix) end namespace = 'AWS/EC2' metric_name = 'CPUUtilization' tests("#get").succeeds do Fog::AWS[:cloud_watch].alarm_data.get(namespace, metric_name).to_json end new_attributes = { :alarm_name => 'tmp-alarm', :comparison_operator => 'GreaterThanOrEqualToThreshold', :evaluation_periods => 1, :metric_name => 'tmp-metric-alarm', :namespace => 'fog-0.11.0', :period => 60, :statistic => 'Sum', :threshold => 5 } tests('#new').returns(new_attributes) do Fog::AWS[:cloud_watch].alarm_data.new(new_attributes).attributes end tests('#create').returns(new_attributes) do Fog::AWS[:cloud_watch].alarm_data.create(new_attributes).attributes end end end fog-aws-3.18.0/tests/models/cloud_watch/alarm_history_tests.rb000066400000000000000000000010221437344660100245160ustar00rootroot00000000000000Shindo.tests("AWS::CloudWatch | alarm_histories", ['aws', 'cloudwatch']) do pending if Fog.mocking? tests('success') do tests("#all").succeeds do Fog::AWS[:cloud_watch].alarm_histories.all end new_attributes = { :alarm_name => 'tmp-alarm', :end_date => '', :history_item_type => 'StateUpdate', :max_records => 1, :start_date => '' } tests('#new').returns(new_attributes) do Fog::AWS[:cloud_watch].alarm_histories.new(new_attributes).attributes end end end fog-aws-3.18.0/tests/models/cloud_watch/metric_statistics_tests.rb000066400000000000000000000031031437344660100254000ustar00rootroot00000000000000Shindo.tests("AWS::CloudWatch | metric_statistics", ['aws', 'cloudwatch']) do tests('success') do pending if Fog.mocking? instanceId = 'i-420c352f' metricName = 'DiskReadBytes' namespace = 'AWS/EC2' startTime = (Time.now-600).iso8601 endTime = Time.now.iso8601 period = 60 statisticTypes = ['Minimum','Maximum','Sum','SampleCount','Average'] tests("#all").succeeds do @statistics = Fog::AWS[:cloud_watch].metric_statistics.all({'Statistics' => statisticTypes, 'StartTime' => startTime, 'EndTime' => endTime, 'Period' => period, 'MetricName' => metricName, 'Namespace' => namespace, 'Dimensions' => [{'Name' => 'InstanceId', 'Value' => instanceId}]}) end tests("#all_not_empty").returns(true) do @statistics.length > 0 end new_attributes = { :namespace => 'Custom/Test', :metric_name => 'ModelTest', :value => 9, :unit => 'None' } tests('#new').returns(new_attributes) do Fog::AWS[:cloud_watch].metric_statistics.new(new_attributes).attributes end tests('#create').returns(new_attributes) do Fog::AWS[:cloud_watch].metric_statistics.create(new_attributes).attributes end stats_set_attributes = { :namespace => 'Custom/Test', :metric_name => 'ModelTest', :minimum => 0, :maximum => 4, :sum => 10, :sample_count => 5, :average => 2, :unit => 'None' } tests('#create_stats_set').returns(stats_set_attributes) do Fog::AWS[:cloud_watch].metric_statistics.create(stats_set_attributes).attributes end end end fog-aws-3.18.0/tests/models/cloud_watch/metrics_tests.rb000066400000000000000000000017001437344660100233120ustar00rootroot00000000000000Shindo.tests("AWS::CloudWatch | metrics", ['aws', 'cloudwatch']) do tests('success') do pending # FIXME: the hardcoded instance id won't be available tests("#all").succeeds do Fog::AWS[:cloud_watch].metrics.all end instanceId = 'i-fd713391' metricName = 'CPUUtilization' namespace = 'AWS/EC2' tests("#get").returns({:dimensions=>[{"Name"=>"InstanceId", "Value"=>instanceId}], :name=>metricName, :namespace=>namespace}) do Fog::AWS[:cloud_watch].metrics.get(namespace, metricName, {'InstanceId' => instanceId}).attributes end end tests('#each') do Fog.mock! tests("handle NextToken").returns(1001) do count = 0 Fog::AWS[:cloud_watch].metrics.each {|e| count += 1 } count end tests("yields Metrics instances").succeeds do all = [] Fog::AWS[:cloud_watch].metrics.each {|e| all << e } all.all? {|e| e.is_a?(Fog::AWS::CloudWatch::Metric) } end end end fog-aws-3.18.0/tests/models/compute/000077500000000000000000000000001437344660100172575ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/compute/address_tests.rb000066400000000000000000000017551437344660100224630ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | address", ['aws']) do model_tests(Fog::Compute[:aws].addresses, {}, true) do @server = Fog::Compute[:aws].servers.create @server.wait_for { ready? } tests('#server=').succeeds do @instance.server = @server end tests('#server') do test(' == @server') do @server.reload @instance.server.public_ip_address == @instance.public_ip end end tests("#change_scope") do test('to vpc') do @instance.change_scope @instance.domain == 'vpc' end test('to classic') do @instance.change_scope @instance.domain == 'standard' end # merge_attributes requires this @instance = Fog::Compute[:aws].addresses.get(@instance.identity) end @server.destroy end model_tests(Fog::Compute[:aws].addresses, { :domain => "vpc" }, true) do tests("#change_scope").raises(Fog::AWS::Compute::Error) do @instance.change_scope end end end fog-aws-3.18.0/tests/models/compute/addresses_tests.rb000066400000000000000000000001741437344660100230050ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | addresses", ['aws']) do collection_tests(Fog::Compute[:aws].addresses, {}, true) end fog-aws-3.18.0/tests/models/compute/dhcp_option_tests.rb000066400000000000000000000003371437344660100233370ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | dhcp_options", ['aws']) do model_tests(Fog::Compute[:aws].dhcp_options, {'dhcp_configuration_set' => {'domain-name' => 'example.com', 'domain-name-servers' => '10.10.10.10'}}, true) end fog-aws-3.18.0/tests/models/compute/dhcp_options_tests.rb000066400000000000000000000003441437344660100235200ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | dhcp_options", ['aws']) do collection_tests(Fog::Compute[:aws].dhcp_options, {'dhcp_configuration_set' => {'domain-name' => 'example.com', 'domain-name-servers' => '10.10.10.10'}}, true) end fog-aws-3.18.0/tests/models/compute/internet_gateway_tests.rb000066400000000000000000000002051437344660100243740ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | internet_gateway", ['aws']) do model_tests(Fog::Compute[:aws].internet_gateways , {}, true) end fog-aws-3.18.0/tests/models/compute/internet_gateways_tests.rb000066400000000000000000000002121437344660100245550ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | internet_gateways", ['aws']) do collection_tests(Fog::Compute[:aws].internet_gateways, {}, true) end fog-aws-3.18.0/tests/models/compute/key_pair_tests.rb000066400000000000000000000010771437344660100226360ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | key_pair", ['aws']) do model_tests(Fog::Compute[:aws].key_pairs, {:name => 'fogkeyname'}, true) after do @keypair.destroy end tests("new keypair") do @keypair = Fog::Compute[:aws].key_pairs.create(:name => 'testkey') test ("writable?") do @keypair.writable? == true end end tests("existing keypair") do Fog::Compute[:aws].key_pairs.create(:name => 'testkey') @keypair = Fog::Compute[:aws].key_pairs.get('testkey') test("writable?") do @keypair.writable? == false end end end fog-aws-3.18.0/tests/models/compute/key_pairs_tests.rb000066400000000000000000000002211437344660100230070ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | key_pairs", ['aws']) do collection_tests(Fog::Compute[:aws].key_pairs, {:name => 'fogkeyname'}, true) end fog-aws-3.18.0/tests/models/compute/network_acl_tests.rb000066400000000000000000000067421437344660100233470ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | network_acl", ['aws']) do @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @subnet = Fog::Compute[:aws].subnets.create('vpc_id' => @vpc.id, 'cidr_block' => '10.0.10.16/28') model_tests(Fog::Compute[:aws].network_acls, { :vpc_id => @vpc.id }, true) tests("associate_with") do @new_nacl = Fog::Compute[:aws].network_acls.create(:vpc_id => @vpc.id) @default_nacl = Fog::Compute[:aws].network_acls.all('vpc-id' => @vpc.id, 'default' => true).first test("associate_with new_nacl") do @new_nacl.associate_with(@subnet) end @new_nacl.reload test("associate_with correctly updates new_nacl") do @new_nacl.associations.map { |a| a['subnetId'] } == [@subnet.subnet_id] end @default_nacl.associate_with(@subnet) @new_nacl.reload @default_nacl.reload test("associate_with correctly updates new_nacl after removal") do @new_nacl.associations.map { |a| a['subnetId'] } == [] end test("associate_with correctly updates default_nacl after removal") do @default_nacl.associations.map { |a| a['subnetId'] } == [@subnet.subnet_id] end @new_nacl.destroy end tests("add_rule and remove_rule") do @new_nacl = Fog::Compute[:aws].network_acls.create(:vpc_id => @vpc.id) default_rules = @new_nacl.entries.dup test("add a new inbound rule") do @new_nacl.add_inbound_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '0.0.0.0/0', 'PortRange.From' => 22, 'PortRange.To' => 22) @new_nacl.reload (@new_nacl.entries - default_rules) == [{ "icmpTypeCode" => {}, "portRange" => { "from" => 22, "to" => 22 }, "ruleNumber" => 100, "protocol" => 6, "ruleAction" => "allow", "egress" => false, "cidrBlock" => "0.0.0.0/0" }] end test("remove inbound rule") do @new_nacl.remove_inbound_rule(100) @new_nacl.reload @new_nacl.entries == default_rules end test("add a new outbound rule") do @new_nacl.add_outbound_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '0.0.0.0/0', 'PortRange.From' => 22, 'PortRange.To' => 22) @new_nacl.reload (@new_nacl.entries - default_rules) == [{ "icmpTypeCode" => {}, "portRange" => { "from" => 22, "to" => 22 }, "ruleNumber" => 100, "protocol" => 6, "ruleAction" => "allow", "egress" => true, "cidrBlock" => "0.0.0.0/0" }] end test("remove outbound rule") do @new_nacl.remove_outbound_rule(100) @new_nacl.reload @new_nacl.entries == default_rules end test("update rule") do @new_nacl.add_inbound_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '0.0.0.0/0', 'PortRange.From' => 22, 'PortRange.To' => 22) @new_nacl.update_inbound_rule(100, Fog::AWS::Compute::NetworkAcl::TCP, 'allow', '10.0.0.0/8', 'PortRange.From' => 22, 'PortRange.To' => 22) @new_nacl.reload (@new_nacl.entries - default_rules) == [{ "icmpTypeCode" => {}, "portRange" => { "from" => 22, "to" => 22 }, "ruleNumber" => 100, "protocol" => 6, "ruleAction" => "allow", "egress" => false, "cidrBlock" => "10.0.0.0/8" }] end @new_nacl.destroy end @subnet.destroy @vpc.destroy end fog-aws-3.18.0/tests/models/compute/network_acls_tests.rb000066400000000000000000000011131437344660100235150ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | network_acls", ['aws']) do @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') collection_tests(Fog::Compute[:aws].network_acls, { :vpc_id => @vpc.id }, true) tests('tags') do test_tags = {'foo' => 'bar'} @acl = Fog::Compute[:aws].network_acls.create(:vpc_id => @vpc.id, :tags => test_tags) tests('@acl.tags').returns(test_tags) do @acl.reload.tags end unless Fog.mocking? Fog::Compute[:aws].tags.all('resource-id' => @acl.identity).each {|tag| tag.destroy} end end @vpc.destroy end fog-aws-3.18.0/tests/models/compute/network_interfaces_test.rb000066400000000000000000000007641437344660100245460ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | network_interfaces", ['aws']) do @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @subnet = Fog::Compute[:aws].subnets.create('vpc_id' => @vpc.id, 'cidr_block' => '10.0.10.16/28') @subnet_id = @subnet.subnet_id collection_tests(Fog::Compute[:aws].network_interfaces, {:description => 'nic_desc', :name => 'nic_name', :subnet_id => @subnet_id}, true) @subnet.destroy @vpc.destroy end fog-aws-3.18.0/tests/models/compute/security_group_tests.rb000066400000000000000000000065561437344660100241250ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | security_group", ['aws']) do model_tests(Fog::Compute[:aws].security_groups, {:description => 'foggroupdescription', :name => 'foggroupname'}, true) tests("authorize and revoke helpers") do @group = Fog::Compute[:aws].security_groups.create(:name => "foggroup", :description => "fog group desc") @other_group = Fog::Compute[:aws].security_groups.create(:name => 'fog other group', :description => 'another fog group') @other_group.reload @other_user_id = Fog::AWS::Mock.owner_id @other_users_group_id = Fog::AWS::Mock.security_group_id test("authorize access by another security group") do @group.authorize_group_and_owner(@other_group.name) @group.reload @group.ip_permissions.size == 3 end test("revoke access from another security group") do @group.revoke_group_and_owner(@other_group.name) @group.reload @group.ip_permissions.empty? end test("authorize access to a port range") do @group.authorize_port_range(5000..6000) @group.reload @group.ip_permissions.size == 1 end test("revoke access to a port range") do @group.revoke_port_range(5000..6000) @group.reload @group.ip_permissions.empty? end test("authorize access at a port range (egress rule)") do @group.authorize_port_range(5000..6000, :direction => 'egress') @group.reload ip_permission_egress = @group.ip_permissions_egress.find do |permission| permission['fromPort'] == 5000 && permission['toPort'] == 6000 && permission['ipProtocol'] == 'tcp' && permission['ipRanges'] == [{ 'cidrIp' => '0.0.0.0/0' }] end !ip_permission_egress.nil? end test("revoke access at a port range (egress rule)") do @group.revoke_port_range(5000..6000, :direction => 'egress') @group.reload ip_permission_egress = @group.ip_permissions_egress.find do |permission| permission['fromPort'] == 5000 && permission['toPort'] == 6000 && permission['ipProtocol'] == 'tcp' && permission['ipRanges'] == [{ 'cidrIp' => '0.0.0.0/0' }] end ip_permission_egress.nil? end group_forms = [ "#{@other_group.owner_id}:#{@other_group.group_id}", # deprecated form @other_group.group_id, {@other_group.owner_id => @other_group.group_id}, ] group_forms.each do |group_arg| test("authorize port range access by another security group #{group_arg.inspect}") do @other_group.reload @group.authorize_port_range(5000..6000, {:group => group_arg}) @group.reload @group.ip_permissions.size == 1 end test("revoke port range access by another security group") do @other_group.reload @group.revoke_port_range(5000..6000, {:group => group_arg}) @group.reload @group.ip_permissions.empty? end end [ { @other_user_id => @other_users_group_id } ].each do |group_arg| test("does not authorize port range access by an invalid security group #{group_arg.inspect}") do raises(Fog::AWS::Compute::NotFound, "The security group '#{@other_users_group_id}' does not exist") { @other_group.reload @group.authorize_port_range(5000..6000, {:group => group_arg}) } end end @other_group.destroy @group.destroy end end fog-aws-3.18.0/tests/models/compute/security_groups_tests.rb000066400000000000000000000003061437344660100242730ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | security_groups", ['aws']) do collection_tests(Fog::Compute[:aws].security_groups, {:description => 'foggroupdescription', :name => 'foggroupname'}, true) end fog-aws-3.18.0/tests/models/compute/server_tests.rb000066400000000000000000000043441437344660100223410ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | monitor", ['aws']) do @instance = Fog::Compute[:aws].servers.new [:addresses, :flavor, :key_pair, :key_pair=, :volumes, :associate_public_ip].each do |association| responds_to(association) end tests('new instance') do test('#monitor = true') do @instance.monitor = true @instance.attributes[:monitoring] == true end test('#monitor = false') do @instance.monitor = false @instance.attributes[:monitoring] == false end test('#associate_public_ip = true') do @instance.associate_public_ip = true @instance.attributes[:associate_public_ip] == true end test('#associate_public_ip = false') do @instance.associate_public_ip = false @instance.associate_public_ip == false end end tests('existing instance') do @instance.save [:id, :availability_zone, :flavor_id, :kernel_id, :image_id, :state].each do |attr| test("instance##{attr} should not contain whitespace") do nil == @instance.send(attr).match(/\s/) end end test('#monitor = true') do @instance.monitor = true @instance.monitoring == true end test('#monitor = false') do @instance.monitor = false @instance.monitoring == false end test('#associate_public_ip = true') do @instance.associate_public_ip = true @instance.attributes[:associate_public_ip] == true end test('#associate_public_ip = false') do @instance.associate_public_ip = false @instance.associate_public_ip == false end test('#stop') do @instance.stop @instance.wait_for { state == "stopped" } @instance.state == "stopped" end test("#start") do @instance.start @instance.wait_for { ready? } @instance.state == "running" end end @instance.destroy tests('tags') do @instance = Fog::Compute[:aws].servers.create(:tags => {'key' => 'value'}) @instance.wait_for { ready? } tests('@instance.reload.tags').returns({'key' => 'value'}) do @instance.reload.tags end unless Fog.mocking? Fog::Compute[:aws].tags.all('resource-id' => @instance.identity).each {|tag| tag.destroy} end @instance.destroy end end fog-aws-3.18.0/tests/models/compute/snapshot_tests.rb000066400000000000000000000004441437344660100226670ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | snapshot", ['aws']) do @volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1a', :size => 1) @volume.wait_for { ready? } model_tests(Fog::Compute[:aws].snapshots, {:volume_id => @volume.identity}, true) @volume.destroy end fog-aws-3.18.0/tests/models/compute/snapshots_tests.rb000066400000000000000000000004521437344660100230510ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | snapshots", ['aws']) do @volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1a', :size => 1) @volume.wait_for { ready? } collection_tests(Fog::Compute[:aws].snapshots, {:volume_id => @volume.identity}, true) @volume.destroy end fog-aws-3.18.0/tests/models/compute/subnet_tests.rb000066400000000000000000000010551437344660100223270ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | subnet", ['aws']) do @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') model_tests(Fog::Compute[:aws].subnets, {:vpc_id => @vpc.id, :cidr_block => '10.0.10.0/28', :availability_zone => 'us-east-1b'}, true) do @ni = Fog::Compute[:aws].network_interfaces.create(:description => 'fog eni', :name => uniq_id('fog-eni'), :subnet_id => @instance.identity) tests("#network_interfaces") do returns([@ni]) { @instance.network_interfaces.to_a } end @ni.destroy end @vpc.destroy end fog-aws-3.18.0/tests/models/compute/subnets_tests.rb000066400000000000000000000004411437344660100225100ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | subnets", ['aws']) do @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/28') collection_tests(Fog::Compute[:aws].subnets, { :vpc_id => @vpc.id, :cidr_block => '10.0.10.0/28', :availability_zone => 'us-east-1c'}, true) @vpc.destroy end fog-aws-3.18.0/tests/models/compute/volume_tests.rb000066400000000000000000000030341437344660100223350ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | volume', ['aws']) do @server = Fog::Compute[:aws].servers.create @server.wait_for { ready? } model_tests( Fog::Compute[:aws].volumes, { availability_zone: @server.availability_zone, size: 1, tags: { 'key' => 'value' }, type: 'gp2', server: @server, device: '/dev/sdz1' }, true ) do tests('attached').succeeds do @instance.server == @server end tests('#detach').succeeds do @instance.detach @instance.wait_for { ready? } @instance.server.nil? end tests('#server=').raises(NoMethodError, 'use Fog::AWS::Compute::Volume#attach(server, device)') do @instance.server = @server end tests('#attach(server, device)').succeeds do @instance.attach(@server, '/dev/sdz1') @instance.server == @server end tests('#force_detach').succeeds do @instance.force_detach @instance.wait_for { ready? } @instance.server.nil? end @instance.type = 'io1' @instance.iops = 5000 @instance.size = 100 @instance.save returns(true) { @instance.modification_in_progress? } @instance.wait_for { !modification_in_progress? } # avoid weirdness with merge_attributes @instance = Fog::Compute[:aws].volumes.get(@instance.identity) returns('io1') { @instance.type } returns(5000) { @instance.iops } returns(100) { @instance.size } tests('@instance.tags').returns({'key' => 'value'}) do @instance.tags end end @server.destroy end fog-aws-3.18.0/tests/models/compute/volumes_tests.rb000066400000000000000000000002761437344660100225250ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | volumes", ['aws']) do collection_tests(Fog::Compute[:aws].volumes, {:availability_zone => 'us-east-1a', :size => 1, :device => '/dev/sdz1'}, true) end fog-aws-3.18.0/tests/models/compute/vpc_tests.rb000066400000000000000000000016261437344660100216230ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | vpc", ['aws']) do model_tests(Fog::Compute[:aws].vpcs, {:cidr_block => '10.0.10.0/28'}, true) do tests("#enable_classic_link") do returns(false) { @instance.classic_link_enabled? } returns(true) { @instance.enable_classic_link } returns(true) { @instance.classic_link_enabled? } end tests("#disable_classic_link") do returns(true) { @instance.disable_classic_link } returns(false) { @instance.classic_link_enabled? } end tests("#enable_classic_link_dns") do returns(false) { @instance.classic_link_dns_enabled? } returns(true) { @instance.enable_classic_link_dns } returns(true) { @instance.classic_link_dns_enabled? } end tests("#disable_classic_link") do returns(true) { @instance.disable_classic_link_dns } returns(false) { @instance.classic_link_dns_enabled? } end end end fog-aws-3.18.0/tests/models/compute/vpcs_tests.rb000066400000000000000000000007711437344660100220060ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:aws] | vpcs", ['aws']) do collection_tests(Fog::Compute[:aws].vpcs, {:cidr_block => '10.0.10.0/28'}, true) tests('tags') do test_tags = {'foo' => 'bar'} @vpc = Fog::Compute[:aws].vpcs.create(:cidr_block => '1.2.3.4/24', :tags => test_tags) tests('@vpc.tags').returns(test_tags) do @vpc.reload.tags end unless Fog.mocking? Fog::Compute[:aws].tags.all('resource-id' => @vpc.id).each {|tag| tag.destroy} end @vpc.destroy end end fog-aws-3.18.0/tests/models/data_pipeline/000077500000000000000000000000001437344660100204015ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/data_pipeline/pipeline_tests.rb000066400000000000000000000004511437344660100237550ustar00rootroot00000000000000Shindo.tests("AWS::DataPipeline | pipelines", ['aws', 'data_pipeline']) do pending if Fog.mocking? unique_id = uniq_id model_tests(Fog::AWS[:data_pipeline].pipelines, { :id => unique_id, :name => "#{unique_id}-name", :unique_id => unique_id }) do @instance.wait_for { state } end end fog-aws-3.18.0/tests/models/data_pipeline/pipelines_tests.rb000066400000000000000000000004561437344660100241450ustar00rootroot00000000000000Shindo.tests("AWS::DataPipeline | pipelines", ['aws', 'data_pipeline']) do pending if Fog.mocking? unique_id = uniq_id collection_tests(Fog::AWS[:data_pipeline].pipelines, { :id => unique_id, :name => "#{unique_id}-name", :unique_id => unique_id }) do @instance.wait_for { state } end end fog-aws-3.18.0/tests/models/dns/000077500000000000000000000000001437344660100163675ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/dns/record_tests.rb000066400000000000000000000016101437344660100214120ustar00rootroot00000000000000Shindo.tests("Fog::Dns[:aws] | record", ['aws', 'dns']) do tests("zones#create").succeeds do @zone = Fog::DNS[:aws].zones.create(:domain => generate_unique_domain) end params = { :name => @zone.domain, :type => 'A', :ttl => 3600, :value => ['1.2.3.4'] } model_tests(@zone.records, params) do # Newly created records should have a change id tests("#change_id") do returns(true) { @instance.change_id != nil } end # Waits for changes to sync to all Route 53 DNS servers. Usually takes ~30 seconds to complete. tests("#ready? - may take a minute to complete...").succeeds do @instance.wait_for { ready? } end tests("#modify") do new_value = ['5.5.5.5'] returns(true) { @instance.modify(:value => new_value) } returns(new_value) { @instance.value } end end tests("zones#destroy").succeeds do @zone.destroy end end fog-aws-3.18.0/tests/models/dns/records_tests.rb000066400000000000000000000020641437344660100216010ustar00rootroot00000000000000Shindo.tests("Fog::DNS[:aws] | records", ['aws', 'dns']) do tests("zones#create").succeeds do @zone = Fog::DNS[:aws].zones.create(:domain => generate_unique_domain) end param_groups = [ # A record { :name => @zone.domain, :type => 'A', :ttl => 3600, :value => ['1.2.3.4'] }, # CNAME record { :name => "www.#{@zone.domain}", :type => "CNAME", :ttl => 300, :value => @zone.domain} ] param_groups.each do |params| collection_tests(@zone.records, params) end records = [] 100.times do |i| records << @zone.records.create(:name => "#{i}.#{@zone.domain}", :type => "A", :ttl => 3600, :value => ['1.2.3.4']) end records << @zone.records.create(:name => "*.#{@zone.domain}", :type => "A", :ttl => 3600, :value => ['1.2.3.4']) tests("#all!").returns(101) do @zone.records.all!.size end tests("#all wildcard parsing").returns(true) do @zone.records.map(&:name).include?("*.#{@zone.domain}") end records.each do |record| record.destroy end tests("zones#destroy").succeeds do @zone.destroy end end fog-aws-3.18.0/tests/models/dns/zone_tests.rb000066400000000000000000000002311437344660100211050ustar00rootroot00000000000000Shindo.tests("Fog::DNS[:aws] | zone", ['aws', 'dns']) do params = {:domain => generate_unique_domain } model_tests(Fog::DNS[:aws].zones, params) end fog-aws-3.18.0/tests/models/dns/zones_tests.rb000066400000000000000000000002371437344660100212760ustar00rootroot00000000000000Shindo.tests("Fog::DNS[:aws] | zones", ['aws', 'dns']) do params = {:domain => generate_unique_domain } collection_tests(Fog::DNS[:aws].zones, params) end fog-aws-3.18.0/tests/models/efs/000077500000000000000000000000001437344660100163605ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/efs/file_system_tests.rb000066400000000000000000000005571437344660100224610ustar00rootroot00000000000000Shindo.tests("AWS::EFS | file system", ["aws", "efs"]) do file_system_params = { :creation_token => "fogtoken#{rand(999).to_s}" } model_tests(Fog::AWS[:efs].file_systems, file_system_params, true) file_system_params = { :creation_token => "fogtoken#{rand(999).to_s}" } collection_tests(Fog::AWS[:efs].file_systems, file_system_params, true) end fog-aws-3.18.0/tests/models/efs/mount_target_tests.rb000066400000000000000000000032121437344660100226350ustar00rootroot00000000000000Shindo.tests("AWS::EFS | mount target", ["aws", "efs"]) do @file_system = Fog::AWS[:efs].file_systems.create(:creation_token => "fogtoken#{rand(999).to_s}") @file_system.wait_for { ready? } if Fog.mocking? vpc = Fog::Compute[:aws].vpcs.create(:cidr_block => "10.0.0.0/16") subnet = Fog::Compute[:aws].subnets.create(:vpc_id => vpc.id, :cidr_block => "10.0.1.0/24") default_security_group_data = Fog::Compute[:aws].data[:security_groups].values.find do |sg| sg['groupDescription'] == 'default_elb security group' end default_security_group = Fog::Compute[:aws].security_groups.new(default_security_group_data) else vpc = Fog::Compute[:aws].vpcs.first subnet = vpc.subnets.first default_security_group = Fog::Compute[:aws].security_groups.detect { |sg| sg.description == 'default VPC security group' } end security_group = Fog::Compute[:aws].security_groups.create( :vpc_id => vpc.id, :name => "fog#{rand(999).to_s}", :description => "fog#{rand(999).to_s}" ) mount_target_params = { :file_system_id => @file_system.identity, :subnet_id => subnet.identity, } model_tests(Fog::AWS[:efs].mount_targets, mount_target_params, true) do @instance.wait_for { ready? } tests("#security_groups") do returns([default_security_group.group_id]) { @instance.security_groups } end tests("#security_groups=") do @instance.security_groups = [security_group.group_id] returns([security_group.group_id]) { @instance.security_groups } end end @file_system.wait_for { number_of_mount_targets == 0 } @file_system.destroy security_group.destroy end fog-aws-3.18.0/tests/models/efs/mount_targets_tests.rb000066400000000000000000000020251437344660100230210ustar00rootroot00000000000000Shindo.tests("AWS::EFS | mount targets", ["aws", "efs"]) do @file_system = Fog::AWS[:efs].file_systems.create(:creation_token => "fogtoken#{rand(999).to_s}") @file_system.wait_for { ready? } if Fog.mocking? vpc = Fog::Compute[:aws].vpcs.create(:cidr_block => "10.0.0.0/16") subnet = Fog::Compute[:aws].subnets.create(:vpc_id => vpc.id, :cidr_block => "10.0.1.0/24") else vpc = Fog::Compute[:aws].vpcs.first subnet = vpc.subnets.first end security_group = Fog::Compute[:aws].security_groups.create( :vpc_id => vpc.id, :name => "fog#{rand(999).to_s}", :description => "fog#{rand(999).to_s}" ) mount_target_params = { :file_system_id => @file_system.identity, :subnet_id => subnet.identity, :security_groups => [security_group.group_id] } collection_tests(Fog::AWS[:efs].mount_targets(:file_system_id => @file_system.identity), mount_target_params, true) @file_system.wait_for { number_of_mount_targets == 0 } @file_system.destroy security_group.destroy end fog-aws-3.18.0/tests/models/elasticache/000077500000000000000000000000001437344660100200505ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/elasticache/cluster_tests.rb000066400000000000000000000023001437344660100232730ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | cache clusters', ['aws', 'elasticache']) do cluster_params = { :id => "fog-test-cluster-#{rand(999).to_s}", :node_type => 'cache.m1.large', :security_groups => ['default'], :engine => 'memcached', :num_nodes => 1 } pending if Fog.mocking? Fog::Formatador.display_line "Creating cluster #{cluster_params[:id]}..." model_tests(Fog::AWS[:elasticache].clusters, cluster_params, false) do @instance.reload # Reload to get the cluster info from AWS Fog::Formatador.display_line "Waiting for #{@instance.id} "+ "to become available (#{@instance.status})..." @instance.wait_for {ready?} end # Single model is still deleting, so re-randomize the cluster ID cluster_params[:id] = "fog-test-cluster-#{rand(999).to_s}" Fog::Formatador.display_line "Creating cluster #{cluster_params[:id]}..." collection_tests(Fog::AWS[:elasticache].clusters, cluster_params, false) do @instance.reload # Reload to get the cluster info from AWS Fog::Formatador.display_line "Waiting for #{@instance.id} "+ "to become available (#{@instance.status})..." @instance.wait_for {ready?} end end fog-aws-3.18.0/tests/models/elasticache/parameter_groups_tests.rb000066400000000000000000000006061437344660100252000ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | parameter groups', ['aws', 'elasticache']) do group_name = 'fog-test' description = 'Fog Test' model_tests( Fog::AWS[:elasticache].parameter_groups, {:id => group_name, :description => description}, true ) collection_tests( Fog::AWS[:elasticache].parameter_groups, {:id => group_name, :description => description}, true ) end fog-aws-3.18.0/tests/models/elasticache/security_groups_tests.rb000066400000000000000000000025501437344660100250670ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | security groups', ['aws', 'elasticache']) do group_name = 'fog-test' description = 'Fog Test' pending if Fog.mocking? model_tests( Fog::AWS[:elasticache].security_groups, {:id => group_name, :description => description}, false ) do # An EC2 group to authorize ec2_group = Fog::Compute.new(:provider => 'AWS').security_groups.create( :name => 'fog-test-elasticache', :description => 'fog test' ) # Reload to get the instance owner_id @instance.reload tests('#authorize_ec2_group') do @instance.authorize_ec2_group(ec2_group.name) returns('authorizing') do group = @instance.ec2_groups.find do |g| g['EC2SecurityGroupName'] == ec2_group.name end group['Status'] end returns(false, 'not ready') { @instance.ready? } end @instance.wait_for { ready? } tests('#revoke_ec2_group') do @instance.revoke_ec2_group(ec2_group.name) returns('revoking') do group = @instance.ec2_groups.find do |g| g['EC2SecurityGroupName'] == ec2_group.name end group['Status'] end returns(false, 'not ready') { @instance.ready? } end ec2_group.destroy end collection_tests( Fog::AWS[:elasticache].security_groups, {:id => group_name, :description => description}, false ) end fog-aws-3.18.0/tests/models/elasticache/subnet_groups_tests.rb000066400000000000000000000025661437344660100245270ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | subnet group', ['aws', 'elasticache']) do # random_differentiator # Useful when rapidly re-running tests, so we don't have to wait # serveral minutes for deleted VPCs/subnets to disappear suffix = rand(65536).to_s(16) @subnet_group_name = "fog-test-#{suffix}" vpc_range = rand(245) + 10 @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => "10.#{vpc_range}.0.0/16") # Create 4 subnets in this VPC, each one in a different AZ subnet_az = 'us-east-1a' subnet_range = 8 @subnets = (1..3).map do result = Fog::Compute[:aws].create_subnet(@vpc.id, "10.#{vpc_range}.#{subnet_range}.0/24", 'AvailabilityZone' => subnet_az) subnet = result.body['subnet'] subnet_az = subnet_az.succ subnet_range *= 2 subnet end tests('success') do group_name = 'fog-test' description = 'Fog Test' subnet_ids = @subnets.map { |sn| sn['subnetId'] }.to_a model_tests( Fog::AWS[:elasticache].subnet_groups, {:name => group_name, :subnet_ids => subnet_ids, :description => description}, true ) collection_tests( Fog::AWS[:elasticache].subnet_groups, {:name => group_name, :subnet_ids => subnet_ids, :description => description}, true ) end @subnets.each do |sn| Fog::Compute[:aws].delete_subnet(sn['subnetId']) end @vpc.destroy end fog-aws-3.18.0/tests/models/elb/000077500000000000000000000000001437344660100163455ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/elb/model_tests.rb000066400000000000000000000337721437344660100212300ustar00rootroot00000000000000Shindo.tests('AWS::ELB | models', ['aws', 'elb']) do Fog::AWS::Compute::Mock.reset if Fog.mocking? @availability_zones = Fog::Compute[:aws].describe_availability_zones('state' => 'available').body['availabilityZoneInfo'].map{ |az| az['zoneName'] } @key_name = 'fog-test-model' @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @vpc_id = @vpc.id @subnet = Fog::Compute[:aws].subnets.create({:vpc_id => @vpc_id, :cidr_block => '10.0.10.0/24'}) @subnet_id = @subnet.subnet_id @scheme = 'internal' @igw = Fog::Compute[:aws].internet_gateways.create @igw_id = @igw.id @igw.attach(@vpc_id) tests('success') do tests('load_balancers') do tests('getting a missing elb') do returns(nil) { Fog::AWS[:elb].load_balancers.get('no-such-elb') } end end tests('listeners') do tests("default attributes") do listener = Fog::AWS[:elb].listeners.new tests('instance_port is 80').returns(80) { listener.instance_port } tests('instance_protocol is HTTP').returns('HTTP') { listener.instance_protocol } tests('lb_port is 80').returns(80) { listener.lb_port } tests('protocol is HTTP').returns('HTTP') { listener.protocol } tests('policy_names is empty').returns([]) { listener.policy_names } end tests("specifying attributes") do attributes = {:instance_port => 2000, :instance_protocol => 'SSL', :lb_port => 2001, :protocol => 'SSL', :policy_names => ['fake'] } listener = Fog::AWS[:elb].listeners.new(attributes) tests('instance_port is 2000').returns(2000) { listener.instance_port } tests('instance_protocol is SSL').returns('SSL') { listener.instance_protocol } tests('lb_port is 2001').returns(2001) { listener.lb_port } tests('protocol is SSL').returns('SSL') { listener.protocol } tests('policy_names is [ fake ]').returns(['fake']) { listener.policy_names } end end elb = nil elb_id = 'fog-test' tests('create') do tests('without availability zones') do elb = Fog::AWS[:elb].load_balancers.create(:id => elb_id, :availability_zones => @availability_zones) tests("availability zones are correct").returns(@availability_zones.sort) { elb.availability_zones.sort } tests("dns names is set").returns(true) { elb.dns_name.is_a?(String) } tests("created_at is set").returns(true) { Time === elb.created_at } tests("policies is empty").returns([]) { elb.policies } tests("default listener") do tests("1 listener").returns(1) { elb.listeners.size } tests("params").returns(Fog::AWS[:elb].listeners.new.to_params) { elb.listeners.first.to_params } end end tests('with vpc') do elb2 = Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-2", :subnet_ids => [@subnet_id]) tests("elb source group should be default").returns('default') { elb2.source_group["GroupName"] } tests("subnet ids are correct").returns(@subnet_id) { elb2.subnet_ids.first } elb2.destroy end tests('with vpc internal') do elb2 = Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-2", :subnet_ids => [@subnet_id], :scheme => 'internal') tests("scheme is internal").returns(@scheme) { elb2.scheme } elb2.destroy end tests('with default vpc') do Fog::Compute[:aws].disable_ec2_classic if Fog.mocking? if Fog::Compute[:aws].supported_platforms.include?("EC2") Fog::Formatador.display_line("[yellow]Skipping test [bold]with default vpc[/][yellow] due to AWS account having EC2 available[/]") else elb2 = Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-2", :availability_zones => @availability_zones[0]) tests("elb source group should start with default_elb_").returns(true) { !!(elb2.source_group["GroupName"] =~ /default_elb_/) } elb2.destroy end Fog::Compute[:aws].enable_ec2_classic if Fog.mocking? end if !Fog.mocking? @igw.detach(@vpc_id) @igw.destroy @subnet.destroy sleep 5 @vpc.destroy end tests('with availability zones') do azs = @availability_zones[1..-1] elb2 = Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-2", :availability_zones => azs) if Fog::Compute[:aws].supported_platforms.include?("EC2") tests("elb source group should be amazon-elb-sg").returns('amazon-elb-sg') { elb2.source_group["GroupName"] } else tests("elb source group should match default_elb_").returns(true) { !!(elb2.source_group["GroupName"] =~ /default_elb_/) } end tests("availability zones are correct").returns(azs.sort) { elb2.availability_zones.sort } elb2.destroy end # Need to sleep here for IAM changes to propgate tests('with ListenerDescriptions') do @certificate = Fog::AWS[:iam].upload_server_certificate(AWS::IAM::SERVER_CERT, AWS::IAM::SERVER_CERT_PRIVATE_KEY, @key_name).body['Certificate'] sleep(10) unless Fog.mocking? listeners = [{ 'Listener' => { 'LoadBalancerPort' => 2030, 'InstancePort' => 2030, 'Protocol' => 'HTTP' }, 'PolicyNames' => [] }, { 'Listener' => { 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'Protocol' => 'HTTPS', 'InstanceProtocol' => 'HTTPS', 'SSLCertificateId' => @certificate['Arn'] }, 'PolicyNames' => [] }] elb3 = Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-3", 'ListenerDescriptions' => listeners, :availability_zones => @availability_zones) tests('there are 2 listeners').returns(2) { elb3.listeners.count } tests('instance_port is 2030').returns(2030) { elb3.listeners.first.instance_port } tests('lb_port is 2030').returns(2030) { elb3.listeners.first.lb_port } tests('protocol is HTTP').returns('HTTP') { elb3.listeners.first.protocol } tests('protocol is HTTPS').returns('HTTPS') { elb3.listeners.last.protocol } tests('instance_protocol is HTTPS').returns('HTTPS') { elb3.listeners.last.instance_protocol } elb3.destroy end tests('with invalid Server Cert ARN').raises(Fog::AWS::IAM::NotFound) do listeners = [{ 'Listener' => { 'LoadBalancerPort' => 443, 'InstancePort' => 80, 'Protocol' => 'HTTPS', 'InstanceProtocol' => 'HTTPS', "SSLCertificateId" => "fakecert"} }] Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-4", "ListenerDescriptions" => listeners, :availability_zones => @availability_zones) end end tests('all') do elb_ids = Fog::AWS[:elb].load_balancers.all.map{|e| e.id} tests("contains elb").returns(true) { elb_ids.include? elb_id } end if Fog.mocking? tests('all marker support') do extra_elb_ids = (1..1000).map {|n| Fog::AWS[:elb].load_balancers.create(:id => "#{elb_id}-extra-#{n}").id } tests('returns all elbs').returns(true) { (extra_elb_ids - Fog::AWS[:elb].load_balancers.all.map {|e| e.id }).empty? } end end tests('get') do tests('ids match').returns(elb_id) { Fog::AWS[:elb].load_balancers.get(elb_id).id } tests('nil id').returns(nil) { Fog::AWS[:elb].load_balancers.get(nil) } end tests('creating a duplicate elb') do raises(Fog::AWS::ELB::IdentifierTaken) do Fog::AWS[:elb].load_balancers.create(:id => elb_id, :availability_zones => ['us-east-1d']) end end tests('registering an invalid instance') do raises(Fog::AWS::ELB::InvalidInstance) { elb.register_instances('i-00000000') } end tests('deregistering an invalid instance') do raises(Fog::AWS::ELB::InvalidInstance) { elb.deregister_instances('i-00000000') } end server = Fog::Compute[:aws].servers.create server.wait_for { ready? } tests('register instance') do begin elb.register_instances(server.id) rescue Fog::AWS::ELB::InvalidInstance # It may take a moment for a newly created instances to be visible to ELB requests raise if @retried_registered_instance @retried_registered_instance = true sleep 1 retry end returns([server.id]) { elb.instances } end tests('instance_health') do returns('OutOfService') do elb.instance_health.find{|hash| hash['InstanceId'] == server.id}['State'] end returns([server.id]) { elb.instances_out_of_service } end tests('deregister instance') do elb.deregister_instances(server.id) returns([]) { elb.instances } end server.destroy tests('disable_availability_zones') do elb.disable_availability_zones(@availability_zones[1..-1]) returns(@availability_zones[0..0]) { elb.availability_zones.sort } end tests('enable_availability_zones') do elb.enable_availability_zones(@availability_zones[1..-1]) returns(@availability_zones) { elb.availability_zones.sort } end tests('connection_draining') do returns(false) { elb.connection_draining? } returns(300) { elb.connection_draining_timeout } elb.set_connection_draining(true, 60) returns(true) { elb.connection_draining? } returns(60) { elb.connection_draining_timeout } end tests('cross_zone_load_balancing') do returns(false) {elb.cross_zone_load_balancing?} elb.cross_zone_load_balancing = true returns(true) {elb.cross_zone_load_balancing?} end tests('idle_connection_settings') do returns(60) { elb.connection_settings_idle_timeout } elb.set_connection_settings_idle_timeout(180) returns(180) { elb.connection_settings_idle_timeout } end tests('default health check') do default_health_check = { "HealthyThreshold"=>10, "Timeout"=>5, "UnhealthyThreshold"=>2, "Interval"=>30, "Target"=>"TCP:80" } returns(default_health_check) { elb.health_check } end tests('configure_health_check') do new_health_check = { "HealthyThreshold"=>5, "Timeout"=>10, "UnhealthyThreshold"=>3, "Interval"=>15, "Target"=>"HTTP:80/index.html" } elb.configure_health_check(new_health_check) returns(new_health_check) { elb.health_check } end tests('listeners') do tests('default') do returns(1) { elb.listeners.size } listener = elb.listeners.first returns([80,80,'HTTP','HTTP', []]) { [listener.instance_port, listener.lb_port, listener.protocol, listener.instance_protocol, listener.policy_names] } end tests('#get') do returns(80) { elb.listeners.get(80).lb_port } end tests('create') do elb.listeners.create(:instance_port => 443, :lb_port => 443, :protocol => 'TCP', :instance_protocol => 'TCP') returns(2) { elb.listeners.size } returns(443) { elb.listeners.get(443).lb_port } end tests('destroy') do elb.listeners.get(443).destroy returns(nil) { elb.listeners.get(443) } end end tests('policies') do app_policy_id = 'my-app-policy' tests 'are empty' do returns([]) { elb.policies.to_a } end tests('#all') do returns([]) { elb.policies.all.to_a } end tests('create app policy') do elb.policies.create(:id => app_policy_id, :cookie => 'my-app-cookie', :cookie_stickiness => :app) returns(app_policy_id) { elb.policies.first.id } returns("my-app-cookie") { elb.policies.get(app_policy_id).cookie } end tests('get policy') do returns(app_policy_id) { elb.policies.get(app_policy_id).id } end tests('destroy app policy') do elb.policies.first.destroy returns([]) { elb.policies.to_a } end lb_policy_id = 'my-lb-policy' tests('create lb policy') do elb.policies.create(:id => lb_policy_id, :expiration => 600, :cookie_stickiness => :lb) returns(lb_policy_id) { elb.policies.first.id } end tests('setting a listener policy') do elb.set_listener_policy(80, lb_policy_id) returns([lb_policy_id]) { elb.listeners.get(80).policy_names } returns(600) { elb.policies.get(lb_policy_id).expiration } end tests('unsetting a listener policy') do elb.unset_listener_policy(80) returns([]) { elb.listeners.get(80).policy_names } end public_key_policy_id = 'fog-public-key-policy' tests('create public key policy') do elb.policies.create(:id => public_key_policy_id, :type_name => 'PublicKeyPolicyType', :policy_attributes => {'PublicKey' => AWS::IAM::SERVER_CERT_PUBLIC_KEY}) policy = elb.policies.get(public_key_policy_id) returns(public_key_policy_id) { policy.id } returns("PublicKeyPolicyType") { policy.type_name } returns(AWS::IAM::SERVER_CERT_PUBLIC_KEY) { policy.policy_attributes["PublicKey"] } end tests('a malformed policy') do raises(ArgumentError) { elb.policies.create(:id => 'foo', :cookie_stickiness => 'invalid stickiness') } end end tests('backend server descriptions') do tests('default') do returns(0) { elb.backend_server_descriptions.size } end tests('with a backend policy') do policy = "EnableProxyProtocol" port = 80 elb.policies.create(:id => policy, :type_name => 'ProxyProtocolPolicyType', :policy_attributes => { "ProxyProtocol" => true }) Fog::AWS[:elb].set_load_balancer_policies_for_backend_server(elb.id, port, [policy]).body elb.reload returns([policy]) { elb.backend_server_descriptions.get(port).policy_names } end end tests('setting a new ssl certificate id') do elb.listeners.create(:instance_port => 443, :lb_port => 443, :protocol => 'HTTPS', :instance_protocol => 'HTTPS', :ssl_id => @certificate['Arn']) elb.set_listener_ssl_certificate(443, @certificate['Arn']) end tests('destroy') do elb.destroy end Fog::AWS[:iam].delete_server_certificate(@key_name) end end fog-aws-3.18.0/tests/models/elb/tagging_tests.rb000066400000000000000000000010411437344660100215300ustar00rootroot00000000000000Shindo.tests("AWS::ELB | tagging", ['aws', 'elb']) do @elb5 = Fog::AWS[:elb].load_balancers.create(:id => "fog-test-elb-tagging") tags1 = {'key1' => 'val1'} tags2 = {'key2' => 'val2'} tests "add and remove tags from an ELB" do returns({}) { @elb5.tags } returns(tags1) { @elb5.add_tags tags1 } returns(tags1.merge tags2) { @elb5.add_tags tags2 } returns(tags2) { @elb5.remove_tags tags1.keys } returns(tags2) { @elb5.tags } @elb5.destroy end end fog-aws-3.18.0/tests/models/glacier/000077500000000000000000000000001437344660100172115ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/glacier/model_tests.rb000066400000000000000000000027471437344660100220720ustar00rootroot00000000000000Shindo.tests('AWS::Glacier | models', ['aws', 'glacier']) do pending if Fog.mocking? tests('success') do tests('vaults') do tests('getting a missing vault') do returns(nil) { Fog::AWS[:glacier].vaults.get('no-such-vault') } end vault = nil tests('creating a vault') do vault = Fog::AWS[:glacier].vaults.create :id => 'Fog-Test-Vault' tests("id is Fog-Test-Vault").returns('Fog-Test-Vault') {vault.id} end tests('all') do tests('contains vault').returns(true) { Fog::AWS[:glacier].vaults.map {|vault| vault.id}.include?(vault.id)} end tests('destroy') do vault.destroy tests('removes vault').returns(nil) {Fog::AWS[:glacier].vaults.get(vault.id)} end end tests("archives") do vault = Fog::AWS[:glacier].vaults.create :id => 'Fog-Test-Vault-upload' tests('create') do archive = vault.archives.create(:body => 'data') tests('sets id').returns(true) {!archive.id.nil?} archive.destroy end tests('create multipart') do body = StringIO.new('x'*1024*1024*2) body.rewind archive = vault.archives.create(:body => body, :multipart_chunk_size => 1024*1024) tests('sets id').returns(true) {!archive.id.nil?} archive.destroy end end vault = Fog::AWS[:glacier].vaults.create :id => 'Fog-Test-Vault' tests("jobs") do tests('all').returns([]) {vault.jobs} end vault.destroy end end fog-aws-3.18.0/tests/models/iam/000077500000000000000000000000001437344660100163515ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/iam/access_keys_tests.rb000066400000000000000000000025211437344660100224140ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | access_keys", ['aws','iam']) do iam = Fog::AWS[:iam] @username = 'fake_user' @user = iam.users.create(:id => @username) tests('#all', 'there are no access keys for a new user').succeeds do @user.access_keys.empty? end tests('#create','an access key').succeeds do access_key = @user.access_keys.create access_key.id =~ /[A-Z0-9]{20}/ access_key.secret_access_key =~ /[\S]{40}/ access_key.status == "Active" access_key.username == @username @access_key_id = access_key.id end @user.access_keys.create tests('#all','there are two access keys').succeeds do @user.access_keys.size == 2 end tests('#get') do tests('a valid access key id').succeeds do access_key = @user.access_keys.get(@access_key_id) access_key.id == @access_key_id access_key.secret_access_key == nil access_key.status == "Active" access_key.username == @username end tests('an invalid access key').succeeds do @user.access_keys.get('non-existing') == nil end end tests('#destroy', 'decrease by one the number of access keys').succeeds do size = @user.access_keys.size @user.access_keys.get(@access_key_id).destroy @user.access_keys.size == ( size - 1 ) end # clean up @user.access_keys.map(&:destroy) @user.destroy end fog-aws-3.18.0/tests/models/iam/groups_tests.rb000066400000000000000000000025331437344660100214420ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | groups", ['aws','iam']) do service = Fog::AWS[:iam] group_name = uniq_id('fog-test-group') policy_name = uniq_id('fog-test-policy') group = nil document = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} tests('#create').succeeds do group = service.groups.create(:name => group_name) group.name == group_name end tests('#all').succeeds do service.groups.all.map(&:name).include?(group_name) end tests('update').succeeds do new_path = group.path = "/newpath/" group.save group.reload.path == new_path end tests('group') do policy = nil tests('#policies', '#create') do policy = group.policies.create(:id => policy_name, :document => document) end tests('#policies', '#get').succeeds do group.policies.get(policy_name) != nil end tests('#policies', '#all').succeeds do group.policies.all.map(&:id).include?(policy.id) end tests('#users', 'when none').succeeds do group.users.empty? end user = nil tests('#add_user').succeeds do user = service.users.create(:id => 'fog-test') group.add_user(user) group.users.include?(user) end tests('#users').succeeds do group.reload.users.map(&:identity).include?(user.identity) end end end fog-aws-3.18.0/tests/models/iam/instance_profile_tests.rb000066400000000000000000000011431437344660100234430ustar00rootroot00000000000000Shindo.tests("Fog::AWS[:iam] | instance_profiles", ['aws', 'iam']) do model_tests(Fog::AWS[:iam].instance_profiles, {:name => uniq_id('fog-instance-profile')}) do @role = Fog::AWS[:iam].roles.create(:rolename => uniq_id('fog-role')) tests("#add_role('#{@role.rolename}')") do returns(true) { @instance.add_role(@role.rolename) } end returns(1) { @role.instance_profiles.count } returns(@instance) { @role.instance_profiles.first } tests("#remove_role('#{@role.rolename}')") do returns(true) { @instance.remove_role(@role.rolename) } end @role.destroy end end fog-aws-3.18.0/tests/models/iam/managed_policies_tests.rb000066400000000000000000000036661437344660100234160ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | managed_policies", ['aws','iam']) do iam = Fog::AWS[:iam] tests('#all').succeeds do iam.managed_policies.size == 100 end tests('#each').succeeds do policies = [] iam.managed_policies.each { |policy| policies << policy } policies.size > 100 end policy = iam.managed_policies.get("arn:aws:iam::aws:policy/IAMReadOnlyAccess") tests("#document").succeeds do policy.document == { "Version" => "2012-10-17", "Statement" => [ { "Effect" => "Allow", "Action" => [ "iam:GenerateCredentialReport", "iam:GenerateServiceLastAccessedDetails", "iam:Get*", "iam:List*" ], "Resource" => "*" } ] } end tests("users") do user = iam.users.create(:id => uniq_id("fog-test-user")) tests("#attach").succeeds do user.attach(policy) user.attached_policies.map(&:identity) == [policy.identity] end returns(1) { policy.reload.attachments} tests("#detach").succeeds do user.detach(policy) user.attached_policies.map(&:identity) == [] end user.destroy end tests("groups") do group = iam.groups.create(:name => uniq_id("fog-test-group")) tests("#attach").succeeds do group.attach(policy) group.attached_policies.map(&:identity) == [policy.identity] end returns(1) { policy.reload.attachments} tests("#detach").succeeds do group.detach(policy) group.attached_policies.map(&:identity) == [] end group.destroy end tests("roles") do role = iam.roles.create(:rolename => uniq_id("fog-test-role")) tests("#attach").succeeds do role.attach(policy) role.attached_policies.map(&:identity) == [policy.identity] end returns(1) { policy.reload.attachments} tests("#detach").succeeds do role.detach(policy) role.attached_policies.map(&:identity) == [] end role.destroy end end fog-aws-3.18.0/tests/models/iam/policies_tests.rb000066400000000000000000000030261437344660100217300ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | policies", ['aws','iam']) do iam = Fog::AWS[:iam] @username = 'fake_user' @user = iam.users.create(:id => @username) @policy_document = {"Statement"=>[{"Action"=>["sqs:*"], "Effect"=>"Allow", "Resource"=>"*"}]} @policy_name = 'fake-sqs-policy' tests('#all', 'there is no policies').succeeds do @user.policies.empty? end tests('#create') do tests('a valid policy').succeeds do policy = @user.policies.create(:id => @policy_name, :document => @policy_document) policy.id == @policy_name policy.username == @username policy.document == @policy_document end # The mocking doesn't validate the document policy #tests('an invalid valid policy').succeeds do # raises(Fog::AWS::IAM::Error) { @user.policies.create(id: 'non-valid-document', document: 'invalid json blob') } #end end @user.policies.create(:id => 'another-policy', :document => {}) tests('#all','there are two policies').succeeds do @user.policies.size == 2 end tests('#get') do tests('a valid policy').succeeds do policy = @user.policies.get(@policy_name) policy.id == @polic_name policy.username == @username policy.document == @policy_document end tests('an invalid policy').succeeds do @user.policies.get('non-existing') == nil end end tests('#destroy').succeeds do @user.policies.get(@policy_name).destroy end # clean up @user.access_keys.map(&:destroy) @user.policies.map(&:destroy) @user.destroy end fog-aws-3.18.0/tests/models/iam/roles_tests.rb000066400000000000000000000036411437344660100212500ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | roles", ['aws','iam']) do @iam = Fog::AWS[:iam] @role_one_name = 'fake_role_one' @role_two_name = 'fake_role_two' @role_three_name = 'fake_role_three' @role_three_path = '/path/to/fake_role_three/' @role_four_name = 'fake_role_four' tests('#create').succeeds do @role_one = @iam.roles.create(:rolename => @role_one_name) @role_one.rolename == @role_one_name end tests('#all','there is only one role').succeeds do @iam.roles.size == 1 end tests('#all','the only role should match').succeeds do @iam.roles.first.rolename == @role_one_name end tests('#create','a second role').succeeds do @role_two = @iam.roles.create(:rolename => @role_two_name) @role_two.rolename == @role_two_name end tests('#all','there are two roles').succeeds do @iam.roles.size == 2 end tests('#get','an existing role').succeeds do @iam.roles.get(@role_one_name).rolename == @role_one_name end tests('#get',"returns nil if the role doesn't exists").succeeds do @iam.roles.get('blah').nil? end tests('#create', 'assigns path').succeeds do @role_three = @iam.roles.create(:rolename => @role_three_name, :path => @role_three_path) @role_three.path == @role_three_path end tests('#create', 'defaults path to /').succeeds do @role_four = @iam.roles.create(:rolename => @role_four_name) @role_four.path == '/' end tests('#destroy','an existing role').succeeds do @iam.roles.get(@role_one_name).destroy end tests('#all', 'limit 1').succeeds do 1 == @iam.roles.all(:limit => 1).size end tests('#all', 'each_entry').succeeds do roles = []; @iam.roles.each(:limit => 1) { |r| roles << r } 3 == roles.size end tests('#destroy','clean up remaining roles').succeeds do @iam.roles.get(@role_two_name).destroy @iam.roles.get(@role_three_name).destroy @iam.roles.get(@role_four_name).destroy end end fog-aws-3.18.0/tests/models/iam/users_tests.rb000066400000000000000000000056761437344660100212770ustar00rootroot00000000000000Shindo.tests("Fog::Compute[:iam] | users", ['aws','iam']) do iam = Fog::AWS[:iam] user_one_name = 'fake_user_one' user_two_name = 'fake_user_two' user_three_name = 'fake_user_three' user_three_path = '/path/to/fake_user_three/' user_four_name = 'fake_user_four' def all_users Fog::AWS[:iam].users.all.select{|user| user.id =~ /^fake_user/ } end tests('#create').succeeds do user_one = iam.users.create(:id => user_one_name) user_one.id == user_one_name end tests('#all','there is only one user').succeeds do all_users.size == 1 end tests('#all','the only user should match').succeeds do all_users.first.id == user_one_name end tests('#create','a second user').succeeds do user_two = iam.users.create(:id => user_two_name) user_two.id == user_two_name end tests('#all','there are two users').succeeds do all_users.size == 2 end user = iam.users.get(user_one_name) tests('#get','an existing user').succeeds do user.id == user_one_name end tests('#current').succeeds do iam.users.current end tests('#get',"returns nil if the user doesn't exists").succeeds do iam.users.get('non-exists') == nil end tests('#policies','it has no policies').succeeds do user.policies.empty? end tests('#access_keys','it has no keys').succeeds do user.access_keys.empty? end # test that users create in mock and be signed in via access key and share data if Fog.mocking? tests("mocking access key usage") do access_key = user.access_keys.create user_client = Fog::AWS::IAM.new( :aws_access_key_id => access_key.identity, :aws_secret_access_key => access_key.secret_access_key ) tests("sets correct data").succeeds do user_client.users.size > 1 end tests("set current user name").succeeds do user_client.current_user_name == user.identity end end end tests('#password=nil', 'without a password').succeeds do user.password = nil user.password_created_at.nil? end tests('#password=(password)').succeeds do user.password = SecureRandom.base64(10) user.password_created_at.is_a?(Time) end tests('#password=(update_password)').succeeds do user.password = SecureRandom.base64(10) user.password_created_at.is_a?(Time) end tests('#password=nil', 'with a password').succeeds do user.password = nil user.password_created_at.nil? end tests('#create', 'assigns path').succeeds do user_three = iam.users.create(:id => user_three_name, :path => user_three_path) user_three.path == user_three_path end tests('#create', 'defaults path to /').succeeds do user_four = iam.users.create(:id => user_four_name) user_four.path == '/' end tests('#destroy','an existing user').succeeds do iam.users.get(user_one_name).destroy end tests('#destroy','clean up remaining user').succeeds do iam.users.get(user_two_name).destroy end end fog-aws-3.18.0/tests/models/rds/000077500000000000000000000000001437344660100163735ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/rds/cluster_tests.rb000066400000000000000000000036151437344660100216300ustar00rootroot00000000000000Shindo.tests("AWS::RDS | cluster", ["aws", "rds"]) do model_tests(Fog::AWS[:rds].clusters, rds_default_cluster_params) do @cluster_id = @instance.id @instance.wait_for(20*60) { ready? } @cluster_with_final_snapshot = Fog::AWS[:rds].clusters.create(rds_default_cluster_params.merge(:id => uniq_id("fog-snapshot-test"), :backup_retention_period => 1)) tests("#servers") do returns([]) { @instance.servers } end @server = Fog::AWS[:rds].servers.create(rds_default_server_params.reject { |k,v| [:allocated_storage, :master_username, :password, :backup_retention_period].include?(k) }.merge(:engine => "aurora", :cluster_id => @instance.id, :flavor_id => "db.r3.large")) @server.wait_for(20*60) { ready? } tests("#servers") do @instance.reload returns([{"DBInstanceIdentifier" => @server.id, "master" => true}]) { @instance.db_cluster_members } returns([@server]) { @instance.servers } end tests("#snapshots") do returns([]) { @instance.snapshots } snapshot_id = uniq_id("manual-snapshot") snapshot = @instance.snapshots.create(:id => snapshot_id) returns(snapshot_id) { snapshot.id } snapshot.wait_for { ready? } returns([snapshot.id]) { @instance.snapshots.map(&:id) } snapshot.destroy end tests("#destroy") do snapshot_id = uniq_id("fog-snapshot") @instance.servers.map(&:destroy) @cluster_with_final_snapshot.wait_for(20*60) { ready? } @cluster_with_final_snapshot.destroy(snapshot_id) snapshot = Fog::AWS[:rds].cluster_snapshots.get(snapshot_id) snapshot.wait_for { ready? } returns(snapshot_id) { snapshot.id } snapshot.destroy end after do if cluster = Fog::AWS[:rds].clusters.get(@cluster_id) unless cluster.state = 'deleting' cluster.servers.map(&:destroy) cluster.destroy end end end end end fog-aws-3.18.0/tests/models/rds/clusters_tests.rb000066400000000000000000000002561437344660100220110ustar00rootroot00000000000000Shindo.tests("AWS::RDS | clusters", ["aws", "rds"]) do collection_tests(Fog::AWS[:rds].clusters, rds_default_cluster_params) do @instance.wait_for { ready? } end end fog-aws-3.18.0/tests/models/rds/event_subscription_tests.rb000066400000000000000000000004131437344660100240650ustar00rootroot00000000000000Shindo.tests("AWS::RDS | event_subscription", ['aws', 'rds']) do pending unless Fog.mocking? name = 'fog' params = {:id => name, :sns_topic_arn => 'arn:aws:sns:us-east-1:12345678910:fog'} model_tests(Fog::AWS[:rds].event_subscriptions, params) do end end fog-aws-3.18.0/tests/models/rds/event_subscriptions_tests.rb000066400000000000000000000003711437344660100242530ustar00rootroot00000000000000Shindo.tests("AWS::RDS | event subscriptions", ['aws', 'rds']) do pending unless Fog.mocking? params = {:id => "fog", :sns_topic_arn => 'arn:aws:sns:us-east-1:12345678910:fog'} collection_tests(Fog::AWS[:rds].event_subscriptions, params) end fog-aws-3.18.0/tests/models/rds/helper.rb000066400000000000000000000013671437344660100202060ustar00rootroot00000000000000def rds_default_server_params { :allocated_storage => 5, :backup_retention_period => 0, :engine => 'mysql', :version => '5.6.22', :id => uniq_id, :master_username => 'foguser', :password => 'fogpassword', :flavor_id => 'db.m3.medium', } end def rds_default_cluster_params { :allocated_storage => 50, :backup_retention_period => 10, :engine => "aurora", :version => "5.6.10a", :id => uniq_id, :master_username => "fogclusteruser", :password => "fogpassword", :flavor_id => "db.r3.large" } end fog-aws-3.18.0/tests/models/rds/instance_option_tests.rb000066400000000000000000000004731437344660100233420ustar00rootroot00000000000000Shindo.tests("AWS::RDS | db instance options", ['aws', 'rds']) do params = {:engine => 'mysql'} pending if Fog.mocking? tests('#options') do tests 'contains options' do @instance = Fog::AWS[:rds].instance_options.new(params) returns(true) { @instance.engine == 'mysql' } end end end fog-aws-3.18.0/tests/models/rds/parameter_group_tests.rb000066400000000000000000000013741437344660100233430ustar00rootroot00000000000000Shindo.tests("AWS::RDS | parameter_group", ['aws', 'rds']) do group_name = 'fog-test' params = {:id => group_name, :family => 'mysql5.1', :description => group_name} pending if Fog.mocking? model_tests(Fog::AWS[:rds].parameter_groups, params, false) do tests('#parameters') do #search for a sample parameter tests 'contains parameters' do returns(true){ @instance.parameters.any? {|p| p.name == 'query_cache_size'}} end end tests('#modify') do @instance.modify([{:name => 'query_cache_size', :value => '6553600', :apply_method => 'immediate'}]) tests 'parameter has changed' do returns('6553600'){@instance.parameters.find {|p| p.name == 'query_cache_size'}.value} end end end end fog-aws-3.18.0/tests/models/rds/parameter_groups_tests.rb000066400000000000000000000004161437344660100235220ustar00rootroot00000000000000Shindo.tests("AWS::RDS | parameter_groups", ['aws', 'rds']) do group_name = 'fog-test' params = {:id => group_name, :family => 'mysql5.1', :description => group_name} pending if Fog.mocking? collection_tests(Fog::AWS[:rds].parameter_groups, params, false) end fog-aws-3.18.0/tests/models/rds/security_group_tests.rb000066400000000000000000000051701437344660100232300ustar00rootroot00000000000000Shindo.tests("AWS::RDS | security_group", ['aws', 'rds']) do group_name = 'fog-test' params = {:id => group_name, :description => 'fog test'} model_tests(Fog::AWS[:rds].security_groups, params) do tests("#description").returns('fog test') { @instance.description } @ec2_sec_group = Fog::Compute[:aws].security_groups.create(:name => uniq_id("fog-rds-test"), :description => 'fog test') tests("#authorize_ec2_security_group('#{@ec2_sec_group.name}')").succeeds do @instance.authorize_ec2_security_group(@ec2_sec_group.name) returns('authorizing') do @instance.ec2_security_groups.find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group.name}['Status'] end end @instance.wait_for { ready? } tests("#revoke_ec2_security_group('#{@ec2_sec_group.name}')").succeeds do @instance.revoke_ec2_security_group(@ec2_sec_group.name) returns('revoking') do @instance.ec2_security_groups.find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group.name}['Status'] end @instance.wait_for { ready? } returns(false) { @instance.ec2_security_groups.any?{|h| h['EC2SecurityGroupName'] == @ec2_sec_group.name} } end @instance.wait_for { ready? } tests("#authorize_ec2_security_group('#{@ec2_sec_group.group_id}')").succeeds do @instance.authorize_ec2_security_group(@ec2_sec_group.group_id) returns('authorizing') do @instance.ec2_security_groups.find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group.name}['Status'] end end @instance.wait_for { ready? } tests("#revoke_ec2_security_group('#{@ec2_sec_group.group_id}')").succeeds do @instance.revoke_ec2_security_group(@ec2_sec_group.group_id) returns('revoking') do @instance.ec2_security_groups.find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group.name}['Status'] end @instance.wait_for { ready? } returns(false) { @instance.ec2_security_groups.any?{|h| h['EC2SecurityGroupId'] == @ec2_sec_group.group_id} } end @instance.wait_for { ready? } @ec2_sec_group.destroy tests("#authorize_cidrip").succeeds do @cidr = '127.0.0.1/32' @instance.authorize_cidrip(@cidr) returns('authorizing') { @instance.ip_ranges.find{|h| h['CIDRIP'] == @cidr}['Status'] } end tests("#revoke_cidrip").succeeds do pending if Fog.mocking? @instance.wait_for { ready? } @instance.revoke_cidrip(@cidr) returns('revoking') { @instance.ip_ranges.find{|h| h['CIDRIP'] == @cidr}['Status'] } @instance.wait_for { ready? } returns(false) { @instance.ip_ranges.any?{|h| h['CIDRIP'] == @cidr} } end end end fog-aws-3.18.0/tests/models/rds/security_groups_tests.rb000066400000000000000000000002711437344660100234100ustar00rootroot00000000000000Shindo.tests("AWS::RDS | security groups", ['aws', 'rds']) do params = {:id => 'fog-test', :description => 'fog test'} collection_tests(Fog::AWS[:rds].security_groups, params) end fog-aws-3.18.0/tests/models/rds/server_tests.rb000066400000000000000000000107231437344660100214530ustar00rootroot00000000000000Shindo.tests("AWS::RDS | server", ['aws', 'rds']) do model_tests(Fog::AWS[:rds].servers, rds_default_server_params) do # We'll need this later; create it early to avoid waiting @instance_with_final_snapshot = Fog::AWS[:rds].servers.create(rds_default_server_params.merge(:id => uniq_id("fog-snapshot-test"), :backup_retention_period => 1)) @instance_with_encrypted_storage = Fog::AWS[:rds].servers.create(rds_default_server_params.merge(:storage_encrypted => true)) @instance.wait_for(20*60) { ready? } @instance_with_encrypted_storage.wait_for(20*60) { ready? } @final_snapshot_id = uniq_id('fog-test-snapshot') tests("#storage_encrypted") do returns(true) { @instance_with_encrypted_storage.storage_encrypted } end test('#read_replica_identifiers is []') do returns([]) { @instance.read_replica_identifiers } end tests('#snapshots') do snapshot = nil tests('#create').succeeds do snapshot = @instance.snapshots.create(:id => uniq_id('fog-snapshot-test')) end snapshot.wait_for { ready?} @instance.wait_for { ready? } returns(true) { @instance.snapshots.map{ |s| s.id }.include?(snapshot.id) } snapshot.destroy end tests("#modify").succeeds do pending if Fog.mocking? engine = rds_default_server_params.fetch(:engine) version = rds_default_server_params.fetch(:version).match(/\d+\.\d+/).to_s orig_parameter_group = @instance.db_parameter_groups.first['DBParameterGroupName'] parameter_group = Fog::AWS[:rds].parameter_groups.create(:id => uniq_id, :family => "#{engine}#{version}", :description => 'fog-test') orig_security_groups = @instance.db_security_groups.map{|h| h['DBSecurityGroupName']} security_group = Fog::AWS[:rds].security_groups.create(:id => uniq_id, :description => 'fog-test') modify_options = { 'DBParameterGroupName' => parameter_group.id, 'DBSecurityGroups' => orig_security_groups + [security_group.id] } @instance.modify(true, modify_options) @instance.wait_for { ready? } returns(parameter_group.id, 'new parameter group') do @instance.db_parameter_groups.first['DBParameterGroupName'] end returns(true, "new security group") do @instance.db_security_groups.any?{|hash| hash['DBSecurityGroupName'] == security_group.id} end @instance.reboot @instance.wait_for { state == 'rebooting' } @instance.wait_for { ready? } # Restore back to original state using symbols restore_options = { :parameter_group_name => orig_parameter_group, :security_group_names => orig_security_groups } @instance.modify(true, restore_options) @instance.reboot @instance.wait_for { state == 'rebooting' } @instance.wait_for do ready? && db_security_groups.all? {|hash| hash['Status'] == 'active'} && db_parameter_groups.all? {|hash| hash['ParameterApplyStatus'] == 'in-sync' } end parameter_group.destroy security_group.destroy end tests("#reboot").succeeds do @instance.reboot end @instance.wait_for { state == 'rebooting' } @instance.wait_for { ready? } replica = nil tests('#create_read_replica').succeeds do replica = @instance_with_final_snapshot.create_read_replica(uniq_id('fog-replica')) @instance_with_final_snapshot.reload returns([replica.id]) { @instance_with_final_snapshot.read_replica_identifiers } returns(@instance_with_final_snapshot.id) { replica.read_replica_source } replica.wait_for { ready? } # FinalDBSnapshotIdentifier can not be specified when deleting a replica instance raises(Fog::AWS::RDS::Error) { replica.destroy("foobar") } end tests('#promote_read_replica').succeeds do replica.promote.wait_for { state != "modifying" } replica.read_replica_source == nil end tests('#promote_read_replica', 'master').raises(Fog::AWS::RDS::Error) { @instance_with_final_snapshot.promote } replica && replica.destroy test("Destroying with a final snapshot") do @instance_with_final_snapshot.wait_for { ready? } @instance_with_final_snapshot.destroy(@final_snapshot_id) returns(true, "Final snapshot created") do @final_snapshot = Fog::AWS[:rds].snapshots.get(@final_snapshot_id) !@final_snapshot.nil? end @final_snapshot.wait_for { ready? } @final_snapshot.destroy end end end fog-aws-3.18.0/tests/models/rds/servers_tests.rb000066400000000000000000000015361437344660100216400ustar00rootroot00000000000000Shindo.tests("AWS::RDS | servers", ['aws', 'rds']) do collection_tests(Fog::AWS[:rds].servers, rds_default_server_params) do @instance.wait_for { ready? } end tests("#restore").succeeds do instance = Fog::AWS[:rds].servers.create(rds_default_server_params.merge(:id => uniq_id("fog-snapshot-test"))) snapshot_id = uniq_id('fog-snapshot-test') @snapshot = instance.snapshots.create(:id => snapshot_id ) instance.destroy db_name = uniq_id('fog-db-name') @restore_instance = Fog::AWS[:rds].servers.restore('master_username' => instance.master_username, 'flavor_id' => 'db.m3.medium', 'source_snapshot_id' => snapshot_id, 'id' => uniq_id('restored-instance')) end if Fog.mocking? && @restore_instance.respond_to?(:ready?) @restore_instance.wait_for { ready? } end @snapshot.destroy @restore_instance.destroy end fog-aws-3.18.0/tests/models/rds/snapshot_tests.rb000066400000000000000000000005061437344660100220020ustar00rootroot00000000000000Shindo.tests("AWS::RDS | snapshot", ['aws', 'rds']) do @server = Fog::AWS[:rds].servers.create(rds_default_server_params) @server.wait_for { ready? } params = {:id => uniq_id, :instance_id => @server.id} model_tests(Fog::AWS[:rds].snapshots, params) do @instance.wait_for { ready? } end @server.destroy end fog-aws-3.18.0/tests/models/rds/snapshots_tests.rb000066400000000000000000000005141437344660100221640ustar00rootroot00000000000000Shindo.tests("AWS::RDS | snapshots", ['aws', 'rds']) do @server = Fog::AWS[:rds].servers.create(rds_default_server_params) @server.wait_for { ready? } params = {:id => uniq_id, :instance_id => @server.id} collection_tests(Fog::AWS[:rds].snapshots, params) do @instance.wait_for { ready? } end @server.destroy end fog-aws-3.18.0/tests/models/rds/tagging_tests.rb000066400000000000000000000013361437344660100215650ustar00rootroot00000000000000Shindo.tests("AWS::RDS | tagging", ['aws', 'rds']) do @server = Fog::AWS[:rds].servers.create(rds_default_server_params) Fog::Formatador.display_line "Creating RDS instance #{@server.id}" Fog::Formatador.display_line "Waiting for instance #{@server.id} to be ready" @server.wait_for { ready? } tags1 = {'key1' => 'val1'} tags2 = {'key2' => 'val2'} tests "add and remove tags from a running RDS model" do returns({}) { @server.tags } returns(tags1) { @server.add_tags tags1 } returns(tags1.merge tags2) { @server.add_tags tags2 } returns(tags2) { @server.remove_tags tags1.keys } returns(tags2) { @server.tags } end @server.destroy end fog-aws-3.18.0/tests/models/sns/000077500000000000000000000000001437344660100164065ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/sns/topic_tests.rb000066400000000000000000000006571437344660100213030ustar00rootroot00000000000000Shindo.tests("AWS::SNS | topic", ['aws', 'sns']) do params = {:id => 'fog'} model_tests(Fog::AWS[:sns].topics, params) do @instance.wait_for { ready? } tests("#display_name").returns('fog') { @instance.display_name } tests("#update_topic_attribute") do @instance.update_topic_attribute("DisplayName", "new-fog") tests("#display_name").returns('new-fog') { @instance.display_name } end end end fog-aws-3.18.0/tests/models/sns/topics_tests.rb000066400000000000000000000003061437344660100214550ustar00rootroot00000000000000Shindo.tests("AWS::SNS | topics", ['aws', 'sns']) do pending unless Fog.mocking? params = {:id => 'arn:aws:sns:us-east-1:12345678910:fog'} collection_tests(Fog::AWS[:sns].topics, params) end fog-aws-3.18.0/tests/models/storage/000077500000000000000000000000001437344660100172475ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/storage/directory_tests.rb000066400000000000000000000154461437344660100230340ustar00rootroot00000000000000Shindo.tests("Storage[:aws] | directory", ["aws"]) do tests('Fog::Storage[:aws]', "#request_params") do def slice(hash, *args) hash.select { |k, _| args.include?(k) } end instance = Fog::Storage[:aws] method = instance.method(:request_params) params = {bucket_name: 'profile-uploads', host: 'profile-uploads.s3.us-west-2.amazonaws.com'} tests("given #{params}, request_params[:host]").returns("profile-uploads.s3.us-west-2.amazonaws.com") do method.call(params)[:host] end params = {bucket_name: 'profile-uploads.johnsmith.net', cname: 'profile-uploads.johnsmith.net', virtual_host: true} tests("given #{params}, request_params[:host]").returns("profile-uploads.johnsmith.net") do method.call(params)[:host] end params = {bucket_name: 'profile-uploads.johnsmith.net', cname: 'profile-uploads.johnsmith.net', virtual_host: false} tests("given #{params}, request_params[:host], request_params[:path]"). returns({host: "s3.amazonaws.com", path: "/profile-uploads.johnsmith.net/"}) do slice(method.call(params), :host, :path) end params = {bucket_name: 'profile-uploads.johnsmith.net', bucket_cname: 'profile-uploads.johnsmith.net'} tests("given #{params}, request_params[:host]").returns("profile-uploads.johnsmith.net") do method.call(params)[:host] end params = {bucket_name: 'profile-uploads'} tests("given #{params}, request_params[:path], request_params[:host]"). returns({path: "/", host: "profile-uploads.s3.amazonaws.com"}) do slice(method.call(params), :path, :host) end params = {bucket_name: 'profile-uploads', path_style: true} tests("given #{params}, request_params[:path], request_params[:host]"). returns({path: "/profile-uploads/", host: "s3.amazonaws.com"}) do slice(method.call(params), :path, :host) end params = {bucket_name: 'profile-uploads', path_style: false} tests("given #{params}, request_params[:path], request_params[:host]"). returns({path: "/", host: "profile-uploads.s3.amazonaws.com"}) do slice(method.call(params), :path, :host) end params = {scheme: 'https', bucket_name: 'profile.uploads', path_style: false} tests("given #{params}, request_params[:path], request_params[:host]"). returns(path: "/profile.uploads/", host: "s3.amazonaws.com") do slice(method.call(params), :path, :host) end params = {:headers=>{:"Content-Type"=>"application/json"}} tests("given #{params}, request_params[:headers]").returns({:"Content-Type"=>"application/json"}) do method.call(params)[:headers] end params = {headers: {}} tests("given #{params}, request_params[:headers]").returns({}) do method.call(params)[:headers] end params = {scheme: 'http'} tests("given #{params}, request_params[:scheme]").returns('http') do method.call(params)[:scheme] end params = {} tests("given #{params}, request_params[:scheme]").returns('https') do method.call(params)[:scheme] end params = {scheme: 'http', port: 8080} tests("given #{params} (default scheme), request_params[:port]").returns(8080) do method.call(params)[:port] end params = {scheme: 'https', port: 443} tests("given #{params}, request_params[:port]").returns(nil) do method.call(params)[:port] end params = {} tests("given #{params}, request_params[:host]").returns("s3.amazonaws.com") do method.call(params)[:host] end params = {region: 'us-east-1'} tests("given #{params}, request_params[:host]").returns("s3.amazonaws.com") do method.call(params)[:host] end params = {region: 'us-west-2'} tests("given #{params}, request_params[:host]").returns("s3.us-west-2.amazonaws.com") do method.call(params)[:host] end params= {region: 'us-east-1', host: 's3.us-west-2.amazonaws.com'} tests("given #{params}, request_params[:host]").returns("s3.us-west-2.amazonaws.com") do method.call(params)[:host] end params = {object_name: 'image.png'} tests("given #{params}, request_params[:host]").returns("/image.png") do method.call(params)[:path] end params = {object_name: 'image.png', path: '/images/image.png'} tests("given #{params}, request_params[:host]").returns("/images/image.png") do method.call(params)[:path] end end directory_attributes = { :key => uniq_id('fogdirectorytests') } model_tests(Fog::Storage[:aws].directories, directory_attributes, Fog.mocking?) do tests("#public_url").returns(nil) do @instance.public_url end tests('#location').returns('us-east-1') do # == Fog::AWS::Storage::DEFAULT_REGION @instance.location end @instance.acl = 'public-read' @instance.save tests("#public_url").returns(true) do if @instance.public_url =~ %r[\Ahttps://fogdirectorytests-[\da-f]+\.s3\.amazonaws\.com/\z] true else @instance.public_url end end end directory_attributes = { :key => uniq_id('different-region'), :location => 'eu-west-1', } model_tests(Fog::Storage[:aws].directories, directory_attributes, Fog.mocking?) do tests("#location").returns('eu-west-1') do @instance.location end tests("#location").returns('eu-west-1') do Fog::Storage[:aws].directories.get(@instance.identity).location end end directory_attributes = { :key => uniq_id('fogdirectorytests') } model_tests(Fog::Storage[:aws].directories, directory_attributes, Fog.mocking?) do tests("#versioning=") do tests("#versioning=(true)").succeeds do @instance.versioning = true end tests("#versioning=(true) sets versioning to 'Enabled'").returns('Enabled') do @instance.versioning = true @instance.service.get_bucket_versioning(@instance.key).body['VersioningConfiguration']['Status'] end tests("#versioning=(false)").succeeds do (@instance.versioning = false).equal? false end tests("#versioning=(false) sets versioning to 'Suspended'").returns('Suspended') do @instance.versioning = false @instance.service.get_bucket_versioning(@instance.key).body['VersioningConfiguration']['Status'] end end end model_tests(Fog::Storage[:aws].directories, directory_attributes, Fog.mocking?) do tests("#versioning?") do tests("#versioning? false if not enabled").returns(false) do @instance.versioning? end tests("#versioning? true if enabled").returns(true) do @instance.service.put_bucket_versioning(@instance.key, 'Enabled') @instance.versioning? end tests("#versioning? false if suspended").returns(false) do @instance.service.put_bucket_versioning(@instance.key, 'Suspended') @instance.versioning? end end end end fog-aws-3.18.0/tests/models/storage/file_tests.rb000066400000000000000000000073331437344660100217430ustar00rootroot00000000000000Shindo.tests("Storage[:aws] | file", ["aws"]) do require 'tempfile' file_attributes = { :key => 'fog_file_tests', :body => lorem_file, :public => true } directory_attributes = { :key => uniq_id("fogfilestests") } @directory = Fog::Storage[:aws].directories.create(directory_attributes) model_tests(@directory.files, file_attributes, Fog.mocking?) do tests("#version") do tests("#version should be null if versioning isn't enabled").returns(nil) do @instance.version end end end @directory.versioning = true model_tests(@directory.files, file_attributes, Fog.mocking?) do tests("#version") do tests("#version should not be null if versioning is enabled").returns(false) do @instance.version == nil end end @directory.files.create(:key => @instance.key) @instance.destroy tests("#versions") do tests('#versions.size includes versions (including DeleteMarkers) for all keys').returns(3) do @instance.versions.size end tests('#versions are all for the correct key').returns(true) do @instance.versions.all? { |v| v.key == @instance.key } end end tests("#destroy") do tests("#destroy a specific version should delete the version, not create a DeleteMarker").returns(2) do @instance.destroy('versionId' => @instance.version) @instance.versions.all.size end end tests("multipart upload") do pending if Fog.mocking? # A 6MB file @large_file = Tempfile.new("fog-test-aws-s3-multipart") 6.times { @large_file.write("x" * (1024**2)) } @large_file.rewind tests("#save(:multipart_chunk_size => 5242880)").succeeds do @directory.files.create(:key => 'multipart-upload', :body => @large_file, :multipart_chunk_size => 5242880) end @large_file.close end tests("multipart upload with empty file") do pending if Fog.mocking? @empty_file = Tempfile.new("fog-test-aws-s3-multipart-empty") tests("#save(:multipart_chunk_size => 5242880)").succeeds do @directory.files.create(:key => 'empty-multipart-upload', :body => @empty_file, :multipart_chunk_size => 5242880) end @empty_file.close end tests("multipart upload with customer encryption").returns(true) do pending if Fog.mocking? encryption_key = OpenSSL::Cipher.new("AES-256-ECB").random_key # A 6MB file @large_file = Tempfile.new("fog-test-aws-s3-multipart") 6.times { @large_file.write("x" * (1024**2)) } @large_file.rewind tests("#save(:multipart_chunk_size => 5242880)").succeeds do @directory.files.create( :key => 'multipart-encrypted-upload', :body => @large_file, :multipart_chunk_size => 5242880, :encryption => "AES256", :encryption_key => encryption_key ) end @large_file.close @directory.files.get('multipart-encrypted-upload', 'x-amz-server-side-encryption-customer-algorithm' => 'AES256', 'x-amz-server-side-encryption-customer-key' => Base64.encode64(encryption_key).chomp!, 'x-amz-server-side-encryption-customer-key-MD5' => Base64.encode64(OpenSSL::Digest::MD5.digest(encryption_key.to_s)).chomp! ).body == "x" * 6*1024**2 end acl = Fog::Storage[:aws].get_object_acl(@directory.key, @instance.key).body["AccessControlList"] tests("#acl").returns(acl) do @instance.acl end tests("#public?").returns(acl.any? {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'}) do @instance.public? end end @directory.versions.each(&:destroy) @directory.destroy end fog-aws-3.18.0/tests/models/storage/files_tests.rb000066400000000000000000000057351437344660100221320ustar00rootroot00000000000000Shindo.tests("Storage[:aws] | files", ["aws"]) do file_attributes = { :key => 'fog_file_tests', :body => lorem_file, :public => true } directory_attributes = { :key => uniq_id('fogfilestests') } @directory = Fog::Storage[:aws].directories.create(directory_attributes) @directory.versioning = true model_tests(@directory.files, file_attributes, Fog.mocking?) do v1 = @instance.version v2 = @directory.service.put_object(@directory.key, @instance.key, 'version 2 content').headers['x-amz-version-id'] v3 = @directory.service.delete_object(@directory.key, @instance.key).headers['x-amz-version-id'] v4 = @directory.service.put_object(@directory.key, @instance.key, 'version 3 content').headers['x-amz-version-id'] tests("#get") do tests("#get without version fetches the latest version").returns(v4) do @directory.files.get(@instance.key).version end tests("#get with version fetches that exact version").returns(v2) do @directory.files.get(@instance.key, 'versionId' => v2).version end tests("#get with a deleted version returns nil").returns(nil) do pending # getting 405 Method Not Allowed @directory.files.get(@instance.key, 'versionId' => v3) end end tests("#head") do tests("#head without version fetches the latest version").returns(v4) do @directory.files.head(@instance.key).version end tests("#head with version fetches that exact version").returns(v2) do @directory.files.head(@instance.key, 'versionId' => v2).version end tests("#head with a deleted version returns nil").returns(nil) do pending # getting 405 Method Not Allowed @directory.files.head(@instance.key, 'versionId' => v3) end end tests('#normalize_headers') do files = @directory.files response = Excon::Response.new current_time = Time.new(2021, 02, 21) response.headers['last-modified'] = current_time.to_s response.headers['etag'] = '12345' response.headers['ETAG'] = '12345' response.headers['Cache-Control'] = 'no-cache' response.headers['Content-disposition'] = 'attachment' response.headers['content-length'] = 100 response.headers['content-Encoding'] = 'gzip' response.headers['content-md5'] = 'ABCDEAB' response.headers['content-Md5'] = 'ABCDEAB' response.headers['ConTent-Type'] = 'application/json' expected = { 'Last-Modified' => current_time, 'ETag' => '12345', 'Cache-Control' => 'no-cache', 'Content-Disposition' => 'attachment', 'Content-Length' => 100, 'Content-Encoding' => 'gzip', 'Content-MD5' => 'ABCDEAB', 'Content-Type' => 'application/json' } tests('header keys are normalized').returns(expected) do files.normalize_headers(response) response.headers end end end @directory.versions.each(&:destroy) @directory.destroy end fog-aws-3.18.0/tests/models/storage/url_tests.rb000066400000000000000000000011451437344660100216210ustar00rootroot00000000000000# encoding: utf-8 Shindo.tests('AWS | url', ["aws"]) do @storage = Fog::Storage.new( :provider => 'AWS', :aws_access_key_id => '123', :aws_secret_access_key => 'abc', :region => 'us-east-1' ) @file = @storage.directories.new(:key => 'fognonbucket').files.new(:key => 'test.txt') now = Fog::Time.now @storage = Fog::Storage.new( :provider => 'AWS', :aws_access_key_id => '123', :aws_secret_access_key => 'abc', :aws_signature_version => 2, :region => 'us-east-1' ) @file = @storage.directories.new(:key => 'fognonbucket').files.new(:key => 'test.txt') end fog-aws-3.18.0/tests/models/storage/version_tests.rb000066400000000000000000000026121437344660100225040ustar00rootroot00000000000000Shindo.tests("Storage[:aws] | version", ["aws"]) do file_attributes = { :key => 'fog_file_tests', :body => lorem_file, :public => true } directory_attributes = { :key => uniq_id('fogfilestests') } @directory = Fog::Storage[:aws].directories.create(directory_attributes) @directory.versioning = true model_tests(@directory.files, file_attributes, Fog.mocking?) do @version_instance = @instance.versions.first @directory.service.put_object(@directory.key, @instance.key, 'second version content') tests("#file") do tests("#file should return the object associated with the version").returns(@version_instance.version) do @version_instance.file.version end end tests("#delete_marker") do tests("#delete_marker should be false if the version isn't a DeleteMarker'").returns(false) do @version_instance.delete_marker end tests("#delete_marker should be true if the version is a DeleteMarker'").returns(true) do @instance.destroy @instance.versions.all.first.delete_marker end end tests("#destroy") do tests("#destroy removes the specific version").returns(false) do @version_instance.destroy @instance.versions.all.map(&:version).include?(@version_instance.version) end end end @directory.versions.each(&:destroy) @directory.destroy end fog-aws-3.18.0/tests/models/storage/versions_tests.rb000066400000000000000000000032161437344660100226700ustar00rootroot00000000000000Shindo.tests("Storage[:aws] | versions", ["aws"]) do file_attributes = { :key => 'fog_file_tests', :body => lorem_file, :public => true } directory_attributes = { :key => uniq_id('fogfilestests') } model_tests(Fog::Storage[:aws].directories, directory_attributes, Fog.mocking?) do @instance.versioning = true versions = [] versions << @instance.service.put_object(@instance.key, 'one', 'abcde').headers['x-amz-version-id'] versions << @instance.service.put_object(@instance.key, 'one', '32423').headers['x-amz-version-id'] versions << @instance.service.delete_object(@instance.key, 'one').headers['x-amz-version-id'] versions.reverse! versions << @instance.service.put_object(@instance.key, 'two', 'aoeu').headers['x-amz-version-id'] tests('#versions') do tests('#versions.size includes versions (including DeleteMarkers) for all keys').returns(4) do @instance.versions.all.size end tests('#versions returns the correct versions').returns(versions) do @instance.versions.all.map(&:version) end end tests("#all") do tests("#all for a directory returns all versions, regardless of key").returns(versions) do @instance.versions.all.map(&:version) end tests("#all for file returns only versions for that file").returns(1) do @instance.files.get('two').versions.all.map(&:version).size end tests("#all for file returns only versions for that file").returns(versions.last) do @instance.files.get('two').versions.all.map(&:version).first end end @instance.versions.each(&:destroy) end end fog-aws-3.18.0/tests/models/support/000077500000000000000000000000001437344660100173175ustar00rootroot00000000000000fog-aws-3.18.0/tests/models/support/trusted_advisor_tests.rb000066400000000000000000000015171437344660100243130ustar00rootroot00000000000000Shindo.tests("AWS::Support | trusted_advisor_checks", ["aws", "support"]) do tests("collection#all").succeeds do Fog::AWS[:support].trusted_advisor_checks.all end @identity = Fog::AWS[:support].trusted_advisor_checks.all.first.identity tests("collection#get(#{@identity})").returns(@identity) do Fog::AWS[:support].trusted_advisor_checks.get(@identity).identity end @model = Fog::AWS[:support].trusted_advisor_checks.all.detect { |tac| tac.id == @identity } tests("model#flagged_resources").returns(nil) do @model.flagged_resources end tests("model#flagged_resources").returns(true) do @model.flagged_resources(false).is_a?(Fog::AWS::Support::FlaggedResources) end tests("model#flagged_resources").returns(true) do @model.flagged_resources.first.metadata.keys.sort == @model.metadata.sort end end fog-aws-3.18.0/tests/parsers/000077500000000000000000000000001437344660100157775ustar00rootroot00000000000000fog-aws-3.18.0/tests/parsers/compute/000077500000000000000000000000001437344660100174535ustar00rootroot00000000000000fog-aws-3.18.0/tests/parsers/compute/describe_images_tests.rb000066400000000000000000000024731437344660100243350ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/compute/describe_images' DESCRIBE_IMAGES_RESULT = <<-EOF 180a8433-ade0-4a6c-b35b-107897579572 aki-02486376 ec2-public-images-eu/vmlinuz-2.6.21-2.fc8xen-ec2-v1.0.i386.aki.manifest.xml available 206029621532 true i386 kernel amazon instance-store paravirtual xen EOF Shindo.tests('AWS::Compute | parsers | describe_images', %w[compute aws parser]) do tests('parses the xml').formats(AWS::Compute::Formats::DESCRIBE_IMAGES) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::Compute::DescribeImages.new) parser.parse(DESCRIBE_IMAGES_RESULT) parser.document.response end end fog-aws-3.18.0/tests/parsers/elb/000077500000000000000000000000001437344660100165415ustar00rootroot00000000000000fog-aws-3.18.0/tests/parsers/elb/describe_load_balancers.rb000066400000000000000000000045501437344660100236630ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/elb/describe_load_balancers' DESCRIBE_LOAD_BALANCERS_RESULT = <<-EOF 2013-08-01T15:47:20.930Z fog-test-elb 30 TCP:80 10 5 2 HTTP 80 HTTP 80 us-east-1a fog-test-elb-1965660309.us-east-1.elb.amazonaws.com Z3DZXE0Q79N41H internet-facing amazon-elb amazon-elb-sg fog-test-elb-1965660309.us-east-1.elb.amazonaws.com a6ea2117-fac1-11e2-abd3-1740ab4ef14e EOF Shindo.tests('AWS::ELB | parsers | describe_load_balancers', %w[aws elb parser]) do tests('parses the xml').formats(AWS::ELB::Formats::DESCRIBE_LOAD_BALANCERS) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::ELB::DescribeLoadBalancers.new) parser.parse(DESCRIBE_LOAD_BALANCERS_RESULT) parser.document.response end end fog-aws-3.18.0/tests/parsers/elbv2/000077500000000000000000000000001437344660100170115ustar00rootroot00000000000000fog-aws-3.18.0/tests/parsers/elbv2/create_load_balancer_tests.rb000066400000000000000000000034561437344660100246610ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/elbv2/create_load_balancer' CREATE_LOAD_BALANCER_RESULT = <<-EOF arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-internal-load-balancer/50dc6c495c0c9188 internet-facing my-load-balancer vpc-3ac0fb5f Z2P70J7EXAMPLE 2016-03-25T21:29:48.850Z subnet-8360a9e7 us-west-2a subnet-b7d581c0 us-west-2b sg-5943793c my-load-balancer-424835706.us-west-2.elb.amazonaws.com provisioning application 32d531b2-f2d0-11e5-9192-3fff33344cfa EOF Shindo.tests('AWS::ELBV2 | parsers | create_load_balancer', %w[aws elb parser]) do tests('parses the xml').formats(AWS::ELBV2::Formats::CREATE_LOAD_BALANCER) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::ELBV2::CreateLoadBalancer.new) parser.parse(CREATE_LOAD_BALANCER_RESULT) parser.document.response end end fog-aws-3.18.0/tests/parsers/elbv2/describe_listeners_tests.rb000066400000000000000000000064521437344660100244370ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/elbv2/describe_listeners' DESCRIBE_LISTENERS_RESULT = <<-EOF arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 HTTPS 80 arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2 polucy forward 1 arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 HTTPS 443 \#{path} \#{query} \#{host} HTTP_301 redirect arn:aws:elasticloadbalancing:us-west-2:123456789012:certificate/56d36256-1245-40d6-916e-6f5a95e2b4c6 arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 HTTPS 80 arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2 polucy forward 2 arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 true 1 arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 arn:aws:elasticloadbalancing:us-west-2:123456789012:certificate/56d36256-1245-40d6-916e-6f5a95e2b4c6 18e470d3-f39c-11e5-a53c-67205c0d10fd EOF Shindo.tests('AWS::ELBV2 | parsers | describe_listeners', %w[aws elb parser]) do tests('parses the xml').formats(AWS::ELBV2::Formats::DESCRIBE_LISTENERS) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::ELBV2::DescribeListeners.new) parser.parse(DESCRIBE_LISTENERS_RESULT) parser.document.response end end fog-aws-3.18.0/tests/parsers/elbv2/describe_load_balancers_tests.rb000066400000000000000000000040561437344660100253560ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/elbv2/describe_load_balancers' DESCRIBE_LOAD_BALANCERS_RESULT = <<-EOF arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 internet-facing my-load-balancer vpc-3ac0fb5f Z2P70J7EXAMPLE 2016-03-25T21:26:12.920Z subnet-8360a9e7 us-west-2a subnet-b7d581c0 us-west-2b 127.0.0.1 eipalloc-1c2ab192c131q2377 sg-5943793c my-load-balancer-424835706.us-west-2.elb.amazonaws.com active application 6581c0ac-f39f-11e5-bb98-57195a6eb84a EOF Shindo.tests('AWS::ELBV2 | parsers | describe_load_balancers', %w[aws elb parser]) do tests('parses the xml').formats(AWS::ELBV2::Formats::DESCRIBE_LOAD_BALANCERS) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::ELBV2::DescribeLoadBalancers.new) parser.parse(DESCRIBE_LOAD_BALANCERS_RESULT) parser.document.response end end fog-aws-3.18.0/tests/parsers/elbv2/describe_tags_tests.rb000066400000000000000000000021701437344660100233560ustar00rootroot00000000000000require 'fog/xml' require 'fog/aws/parsers/elbv2/describe_tags' DESCRIBE_TAGS_RESULT = <<-EOF arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188 lima project digital-media department 34f144db-f2d9-11e5-a53c-67205c0d10fd EOF Shindo.tests('AWS::ELBV2 | parsers | describe_tags', %w[aws elb parser]) do tests('parses the xml').formats(AWS::ELBV2::Formats::DESCRIBE_TAGS) do parser = Nokogiri::XML::SAX::Parser.new(Fog::Parsers::AWS::ELBV2::DescribeTags.new) parser.parse(DESCRIBE_TAGS_RESULT) parser.document.response end end fog-aws-3.18.0/tests/requests/000077500000000000000000000000001437344660100161735ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/auto_scaling/000077500000000000000000000000001437344660100206435ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/auto_scaling/auto_scaling_tests.rb000066400000000000000000000125631437344660100250710ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | auto_scaling_tests', ['aws', 'auto_scaling']) do @asg_name = 'fog-test-asg' @lc_name = 'fog-test-lc' tests('success') do tests("#create_launch_configuration").formats(AWS::AutoScaling::Formats::BASIC) do image_id = 'ami-8c1fece5' instance_type = 't1.micro' #listeners = [{'LoadBalancerPort' => 80, 'InstancePort' => 80, 'Protocol' => 'http'}] Fog::AWS[:auto_scaling].create_launch_configuration(image_id, instance_type, @lc_name).body end tests("#describe_launch_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_LAUNCH_CONFIGURATIONS) do Fog::AWS[:auto_scaling].describe_launch_configurations().body end tests("#describe_launch_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_LAUNCH_CONFIGURATIONS) do Fog::AWS[:auto_scaling].describe_launch_configurations('LaunchConfigurationNames' => @lc_name).body end tests("#describe_launch_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_LAUNCH_CONFIGURATIONS) do Fog::AWS[:auto_scaling].describe_launch_configurations('LaunchConfigurationNames' => [@lc_name]).body end tests("#create_auto_scaling_group").formats(AWS::AutoScaling::Formats::BASIC) do zones = ['us-east-1d'] max_size = 0 min_size = 0 Fog::AWS[:auto_scaling].create_auto_scaling_group(@asg_name, zones, @lc_name, max_size, min_size).body end if Fog.mocking? tests("#attach_load_balancers").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].attach_load_balancers(@asg_name, 'LoadBalancerNames' => 'elb-test-fog').body end tests("#detach_load_balancers").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].detach_load_balancers(@asg_name, 'LoadBalancerNames' => 'elb-test-fog').body end tests("#attach_load_balancer_target_groups").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].attach_load_balancer_target_groups(@asg_name, 'TargetGroupARNs' => 'elb-test-fog').body end tests("#detach_load_balancer_target_groups").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].detach_load_balancer_target_groups(@asg_name, 'TargetGroupARNs' => 'elb-test-fog').body end tests("#detach_instances").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].detach_instances(@asg_name, 'InstanceIds' => 'i-deadbeef').body end tests("#attach_instances").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].attach_instances(@asg_name, 'InstanceIds' => 'i-deadbeef').body end tests("#set_instance_protection").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].set_instance_protection( @asg_name, 'InstanceIds' => 'i-deadbeef', 'ProtectedFromScaleIn' => true ).body end end tests("#describe_auto_scaling_groups").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_GROUPS) do Fog::AWS[:auto_scaling].describe_auto_scaling_groups().body end tests("#describe_auto_scaling_groups").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_GROUPS) do Fog::AWS[:auto_scaling].describe_auto_scaling_groups('AutoScalingGroupNames' => @asg_name).body end tests("#describe_auto_scaling_groups").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_GROUPS) do Fog::AWS[:auto_scaling].describe_auto_scaling_groups('AutoScalingGroupNames' => [@asg_name]).body end tests("#describe_auto_scaling_instances").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_INSTANCES) do Fog::AWS[:auto_scaling].describe_auto_scaling_instances().body end tests("#describe_scaling_activities").formats(AWS::AutoScaling::Formats::DESCRIBE_SCALING_ACTIVITIES) do pending if Fog.mocking? Fog::AWS[:auto_scaling].describe_scaling_activities().body end tests("#describe_scaling_activities").formats(AWS::AutoScaling::Formats::DESCRIBE_SCALING_ACTIVITIES) do pending if Fog.mocking? Fog::AWS[:auto_scaling].describe_scaling_activities('ActivityIds' => '1').body end tests("#describe_scaling_activities").formats(AWS::AutoScaling::Formats::DESCRIBE_SCALING_ACTIVITIES) do pending if Fog.mocking? Fog::AWS[:auto_scaling].describe_scaling_activities('ActivityIds' => ['1', '2']).body end tests("#describe_scaling_activities").formats(AWS::AutoScaling::Formats::DESCRIBE_SCALING_ACTIVITIES) do pending if Fog.mocking? Fog::AWS[:auto_scaling].describe_scaling_activities('AutoScalingGroupName' => @asg_name).body end tests("#set_desired_capacity").formats(AWS::AutoScaling::Formats::BASIC) do desired_capacity = 0 Fog::AWS[:auto_scaling].set_desired_capacity(@asg_name, desired_capacity).body end tests("#delete_auto_scaling_group").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].delete_auto_scaling_group(@asg_name, 'ForceDelete' => true).body end tests("#delete_auto_scaling_group that does not exists").raises(Fog::AWS::AutoScaling::ValidationError) do Fog::AWS[:auto_scaling].delete_auto_scaling_group("group that does not exist") end tests("#delete_launch_configuration").formats(AWS::AutoScaling::Formats::BASIC) do Fog::AWS[:auto_scaling].delete_launch_configuration(@lc_name).body end end end fog-aws-3.18.0/tests/requests/auto_scaling/describe_types_tests.rb000066400000000000000000000063371437344660100254270ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | describe types requests', ['aws', 'auto_scaling']) do tests('success') do tests("#describe_adjustment_types").formats(AWS::AutoScaling::Formats::DESCRIBE_ADJUSTMENT_TYPES) do body = Fog::AWS[:auto_scaling].describe_adjustment_types.body [ 'ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity' ].each do |v| returns(true, "AdjustmentTypes contains #{v}") do body['DescribeAdjustmentTypesResult']['AdjustmentTypes'].any? {|t| t['AdjustmentType'] == v} end end body end tests("#describe_auto_scaling_notification_types").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_NOTIFICATION_TYPES) do body = Fog::AWS[:auto_scaling].describe_auto_scaling_notification_types.body [ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', 'autoscaling:TEST_NOTIFICATION' ].each do |v| returns(true, "AutoScalingNotificationTypes contains #{v}") do body['DescribeAutoScalingNotificationTypesResult']['AutoScalingNotificationTypes'].include?(v) end end body end tests("#describe_metric_collection_types").formats(AWS::AutoScaling::Formats::DESCRIBE_METRIC_COLLECTION_TYPES) do body = Fog::AWS[:auto_scaling].describe_metric_collection_types.body [ 'GroupDesiredCapacity', 'GroupInServiceInstances', 'GroupMaxSize', 'GroupMinSize', 'GroupPendingInstances', 'GroupTerminatingInstances', 'GroupTotalInstances' ].each do |v| returns(true, "Metrics contains #{v}") do body['DescribeMetricCollectionTypesResult']['Metrics'].any? {|t| t['Metric'] == v} end end [ '1Minute' ].each do |v| returns(true, "Granularities contains #{v}") do body['DescribeMetricCollectionTypesResult']['Granularities'].any? {|t| t['Granularity'] == v} end end body end tests("#describe_scaling_process_types").formats(AWS::AutoScaling::Formats::DESCRIBE_SCALING_PROCESS_TYPES) do body = Fog::AWS[:auto_scaling].describe_scaling_process_types.body [ 'AZRebalance', 'AddToLoadBalancer', 'AlarmNotification', 'HealthCheck', 'Launch', 'ReplaceUnhealthy', 'ScheduledActions', 'Terminate' ].each do |v| returns(true, "Processes contains #{v}") do body['DescribeScalingProcessTypesResult']['Processes'].any? {|t| t['ProcessName'] == v} end end body end tests("#describe_termination_policy_types").formats(AWS::AutoScaling::Formats::DESCRIBE_TERMINATION_POLICY_TYPES) do body = Fog::AWS[:auto_scaling].describe_termination_policy_types.body [ 'ClosestToNextInstanceHour', 'Default', 'NewestInstance', 'OldestInstance', 'OldestLaunchConfiguration' ].each do |v| returns(true, "TerminationPolicyTypes contains #{v}") do body['DescribeTerminationPolicyTypesResult']['TerminationPolicyTypes'].include?(v) end end body end end end fog-aws-3.18.0/tests/requests/auto_scaling/helper.rb000066400000000000000000000150161437344660100224520ustar00rootroot00000000000000class AWS module AutoScaling module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } PAGINATED = { 'NextToken' => Fog::Nullable::String } ACTIVITY = { 'ActivityId' => String, 'AutoScalingGroupName' => String, 'Cause' => Fog::Nullable::String, 'Description' => String, 'EndTime' => Time, 'Progress' => Integer, 'StartTime' => Time, 'StatusCode' => String, 'StatusMessage' => Fog::Nullable::String } ALARM = { 'AlarmARN' => String, 'AlarmName' => String } BLOCK_DEVICE_MAPPING = { 'DeviceName' => String, 'Ebs' => {'SnapshotId' => String, 'VolumeSize' => Integer}, 'VirtualName' => String } ENABLED_METRIC = { 'Granularity' => Array, 'Metric' => Array } INSTANCE = { 'AvailabilityZone' => String, 'HealthStatus' => String, 'InstanceId' => String, 'LaunchConfigurationName' => String, 'LifecycleState' => String } NOTIFICATION_CONFIGURATION = { 'AutoScalingGroupName' => String, 'NotificationType' => String, 'TopicARN' => String } SCHEDULED_UPDATE_GROUP_ACTION = { 'AutoScalingGroupName' => String, 'DesiredCapacity' => Integer, 'EndTime' => Time, 'MaxSize' => Integer, 'MinSize' => Integer, 'Recurrence' => String, 'ScheduledActionARN' => String, 'ScheduledActionName' => String, 'StartTime' => Time, } PROCESS_TYPE = { 'ProcessName' => String } SUSPENDED_PROCESS = PROCESS_TYPE.merge({ 'SuspensionReason' => String }) TAG_DESCRIPTION = { 'Key' => String, 'PropagateAtLaunch' => Fog::Boolean, 'ResourceId' => String, 'ResourceType' => String, 'Value' => Fog::Nullable::String } AUTO_SCALING_GROUP = { 'AutoScalingGroupARN' => String, 'AutoScalingGroupName' => String, 'AvailabilityZones' => Array, 'CreatedTime' => Time, 'DefaultCooldown' => Integer, 'DesiredCapacity' => Integer, 'EnabledMetrics' => [ENABLED_METRIC], 'HealthCheckGracePeriod' => Integer, 'HealthCheckType' => String, 'Instances' => [INSTANCE], 'LaunchConfigurationName' => String, 'LoadBalancerNames' => Array, 'MaxSize' => Integer, 'MinSize' => Integer, 'PlacementGroup' => Fog::Nullable::String, 'Status' => Fog::Nullable::String, 'SuspendedProcesses' => [SUSPENDED_PROCESS], 'Tags' => [TAG_DESCRIPTION], 'TargetGroupARNs' => Array, 'TerminationPolicies' => [String], 'VPCZoneIdentifier' => Fog::Nullable::String } AUTO_SCALING_INSTANCE_DETAILS = INSTANCE.merge({ 'AutoScalingGroupName' => String }) LAUNCH_CONFIGURATION = { 'BlockDeviceMappings' => [BLOCK_DEVICE_MAPPING], 'CreatedTime' => Time, 'ImageId' => String, 'InstanceMonitoring' => {'Enabled' => Fog::Boolean}, 'InstanceType' => String, 'KernelId' => Fog::Nullable::String, 'KeyName' => Fog::Nullable::String, 'LaunchConfigurationARN' => String, 'LaunchConfigurationName' => String, 'RamdiskId' => Fog::Nullable::String, 'SpotPrice' => Fog::Nullable::String, 'SecurityGroups' => Array, 'UserData' => Fog::Nullable::String } SCALING_POLICY = { 'AdjustmentType' => String, 'Alarms' => [ALARM], 'AutoScalingGroupName' => String, 'Cooldown' => Integer, 'MinAdjustmentStep' => Integer, 'PolicyARN' => String, 'PolicyName' => String, 'ScalingAdjustment' => Integer } DESCRIBE_ADJUSTMENT_TYPES = BASIC.merge({ 'DescribeAdjustmentTypesResult' => { 'AdjustmentTypes' => [{'AdjustmentType' => String}] } }) DESCRIBE_AUTO_SCALING_GROUPS = BASIC.merge({ 'DescribeAutoScalingGroupsResult' => PAGINATED.merge({ 'AutoScalingGroups' => [AUTO_SCALING_GROUP], }) }) DESCRIBE_AUTO_SCALING_INSTANCES = BASIC.merge({ 'DescribeAutoScalingInstancesResult' => PAGINATED.merge({ 'AutoScalingInstances' => [AUTO_SCALING_INSTANCE_DETAILS], }) }) DESCRIBE_AUTO_SCALING_NOTIFICATION_TYPES = BASIC.merge({ 'DescribeAutoScalingNotificationTypesResult' => { 'AutoScalingNotificationTypes' => [String] } }) DESCRIBE_LAUNCH_CONFIGURATIONS = BASIC.merge({ 'DescribeLaunchConfigurationsResult' => PAGINATED.merge({ 'LaunchConfigurations' => [LAUNCH_CONFIGURATION], }) }) DESCRIBE_METRIC_COLLECTION_TYPES = BASIC.merge({ 'DescribeMetricCollectionTypesResult' => { 'Granularities' => [{'Granularity' => String}], 'Metrics' => [{'Metric' => String}] } }) DESCRIBE_NOTIFICATION_CONFIGURATIONS = BASIC.merge({ 'DescribeNotificationConfigurationsResult' => PAGINATED.merge({ 'NotificationConfigurations' => [NOTIFICATION_CONFIGURATION] }) }) DESCRIBE_POLICIES = BASIC.merge({ 'DescribePoliciesResult' => PAGINATED.merge({ 'ScalingPolicies' => [SCALING_POLICY] }) }) DESCRIBE_SCALING_ACTIVITIES = BASIC.merge({ 'DescribeScalingActivitiesResult' => PAGINATED.merge({ 'Activities' => [ACTIVITY], }) }) DESCRIBE_SCALING_PROCESS_TYPES = BASIC.merge({ 'DescribeScalingProcessTypesResult' => { 'Processes' => [PROCESS_TYPE] } }) DESCRIBE_SCHEDULED_ACTIONS = BASIC.merge({ 'DescribeScheduledActionsResult' => PAGINATED.merge({ 'ScheduledUpdateGroupActions' => [SCHEDULED_UPDATE_GROUP_ACTION] }) }) DESCRIBE_TAGS = BASIC.merge({ 'DescribeTagsResult' => PAGINATED.merge({ 'Tags' => [TAG_DESCRIPTION] }) }) DESCRIBE_TERMINATION_POLICY_TYPES = BASIC.merge({ 'DescribeTerminationPolicyTypesResult' => { 'TerminationPolicyTypes' => [String] } }) PUT_SCALING_POLICY = BASIC.merge({ 'PutScalingPolicyResult' => { 'PolicyARN' => String } }) TERMINATE_INSTANCE_IN_AUTO_SCALING_GROUP = BASIC.merge({ 'TerminateInstanceInAutoScalingGroupResult' => { 'Activity' => [ACTIVITY] } }) end end end fog-aws-3.18.0/tests/requests/auto_scaling/model_tests.rb000066400000000000000000000172011437344660100235130ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | model_tests', ['aws', 'auto_scaling']) do tests('success') do lc = nil lc_id = 'fog-model-lc' tests('configurations') do tests('getting a missing configuration') do returns(nil) { Fog::AWS[:auto_scaling].configurations.get('fog-no-such-lc') } end tests('create configuration') do lc = Fog::AWS[:auto_scaling].configurations.create(:id => lc_id, :image_id => 'ami-8c1fece5', :instance_type => 't1.micro') #tests("dns names is set").returns(true) { lc.dns_name.is_a?(String) } tests("created_at is set").returns(true) { Time === lc.created_at } #tests("policies is empty").returns([]) { lc.policies } end tests('all configurations') do lc_ids = Fog::AWS[:auto_scaling].configurations.all.map{|e| e.id} tests("contains lc").returns(true) { lc_ids.include? lc_id } end tests('get configuration') do lc2 = Fog::AWS[:auto_scaling].configurations.get(lc_id) tests('ids match').returns(lc_id) { lc2.id } end tests('creating a duplicate configuration') do raises(Fog::AWS::AutoScaling::IdentifierTaken) do Fog::AWS[:auto_scaling].configurations.create(:id => lc_id, :image_id => 'ami-8c1fece5', :instance_type => 't1.micro') end end end tests('groups') do tests('getting a missing group') do returns(nil) { Fog::AWS[:auto_scaling].groups.get('fog-no-such-asg') } end asg = nil asg_id = 'fog-model-asg' tests('create') do asg = Fog::AWS[:auto_scaling].groups.create(:id => asg_id, :availability_zones => ['us-east-1d'], :launch_configuration_name => lc_id) #tests("dns names is set").returns(true) { asg.dns_name.is_a?(String) } tests("created_at is set").returns(true) { Time === asg.created_at } #tests("policies is empty").returns([]) { asg.policies } end tests('all') do asg_ids = Fog::AWS[:auto_scaling].groups.all.map{|e| e.id} tests("contains asg").returns(true) { asg_ids.include? asg_id } end tests('get') do asg2 = Fog::AWS[:auto_scaling].groups.get(asg_id) tests('ids match').returns(asg_id) { asg2.id } end tests('suspend processes') do asg.suspend_processes() if Fog.mocking? tests('processes suspended').returns([]) { asg.suspended_processes } end end tests('resume processes') do asg.resume_processes() tests('no processes suspended').returns([]) { asg.suspended_processes } end tests('creating a duplicate group') do raises(Fog::AWS::AutoScaling::IdentifierTaken) do Fog::AWS[:auto_scaling].groups.create(:id => asg_id, :availability_zones => ['us-east-1d'], :launch_configuration_name => lc_id) end end tests('destroy group') do asg.destroy asg = nil end #tests('registering an invalid instance') do # raises(Fog::AWS::AutoScaling::InvalidInstance) { asg.register_instances('i-00000000') } #end #tests('deregistering an invalid instance') do # raises(Fog::AWS::AutoScaling::InvalidInstance) { asg.deregister_instances('i-00000000') } #end end tests('configurations') do tests('destroy configuration') do lc.destroy lc = nil end end #server = Fog::AWS[:compute].servers.create #tests('register instance') do # begin # elb.register_instances(server.id) # rescue Fog::AWS::ELB::InvalidInstance # # It may take a moment for a newly created instances to be visible to ELB requests # raise if @retried_registered_instance # @retried_registered_instance = true # sleep 1 # retry # end # # returns([server.id]) { elb.instances } #end #tests('instance_health') do # returns('OutOfService') do # elb.instance_health.detect{|hash| hash['InstanceId'] == server.id}['State'] # end # # returns([server.id]) { elb.instances_out_of_service } #end #tests('deregister instance') do # elb.deregister_instances(server.id) # returns([]) { elb.instances } #end #server.destroy #tests('disable_availability_zones') do # elb.disable_availability_zones(%w{us-east-1c us-east-1d}) # returns(%w{us-east-1a us-east-1b}) { elb.availability_zones.sort } #end #tests('enable_availability_zones') do # elb.enable_availability_zones(%w{us-east-1c us-east-1d}) # returns(%w{us-east-1a us-east-1b us-east-1c us-east-1d}) { elb.availability_zones.sort } #end #tests('default health check') do # default_health_check = { # "HealthyThreshold"=>10, # "Timeout"=>5, # "UnhealthyThreshold"=>2, # "Interval"=>30, # "Target"=>"TCP:80" # } # returns(default_health_check) { elb.health_check } #end #tests('configure_health_check') do # new_health_check = { # "HealthyThreshold"=>5, # "Timeout"=>10, # "UnhealthyThreshold"=>3, # "Interval"=>15, # "Target"=>"HTTP:80/index.html" # } # elb.configure_health_check(new_health_check) # returns(new_health_check) { elb.health_check } #end #tests('listeners') do # default_listener_description = [{"Listener"=>{"InstancePort"=>80, "Protocol"=>"HTTP", "LoadBalancerPort"=>80}, "PolicyNames"=>[]}] # tests('default') do # returns(1) { elb.listeners.size } # # listener = elb.listeners.first # returns([80,80,'HTTP', []]) { [listener.instance_port, listener.lb_port, listener.protocol, listener.policy_names] } # # end # # tests('#get') do # returns(80) { elb.listeners.get(80).lb_port } # end # # tests('create') do # new_listener = { 'InstancePort' => 443, 'LoadBalancerPort' => 443, 'Protocol' => 'TCP'} # elb.listeners.create(:instance_port => 443, :lb_port => 443, :protocol => 'TCP') # returns(2) { elb.listeners.size } # returns(443) { elb.listeners.get(443).lb_port } # end # # tests('destroy') do # elb.listeners.get(443).destroy # returns(nil) { elb.listeners.get(443) } # end #end #tests('policies') do # app_policy_id = 'my-app-policy' # # tests 'are empty' do # returns([]) { elb.policies.to_a } # end # # tests('#all') do # returns([]) { elb.policies.all.to_a } # end # # tests('create app policy') do # elb.policies.create(:id => app_policy_id, :cookie => 'my-app-cookie', :cookie_stickiness => :app) # returns(app_policy_id) { elb.policies.first.id } # end # # tests('get policy') do # returns(app_policy_id) { elb.policies.get(app_policy_id).id } # end # # tests('destroy app policy') do # elb.policies.first.destroy # returns([]) { elb.policies.to_a } # end # # lb_policy_id = 'my-lb-policy' # tests('create lb policy') do # elb.policies.create(:id => lb_policy_id, :expiration => 600, :cookie_stickiness => :lb) # returns(lb_policy_id) { elb.policies.first.id } # end # # tests('setting a listener policy') do # elb.set_listener_policy(80, lb_policy_id) # returns([lb_policy_id]) { elb.listeners.get(80).policy_names } # end # # tests('unsetting a listener policy') do # elb.unset_listener_policy(80) # returns([]) { elb.listeners.get(80).policy_names } # end # # tests('a malformed policy') do # raises(ArgumentError) { elb.policies.create(:id => 'foo', :cookie_stickiness => 'invalid stickiness') } # end #end end end fog-aws-3.18.0/tests/requests/auto_scaling/notification_configuration_tests.rb000066400000000000000000000126121437344660100300310ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | notification configuration requests', ['aws', 'auto_scaling']) do image_id = { # Ubuntu 12.04 LTS 64-bit EBS 'ap-northeast-1' => 'ami-60c77761', 'ap-southeast-1' => 'ami-a4ca8df6', 'ap-southeast-2' => 'ami-fb8611c1', 'eu-west-1' => 'ami-e1e8d395', 'sa-east-1' => 'ami-8cd80691', 'us-east-1' => 'ami-a29943cb', 'us-west-1' => 'ami-87712ac2', 'us-west-2' => 'ami-20800c10' } now = Time.now.utc.to_i lc_name = "fog-test-#{now}" asg_name = "fog-test-#{now}" topic_name = "fog-test-#{now}" begin topic = Fog::AWS[:sns].create_topic(topic_name).body topic_arn = topic['TopicArn'] rescue Fog::Errors::MockNotImplemented topic_arn = Fog::AWS::Mock.arn('sns', Fog::AWS[:auto_scaling].data[:owner_id], "fog-test-#{now}", Fog::AWS[:auto_scaling].region) end lc = Fog::AWS[:auto_scaling].create_launch_configuration(image_id[Fog::AWS[:auto_scaling].region], 't1.micro', lc_name) asg = Fog::AWS[:auto_scaling].create_auto_scaling_group(asg_name, "#{Fog::AWS[:auto_scaling].region}a", lc_name, 0, 0) tests('raises') do tests("#put_notification_configuration(non-existent-group)").raises(Fog::AWS::AutoScaling::ValidationError) do Fog::AWS[:auto_scaling].put_notification_configuration('fog-test-nonexistent-group', 'autoscaling:TEST_NOTIFICATION', topic_arn) end tests("#put_notification_configuration(null-types)").raises(Fog::AWS::AutoScaling::ValidationError) do Fog::AWS[:auto_scaling].put_notification_configuration(asg_name, [], topic_arn) end end tests('success') do tests("#put_notification_configuration(string)").formats(AWS::AutoScaling::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:auto_scaling].put_notification_configuration(asg_name, 'autoscaling:TEST_NOTIFICATION', topic_arn).body end tests("#describe_notification_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_NOTIFICATION_CONFIGURATIONS) do pending if Fog.mocking? body = Fog::AWS[:auto_scaling].describe_notification_configurations('AutoScalingGroupNames' => asg_name).body notification_configurations = body['DescribeNotificationConfigurationsResult']['NotificationConfigurations'] returns(true, 'exactly 1 configurations') do notification_configurations.size == 1 end returns(true) do config = notification_configurations.first config['AutoScalingGroupName'] == asg_name && config['TopicARN'] == topic_arn && config['NotificationType'] == 'autoscaling:TEST_NOTIFICATION' end body end tests("#put_notification_configuration(array)").formats(AWS::AutoScaling::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:auto_scaling].put_notification_configuration(asg_name, ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_TERMINATE'], topic_arn).body end tests("#describe_notification_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_NOTIFICATION_CONFIGURATIONS) do pending if Fog.mocking? body = Fog::AWS[:auto_scaling].describe_notification_configurations('AutoScalingGroupName' => asg_name).body notification_configurations = body['DescribeNotificationConfigurationsResult']['NotificationConfigurations'] returns(true, 'exactly 2 configurations') do notification_configurations.size == 2 end [ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_TERMINATE'].each do |type| returns(true) do notification_configurations.any? do |config| config['AutoScalingGroupName'] == asg_name && config['TopicARN'] == topic_arn && config['NotificationType'] == type end end end body end tests("#describe_notification_configurations(all)").formats(AWS::AutoScaling::Formats::DESCRIBE_NOTIFICATION_CONFIGURATIONS) do pending if Fog.mocking? body = Fog::AWS[:auto_scaling].describe_notification_configurations().body notification_configurations = body['DescribeNotificationConfigurationsResult']['NotificationConfigurations'] returns(true, 'at least 2 configurations') do notification_configurations.size >= 2 end [ 'autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_TERMINATE'].each do |type| returns(true) do notification_configurations.any? do |config| config['AutoScalingGroupName'] == asg_name && config['TopicARN'] == topic_arn && config['NotificationType'] == type end end end body end tests("#delete_notification_configuration").formats(AWS::AutoScaling::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:auto_scaling].delete_notification_configuration(asg_name, topic_arn).body end tests("#describe_notification_configurations").formats(AWS::AutoScaling::Formats::DESCRIBE_NOTIFICATION_CONFIGURATIONS) do pending if Fog.mocking? body = Fog::AWS[:auto_scaling].describe_notification_configurations('AutoScalingGroupNames' => asg_name).body returns(true) do body['DescribeNotificationConfigurationsResult']['NotificationConfigurations'].empty? end body end end Fog::AWS[:auto_scaling].delete_auto_scaling_group(asg_name) Fog::AWS[:auto_scaling].delete_launch_configuration(lc_name) if topic begin Fog::AWS[:sns].delete_topic(topic_arn) rescue Fog::Errors::MockNotImplemented end end end fog-aws-3.18.0/tests/requests/auto_scaling/tag_tests.rb000066400000000000000000000044261437344660100231730ustar00rootroot00000000000000Shindo.tests('AWS::AutoScaling | tag requests', ['aws', 'auto_scaling']) do image_id = { # Ubuntu 12.04 LTS 64-bit EBS 'ap-northeast-1' => 'ami-60c77761', 'ap-southeast-1' => 'ami-a4ca8df6', 'ap-southeast-2' => 'ami-fb8611c1', 'eu-west-1' => 'ami-e1e8d395', 'sa-east-1' => 'ami-8cd80691', 'us-east-1' => 'ami-a29943cb', 'us-west-1' => 'ami-87712ac2', 'us-west-2' => 'ami-20800c10' } now = Time.now.utc.to_i lc_name = "fog-test-#{now}" asg_name = "fog-test-#{now}" asg_tag = { 'Key' => 'Name', 'PropagateAtLaunch' => true, 'ResourceId' => asg_name, 'ResourceType' => 'auto-scaling-group', 'Value' => asg_name } Fog::AWS[:auto_scaling].create_launch_configuration(image_id[Fog::AWS[:auto_scaling].region], 't1.micro', lc_name) Fog::AWS[:auto_scaling].create_auto_scaling_group(asg_name, "#{Fog::AWS[:auto_scaling].region}a", lc_name, 0, 0, 'Tags' => [asg_tag]) tests('raises') do tests("#create_or_update_tags(empty)").raises(Fog::AWS::AutoScaling::ValidationError) do Fog::AWS[:auto_scaling].create_or_update_tags([]) end tests("#delete_tags(empty)").raises(Fog::AWS::AutoScaling::ValidationError) do Fog::AWS[:auto_scaling].delete_tags([]) end end tests('success') do tests("#describe_auto_scaling_groups(#{asg_name}").formats(AWS::AutoScaling::Formats::DESCRIBE_AUTO_SCALING_GROUPS) do body = Fog::AWS[:auto_scaling].describe_auto_scaling_groups('AutoScalingGroupNames' => asg_name).body auto_scaling_group = body['DescribeAutoScalingGroupsResult']['AutoScalingGroups'].first returns(true) { auto_scaling_group.key?('Tags') } returns(true) { auto_scaling_group['Tags'].size == 1 } returns(true) { auto_scaling_group['Tags'].first == asg_tag } body end tests("#describe_tags").formats(AWS::AutoScaling::Formats::DESCRIBE_TAGS) do pending if Fog.mocking? body = Fog::AWS[:auto_scaling].describe_tags.body tags = body['DescribeTagsResult']['Tags'] returns(true) { tags.any? {|tag| tag == asg_tag} } body end # TODO: more tests! end Fog::AWS[:auto_scaling].delete_auto_scaling_group(asg_name) Fog::AWS[:auto_scaling].delete_launch_configuration(lc_name) end fog-aws-3.18.0/tests/requests/beanstalk/000077500000000000000000000000001437344660100201375ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/beanstalk/application_tests.rb000066400000000000000000000101471437344660100242140ustar00rootroot00000000000000Shindo.tests('AWS::ElasticBeanstalk | application_tests', ['aws', 'beanstalk']) do def unique_name(prefix) #get time (with 1/100th of sec accuracy) #want unique domain name and if provider is fast, this can be called more than once per second time = Time.now.to_i.to_s prefix + time end unless Fog.mocking? @beanstalk = Fog::AWS[:beanstalk] end @test_description = "A unique description." @test_app_name = unique_name("fog-test-app-") tests('success') do pending if Fog.mocking? @describe_applications_format = { 'DescribeApplicationsResult' => { 'Applications' => [ 'ApplicationName' => String, 'ConfigurationTemplates' => [String], 'Description' => Fog::Nullable::String, 'DateCreated' => Time, 'DateUpdated' => Time, 'Versions' => [String] ]}, 'ResponseMetadata' => {'RequestId'=> String}, } tests("#describe_applications format").formats(@describe_applications_format) do result = @beanstalk.describe_applications.body end test("#create_application") { response = @beanstalk.create_application({ 'ApplicationName' => @test_app_name, 'Description' => @test_description }) result = false if response.status == 200 app_info = response.body['CreateApplicationResult']['Application'] if app_info if app_info['ApplicationName'] == @test_app_name && app_info['Description'] == @test_description && app_info['ConfigurationTemplates'].empty? && app_info['Versions'].empty? result = true end end end result } test("#describe_applications all") { response = @beanstalk.describe_applications result = false if response.status == 200 apps = response.body['DescribeApplicationsResult']['Applications'] apps.each { |app_info| if app_info if app_info['ApplicationName'] == @test_app_name && app_info['Description'] == @test_description && app_info['ConfigurationTemplates'].empty? && app_info['Versions'].empty? result = true end end } end result } test("#create_application filter") { # Test for a specific app response = @beanstalk.describe_applications([@test_app_name]) result = false if response.status == 200 apps = response.body['DescribeApplicationsResult']['Applications'] if apps && apps.length == 1 app_info = apps.first if app_info['ApplicationName'] == @test_app_name && app_info['Description'] == @test_description && app_info['ConfigurationTemplates'].empty? && app_info['Versions'].empty? result = true end end end result } test("#update_application description") { @test_description = "A completely new description." response = @beanstalk.update_application({ 'ApplicationName' => @test_app_name, 'Description' => @test_description }) result = false if response.status == 200 app_info = response.body['UpdateApplicationResult']['Application'] if app_info if app_info['ApplicationName'] == @test_app_name && app_info['Description'] == @test_description && app_info['ConfigurationTemplates'].empty? && app_info['Versions'].empty? result = true end end end result } test("#delete_application") { response = @beanstalk.delete_application(@test_app_name) result = false if response.status == 200 result = true end result } end end fog-aws-3.18.0/tests/requests/beanstalk/solution_stack_tests.rb000066400000000000000000000012171437344660100247500ustar00rootroot00000000000000Shindo.tests('AWS::ElasticBeanstalk | solution_stack_tests', ['aws', 'beanstalk']) do tests('success') do pending if Fog.mocking? @solution_stack_result_format = { 'ListAvailableSolutionStacksResult' => { 'SolutionStackDetails' => [ 'SolutionStackName' => String, 'PermittedFileTypes' => [String] ], 'SolutionStacks' => [String] }, 'ResponseMetadata' => {'RequestId'=> String}, } tests("#list_available_solution_stacks").formats(@solution_stack_result_format) do Fog::AWS[:beanstalk].list_available_solution_stacks.body end end end fog-aws-3.18.0/tests/requests/cdn/000077500000000000000000000000001437344660100167375ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/cdn/cdn_tests.rb000066400000000000000000000147001437344660100212540ustar00rootroot00000000000000Shindo.tests('Fog::CDN[:aws] | CDN requests', ['aws', 'cdn']) do @cf_connection = Fog::CDN[:aws] tests('distributions success') do test('get current ditribution list count') do @count= 0 response = @cf_connection.get_distribution_list if response.status == 200 @distributions = response.body['DistributionSummary'] @count = @distributions.count end response.status == 200 end test('create distribution') { result = false response = @cf_connection.post_distribution('S3Origin' => { 'DNSName' => 'test_cdn.s3.amazonaws.com'}, 'Enabled' => true) if response.status == 201 @dist_id = response.body['Id'] @etag = response.headers['ETag'] @caller_reference = response.body['DistributionConfig']['CallerReference'] if (@dist_id.length > 0) result = true end end result } test("get info on distribution #{@dist_id}") { result = false response = @cf_connection.get_distribution(@dist_id) if response.status == 200 @etag = response.headers['ETag'] status = response.body['Status'] if ((status == 'Deployed') or (status == 'InProgress')) and not @etag.nil? result = true end end result } test('list distributions') do result = false response = @cf_connection.get_distribution_list if response.status == 200 distributions = response.body['DistributionSummary'] if (distributions.count > 0) dist = distributions[0] dist_id = dist['Id'] end max_items = response.body['MaxItems'] if (dist_id.length > 0) and (max_items > 0) result = true end end result end test("invalidate paths") { response = @cf_connection.post_invalidation(@dist_id, ["/test.html", "/path/to/file.html"]) if response.status == 201 @invalidation_id = response.body['Id'] end response.status == 201 } test("list invalidations") { result = false response = @cf_connection.get_invalidation_list(@dist_id) if response.status == 200 if response.body['InvalidationSummary'].find { |f| f['Id'] == @invalidation_id } result = true end end result } test("get invalidation information") { result = false response = @cf_connection.get_invalidation(@dist_id, @invalidation_id) if response.status == 200 paths = response.body['InvalidationBatch']['Path'].sort status = response.body['Status'] if status.length > 0 and paths == [ '/test.html', '/path/to/file.html' ].sort result = true end end result } test("disable distribution #{@dist_id} - can take 15 minutes to complete...") { result = false response = @cf_connection.put_distribution_config(@dist_id, @etag, 'S3Origin' => { 'DNSName' => 'test_cdn.s3.amazonaws.com'}, 'Enabled' => false, 'CallerReference' => @caller_reference) if response.status == 200 @etag = response.headers['ETag'] unless @etag.nil? result = true end end result } test("remove distribution #{@dist_id}") { result = true # unfortunately you can delete only after a distribution becomes Deployed Fog.wait_for { response = @cf_connection.get_distribution(@dist_id) @etag = response.headers['ETag'] response.status == 200 and response.body['Status'] == 'Deployed' } response = @cf_connection.delete_distribution(@dist_id, @etag) if response.status != 204 result = false end result } end tests('streaming distributions success') do test('get current streaming ditribution list count') do @count= 0 response = @cf_connection.get_streaming_distribution_list if response.status == 200 @distributions = response.body['StreamingDistributionSummary'] @count = @distributions.count end response.status == 200 end test('create distribution') { result = false response = @cf_connection.post_streaming_distribution('S3Origin' => { 'DNSName' => 'test_cdn.s3.amazonaws.com'}, 'Enabled' => true) if response.status == 201 @dist_id = response.body['Id'] @etag = response.headers['ETag'] @caller_reference = response.body['StreamingDistributionConfig']['CallerReference'] if (@dist_id.length > 0) result = true end end result } test("get info on distribution #{@dist_id}") { result = false response = @cf_connection.get_streaming_distribution(@dist_id) if response.status == 200 @etag = response.headers['ETag'] status = response.body['Status'] if ((status == 'Deployed') or (status == 'InProgress')) and not @etag.nil? result = true end end result } test('list streaming distributions') do result = false response = @cf_connection.get_streaming_distribution_list if response.status == 200 distributions = response.body['StreamingDistributionSummary'] if (distributions.count > 0) dist = distributions[0] dist_id = dist['Id'] end max_items = response.body['MaxItems'] if (dist_id.length > 0) and (max_items > 0) result = true end end result end test("disable distribution #{@dist_id} - can take 15 minutes to complete...") { result = false response = @cf_connection.put_streaming_distribution_config(@dist_id, @etag, 'S3Origin' => { 'DNSName' => 'test_cdn.s3.amazonaws.com'}, 'Enabled' => false, 'CallerReference' => @caller_reference) if response.status == 200 @etag = response.headers['ETag'] unless @etag.nil? result = true end end result } test("remove distribution #{@dist_id}") { result = true # unfortunately you can delete only after a distribution becomes Deployed Fog.wait_for { response = @cf_connection.get_streaming_distribution(@dist_id) @etag = response.headers['ETag'] response.status == 200 and response.body['Status'] == 'Deployed' } response = @cf_connection.delete_streaming_distribution(@dist_id, @etag) if response.status != 204 result = false end result } end end fog-aws-3.18.0/tests/requests/cloud_formation/000077500000000000000000000000001437344660100213575ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/cloud_formation/stack_tests.rb000066400000000000000000000114131437344660100242330ustar00rootroot00000000000000Shindo.tests('AWS::CloudFormation | stack requests', ['aws', 'cloudformation']) do @validate_template_format = { 'Description' => String, 'Parameters' => [ { 'DefaultValue' => Fog::Nullable::String, 'Description' => String, 'NoEcho' => Fog::Boolean, 'ParameterKey' => String, } ], 'RequestId' => String } @create_stack_format = { 'RequestId' => String, 'StackId' => String } @update_stack_format = { 'RequestId' => String, 'StackId' => String } @get_template_format = { 'RequestId' => String, 'TemplateBody' => String } @describe_stacks_format = { 'RequestId' => String, 'Stacks' => [ { 'CreationTime' => Time, 'DisableRollback' => Fog::Boolean, 'Outputs' => [ { 'OutputKey' => String, 'OutputValue' => String } ], 'Parameters' => [ { 'ParameterKey' => String, 'ParameterValue' => String, } ], 'StackId' => String, 'StackName' => String, 'StackStatus' => String, } ] } @describe_stack_events_format = { 'RequestId' => String, 'StackEvents' => [ { 'EventId' => String, 'LogicalResourceId' => String, 'PhysicalResourceId' => String, 'ResourceProperties' => String, 'ResourceStatus' => String, 'ResourceStatusReason' => Fog::Nullable::String, 'ResourceType' => String, 'StackId' => String, 'StackName' => String, 'Timestamp' => Time } ] } @describe_stack_resources_format = { 'RequestId' => String, 'StackResources' => [ { 'LogicalResourceId' => String, 'PhysicalResourceId' => String, 'ResourceStatus' => String, 'ResourceType' => String, 'StackId' => String, 'StackName' => String, 'Timestamp' => Time } ] } tests('success') do unless Fog.mocking? @stack_name = 'fogstack' << Time.now.to_i.to_s @keypair = Fog::Compute[:aws].key_pairs.create(:name => 'cloudformation') @template_url = 'https://s3.amazonaws.com/cloudformation-templates-us-east-1/EC2InstanceSample-1.0.0.template' end tests("validate_template('TemplateURL' => '#{@template_url}')").formats(@validate_template_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].validate_template('TemplateURL' => @template_url).body end tests("create_stack('#{@stack_name}', 'TemplateURL' => '#{@template_url}', Parameters => {'KeyName' => 'cloudformation'})").formats(@create_stack_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].create_stack( @stack_name, 'TemplateURL' => @template_url, 'Parameters' => {'KeyName' => 'cloudformation'} ).body end tests("update_stack('#{@stack_name}', 'TemplateURL' => '#{@template_url}', Parameters => {'KeyName' => 'cloudformation'})").formats(@update_stack_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].update_stack( @stack_name, 'TemplateURL' => @template_url, 'Parameters' => {'KeyName' => 'cloudformation'} ).body end tests("get_template('#{@stack_name})").formats(@get_template_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].get_template(@stack_name).body end tests("describe_stacks").formats(@describe_stacks_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].describe_stacks.body end sleep(1) # avoid throttling tests("describe_stack_events('#{@stack_name}')").formats(@describe_stack_events_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].describe_stack_events(@stack_name).body end tests("describe_stack_resources('StackName' => '#{@stack_name}')").formats(@describe_stack_resources_format) do pending if Fog.mocking? Fog::AWS[:cloud_formation].describe_stack_resources('StackName' => @stack_name).body end tests("delete_stack('#{@stack_name}')").succeeds do pending if Fog.mocking? Fog::AWS[:cloud_formation].delete_stack(@stack_name) end tests("list_stacks").succeeds do pending if Fog.mocking? Fog::AWS[:cloud_formation].list_stacks.body end tests("list_stack_resources").succeeds do pending if Fog.mocking? Fog::AWS[:cloud_formation].list_stack_resources("StackName"=>@stack_name).body end unless Fog.mocking? @keypair.destroy end end tests('failure') do end end fog-aws-3.18.0/tests/requests/cloud_watch/000077500000000000000000000000001437344660100204675ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/cloud_watch/get_metric_statistics_tests.rb000066400000000000000000000017771437344660100266460ustar00rootroot00000000000000Shindo.tests('AWS::CloudWatch | metric requests', ['aws', 'cloudwatch']) do tests('success') do @metrics_statistic_format = { 'GetMetricStatisticsResult' => { 'Label' => String, 'Datapoints' => [{ "Timestamp" => Time, 'Unit' => String, 'Minimum' => Float, 'Maximum' => Float, 'Average' => Float, 'Sum' => Float, 'SampleCount' => Float }], }, 'ResponseMetadata' => { 'RequestId' => String } } tests("#get_metric_statistics").formats(@metrics_statistic_format) do pending if Fog.mocking? instanceId = 'i-420c352f' Fog::AWS[:cloud_watch].get_metric_statistics({'Statistics' => ['Minimum','Maximum','Sum','SampleCount','Average'], 'StartTime' => (Time.now-600).iso8601, 'EndTime' => Time.now.iso8601, 'Period' => 60, 'MetricName' => 'DiskReadBytes', 'Namespace' => 'AWS/EC2', 'Dimensions' => [{'Name' => 'InstanceId', 'Value' => instanceId}]}).body end end end fog-aws-3.18.0/tests/requests/cloud_watch/list_metrics_test.rb000066400000000000000000000037401437344660100245600ustar00rootroot00000000000000Shindo.tests('AWS::CloudWatch | metric requests', ['aws', 'cloudwatch']) do tests('success') do @metrics_list_format = { 'ListMetricsResult' => { 'Metrics' => [{ 'Dimensions' => [{ 'Name' => String, 'Value' => String }], "MetricName" => String, "Namespace" => String }], 'NextToken' => Fog::Nullable::String, }, 'ResponseMetadata' => {"RequestId"=> String}, } @instanceId = 'i-2f3eab59' @dimension_filtered_metrics_list_format = { 'ListMetricsResult' => { 'Metrics' => [{ 'Dimensions' => [{ 'Name' => 'InstanceId', 'Value' => @instanceId }], "MetricName" => String, "Namespace" => String }], 'NextToken' => Fog::Nullable::String, }, 'ResponseMetadata' => {"RequestId"=> String}, } tests("#list_metrics").formats(@metrics_list_format) do pending if Fog.mocking? Fog::AWS[:cloud_watch].list_metrics.body end tests("#dimension_filtered_list_metrics").formats(@dimension_filtered_metrics_list_format) do pending if Fog.mocking? Fog::AWS[:cloud_watch].list_metrics('Dimensions' => [{'Name' => 'InstanceId', 'Value' => @instanceId}]).body end tests("#metric_name_filtered_list_metrics").returns(true) do pending if Fog.mocking? metricName = "CPUUtilization" Fog::AWS[:cloud_watch].list_metrics('MetricName' => metricName).body['ListMetricsResult']['Metrics'].all? do |metric| metric['MetricName'] == metricName end end tests("#namespace_filtered_list_metrics").returns(true) do pending if Fog.mocking? namespace = "AWS/EC2" Fog::AWS[:cloud_watch].list_metrics('Namespace' => namespace).body['ListMetricsResult']['Metrics'].all? do |metric| metric['Namespace'] == namespace end end end end fog-aws-3.18.0/tests/requests/cloud_watch/put_metric_data_tests.rb000066400000000000000000000025751437344660100254130ustar00rootroot00000000000000Shindo.tests('AWS::CloudWatch | metric requests', ['aws', 'cloudwatch']) do tests('success') do namespace = 'Custom/Test' @puts_format = {'ResponseMetadata' => {'RequestId' => String}} tests('#puts_value').formats(@puts_format) do pending if Fog.mocking? Fog::AWS[:cloud_watch].put_metric_data(namespace, [{'MetricName' => 'RequestTest', 'Unit' => 'None', 'Value' => 1}]).body end tests('#puts_statistics_set').succeeds do pending if Fog.mocking? Fog::AWS[:cloud_watch].put_metric_data(namespace, [{'MetricName' => 'RequestTest', 'Unit' => 'None', 'StatisticValues' => {'Minimum' => 0, 'Maximum' => 9, 'Sum' => 45, 'SampleCount' => 10, 'Average' => 4.5}}]).body end tests('#puts with dimensions').succeeds do pending if Fog.mocking? dimensions = [{}] Fog::AWS[:cloud_watch].put_metric_data(namespace, [{'MetricName' => 'RequestTest', 'Unit' => 'None', 'Value' => 1, 'Dimensions' => dimensions}]).body end tests('#puts more than one').succeeds do pending if Fog.mocking? datapoints = (0...3).map do |i| dp = {'MetricName' => "#{i}RequestTest", 'Unit' => 'None', 'Value' => i} if i%2==0 dp['Dimensions'] = [{'Name' => 'Ruler', 'Value' => "measurement_#{i}"}] end dp end Fog::AWS[:cloud_watch].put_metric_data(namespace, datapoints).body end end end fog-aws-3.18.0/tests/requests/compute/000077500000000000000000000000001437344660100176475ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/compute/address_tests.rb000066400000000000000000000131641437344660100230500ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | address requests', ['aws']) do compute = Fog::Compute[:aws] @addresses_format = { 'addressesSet' => [{ 'allocationId' => Fog::Nullable::String, 'associationId' => Fog::Nullable::String, 'domain' => String, 'instanceId' => Fog::Nullable::String, 'publicIp' => String }], 'requestId' => String } @server = compute.servers.create @server.wait_for { ready? } @ip_address = @server.public_ip_address tests('success') do @public_ip = nil @vpc_public_ip = nil @vpc_allocation_id = nil tests('#allocate_address').formats({'domain' => String, 'publicIp' => String, 'requestId' => String}) do data = compute.allocate_address.body @public_ip = data['publicIp'] data end tests("#allocate_address('vpc')").formats({'domain' => String, 'publicIp' => String, 'allocationId' => String, 'requestId' => String}) do data = compute.allocate_address('vpc').body @vpc_public_ip = data['publicIp'] @vpc_allocation_id = data['allocationId'] data end # the following 2 tests imply that your account is old enough that the tested region does not have a default VPC. These methods do not work with an ip created in a vpc. this probably means that they will probably fail if they aren't mocked tests("#move_address_to_vpc('#{@public_ip}')").formats({'status' => String, 'allocationId' => String, 'requestId' => String}) do compute.move_address_to_vpc(@public_ip).body end tests("#restore_address_to_classic('#{@public_ip}')").formats({'status' => String, 'publicIp' => String, 'requestId' => String}) do compute.restore_address_to_classic(@public_ip).body end tests('#describe_addresses').formats(@addresses_format) do compute.describe_addresses.body end tests("#describe_addresses('public-ip' => #{@public_ip}')").formats(@addresses_format) do compute.describe_addresses('public-ip' => @public_ip).body end tests("#associate_address('#{@server.identity}', '#{@public_ip}')").formats(AWS::Compute::Formats::BASIC) do compute.associate_address(@server.identity, @public_ip).body end tests("#associate_address({:instance_id=>'#{@server.identity}', :public_ip=>'#{@public_ip}'})").formats(AWS::Compute::Formats::BASIC) do compute.associate_address({:instance_id=>@server.identity,:public_ip=> @public_ip}).body end tests("#dissassociate_address('#{@public_ip}')").formats(AWS::Compute::Formats::BASIC) do compute.disassociate_address(@public_ip).body end tests("#associate_address('#{@server.id}', nil, nil, '#{@vpc_allocation_id}')").formats(AWS::Compute::Formats::BASIC) do compute.associate_address(@server.id, nil, nil, @vpc_allocation_id).body end $pry = true tests("#associate_address({:instance_id=>'#{@server.id}', :allocation_id=>'#{@vpc_allocation_id}'})").formats(AWS::Compute::Formats::BASIC) do compute.associate_address({:instance_id=>@server.id, :allocation_id=>@vpc_allocation_id}).body end tests("#disassociate_address('#{@vpc_public_ip}')").raises(Fog::AWS::Compute::Error) do compute.disassociate_address(@vpc_public_ip) end tests("#release_address('#{@public_ip}')").formats(AWS::Compute::Formats::BASIC) do compute.release_address(@public_ip).body end tests("#disassociate_address('#{@vpc_public_ip}', '#{@vpc_allocation_id}')").formats(AWS::Compute::Formats::BASIC) do address = compute.describe_addresses('public-ip' => @vpc_public_ip).body['addressesSet'].first compute.disassociate_address(@vpc_public_ip, address['associationId']).body end tests("#release_address('#{@vpc_allocation_id}')").formats(AWS::Compute::Formats::BASIC) do compute.release_address(@vpc_allocation_id).body end end tests('failure') do @address = compute.addresses.create @vpc_address = compute.addresses.create(:domain => 'vpc') tests("#associate_addresses({:instance_id =>'i-00000000', :public_ip => '#{@address.identity}')}").raises(Fog::AWS::Compute::NotFound) do compute.associate_address({:instance_id => 'i-00000000', :public_ip => @address.identity}) end tests("#associate_addresses({:instance_id =>'#{@server.identity}', :public_ip => '127.0.0.1'})").raises(Fog::AWS::Compute::Error) do compute.associate_address({:instance_id => @server.identity, :public_ip => '127.0.0.1'}) end tests("#associate_addresses({:instance_id =>'i-00000000', :public_ip => '127.0.0.1'})").raises(Fog::AWS::Compute::NotFound) do compute.associate_address({:instance_id =>'i-00000000', :public_ip =>'127.0.0.1'}) end tests("#restore_address_to_classic('#{@vpc_address.identity}')").raises(Fog::AWS::Compute::Error) do compute.restore_address_to_classic(@vpc_address.identity) end tests("#disassociate_addresses('127.0.0.1') raises BadRequest error").raises(Fog::AWS::Compute::Error) do compute.disassociate_address('127.0.0.1') end tests("#release_address('127.0.0.1')").raises(Fog::AWS::Compute::Error) do compute.release_address('127.0.0.1') end tests("#release_address('#{@vpc_address.identity}')").raises(Fog::AWS::Compute::Error) do compute.release_address(@vpc_address.identity) end if Fog.mocking? old_limit = compute.data[:limits][:addresses] tests("#allocate_address", "limit exceeded").raises(Fog::AWS::Compute::Error) do compute.data[:limits][:addresses] = 0 compute.allocate_address end compute.data[:limits][:addresses] = old_limit end @address.destroy @vpc_address.destroy end @server.destroy end fog-aws-3.18.0/tests/requests/compute/assign_private_ip_tests.rb000066400000000000000000000045001437344660100251230ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | internet_gateway requests', ['aws']) do tests('success') do Fog::AWS::Compute::Mock.reset if Fog.mocking? @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @vpc_id = @vpc.id @subnet=Fog::Compute[:aws].subnets.create('vpc_id' => @vpc_id, 'cidr_block' => '10.0.10.0/24') @subnet_id = @subnet.subnet_id @network_interface = Fog::Compute[:aws].network_interfaces.new(:subnet_id => @subnet_id) @network_interface.save @network_interface_id = @network_interface.network_interface_id @ip_address = Fog::AWS::Mock.ip_address @second_ip_address = Fog::AWS::Mock.ip_address tests("#assign_private_ip_addresses('#{@network_interface_id}', {'PrivateIpAddresses'=>['#{@ip_address}','#{@second_ip_address}']})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].assign_private_ip_addresses(@network_interface_id, { 'PrivateIpAddresses' =>[@ip_address, @second_ip_address]}).body end tests("#assign_private_ip_addresses('#{@network_interface_id}', {'SecondaryPrivateIpAddressCount'=>4})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].assign_private_ip_addresses(@network_interface_id, {'SecondaryPrivateIpAddressCount'=>4}).body end @network_interface.destroy @subnet.destroy @vpc.destroy end tests('failure') do Fog::AWS::Compute::Mock.reset if Fog.mocking? @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @vpc_id = @vpc.id @subnet=Fog::Compute[:aws].subnets.create('vpc_id' => @vpc_id, 'cidr_block' => '10.0.10.0/24') @subnet_id = @subnet.subnet_id @network_interface = Fog::Compute[:aws].network_interfaces.new(:subnet_id => @subnet_id) @network_interface.save @network_interface_id = @network_interface.network_interface_id @ip_address = Fog::AWS::Mock.ip_address tests("#assign_private_ip_addresses('#{@network_interface_id}', {'PrivateIpAddresses'=>['#{@ip_address}','#{@second_ip_address}'], 'SecondaryPrivateIpAddressCount'=>4 })").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].assign_private_ip_addresses(@network_interface_id, { 'PrivateIpAddresses' =>[@ip_address, @second_ip_address], 'SecondaryPrivateIpAddressCount'=>4 }).body end @network_interface.destroy @subnet.destroy @vpc.destroy end end fog-aws-3.18.0/tests/requests/compute/availability_zone_tests.rb000066400000000000000000000012761437344660100251310ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | availability zone requests', ['aws']) do @availability_zones_format = { 'availabilityZoneInfo' => [{ 'messageSet' => [], 'regionName' => String, 'zoneName' => String, 'zoneState' => String }], 'requestId' => String } tests('success') do tests('#describe_availability_zones').formats(@availability_zones_format) do Fog::Compute[:aws].describe_availability_zones.body end tests("#describe_availability_zones('zone-name' => 'us-east-1a')").formats(@availability_zones_format) do Fog::Compute[:aws].describe_availability_zones('zone-name' => 'us-east-1a').body end end end fog-aws-3.18.0/tests/requests/compute/client_tests.rb000066400000000000000000000020641437344660100226760ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | account tests', ['aws']) do if Fog.mocking? tests('check for vpc') do tests('supports both vpc and ec2 in compatibility mode').succeeds do client = Fog::Compute[:aws] client.enable_ec2_classic data = Fog::Compute[:aws].describe_account_attributes.body data['accountAttributeSet'].any? { |s| [*s["values"]].include?("VPC") && [*s["values"]].include?("EC2") } end tests('supports VPC in vpc mode').succeeds do client = Fog::Compute[:aws] client.enable_ec2_classic data = Fog::Compute[:aws].describe_account_attributes.body data['accountAttributeSet'].any? { |s| [*s["values"]].include?("VPC") } end tests('does not support VPC and EC2 in vpc mode').succeeds do client = Fog::Compute[:aws] client.disable_ec2_classic data = Fog::Compute[:aws].describe_account_attributes.body !data['accountAttributeSet'].any? { |s| [*s["values"]].include?("VPC") && [*s["values"]].include?("EC2") } end end end end fog-aws-3.18.0/tests/requests/compute/dhcp_options_tests.rb000066400000000000000000000025671437344660100241210ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | dhcp_options requests', ['aws']) do @dhcp_options_format = { 'dhcpOptionsSet' => [{ 'dhcpOptionsId' => String, 'dhcpConfigurationSet' => Hash, 'tagSet' => Fog::Nullable::Hash, }], 'requestId' => String } tests('success') do @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @vpc_id = @vpc.id tests('#create_dhcp_options').formats(@dhcp_options_format) do data = Fog::Compute[:aws].create_dhcp_options({'domain-name' => 'example.com', 'domain-name-servers' => '10.10.10.10'}).body @dopt_id = data['dhcpOptionsSet'].first['dhcpOptionsId'] data end tests('#describe_dhcp_options').formats(@dhcp_options_format) do Fog::Compute[:aws].describe_dhcp_options.body end tests("#associate_dhcp_options('#{@dopt_id}, #{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].associate_dhcp_options(@dopt_id, @vpc_id).body end tests("#associate_default_dhcp_options('default', #{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].associate_dhcp_options('default', @vpc_id).body end tests("#delete_dhcp_options('#{@dopt_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_dhcp_options(@dopt_id).body end @vpc.destroy end end fog-aws-3.18.0/tests/requests/compute/helper.rb000066400000000000000000000012761437344660100214610ustar00rootroot00000000000000class AWS module Compute module Formats BASIC = { 'requestId' => String } DESCRIBE_IMAGES = BASIC.merge({ "imagesSet" => [{ "imageId" => String, "imageLocation" => String, "imageState" => String, "imageOwnerId" => String, "creationDate" => Fog::Nullable::String, "isPublic" => Fog::Nullable::Boolean, "architecture" => String, "imageType" => String, "imageOwnerAlias" => String, "rootDeviceType" => String, "blockDeviceMapping" => Array, "virtualizationType" => String, "hypervisor" => String }] }) end end end fog-aws-3.18.0/tests/requests/compute/image_tests.rb000066400000000000000000000163161437344660100225070ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | image requests', ['aws']) do @describe_images_format = { 'imagesSet' => [{ 'architecture' => String, 'blockDeviceMapping' => [Fog::Nullable::Hash], 'description' => Fog::Nullable::String, 'hypervisor' => String, 'imageId' => String, 'imageLocation' => String, 'imageOwnerAlias' => Fog::Nullable::String, 'imageOwnerId' => String, 'imageState' => String, 'imageType' => String, 'isPublic' => Fog::Boolean, 'kernelId' => String, 'name' => String, 'platform' => Fog::Nullable::String, 'productCodes' => [], 'ramdiskId' => Fog::Nullable::String, 'rootDeviceName' => String, 'rootDeviceType' => String, 'stateReason' => {}, 'tagSet' => {}, 'virtualizationType' => String, 'creationDate' => Fog::Nullable::Time, 'enaSupport' => Fog::Nullable::Boolean }], 'requestId' => String, } @register_image_format = { 'imageId' => String, 'requestId' => String } @modify_image_attribute_format = { 'return' => Fog::Boolean, 'requestId' => String } @describe_image_attribute_format = { 'requestId' => String, 'imageId' => String, 'launchPermission' => [Fog::Nullable::String] } @create_image_format = { 'requestId' => String, 'imageId' => String } @image_copy_result = { 'requestId' => String, 'imageId' => String } tests('success') do # the result for this is HUGE and relatively uninteresting... # tests("#describe_images").formats(@images_format) do # Fog::Compute[:aws].describe_images.body # end @image_id = 'ami-1aad5273' if Fog.mocking? @other_account = Fog::AWS::Compute.new(:aws_access_key_id => 'other', :aws_secret_access_key => 'account') @server = Fog::Compute[:aws].servers.create @server.wait_for{state == 'running'} @created_image tests("#create_image").formats(@create_image_format) do result = Fog::Compute[:aws].create_image(@server.id, 'Fog-Test-Image', 'Fog Test Image', false).body @created_image = Fog::Compute[:aws].images.get(result['imageId']) result end tests("#create_image - no reboot").formats(@create_image_format) do result = Fog::Compute[:aws].create_image(@server.id, 'Fog-Test-Image', 'Fog Test Image', true).body @created_image = Fog::Compute[:aws].images.get(result['imageId']) result end tests("#create_image - automatic ebs image registration").returns(true) do create_image_response = Fog::Compute[:aws].create_image(@server.id, 'Fog-Test-Image', 'Fog Test Image') Fog::Compute[:aws].images.get(create_image_response.body['imageId']) != nil end @server.destroy tests("#copy_image (#{@image_id}, 'eu-west-1')").formats(@image_copy_result) do data = Fog::Compute.new(:provider => :aws, :region => "us-west-1", :version => "2013-02-01").copy_image(@image_id, "eu-east-1").body @eu_image_id = data['imageId'] data end tests("#register_image").formats(@register_image_format) do @image = Fog::Compute[:aws].register_image('image', 'image', '/dev/sda1').body end tests("#register_image - with ebs block device mapping").formats(@register_image_format) do @ebs_image = Fog::Compute[:aws].register_image('image', 'image', '/dev/sda1', [ { 'DeviceName' => '/dev/sdh', "SnapshotId" => "snap-123456789", "VolumeSize" => "10G", "DeleteOnTermination" => true}]).body end tests("#register_image - with ephemeral block device mapping").formats(@register_image_format) do @ephemeral_image = Fog::Compute[:aws].register_image('image', 'image', '/dev/sda1', [ { 'VirtualName' => 'ephemeral0', "DeviceName" => "/dev/sdb"} ]).body end @image_id = @image['imageId'] sleep 1 tests("#describe_images('Owner' => 'self')").formats(@describe_images_format) do Fog::Compute[:aws].describe_images('Owner' => 'self').body end tests("#describe_images('state' => 'available')").formats(@describe_images_format) do Fog::Compute[:aws].describe_images('state' => 'available').body end tests("other_account#describe_images('image-id' => '#{@image_id}')").returns([]) do @other_account.describe_images('image-id' => @image_id).body['imagesSet'] end tests("#modify_image_attribute('#{@image_id}', 'Add.UserId' => ['#{@other_account.data[:owner_id]}'])").formats(@modify_image_attribute_format) do Fog::Compute[:aws].modify_image_attribute(@image_id, { 'Add.UserId' => [@other_account.data[:owner_id]] }).body end tests("#describe_image_attribute('#{@image_id}', 'launchPermission'])").formats(@describe_image_attribute_format) do Fog::Compute[:aws].describe_image_attribute(@image_id, 'launchPermission' ).body end tests("other_account#describe_images('image-id' => '#{@image_id}')").returns([@image_id]) do @other_account.describe_images('image-id' => @image_id).body['imagesSet'].map {|i| i['imageId'] } end tests("#modify_image_attribute('#{@image_id}', 'Remove.UserId' => ['#{@other_account.data[:owner_id]}'])").formats(@modify_image_attribute_format) do Fog::Compute[:aws].modify_image_attribute(@image_id, { 'Remove.UserId' => [@other_account.data[:owner_id]] }).body end tests("other_account#describe_images('image-id' => '#{@image_id}')").returns([]) do @other_account.describe_images('image-id' => @image_id).body['imagesSet'] end end tests("#describe_images('image-id' => '#{@image_id}')").formats(@describe_images_format) do @other_image = Fog::Compute[:aws].describe_images('image-id' => @image_id).body end unless Fog.mocking? tests("#describe_images('Owner' => '#{@other_image['imageOwnerAlias']}', 'image-id' => '#{@image_id}')").formats(@describe_images_format) do Fog::Compute[:aws].describe_images('Owner' => @other_image['imageOwnerAlias'], 'image-id' => @image_id).body end end #NOTE: waiting for the image to complete can sometimes take up to 1 hour # for quicker tests: uncomment the rest of this block #Fog.wait_for { Fog::Compute.new(:provider => :aws, :region => "us-west-1").snapshots.get(@eu_image_id) } #tests("#delete_snapshots(#{@eu_image_id})").formats(AWS::Compute::Formats::BASIC) do # Fog::Compute.new(:provider => :aws, :region => "us-west-1").delete_snapshot(@eu_image_id).body #end end tests('failure') do tests("#modify_image_attribute(nil, { 'Add.Group' => ['all'] })").raises(ArgumentError) do Fog::Compute[:aws].modify_image_attribute(nil, { 'Add.Group' => ['all'] }).body end tests("#modify_image_attribute('ami-00000000', { 'Add.UserId' => ['123456789012'] })").raises(Fog::AWS::Compute::NotFound) do pending unless Fog.mocking? Fog::Compute[:aws].modify_image_attribute('ami-00000000', { 'Add.UserId' => ['123456789012'] }).body end end end fog-aws-3.18.0/tests/requests/compute/instance_attrib_tests.rb000066400000000000000000000170021437344660100245670ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | describe_instance_attribute request', ['aws']) do @instance_attributes = [ 'instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination', 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'productCodes', 'groupSet', 'ebsOptimized', 'sourceDestCheck', 'sriovNetSupport' ] @instance_attribute_common_format = { "requestId" => String, "instanceId" => String } @instance_attribute_format = { "instanceType" => Fog::Nullable::String, "kernelId" => Fog::Nullable::String, "ramdiskId" => Fog::Nullable::String, "userData" => Fog::Nullable::String, "disableApiTermination" => Fog::Nullable::Boolean, "instanceInitiatedShutdownBehavior" => Fog::Nullable::String, "rootDeviceName" => Fog::Nullable::String, "blockDeviceMapping" => [Fog::Nullable::Hash], "productCodes" => Fog::Nullable::Array, "ebsOptimized" => Fog::Nullable::Boolean, "sriovNetSupport" => Fog::Nullable::String, "sourceDestCheck" => Fog::Nullable::Boolean, "groupSet" => [Fog::Nullable::Hash] } tests('success') do # In mocking the groupSet attribute is returned as nil if Fog.mocking? @instance_attribute_format["groupSet"] = Fog::Nullable::Array end # Setting up the environment @instance_id = nil @ami = 'ami-79c0ae10' key_name = uniq_id('fog-test-key') @key = Fog::Compute[:aws].key_pairs.create(:name => key_name) instance_type = "t1.micro" @az = "us-east-1a" vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/16') subnet = Fog::Compute[:aws].subnets.create('vpc_id' => vpc.id, 'cidr_block' => '10.0.10.0/16', "availability_zone" => @az) security_groups = Fog::Compute[:aws].security_groups.all @launch_config = { :image_id => @ami, :flavor_id => instance_type, :key_name => key_name, :subnet_id => subnet.subnet_id, :disable_api_termination => false } if !Fog.mocking? security_group = security_groups.select { |group| group.vpc_id == vpc.id } security_group_ids = security_group.collect { |group| group.group_id } @launch_config[:security_group_ids] = security_group_ids block_device_mapping = [{"DeviceName" => "/dev/sdp1", "VirtualName" => nil, "Ebs.VolumeSize" => 15}] @launch_config[:block_device_mapping] = block_device_mapping else security_group_ids = [nil] # In mocking the first device provided in block_device_mapping is set as the root device. There is no root device by default. So setting the root device here so that the tests for rootDeviceName and blockDeviceMapping attribute get passed block_device_mapping = [{"DeviceName" => "/dev/sda1", "VirtualName" => nil, "Ebs.VolumeSize" => 15},{"DeviceName" => "/dev/sdp1", "VirtualName" => nil, "Ebs.VolumeSize" => 15}] @launch_config[:block_device_mapping] = block_device_mapping end server = Fog::Compute[:aws].servers.create(@launch_config) server.wait_for { ready? } server.reload @instance_id = server.id ################ # BEGIN TESTS # ################ @instance_attributes.each do |attrib| # Creating format schema for each attribute describe_instance_attribute_format = @instance_attribute_common_format.clone if attrib == "kernel" key = "kernelId" elsif attrib == "ramdisk" key = "ramdiskId" else key = attrib end describe_instance_attribute_format[key] = @instance_attribute_format[key] # Running format check tests("#describe_instance_attribute('#{@instance_id}', #{attrib})").formats(describe_instance_attribute_format,false) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, attrib).body end # Running test to see proper instance Id is get in each response tests("#describe_instance_attribute('#{@instance_id}', #{attrib})").returns(@instance_id) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, attrib).body['instanceId'] end end # Test for instanceType attribute tests("#describe_instance_attribute(#{@instance_id}, 'instanceType')").returns(instance_type) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'instanceType').body["instanceType"] end # Test for disableApiTermination attribute tests("#describe_instance_attribute(#{@instance_id}, 'disableApiTermination')").returns(false) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'disableApiTermination').body["disableApiTermination"] end # Test for instanceInitiatedShutdownBehavior attribute tests("#describe_instance_attribute(#{@instance_id}, 'instanceInitiatedShutdownBehavior')").returns('stop') do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'instanceInitiatedShutdownBehavior').body["instanceInitiatedShutdownBehavior"] end # Test for rootDeviceName attribute tests("#describe_instance_attribute(#{@instance_id}, 'rootDeviceName')").returns('/dev/sda1') do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'rootDeviceName').body["rootDeviceName"] end # Test to see there are two devices for blockDeviceMapping attribute tests("#describe_instance_attribute(#{@instance_id}, 'blockDeviceMapping')").returns(2) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'blockDeviceMapping').body["blockDeviceMapping"].count end # Test to check the device name /dev/sdp1 passed in block_device_mapping is returned correctly tests("#describe_instance_attribute(#{@instance_id}, 'blockDeviceMapping')").returns("/dev/sdp1") do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'blockDeviceMapping').body["blockDeviceMapping"].last["deviceName"] end # Test for groupSet attribute tests("#describe_instance_attribute(#{@instance_id}, 'groupSet')").returns(security_group_ids) do group_set = Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'groupSet').body["groupSet"] group_set.collect { |g| g["groupId"]} end # Test for sourceDestCheck attribute (This attribute is set only for VPC instances. So created the instance in a VPC during setup process) tests("#describe_instance_attribute(#{@instance_id}, 'sourceDestCheck')").returns(true) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'sourceDestCheck').body["sourceDestCheck"] end # Test for ebsOptimized attribute tests("#describe_instance_attribute(#{@instance_id}, 'ebsOptimized')").returns(false) do Fog::Compute[:aws].describe_instance_attribute(@instance_id, 'ebsOptimized').body["ebsOptimized"] end ############### # END OF TEST # ############### # Tear down if !Fog.mocking? @key.destroy server.destroy until server.state == "terminated" sleep 5 #Wait for the server to be terminated server.reload end subnet.destroy vpc.destroy end end tests('failure') do @instance_attributes.each do |attrib| tests("#describe_instance_attribute('i-00000000', #{attrib})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].describe_instance_attribute('i-00000000', attrib) end end end end fog-aws-3.18.0/tests/requests/compute/instance_tests.rb000066400000000000000000000277041437344660100232340ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | instance requests', ['aws']) do @instance_format = { 'architecture' => String, 'amiLaunchIndex' => Integer, 'associatePublicIP' => Fog::Nullable::Boolean, 'attachmentId' => Fog::Nullable::String, 'blockDeviceMapping' => [Fog::Nullable::Hash], 'networkInterfaces' => [Fog::Nullable::Hash], 'clientToken' => Fog::Nullable::String, 'dnsName' => NilClass, 'ebsOptimized' => Fog::Boolean, 'imageId' => String, 'instanceId' => String, 'instanceState' => {'code' => Integer, 'name' => String}, 'instanceType' => String, 'kernelId' => Fog::Nullable::String, 'keyName' => Fog::Nullable::String, 'launchTime' => Time, 'monitoring' => {'state' => Fog::Boolean}, 'networkInterfaceId' => Fog::Nullable::String, 'placement' => { 'availabilityZone' => String, 'groupName' => Fog::Nullable::String, 'tenancy' => String }, 'platform' => Fog::Nullable::String, 'privateDnsName' => NilClass, 'productCodes' => Array, 'reason' => Fog::Nullable::String, 'rootDeviceName' => Fog::Nullable::String, 'rootDeviceType' => String, 'sourceDestCheck' => Fog::Nullable::Boolean, 'subnetId' => Fog::Nullable::String, 'vpcId' => Fog::Nullable::String } @run_instances_format = { 'groupSet' => [String], 'instancesSet' => [@instance_format], 'ownerId' => Fog::Nullable::String, 'requestId' => String, 'reservationId' => String } @describe_instances_format = { 'reservationSet' => [{ 'groupSet' => [String], 'groupIds' => [String], 'instancesSet' => [@instance_format.merge( 'architecture' => String, 'dnsName' => Fog::Nullable::String, 'hypervisor' => String, 'iamInstanceProfile' => Hash, 'ipAddress' => Fog::Nullable::String, 'networkInterfaces' => Array, 'ownerId' => String, 'privateDnsName' => Fog::Nullable::String, 'privateIpAddress' => Fog::Nullable::String, 'stateReason' => Hash, 'tagSet' => Hash, 'virtualizationType' => String )], 'ownerId' => Fog::Nullable::String, 'reservationId' => String }], 'requestId' => String } @get_console_output_format = { 'instanceId' => String, 'output' => Fog::Nullable::String, 'requestId' => String, 'timestamp' => Time } @get_password_data_format = { 'instanceId' => String, 'passwordData' => Fog::Nullable::String, 'requestId' => String, 'timestamp' => Time } @instance_state_change_format = { 'instancesSet' => [{ 'currentState' => {'code' => Integer, 'name' => String}, 'instanceId' => String, 'previousState' => {'code' => Integer, 'name' => String}, }], 'requestId' => String } @describe_reserved_instances_offerings_format = { 'reservedInstancesOfferingsSet' => [{ 'reservedInstancesOfferingId' => String, 'instanceType' => String, 'availabilityZone' => String, 'duration' => Integer, 'fixedPrice' => Float, 'offeringType' => String, 'usagePrice' => Float, 'productDescription' => String, 'instanceTenancy' => String, 'currencyCode' => String }], 'requestId' => String } @purchase_reserved_instances_offering_format = { 'reservedInstancesId' => String, 'requestId' => String } @describe_reserved_instances_format = { 'reservedInstancesSet' => [{ 'reservedInstancesId' => String, 'instanceType' => String, 'availabilityZone' => String, 'start' => Time, 'end' => Time, 'duration' => Integer, 'fixedPrice' => Float, 'usagePrice' => Float, 'instanceCount' => Integer, 'offeringType' => String, 'productDescription' => String, 'state' => String, 'tagSet' => [{ 'key' => String, 'value' => String }], 'instanceTenancy' => String, 'currencyCode' => String }], 'requestId' => String } @describe_instance_status_format = { 'requestId' => String, 'instanceStatusSet' => [{ 'instanceId' => String, 'availabilityZone' => String, 'instanceState' => { 'code' => Integer, 'name' => String }, 'systemStatus' => { 'status' => String, 'details' => [{ 'name' => String, 'status' => String }] }, 'instanceStatus' => { 'status' => String, 'details' => [{ 'name' => String, 'status' => String }] }, 'eventsSet' => [Fog::Nullable::Hash], }] } tests('success') do @instance_id = nil @ami = if ENV['FASTER_TEST_PLEASE'] 'ami-79c0ae10' # ubuntu 12.04 daily build 20120728 else # Use a MS Windows AMI to test #get_password_data 'ami-71b50018' # Amazon Public Images - Windows_Server-2008-SP2-English-64Bit-Base-2012.07.11 end # Create a keypair for decrypting the password key_name = uniq_id('fog-test-key') key = Fog::Compute[:aws].key_pairs.create(:name => key_name) tests("#run_instances").formats(@run_instances_format) do data = Fog::Compute[:aws].run_instances(@ami, 1, 1, 'InstanceType' => 't1.micro', 'KeyName' => key_name, 'BlockDeviceMapping' => [{"DeviceName" => "/dev/sdp1", "VirtualName" => nil, "Ebs.VolumeSize" => 15}]).body @instance_id = data['instancesSet'].first['instanceId'] data end server = Fog::Compute[:aws].servers.get(@instance_id) while server.nil? do # It may take a moment to get the server after launching it sleep 0.1 server = Fog::Compute[:aws].servers.get(@instance_id) end server.wait_for { ready? } tests("#describe_instances").formats(@describe_instances_format) do Fog::Compute[:aws].describe_instances('instance-state-name' => 'running').body end # Launch another instance to test filters another_server = Fog::Compute[:aws].servers.create tests("#describe_instances('instance-id' => '#{@instance_id}'").formats(@describe_instances_format) do body = Fog::Compute[:aws].describe_instances('instance-id' => "#{@instance_id}").body tests("returns 1 instance").returns(1) { body['reservationSet'].size } body end # Test network interface attachment tests('#describe_instances networkInterfaces') do vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/16') subnet = Fog::Compute[:aws].subnets.create('vpc_id' => vpc.id, 'cidr_block' => '10.0.10.0/16') data = Fog::Compute[:aws].create_network_interface(subnet.subnet_id).body @network_interface_id = data['networkInterface']['networkInterfaceId'] Fog::Compute[:aws].attach_network_interface(@network_interface_id, @instance_id, '1') body = Fog::Compute[:aws].describe_instances('instance-id' => "#{@instance_id}").body tests("returns 1 attachment").returns(1) { body['reservationSet'].first['instancesSet'].first['networkInterfaces'].size } subnet.destroy vpc.destroy end another_server.destroy tests("#run_instances_with_tags").formats(@describe_instances_format) do svr1 = Fog::Compute[:aws].servers.create( :availability_zone => 'eu-west-1a', :tags => { "Name" => "test::test::test", "Stack" => "test", "Stage" => "test", "App" => "test1", }, :image_id => 'ami-3d7e2e54', :flavor_id => 't1.micro' ) svr2 = Fog::Compute[:aws].servers.create( :availability_zone => 'eu-west-1b', :tags => { "Name" => "test::test::dev", "Stack" => "test", "Stage" => "test", "App" => "test2", }, :image_id => 'ami-3d7e2e54', :flavor_id => 't1.micro' ) body = Fog::Compute[:aws].describe_instances('tag:App' => ['test1', 'test2']).body tests("returns 2 hosts").returns(2) { body['reservationSet'].size } svr1.destroy svr2.destroy body end tests("#get_console_output('#{@instance_id}')").formats(@get_console_output_format) do Fog::Compute[:aws].get_console_output(@instance_id).body end tests("#get_password_data('#{@instance_id}')").formats(@get_password_data_format) do result = Fog::Compute[:aws].get_password_data(@instance_id).body tests("key can decrypt passwordData").returns(true) do pending if Fog.mocking? password_data = result['passwordData'] Fog.wait_for do password_data ||= Fog::Compute[:aws].get_password_data(@instance_id).body['passwordData'] end decoded_password = Base64.decode64(password_data) pkey = OpenSSL::PKey::RSA.new(key.private_key) String === pkey.private_decrypt(decoded_password) end result end unless ENV['FASTER_TEST_PLEASE'] key.destroy tests("#reboot_instances('#{@instance_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].reboot_instances(@instance_id).body end tests("#stop_instances('#{@instance_id}')").formats(@instance_state_change_format) do Fog::Compute[:aws].stop_instances(@instance_id).body end tests("#start_instances('#{@instance_id}')").formats(@instance_state_change_format) do Fog::Compute[:aws].start_instances(@instance_id).body end tests("#terminate_instances('#{@instance_id}')").formats(@instance_state_change_format) do Fog::Compute[:aws].terminate_instances(@instance_id).body end tests("#describe_reserved_instances_offerings").formats(@describe_reserved_instances_offerings_format) do @reserved_instances = Fog::Compute[:aws].describe_reserved_instances_offerings.body @reserved_instances end tests('#describe_instance_status').formats(@describe_instance_status_format) do Fog::Compute[:aws].describe_instance_status.body end if Fog.mocking? @reserved_instance_offering_id = @reserved_instances["reservedInstancesOfferingsSet"].first["reservedInstancesOfferingId"] tests("#purchase_reserved_instances_offering('#{@reserved_instance_offering_id}')").formats(@purchase_reserved_instances_offering_format) do Fog::Compute[:aws].purchase_reserved_instances_offering(@reserved_instance_offering_id, 1).body end tests("#describe_reserved_instances").formats(@describe_reserved_instances_format) do Fog::Compute[:aws].describe_reserved_instances.body end end end tests('failure') do tests("#run_instances(nil, 1, 1, {'SubnetId'=>'subnet-00000000'}").raises(::Fog::AWS::Compute::Error) do Fog::Compute[:aws].run_instances(nil, 1, 1, {'SubnetId' => 'subnet-000000'}) end tests("#get_console_output('i-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].get_console_output('i-00000000') end tests("#get_password_data('i-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].get_password_data('i-00000000') end tests("#reboot_instances('i-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].reboot_instances('i-00000000') end tests("#terminate_instances('i-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].terminate_instances('i-00000000') end end end fog-aws-3.18.0/tests/requests/compute/internet_gateway_tests.rb000066400000000000000000000034461437344660100247760ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | internet_gateway requests', ['aws']) do @internet_gateways_format = { 'internetGatewaySet' => [{ 'internetGatewayId' => String, 'attachmentSet' => Hash, 'tagSet' => Fog::Nullable::Hash, }], 'requestId' => String } tests('success') do Fog::AWS::Compute::Mock.reset if Fog.mocking? @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @vpc_id = @vpc.id @subnet=Fog::Compute[:aws].subnets.create('vpc_id' => @vpc_id, 'cidr_block' => '10.0.10.0/24') @subnet_id = @subnet.subnet_id @igw_id = nil tests('#create_internet_gateway').formats(@internet_gateways_format) do data = Fog::Compute[:aws].create_internet_gateway().body @igw_id = data['internetGatewaySet'].first['internetGatewayId'] data end tests('#describe_internet_gateways').formats(@internet_gateways_format) do Fog::Compute[:aws].describe_internet_gateways.body end tests('#describe_internet_gateways with tags').formats(@internet_gateways_format) do Fog::Compute[:aws].create_tags @igw_id, {"environment" => "production"} Fog::Compute[:aws].describe_internet_gateways.body end tests("#attach_internet_gateway('#{@igw_id}, #{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].attach_internet_gateway(@igw_id, @vpc_id).body end tests("#detach_internet_gateway('#{@igw_id}, #{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].detach_internet_gateway(@igw_id, @vpc_id).body end tests("#delete_internet_gateway('#{@igw_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_internet_gateway(@igw_id).body end @subnet.destroy @vpc.destroy end end fog-aws-3.18.0/tests/requests/compute/key_pair_tests.rb000066400000000000000000000046061437344660100232270ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | key pair requests', ['aws']) do tests('success') do @keypair_format = { 'keyFingerprint' => String, 'keyName' => String, 'requestId' => String } @keypairs_format = { 'keySet' => [{ 'keyFingerprint' => String, 'keyName' => String }], 'requestId' => String } @key_pair_name = 'fog_create_key_pair' @public_key_material = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA1SL+kgze8tvSFW6Tyj3RyZc9iFVQDiCKzjgwn2tS7hyWxaiDhjfY2mBYSZwFdKN+ZdsXDJL4CPutUg4DKoQneVgIC1zuXrlpPbaT0Btu2aFd4qNfJ85PBrOtw2GrWZ1kcIgzZ1mMbQt6i1vhsySD2FEj+5kGHouNxQpI5dFR5K+nGgcTLFGnzb/MPRBk136GVnuuYfJ2I4va/chstThoP8UwnoapRHcBpwTIfbmmL91BsRVqjXZEUT73nxpxFeXXidYwhHio+5dXwE0aM/783B/3cPG6FVoxrBvjoNpQpAcEyjtRh9lpwHZtSEW47WNzpIW3PhbQ8j4MryznqF1Rhw==' tests("#create_key_pair('#{@key_pair_name}')").formats(@keypair_format.merge({'keyMaterial' => String})) do body = Fog::Compute[:aws].create_key_pair(@key_pair_name).body tests("key material").returns(OpenSSL::PKey::RSA, "is a valid private RSA key") do OpenSSL::PKey::RSA.new(body['keyMaterial']).class end body end tests('#describe_key_pairs').formats(@keypairs_format) do Fog::Compute[:aws].describe_key_pairs.body end tests("#describe_key_pairs('key-name' => '#{@key_pair_name}')").formats(@keypairs_format) do Fog::Compute[:aws].describe_key_pairs('key-name' => @key_pair_name).body end tests("#delete_key_pair('#{@key_pair_name}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_key_pair(@key_pair_name).body end tests("#import_key_pair('fog_import_key_pair', '#{@public_key_material}')").formats(@keypair_format) do Fog::Compute[:aws].import_key_pair('fog_import_key_pair', @public_key_material).body end tests("#delete_key_pair('fog_import_key_pair)").succeeds do Fog::Compute[:aws].delete_key_pair('fog_import_key_pair') end tests("#delete_key_pair('not_a_key_name')").succeeds do Fog::Compute[:aws].delete_key_pair('not_a_key_name') end end tests('failure') do @key_pair = Fog::Compute[:aws].key_pairs.create(:name => 'fog_key_pair') tests("duplicate #create_key_pair('#{@key_pair.name}')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_key_pair(@key_pair.name) end @key_pair.destroy end end fog-aws-3.18.0/tests/requests/compute/network_acl_tests.rb000066400000000000000000000103231437344660100237250ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | network acl requests', ['aws']) do @network_acl_format = { 'networkAclId' => String, 'vpcId' => String, 'default' => Fog::Boolean, 'entrySet' => [{ 'ruleNumber' => Integer, 'protocol' => Integer, 'ruleAction' => String, 'egress' => Fog::Boolean, 'cidrBlock' => String, 'icmpTypeCode' => { 'code' => Fog::Nullable::Integer, 'type' => Fog::Nullable::Integer }, 'portRange' => { 'from' => Fog::Nullable::Integer, 'to' => Fog::Nullable::Integer } }], 'associationSet' => Array, 'tagSet' => Hash } @network_acls_format = { 'requestId' => String, 'networkAclSet' => [ @network_acl_format ] } @network_acl_replace_association = { 'requestId' => String, 'newAssociationId' => String } tests('success') do Fog::AWS::Compute::Mock.reset if Fog.mocking? @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @subnet = Fog::Compute[:aws].subnets.create('vpc_id' => @vpc.id, 'cidr_block' => '10.0.10.16/28') @network_acl = nil # Describe network interfaces tests('#describe_network_acls').formats(@network_acls_format) do Fog::Compute[:aws].describe_network_acls.body end tests('#create_network_acl').formats(@network_acl_format) do data = Fog::Compute[:aws].create_network_acl(@vpc.id).body @network_acl = data['networkAcl'] data['networkAcl'] end tests("#create_network_acl_entry").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_network_acl_entry(@network_acl['networkAclId'], 100, 6, 'allow', '0.0.0.0/8', false, 'PortRange.From' => 22, 'PortRange.To' => 22).body end tests("#replace_network_acl_entry").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].replace_network_acl_entry(@network_acl['networkAclId'], 100, 6, 'deny', '0.0.0.0/8', false, 'PortRange.From' => 22, 'PortRange.To' => 22).body end tests("#delete_network_acl_entry").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_network_acl_entry(@network_acl['networkAclId'], 100, false).body end default_acl = Fog::Compute[:aws].describe_network_acls('vpc-id' => @vpc.id, 'default' => true).body['networkAclSet'].first @assoc_id = default_acl['associationSet'].first['networkAclAssociationId'] tests("#replace_network_acl_association").formats(@network_acl_replace_association) do data = Fog::Compute[:aws].replace_network_acl_association(@assoc_id, @network_acl['networkAclId']).body @assoc_id = data['newAssociationId'] data end tests("#replace_network_acl_association").formats(@network_acl_replace_association) do Fog::Compute[:aws].replace_network_acl_association(@assoc_id, default_acl['networkAclId']).body end # Create another network acl to test tag filters test_tags = {'foo' => 'bar'} @another_acl = Fog::Compute[:aws].network_acls.create :vpc_id => @vpc.id, :tags => test_tags tests("#describe_network_acls('tag-key' => 'foo')").formats(@network_acls_format) do body = Fog::Compute[:aws].describe_network_acls('tag-key' => 'foo').body tests("returns 1 acl").returns(1) { body['networkAclSet'].size } body end tests("#describe_network_acls('tag-value' => 'bar')").formats(@network_acls_format) do body = Fog::Compute[:aws].describe_network_acls('tag-value' => 'bar').body tests("returns 1 acl").returns(1) { body['networkAclSet'].size } body end tests("#describe_network_acls('tag:foo' => 'bar')").formats(@network_acls_format) do body = Fog::Compute[:aws].describe_network_acls('tag:foo' => 'bar').body tests("returns 1 acl").returns(1) { body['networkAclSet'].size } body end tests('#delete_network_acl').formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_network_acl(@network_acl['networkAclId']).body end # Clean up Fog::Compute[:aws].delete_tags(@another_acl.identity, test_tags) @another_acl.destroy @subnet.destroy @vpc.destroy Fog::AWS::Compute::Mock.reset if Fog.mocking? end end fog-aws-3.18.0/tests/requests/compute/network_interface_tests.rb000066400000000000000000000245121437344660100251330ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | network interface requests', ['aws']) do @network_interface_format = { 'networkInterfaceId' => String, 'subnetId' => String, 'vpcId' => String, 'availabilityZone' => String, 'description' => Fog::Nullable::String, 'ownerId' => String, 'requesterId' => Fog::Nullable::String, 'requesterManaged' => String, 'status' => String, 'macAddress' => String, 'privateIpAddress' => String, 'privateDnsName' => Fog::Nullable::String, 'sourceDestCheck' => Fog::Boolean, 'groupSet' => Fog::Nullable::Hash, 'attachment' => Hash, 'association' => Hash, 'tagSet' => Hash } @network_interface_create_format = { 'networkInterface' => @network_interface_format, 'requestId' => String } @network_interfaces_format = { 'requestId' => String, 'networkInterfaceSet' => [ @network_interface_format ] } @attach_network_interface_format = { 'requestId' => String, 'attachmentId' => String } tests('success') do Fog::AWS::Compute::Mock.reset if Fog.mocking? # Create environment @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @subnet = Fog::Compute[:aws].subnets.create('vpc_id' => @vpc.id, 'cidr_block' => '10.0.10.16/28') @security_group = Fog::Compute[:aws].security_groups.create('name' => 'sg_name', 'description' => 'sg_desc', 'vpc_id' => @vpc.id) @owner_id = Fog::Compute[:aws].describe_security_groups('group-name' => 'default').body['securityGroupInfo'].first['ownerId'] @subnet_id = @subnet.subnet_id @security_group_id = @security_group.group_id DESCRIPTION = "Small and green" tests("#create_network_interface(#{@subnet_id})").formats(@network_interface_create_format) do data = Fog::Compute[:aws].create_network_interface(@subnet_id, {"PrivateIpAddress" => "10.0.10.23"}).body @nic_id = data['networkInterface']['networkInterfaceId'] data end # Describe network interfaces tests('#describe_network_interfaces').formats(@network_interfaces_format) do Fog::Compute[:aws].describe_network_interfaces.body end # Describe network interface attribute tests("#describe_network_interface_attribute(#{@nic_id}, 'description')").returns(nil) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, 'description').body['description'] end # test describe of all supported attributes [ 'description', 'groupSet', 'sourceDestCheck', 'attachment'].each do |attrib| tests("#describe_network_interface_attribute(#{@nic_id}, #{attrib})").returns(@nic_id) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, attrib).body['networkInterfaceId'] end end # Modify network interface description attribute tests("#modify_network_interface_attribute(#{@nic_id}, 'description', '#{DESCRIPTION}')").returns(true) do Fog::Compute[:aws].modify_network_interface_attribute(@nic_id, 'description', DESCRIPTION).body["return"] end # Describe network interface attribute again tests("#describe_network_interface_attribute(#{@nic_id}, 'description')").returns(DESCRIPTION) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, 'description').body["description"] end # Restore network interface description attribute tests("#modify_network_interface_attribute(#{@nic_id}, 'description', '')").returns(true) do Fog::Compute[:aws].modify_network_interface_attribute(@nic_id, 'description', '').body["return"] end # Check modifying the group set tests("#modify_network_interface_attribute(#{@nic_id}, 'groupSet', [#{@security_group_id}])").returns(true) do Fog::Compute[:aws].modify_network_interface_attribute(@nic_id, 'groupSet', [@security_group_id]).body["return"] end tests("#describe_network_interface_attribute(#{@nic_id}, 'groupSet')").returns({ @security_group_id => "sg_name" }) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, 'groupSet').body["groupSet"] end # Check modifying the source dest check (and reset) tests("#modify_network_interface_attribute(#{@nic_id}, 'sourceDestCheck', false)").returns(true) do Fog::Compute[:aws].modify_network_interface_attribute(@nic_id, 'sourceDestCheck', false).body["return"] end tests("#describe_network_interface_attribute(#{@nic_id}, 'sourceDestCheck')").returns(false) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, 'sourceDestCheck').body["sourceDestCheck"] end tests("#reset_network_interface_attribute(#{@nic_id}, 'sourceDestCheck')").returns(true) do Fog::Compute[:aws].reset_network_interface_attribute(@nic_id, 'sourceDestCheck').body["return"] end tests("#describe_network_interface_attribute(#{@nic_id}, 'sourceDestCheck')").returns(true) do Fog::Compute[:aws].describe_network_interface_attribute(@nic_id, 'sourceDestCheck').body["sourceDestCheck"] end @server = Fog::Compute[:aws].servers.create({:flavor_id => 'm1.small', :subnet_id => @subnet_id }) @server.wait_for { ready? } @instance_id=@server.id # attach @device_index = 1 tests('#attach_network_interface').formats(@attach_network_interface_format) do data = Fog::Compute[:aws].attach_network_interface(@nic_id, @instance_id, @device_index).body @attachment_id = data['attachmentId'] data end # Check modifying the attachment attach_attr = { 'attachmentId' => @attachment_id, 'deleteOnTermination' => true } tests("#modify_network_interface_attribute(#{@nic_id}, 'attachment', #{attach_attr.inspect})").returns(true) do Fog::Compute[:aws].modify_network_interface_attribute(@nic_id, 'attachment', attach_attr).body["return"] end # detach tests('#detach_network_interface').returns(true) do Fog::Compute[:aws].detach_network_interface(@attachment_id,true).body["return"] end if !Fog.mocking? Fog::Compute[:aws].network_interfaces.get(@nic_id).wait_for { status == 'available'} end # Create network interface with arguments options = { "PrivateIpAddress" => "10.0.10.24", "Description" => DESCRIPTION, "GroupSet" => [@security_group_id] } tests("#create_network_interface(#{@subnet_id}), #{options.inspect}").returns("10.0.10.24") do data = Fog::Compute[:aws].create_network_interface(@subnet_id, options).body @nic2_id = data['networkInterface']['networkInterfaceId'] data['networkInterface']['privateIpAddress'] end # Check assigned values tests("#describe_network_interface_attribute(#{@nic2_id}, 'description')").returns(DESCRIPTION) do Fog::Compute[:aws].describe_network_interface_attribute(@nic2_id, 'description').body["description"] end tests("#describe_network_interface_attribute(#{@nic2_id}, 'groupSet')").returns({ @security_group_id => @security_group.name }) do Fog::Compute[:aws].describe_network_interface_attribute(@nic2_id, 'groupSet').body["groupSet"] end # Delete network interfaces tests("#delete_network_interface('#{@nic2_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_network_interface(@nic2_id).body end tests("#delete_network_interface('#{@nic_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_network_interface(@nic_id).body end @server.destroy if !Fog.mocking? @server.wait_for { state == 'terminated' } # despite the fact that the state goes to 'terminated' we need a little delay for aws to do its thing sleep 5 end # Bring up another server to test vpc public IP association @server = Fog::Compute[:aws].servers.create(:flavor_id => 'm1.small', :subnet_id => @subnet_id, :associate_public_ip => true) @server.wait_for { ready? } @instance_id = @server.id test("#associate_public_ip") do server = Fog::Compute[:aws].servers.get(@instance_id) server.public_ip_address.nil? == false end # Clean up resources @server.destroy if !Fog.mocking? @server.wait_for { state == 'terminated' } # despite the fact that the state goes to 'terminated' we need a little delay for aws to do its thing sleep 5 end @security_group.destroy @subnet.destroy @vpc.destroy end tests('failure') do # Attempt to attach a nonexistent interface tests("#attach_network_interface('eni-00000000', 'i-00000000', '1')").raises(::Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].attach_network_interface('eni-00000000', 'i-00000000', '1') end # Create environment @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @subnet = Fog::Compute[:aws].subnets.create('vpc_id' => @vpc.id, 'cidr_block' => '10.0.10.16/28') @subnet_id = @subnet.subnet_id data = Fog::Compute[:aws].create_network_interface(@subnet_id).body @nic_id = data['networkInterface']['networkInterfaceId'] # Attempt to re-use an existing IP for another ENI tests("#create_network_interface('#{@subnet_id}', " \ "{'PrivateIpAddress' => " \ "'#{data['networkInterface']['privateIpAddress']}'}").raises(::Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_network_interface(@subnet_id, {'PrivateIpAddress' => data['networkInterface']['privateIpAddress']}) end # Attempt to attach a valid ENI to a nonexistent instance. tests("#attach_network_interface('#{@nic_id}', 'i-00000000', '0')").raises(::Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].attach_network_interface(@nic_id, 'i-00000000', '0') end @server = Fog::Compute[:aws].servers.create({:flavor_id => 'm1.small', :subnet_id => @subnet_id }) @server.wait_for { ready? } @instance_id=@server.id @device_index = 1 data = Fog::Compute[:aws].attach_network_interface(@nic_id, @instance_id, @device_index).body # Attempt to attach two ENIs to the same instance with the same device # index. tests("#attach_network_interface('#{@nic_id}', '#{@instance_id}', '#{@device_index}')").raises(::Fog::AWS::Compute::Error) do Fog::Compute[:aws].attach_network_interface(@nic_id, @instance_id, @device_index) end Fog::AWS::Compute::Mock.reset if Fog.mocking? end end fog-aws-3.18.0/tests/requests/compute/placement_group_tests.rb000066400000000000000000000033371437344660100246100ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | placement group requests', ['aws']) do @placement_group_format = { 'requestId' => String, 'placementGroupSet' => [{ 'groupName' => String, 'state' => String, 'strategy' => String }] } tests('success') do tests("#create_placement_group('fog_placement_group', 'cluster')").formats(AWS::Compute::Formats::BASIC) do pending if Fog.mocking? Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster').body end tests("#describe_placement_groups").formats(@placement_group_format) do pending if Fog.mocking? Fog::Compute[:aws].describe_placement_groups.body end tests("#describe_placement_groups('group-name' => 'fog_placement_group)").formats(@placement_group_format) do pending if Fog.mocking? Fog::Compute[:aws].describe_placement_groups('group-name' => 'fog_security_group').body end tests("#delete_placement_group('fog_placement_group')").formats(AWS::Compute::Formats::BASIC) do pending if Fog.mocking? Fog::Compute[:aws].delete_placement_group('fog_placement_group').body end end tests('failure') do pending if Fog.mocking? Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster') tests("duplicate #create_placement_group('fog_placement_group', 'cluster')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_placement_group('fog_placement_group', 'cluster') end tests("#delete_placement_group('not_a_group_name')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_placement_group('not_a_group_name') end Fog::Compute[:aws].delete_placement_group('fog_placement_group') end end fog-aws-3.18.0/tests/requests/compute/region_tests.rb000066400000000000000000000033221437344660100227010ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | region requests', ['aws']) do @regions_format = { 'regionInfo' => [{ 'regionEndpoint' => String, 'regionName' => String }], 'requestId' => String } tests('success') do tests("#describe_regions").formats(@regions_format) do Fog::Compute[:aws].describe_regions.body end tests("#describe_regions('region-name' => 'us-east-1')").formats(@regions_format) do Fog::Compute[:aws].describe_regions('region-name' => 'us-east-1').body end tests("#incorrect_region") do raises(ArgumentError, "Unknown region: world-antarctica-1") do Fog::AWS::Compute.new({:aws_access_key_id => 'dummykey', :aws_secret_access_key => 'dummysecret', :aws_session_token => 'dummytoken', :region => 'world-antarctica-1'}) end end tests("#unknown_endpoint").formats(@regions_format) do Fog::AWS::Compute.new({:aws_access_key_id => 'dummykey', :aws_secret_access_key => 'dummysecret', :aws_session_token => 'dummytoken', :region => 'world-antarctica-1', :endpoint => 'http://aws-clone.example'}).describe_regions.body end tests("#invalid_endpoint") do raises(Fog::AWS::Compute::InvalidURIError) do Fog::AWS::Compute.new({:aws_access_key_id => 'dummykey', :aws_secret_access_key => 'dummysecret', :aws_session_token => 'dummytoken', :region => 'world-antarctica-1', :endpoint => 'aws-clone.example'}) end end end end fog-aws-3.18.0/tests/requests/compute/route_tests.rb000066400000000000000000000410571437344660100225630ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | route table requests', ['aws']) do @route_table_format = { 'routeTable' => [{ 'routeSet' => [{ 'destinationCidrBlock' => String, 'gatewayId' => String, 'state' => String, }], 'tagSet' => Hash, 'associationSet' => Array, 'routeTableId' => String, 'vpcId' => String, }], 'requestId' => String } @route_tables_format = { 'routeTableSet' => [{ 'associationSet' => [{ 'routeTableAssociationId' => Fog::Nullable::String, 'routeTableId' => String, 'subnetId' => Fog::Nullable::String, 'main' => Fog::Boolean }], 'tagSet' => Hash, 'routeSet' => [{ 'destinationCidrBlock' => String, 'gatewayId' => Fog::Nullable::String, 'instanceId' => Fog::Nullable::String, 'instanceOwnerId' => Fog::Nullable::String, 'networkInterfaceId' => Fog::Nullable::String, 'vpcPeeringConnectionId' => Fog::Nullable::String, 'natGatewayId' => Fog::Nullable::String, 'state' => String, 'origin' => String }], 'routeTableId' => String, 'vpcId' => String, }], 'requestId' => String } Fog::AWS::Compute::Mock.reset if Fog.mocking? vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') if !Fog.mocking? vpc.wait_for { state.eql? "available" } end @subnet_id = Fog::Compute[:aws].create_subnet(vpc.id, '10.0.10.0/24').body['subnet']['subnetId'] @network_interface = Fog::Compute[:aws].create_network_interface(@subnet_id, {"PrivateIpAddress" => "10.0.10.23"}).body @internet_gateway_id = Fog::Compute[:aws].create_internet_gateway.body['internetGatewaySet'].first['internetGatewayId'] @alt_internet_gateway_id = Fog::Compute[:aws].create_internet_gateway.body['internetGatewaySet'].first['internetGatewayId'] @network_interface_id = @network_interface['networkInterface']['networkInterfaceId'] key_name = uniq_id('fog-test-key') key = Fog::Compute[:aws].key_pairs.create(:name => key_name) @cidr_block = '10.0.10.0/24' @destination_cidr_block = '10.0.10.0/23' @ami = 'ami-79c0ae10' # ubuntu 12.04 daily build 20120728 tests('success') do # Test create_route_table # tests("#create_route_table('#{vpc.id}')").formats(@route_table_format) do data = Fog::Compute[:aws].create_route_table(vpc.id).body @route_table_id = data['routeTable'].first['routeTableId'] data end # Test associate_route_table # tests("#associate_route_table('#{@route_table_id}', '#{@subnet_id}')").formats({'requestId'=>String, 'associationId'=>String}) do data = Fog::Compute[:aws].associate_route_table(@route_table_id, @subnet_id).body @association_id = data['associationId'] data end # Tests create_route # - using internet gateway # - using instance id # - using network interface # Fog::Compute[:aws].attach_internet_gateway(@internet_gateway_id, vpc.id).body tests("#create_route('#{@route_table_id}', '#{@destination_cidr_block}', '#{@internet_gateway_id}', 'nil')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, @internet_gateway_id, nil).body end instance = Fog::Compute[:aws].servers.create(:image_id => @ami, :flavor_id => 't1.micro', :key_name => key_name, :subnet_id => @subnet_id) instance.wait_for { state.eql? "running" } tests("#create_route('#{@route_table_id}', '10.0.10.0/22', 'nil', '#{instance.id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_route(@route_table_id, '10.0.10.0/22', nil, instance.id).body end tests("#create_route('#{@route_table_id}', '10.0.10.0/21', 'nil', 'nil', '#{@network_interface_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_route(@route_table_id, '10.0.10.0/21', nil, nil, @network_interface_id).body end # Tests replace_route # - using internet gateway # - using instance id # - using network interface # Fog::Compute[:aws].attach_internet_gateway(@alt_internet_gateway_id, vpc.id).body tests("#replace_route('#{@route_table_id}', '#{@destination_cidr_block}', {'gatewayId' => '#{@alt_internet_gateway_id}'})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].replace_route(@route_table_id, @destination_cidr_block, {'gatewayId' => @alt_internet_gateway_id}).body end instance = Fog::Compute[:aws].servers.create(:image_id => @ami, :flavor_id => 't1.micro', :key_name => key_name, :subnet_id => @subnet_id) instance.wait_for { state.eql? "running" } tests("#replace_route('#{@route_table_id}', '10.0.10.0/22', {'instanceId' => '#{instance.id}'})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].replace_route(@route_table_id, '10.0.10.0/22', {'instanceId' => instance.id}).body end tests("#replace_route('#{@route_table_id}', '10.0.10.0/21', {'networkInterfaceId' => '#{@network_interface_id}'})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].replace_route(@route_table_id, '10.0.10.0/21', {'networkInterfaceId' => @network_interface_id}).body end # Tests describe_route_tables # - no parameters # - filter: vpc-id => vpc_id # - filter: vpc-id => ['all'] # tests('#describe_route_tables').formats(@route_tables_format) do Fog::Compute[:aws].describe_route_tables.body end tests("#describe_route_tables('vpc-id' => #{vpc.id})").formats(@route_tables_format) do Fog::Compute[:aws].describe_route_tables('vpc-id' => vpc.id).body end tests("#describe_route_tables('vpc-id' => ['all'])").formats(@route_tables_format) do Fog::Compute[:aws].describe_route_tables('vpc-id' => ['all']).body end # Test delete_route(route_table_id, cidr_block) # tests("#delete_route('#{@route_table_id}', '10.0.10.0/21')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_route(@route_table_id, '10.0.10.0/21').body end tests("#delete_route('#{@route_table_id}', '10.0.10.0/22')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_route(@route_table_id, '10.0.10.0/22').body end Fog::Compute[:aws].servers.all('instance-id'=>instance.id).first.destroy if !Fog.mocking? instance.wait_for { state.eql? "terminated" } end tests("#delete_route('#{@route_table_id}', '#{@destination_cidr_block}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_route(@route_table_id, @destination_cidr_block).body end # Test disassociate_route_table(association_id) # tests("#disassociate_route_table('#{@association_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].disassociate_route_table(@association_id).body end # Test delete_route_table(route_table_id) # tests("#delete_route_table('#{@route_table_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_route_table(@route_table_id).body end end tests('failure') do @route_table_id = Fog::Compute[:aws].create_route_table(vpc.id).body['routeTable'].first['routeTableId'] @association_id = Fog::Compute[:aws].associate_route_table(@route_table_id, @subnet_id).body['associationId'] Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, @internet_gateway_id, nil) instance = Fog::Compute[:aws].servers.create(:image_id => @ami, :flavor_id => 't1.micro', :key_name => key_name, :subnet_id => @subnet_id) instance.wait_for { state.eql? "running" } # Tests create_route_table # - no parameters # - passing a nonexisting vpc # tests('#create_route_table').raises(ArgumentError) do Fog::Compute[:aws].create_route_table end tests("#create_route_table('vpc-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route_table('vpc-00000000') end # Tests associate_route_table # - no parameters # - passing a nonexisiting route table # - passing a nonexisiting subnet # tests('#associate_route_table').raises(ArgumentError) do Fog::Compute[:aws].associate_route_table end tests("#associate_route_table('rtb-00000000', '#{@subnet_id}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].associate_route_table('rtb-00000000', @subnet_id) end tests("#associate_route_table('#{@route_table_id}', 'subnet-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].associate_route_table(@route_table_id, 'subnet-00000000') end # Tests create_route # - no parameters # - passing a nonexisiting route table and an exisiting internet gateway # - passing a nonexisiting internet gateway # - passing a nonexisting route table and an exisiting instance # - passing a nonexisiting instance # - passing a nonexsiting route table and an exisiting network interface # - passing a nonexisiting network interface # - attempting to add a route at the same destination cidr block as another # - attempting to add a route at a less specific destination cidr block # tests('#create_route').raises(ArgumentError) do Fog::Compute[:aws].create_route end tests("#create_route('rtb-00000000', '#{@destination_cidr_block}', '#{@internet_gateway_id}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route('rtb-00000000', @destination_cidr_block, @internet_gateway_id) end tests("#create_route('#{@route_table_id}', '#{@destination_cidr_block}', 'igw-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, 'igw-00000000') end tests("#create_route('rtb-00000000', '#{@destination_cidr_block}', 'nil', '#{instance.id}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route('rtb-00000000', @destination_cidr_block, instance.id) end tests("#create_route('#{@route_table_id}', '#{@destination_cidr_block}', 'nil', 'i-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, nil, 'i-00000000') end tests("#create_route('#{@route_table_id}', '#{@destinationCidrBlock}', 'nil', 'nil', 'eni-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, nil, nil, 'eni-00000000') end tests("#create_route('#rtb-00000000', '#{@destination_cidr_block}', 'nil, 'nil', '#{@network_interface_id}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_route('rtb-00000000', @destination_cidr_block, nil, nil, @network_interface_id) end tests("#create_route same destination_cidr_block").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, @internet_gateway_id) Fog::Compute[:aws].create_route(@route_table_id, @destination_cidr_block, nil, nil, @network_interface_id).body end if !Fog.mocking? tests("#create_route less specific destination_cidr_block").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_route(@route_table_id, '10.0.10.0/25', @internet_gateway_id) Fog::Compute[:aws].delete_route(@route_table_id, @destination_cidr_block).body end end # Tests replace_route # - no parameters # - passing a nonexisiting route table and an exisiting internet gateway # - passing a nonexisiting route table # - passing a nonexisting route table and an exisiting instance # - passing a nonexisiting instance # - passing a nonexsiting route table and an exisiting network interface # - passing a nonexisiting network interface # - attempting to add a route at a less specific destination cidr block # tests('#replace_route').raises(ArgumentError) do Fog::Compute[:aws].replace_route end tests("#replace_route('rtb-00000000', '#{@destination_cidr_block}', {'internetGatewayId' => '#{@internet_gateway_id}'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route('rtb-00000000', @destination_cidr_block, {'internetGatewayId' => @internet_gateway_id}) end tests("#replace_route('rtb-00000000', '#{@destination_cidr_block}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route('rtb-00000000', @destination_cidr_block) end tests("#replace_route('#{@route_table_id}', '#{@destination_cidr_block}', {'gatewayId' => 'igw-00000000'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route(@route_table_id, @destination_cidr_block, {'gatewayId' => 'igw-00000000'}) end tests("#replace_route('rtb-00000000', '#{@destination_cidr_block}', {'instanceId' => '#{instance.id}'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route('rtb-00000000', @destination_cidr_block, {'instanceId' => instance.id}) end tests("#replace_route('#{@route_table_id}', '#{@destination_cidr_block}', {'instanceId' => 'i-00000000'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route(@route_table_id, @destination_cidr_block, {'instanceId' => 'i-00000000'}) end tests("#replace_route('#{@route_table_id}', '#{@destination_cidr_block}', {'networkInterfaceId' => 'eni-00000000'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route(@route_table_id, @destination_cidr_block, {'networkInterfaceId' => 'eni-00000000'}) end tests("#replace_route('rtb-00000000', '#{@destination_cidr_block}', {'networkInterfaceId' => '#{@network_interface_id}'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].replace_route('rtb-00000000', @destination_cidr_block, {'networkInterfaceId' => @network_interface_id}) end if !Fog.mocking? tests("#replace_route less specific destination_cidr_block").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].replace_route(@route_table_id, '10.0.10.0/25', {'gatewayId' => @internet_gateway_id}) end end # Test describe_route_tables # - passing a nonexisiting vpc # tests("#describe_route_tables('vpc-id' => 'vpc-00000000").formats({'routeTableSet'=>Array, 'requestId'=>String}) do Fog::Compute[:aws].describe_route_tables('vpc-id' => 'vpc-00000000').body end # Tests delete_route # - no parameters # - passing a nonexisiting route table # tests('#delete_route').raises(ArgumentError) do Fog::Compute[:aws].delete_route end tests("#delete_route('rtb-00000000', '#{@destination_cidr_block}')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_route('rtb-00000000', @destination_cidr_block) end # Tests disassociate_route_table # - no parameters # - passing a nonexisiting route table association id # tests('#disassociate_route_table').raises(ArgumentError) do Fog::Compute[:aws].disassociate_route_table end tests("#disassociate_route_table('rtbassoc-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].disassociate_route_table('rtbassoc-00000000') end # Tests delete_route_table # - no parameters # - passing a nonexisiting route table # tests('#delete_route_table').raises(ArgumentError) do Fog::Compute[:aws].delete_route_table end tests("#delete_route_table('rtb-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_route_table('rtb-00000000') end # Dependency Tests # - route is depending on route_table, so route_table cannot be deleted # tests("#delete_route_table('#{@route_table_id}')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].delete_route_table(@route_table_id) end Fog::Compute[:aws].servers.all('instance-id'=>instance.id).first.destroy if !Fog.mocking? instance.wait_for { state.eql? "terminated" } end Fog::Compute[:aws].delete_route(@route_table_id, @destination_cidr_block) Fog::Compute[:aws].disassociate_route_table(@association_id) Fog::Compute[:aws].delete_route_table(@route_table_id) end Fog::Compute[:aws].delete_network_interface(@network_interface_id) Fog::Compute[:aws].detach_internet_gateway(@internet_gateway_id, vpc.id) Fog::Compute[:aws].delete_internet_gateway(@internet_gateway_id) Fog::Compute[:aws].delete_subnet(@subnet_id) vpc.destroy key.destroy end fog-aws-3.18.0/tests/requests/compute/security_group_tests.rb000066400000000000000000000502441437344660100245060ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | security group requests', ['aws']) do # See https://github.com/fog/fog/issues/2932hj0 pending @create_security_group_format = { 'requestId' => String, 'groupId' => String, 'return' => Fog::Boolean } @security_groups_format = { 'requestId' => String, 'securityGroupInfo' => [{ 'groupDescription' => String, 'groupId' => Fog::Nullable::String, 'groupName' => String, 'ipPermissions' => [{ 'fromPort' => Fog::Nullable::Integer, 'groups' => [{ 'groupName' => Fog::Nullable::String, 'userId' => String, 'groupId' => String }], 'ipProtocol' => String, 'ipRanges' => [Fog::Nullable::Hash], 'ipv6Ranges' => [Fog::Nullable::Hash], 'toPort' => Fog::Nullable::Integer, }], 'ipPermissionsEgress' => [], 'ownerId' => String, 'vpcId' => Fog::Nullable::String }] } @owner_id = Fog::Compute[:aws].describe_security_groups('group-name' => 'default').body['securityGroupInfo'].first['ownerId'] @group_id_default = Fog::Compute[:aws].describe_security_groups('group-name' => 'default').body['securityGroupInfo'].first['groupId'] tests('success') do tests("#create_security_group('fog_security_group', 'tests group')").formats(@create_security_group_format) do Fog::Compute[:aws].create_security_group('fog_security_group', 'tests group').body end tests("#create_security_group('fog_security_group_two', 'tests group')").formats(@create_security_group_format) do Fog::Compute[:aws].create_security_group('fog_security_group_two', 'tests group').body end @group_id_two = Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group_two').body['securityGroupInfo'].first['groupId'] group_id = Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['groupId'] to_be_revoked = [] expected_permissions = [] permission = { 'SourceSecurityGroupName' => 'default' } tests("#authorize_security_group_ingress('fog_security_group', #{permission.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permission).body end to_be_revoked.push([permission, expected_permissions.dup]) expected_permissions = [ {"groups"=>[{"groupName"=>"default", "userId"=>@owner_id, "groupId"=>@group_id_default}], "fromPort"=>1, "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"tcp", "toPort"=>65535}, {"groups"=>[{"groupName"=>"default", "userId"=>@owner_id, "groupId"=>@group_id_default}], "fromPort"=>1, "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"udp", "toPort"=>65535}, {"groups"=>[{"groupName"=>"default", "userId"=>@owner_id, "groupId"=>@group_id_default}], "fromPort"=>-1, "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"icmp", "toPort"=>-1} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end tests("#describe_security_groups('group-id' => '#{group_id}')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-id' => group_id).body['securityGroupInfo'].first['ipPermissions']) end permission = { 'SourceSecurityGroupName' => 'fog_security_group_two', 'SourceSecurityGroupOwnerId' => @owner_id } tests("#authorize_security_group_ingress('fog_security_group', #{permission.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permission).body end to_be_revoked.push([permission, expected_permissions.dup]) expected_permissions = [ {"groups"=> [{"userId"=>@owner_id, "groupName"=>"default", "groupId"=>@group_id_default}, {"userId"=>@owner_id, "groupName"=>"fog_security_group_two", "groupId"=>@group_id_two}], "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"tcp", "fromPort"=>1, "toPort"=>65535}, {"groups"=> [{"userId"=>@owner_id, "groupName"=>"default", "groupId"=>@group_id_default}, {"userId"=>@owner_id, "groupName"=>"fog_security_group_two", "groupId"=>@group_id_two}], "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"udp", "fromPort"=>1, "toPort"=>65535}, {"groups"=> [{"userId"=>@owner_id, "groupName"=>"default", "groupId"=>@group_id_default}, {"userId"=>@owner_id, "groupName"=>"fog_security_group_two", "groupId"=>@group_id_two}], "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"icmp", "fromPort"=>-1, "toPort"=>-1} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end permission = { 'IpProtocol' => 'tcp', 'FromPort' => '22', 'ToPort' => '22' } tests("#authorize_security_group_ingress('fog_security_group', #{permission.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permission).body end to_be_revoked.push([permission, expected_permissions.dup]) # previous did nothing tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end permission = { 'IpProtocol' => 'tcp', 'FromPort' => '22', 'ToPort' => '22', 'CidrIp' => '10.0.0.0/8' } tests("#authorize_security_group_ingress('fog_security_group', #{permission.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permission).body end to_be_revoked.push([permission, expected_permissions.dup]) expected_permissions += [ {"groups"=>[], "ipRanges"=>[{"cidrIp"=>"10.0.0.0/8"}], "ipv6Ranges"=>[], "ipProtocol"=>"tcp", "fromPort"=>22, "toPort"=>22} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end # authorize with nested IpProtocol without IpRanges or Groups does nothing permissions = { 'IpPermissions' => [ { 'IpProtocol' => 'tcp', 'FromPort' => '22', 'ToPort' => '22' } ] } tests("#authorize_security_group_ingress('fog_security_group', #{permissions.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permissions).body end to_be_revoked.push([permissions, expected_permissions.dup]) # previous did nothing tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end # authorize with nested IpProtocol with IpRanges permissions = { 'IpPermissions' => [ { 'IpProtocol' => 'tcp', 'FromPort' => '80', 'ToPort' => '80', 'IpRanges' => [{ 'CidrIp' => '192.168.0.0/24' }], 'Ipv6Ranges' => [] } ] } tests("#authorize_security_group_ingress('fog_security_group', #{permissions.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permissions).body end to_be_revoked.push([permissions, expected_permissions.dup]) expected_permissions += [ {"groups"=>[], "ipRanges"=>[{"cidrIp"=>"192.168.0.0/24"}], "ipv6Ranges"=>[], "ipProtocol"=>"tcp", "fromPort"=>80, "toPort"=>80} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end # authorize with nested IpProtocol with Groups permissions = { 'IpPermissions' => [ { 'IpProtocol' => 'tcp', 'FromPort' => '8000', 'ToPort' => '8000', 'Groups' => [{ 'GroupName' => 'fog_security_group_two' }] } ] } tests("#authorize_security_group_ingress('fog_security_group', #{permissions.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permissions).body end to_be_revoked.push([permissions, expected_permissions.dup]) expected_permissions += [ {"groups"=>[{"userId"=>@owner_id, "groupName"=>"fog_security_group_two", "groupId"=>@group_id_two}], "ipRanges"=>[], "ipv6Ranges"=>[], "ipProtocol"=>"tcp", "fromPort"=>8000, "toPort"=>8000} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end # authorize with nested IpProtocol with IpRanges and Groups # try integers on this one instead of strings permissions = { 'IpPermissions' => [ { 'IpProtocol' => 'tcp', 'FromPort' => 9000, 'ToPort' => 9000, 'IpRanges' => [{ 'CidrIp' => '172.16.0.0/24' }], 'Groups' => [{ 'GroupName' => 'fog_security_group_two' }] } ] } tests("#authorize_security_group_ingress('fog_security_group', #{permissions.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', permissions).body end to_be_revoked.push([permissions, expected_permissions.dup]) expected_permissions += [ {"groups"=> [{"userId"=>@owner_id, "groupName"=>"fog_security_group_two", "groupId"=>@group_id_two}], "ipRanges"=>[{"cidrIp"=>"172.16.0.0/24"}], "ipProtocol"=>"tcp", "fromPort"=>9000, "toPort"=>9000} ] tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end tests("#describe_security_groups").formats(@security_groups_format) do Fog::Compute[:aws].describe_security_groups.body end tests("#describe_security_groups('group-name' => 'fog_security_group')").formats(@security_groups_format) do Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body end to_be_revoked.reverse.each do |permission, expected_permissions_after| tests("#revoke_security_group_ingress('fog_security_group', #{permission.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].revoke_security_group_ingress('fog_security_group', permission).body end tests("#describe_security_groups('group-name' => 'fog_security_group')").returns([]) do array_differences(expected_permissions_after, Fog::Compute[:aws].describe_security_groups('group-name' => 'fog_security_group').body['securityGroupInfo'].first['ipPermissions']) end end tests("#delete_security_group('fog_security_group')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_security_group('fog_security_group').body end tests("#delete_security_group('fog_security_group_two')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_security_group('fog_security_group_two').body end vpc_id = Fog::Compute[:aws].create_vpc('10.255.254.64/28').body['vpcSet'].first['vpcId'] # Create security group in VPC tests("#create_security_group('vpc_security_group', 'tests group')").formats(@create_security_group_format) do Fog::Compute[:aws].create_security_group('vpc_security_group', 'tests group', vpc_id).body end group_id = Fog::Compute[:aws].describe_security_groups('group-name' => 'vpc_security_group').body['securityGroupInfo'].first['groupId'] permissions = { 'IpPermissions' => [ { 'IpProtocol' => '42', 'IpRanges' => [{ 'CidrIp' => '10.0.0.0/8' }], } ] } expected_permissions = [ {"groups"=>[], "ipRanges"=>[{"cidrIp"=>"10.0.0.0/8"}], "ipProtocol"=>"42"} ] options = permissions.clone options['GroupId'] = group_id tests("#authorize_security_group_ingress(#{options.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress(options).body end tests("#describe_security_groups('group-name' => 'vpc_security_group')").returns([]) do array_differences(expected_permissions, Fog::Compute[:aws].describe_security_groups('group-name' => 'vpc_security_group').body['securityGroupInfo'].first['ipPermissions']) end tests("#revoke_security_group_ingress(#{options.inspect})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].revoke_security_group_ingress(options).body end vpc_group=Fog::Compute[:aws].security_groups.get_by_id(group_id) vpc_group.destroy Fog::Compute[:aws].delete_vpc(vpc_id) end ## Rate limiting seems to want us to take a break otherwise it will throw errors tests('failure') do @security_group = Fog::Compute[:aws].security_groups.create(:description => 'tests group', :name => 'fog_security_group') @other_security_group = Fog::Compute[:aws].security_groups.create(:description => 'tests group', :name => 'fog_other_security_group') tests("duplicate #create_security_group(#{@security_group.name}, #{@security_group.description})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_security_group(@security_group.name, @security_group.description) end tests("#authorize_security_group_ingress('not_a_group_name', {'FromPort' => 80, 'IpProtocol' => 'tcp', 'toPort' => 80})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].authorize_security_group_ingress( 'not_a_group_name', { 'FromPort' => 80, 'IpProtocol' => 'tcp', 'ToPort' => 80, } ) end tests("#authorize_security_group_ingress('not_a_group_name', {'SourceSecurityGroupName' => 'not_a_group_name', 'SourceSecurityGroupOwnerId' => '#{@owner_id}'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].authorize_security_group_ingress( 'not_a_group_name', { 'SourceSecurityGroupName' => 'not_a_group_name', 'SourceSecurityGroupOwnerId' => @owner_id } ) end tests("#authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'IpProtocol' => 'tcp', 'FromPort' => 80, 'ToPort' => 80, 'IpRanges' => [{'CidrIp' => '10.0.0.0/8'}]}]})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'IpProtocol' => 'tcp', 'FromPort' => 80, 'ToPort' => 80, 'IpRanges' => [{'CidrIp' => '10.0.0.0/8'}]}]}).body end tests("#authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'IpProtocol' => 'tcp', 'FromPort' => 80, 'ToPort' => 80, 'IpRanges' => [{'CidrIp' => '10.0.0.0/8'}]}]})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'IpProtocol' => 'tcp', 'FromPort' => 80, 'ToPort' => 80, 'IpRanges' => [{'CidrIp' => '10.0.0.0/8'}]}]}) end tests("#authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'Groups' => [{'GroupName' => '#{@other_security_group.name}'}], 'FromPort' => 80, 'ToPort' => 80, 'IpProtocol' => 'tcp'}]})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', {'IpPermissions' => [{'Groups' => [{'GroupName' => @other_security_group.name}], 'FromPort' => 80, 'ToPort' => 80, 'IpProtocol' => 'tcp'}]}).body end tests("#delete_security_group('#{@other_security_group.name}')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].delete_security_group(@other_security_group.name) end broken_params = [ {}, { "IpProtocol" => "what" }, { "IpProtocol" => "tcp" }, { "IpProtocol" => "what", "FromPort" => 1, "ToPort" => 1 }, ] broken_params += broken_params.map do |broken_params_item| { "IpPermissions" => [broken_params_item] } end broken_params += [ { "IpPermissions" => [] }, { "IpPermissions" => nil } ] broken_params.each do |broken_params_item| tests("#authorize_security_group_ingress('fog_security_group', #{broken_params_item.inspect})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].authorize_security_group_ingress('fog_security_group', broken_params_item) end tests("#revoke_security_group_ingress('fog_security_group', #{broken_params_item.inspect})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].revoke_security_group_ingress('fog_security_group', broken_params_item) end end tests("#revoke_security_group_ingress('not_a_group_name', {'FromPort' => 80, 'IpProtocol' => 'tcp', 'toPort' => 80})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].revoke_security_group_ingress( 'not_a_group_name', { 'FromPort' => 80, 'IpProtocol' => 'tcp', 'ToPort' => 80, } ) end tests("#revoke_security_group_ingress('not_a_group_name', {'SourceSecurityGroupName' => 'not_a_group_name', 'SourceSecurityGroupOwnerId' => '#{@owner_id}'})").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].revoke_security_group_ingress( 'not_a_group_name', { 'SourceSecurityGroupName' => 'not_a_group_name', 'SourceSecurityGroupOwnerId' => @owner_id } ) end tests("#delete_security_group('not_a_group_name')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_security_group('not_a_group_name') end @rds_security_group = Fog::AWS[:rds].security_groups.create(:id => "rdsgroup", :description => 'fog rds test') tests("#delete_security_group('when authorized to an rds firewall')").raises(Fog::AWS::Compute::Error) do @rds_security_group.authorize_ec2_security_group(@security_group.name) Fog::Compute[:aws].delete_security_group(@security_group.name) end @rds_security_group.destroy @security_group.destroy @other_security_group.destroy tests("#delete_security_group('default')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].delete_security_group('default') end broken_params = [ ['fog_security_group', { 'GroupName' => 'fog_security_group' }], [nil, nil], [nil, { 'GroupId' => nil }], [nil, { 'GroupName' => nil, 'GroupId' => nil }] ] broken_params.each do |list_elem| tests("#authorize_security_group_ingress(#{list_elem[0].inspect}, #{list_elem[1].inspect})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].authorize_security_group_ingress(list_elem[0], list_elem[1]) end tests("#revoke_security_group_ingress(#{list_elem[0].inspect}, #{list_elem[1].inspect})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].revoke_security_group_ingress(list_elem[0], list_elem[1]) end end end end fog-aws-3.18.0/tests/requests/compute/snapshot_tests.rb000066400000000000000000000050311437344660100232540ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | snapshot requests', ['aws']) do @snapshot_format = { 'description' => Fog::Nullable::String, 'encrypted' => Fog::Boolean, 'ownerId' => String, 'progress' => String, 'snapshotId' => String, 'startTime' => Time, 'status' => String, 'volumeId' => String, 'volumeSize' => Integer } @snapshots_format = { 'requestId' => String, 'snapshotSet' => [@snapshot_format.merge('tagSet' => {})] } @snapshot_copy_result = { 'requestId' => String, 'snapshotId' => String } @volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1a', :size => 1) tests('success') do @snapshot_id = nil tests("#create_snapshot(#{@volume.identity})").formats(@snapshot_format.merge('progress' => NilClass, 'requestId' => String)) do data = Fog::Compute[:aws].create_snapshot(@volume.identity).body @snapshot_id = data['snapshotId'] data end Fog.wait_for { Fog::Compute[:aws].snapshots.get(@snapshot_id) } Fog::Compute[:aws].snapshots.get(@snapshot_id).wait_for { ready? } tests("#describe_snapshots").formats(@snapshots_format) do Fog::Compute[:aws].describe_snapshots.body end tests("#describe_snapshots('snapshot-id' => '#{@snapshot_id}')").formats(@snapshots_format) do Fog::Compute[:aws].describe_snapshots('snapshot-id' => @snapshot_id).body end tests("#copy_snapshot (#{@snapshot_id}, 'us-east-1')").formats(@snapshot_copy_result) do data = Fog::Compute.new(:provider => :aws, :region => "us-west-1").copy_snapshot(@snapshot_id, "us-east-1").body @west_snapshot_id = data['snapshotId'] data end tests("#delete_snapshots(#{@snapshot_id})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_snapshot(@snapshot_id).body end #NOTE: waiting for the copy to complete can sometimes take up to 5 minutes (but sometimes it's nearly instant) #for faster tests: comment out the rest of this block Fog.wait_for { Fog::Compute.new(:provider => :aws, :region => "us-west-1").snapshots.get(@west_snapshot_id) } tests("#delete_snapshots(#{@west_snapshot_id})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute.new(:provider => :aws, :region => "us-west-1").delete_snapshot(@west_snapshot_id).body end end tests('failure') do tests("#delete_snapshot('snap-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_snapshot('snap-00000000') end end @volume.destroy end fog-aws-3.18.0/tests/requests/compute/spot_datafeed_subscription_tests.rb000066400000000000000000000033311437344660100270240ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | spot datafeed subscription requests', ['aws']) do @spot_datafeed_subscription_format = { 'spotDatafeedSubscription' => { 'bucket' => String, 'ownerId' => String, 'prefix' => String, 'state' => String }, 'requestId' => String } @directory = Fog::Storage[:aws].directories.create(:key => 'fogspotdatafeedsubscriptiontests') tests('success') do pending if Fog.mocking? tests("#create_spot_datafeed_subscription('fogspotdatafeedsubscriptiontests', 'fogspotdatafeedsubscription/')").formats(@spot_datafeed_subscription_format) do Fog::Compute[:aws].create_spot_datafeed_subscription('fogspotdatafeedsubscriptiontests', 'fogspotdatafeedsubscription/').body end tests("duplicate #create_spot_datafeed_subscription('fogspotdatafeedsubscriptiontests', 'fogspotdatafeedsubscription/')").succeeds do Fog::Compute[:aws].create_spot_datafeed_subscription('fogspotdatafeedsubscriptiontests', 'fogspotdatafeedsubscription/') end tests("#describe_spot_datafeed_subscription").formats(@spot_datafeed_subscription_format) do Fog::Compute[:aws].describe_spot_datafeed_subscription.body end tests("#delete_spot_datafeed_subscription").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_spot_datafeed_subscription.body end tests("duplicate #delete_spot_datafeed_subscription").succeeds do Fog::Compute[:aws].delete_spot_datafeed_subscription end end tests('failure') do pending if Fog.mocking? tests("#describe_spot_datafeed_subscription").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].describe_spot_datafeed_subscription end end @directory.destroy end fog-aws-3.18.0/tests/requests/compute/spot_instance_tests.rb000066400000000000000000000041521437344660100242710ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | spot instance requests', ['aws']) do @spot_instance_requests_format = { 'spotInstanceRequestSet' => [{ 'createTime' => Time, 'instanceId' => Fog::Nullable::String, 'launchedAvailabilityZone' => Fog::Nullable::String, 'launchSpecification' => { 'blockDeviceMapping' => [], 'groupSet' => [String], 'keyName' => Fog::Nullable::String, 'imageId' => String, 'instanceType' => String, 'monitoring' => Fog::Boolean, 'ebsOptimized' => Fog::Boolean, 'subnetId' => Fog::Nullable::String, 'iamInstanceProfile' => Fog::Nullable::Hash, }, 'productDescription' => String, 'spotInstanceRequestId' => String, 'spotPrice' => Float, 'state' => String, 'type' => String, 'fault' => Fog::Nullable::Hash, }], 'requestId' => String } @cancel_spot_instance_request_format = { 'spotInstanceRequestSet' => [{ 'spotInstanceRequestId' => String, 'state' => String }], 'requestId' => String } tests('success') do tests("#request_spot_instances('ami-3202f25b', 't1.micro', '0.001')").formats(@spot_instance_requests_format) do data = Fog::Compute[:aws].request_spot_instances('ami-3202f25b', 't1.micro', '0.001',{'LaunchSpecification.EbsOptimized' => false}).body @spot_instance_request_id = data['spotInstanceRequestSet'].first['spotInstanceRequestId'] data end tests("#describe_spot_instance_requests").formats(@spot_instance_requests_format) do data = Fog::Compute[:aws].describe_spot_instance_requests('spot-instance-request-id' => [@spot_instance_request_id]).body end tests("#cancel_spot_instance_requests('#{@spot_instance_request_id}')").formats(@cancel_spot_instance_request_format) do Fog::Compute[:aws].cancel_spot_instance_requests(@spot_instance_request_id).body end end end fog-aws-3.18.0/tests/requests/compute/spot_price_history_tests.rb000066400000000000000000000011271437344660100253470ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | spot price history requests', ['aws']) do @spot_price_history_format = { 'spotPriceHistorySet' => [{ 'availabilityZone' => String, 'instanceType' => String, 'spotPrice' => Float, 'productDescription' => String, 'timestamp' => Time }], 'requestId' => String, 'nextToken' => Fog::Nullable::String } tests('success') do tests("#describe_spot_price_history").formats(@spot_price_history_format) do Fog::Compute[:aws].describe_spot_price_history.body end end end fog-aws-3.18.0/tests/requests/compute/subnet_tests.rb000066400000000000000000000054721437344660100227260ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | subnet requests', ['aws']) do @subnet_format = { 'subnetId' => String, 'state' => String, 'vpcId' => String, 'cidrBlock' => String, 'availableIpAddressCount' => String, 'availabilityZone' => String, 'tagSet' => Hash, 'mapPublicIpOnLaunch' => Fog::Boolean, 'defaultForAz' => Fog::Boolean, } @single_subnet_format = { 'subnet' => @subnet_format, 'requestId' => String, } @subnets_format = { 'subnetSet' => [@subnet_format], 'requestId' => String } @modify_subnet_format = { 'requestId' => String, 'return' => Fog::Boolean } @vpc_network = '10.0.10.0/24' @vpc=Fog::Compute[:aws].vpcs.create('cidr_block' => @vpc_network) @vpc_id = @vpc.id tests('success') do @subnet_id = nil @subnet_network = '10.0.10.16/28' tests("#create_subnet('#{@vpc_id}', '#{@subnet_network}')").formats(@single_subnet_format) do data = Fog::Compute[:aws].create_subnet(@vpc_id, @subnet_network).body @subnet_id = data['subnet']['subnetId'] data end tests("modify_subnet('#{@subnet_id}'").formats(@modify_subnet_format) do Fog::Compute[:aws].modify_subnet_attribute(@subnet_id, 'MapPublicIpOnLaunch' => true).body end @vpc2=Fog::Compute[:aws].vpcs.create('cidr_block' => @vpc_network) @vpc2_id = @vpc2.id # Create a second subnet in a second VPC with the same netblock tests("#create_subnet('#{@vpc2_id}', '#{@subnet_network}')").formats(@single_subnet_format) do data = Fog::Compute[:aws].create_subnet(@vpc2_id, @subnet_network).body @subnet2_id = data['subnet']['subnetId'] data end Fog::Compute[:aws].delete_subnet(@subnet2_id) tests('#describe_subnets').formats(@subnets_format) do Fog::Compute[:aws].describe_subnets.body end tests("#delete_subnet('#{@subnet_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_subnet(@subnet_id).body end end tests('failure') do tests("#create_subnet('vpc-00000000', '10.0.10.0/16')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].create_subnet('vpc-00000000', '10.0.10.0/16') end tests("#create_subnet('#{@vpc_id}', '10.0.9.16/28')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_subnet(@vpc_id, '10.0.9.16/28') end # Attempt to create two subnets with conflicting CIDRs in the same VPC tests("#create_subnet('#{@vpc_id}', '10.0.10.0/24'); " \ "#create_subnet('#{@vpc_id}', '10.0.10.64/26'); ").raises(::Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_subnet(@vpc_id, '10.0.10.0/24') Fog::Compute[:aws].create_subnet(@vpc_id, '10.0.10.64/26') end end @vpc.destroy end fog-aws-3.18.0/tests/requests/compute/tag_tests.rb000066400000000000000000000103331437344660100221710ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | tag requests', ['aws']) do Fog::AWS::Compute::Mock.reset if Fog.mocking? @tags_format = { 'tagSet' => [{ 'key' => String, 'resourceId' => String, 'resourceType' => String, 'value' => Fog::Nullable::String }], 'requestId' => String } @volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1a', :size => 1) @volume.wait_for { ready? } @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => '10.0.10.0/24') @network_acl = Fog::Compute[:aws].network_acls.all('vpc-id' => @vpc.id, 'default' => true).first tests('success') do if Fog.mocking? @other_account = Fog::AWS::Compute.new(:aws_access_key_id => 'other', :aws_secret_access_key => 'account') @image_id = Fog::Compute[:aws].register_image('image', 'image', '/dev/sda1').body['imageId'] end tests("#create_tags('#{@volume.identity}', 'foo' => 'bar')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_tags(@volume.identity, 'foo' => 'bar').body end if Fog.mocking? tests("#create_tags('#{@image_id}', 'foo' => 'baz')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_tags(@image_id, 'foo' => 'baz').body end tests("#create_tags('#{@vpc.id}', 'type' => 'vpc')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_tags(@vpc.id, 'type' => 'vpc').body end tests("#create_tags('#{@network_acl.network_acl_id}', 'type' => 'network_acl')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].create_tags(@network_acl.network_acl_id, 'type' => 'network_acl').body end end tests('#describe_tags').formats(@tags_format) do Fog::Compute[:aws].describe_tags.body end expected_identities = Fog.mocking? ? [@volume.identity, @image_id] : [@volume.identity] tests('#describe_tags').succeeds do (expected_identities - Fog::Compute[:aws].describe_tags.body['tagSet'].map {|t| t['resourceId'] }).empty? end tests("#describe_tags('key' => 'foo', 'value' => 'bar')").returns([@volume.identity]) do Fog::Compute[:aws].describe_tags('key' => 'foo', 'value' => 'bar').body['tagSet'].map {|t| t['resourceId'] } end if Fog.mocking? tests("#describe_tags('key' => 'foo', 'value' => 'baz')").returns([@image_id]) do Fog::Compute[:aws].describe_tags('key' => 'foo', 'value' => 'baz').body['tagSet'].map {|t| t['resourceId'] } end Fog::Compute[:aws].modify_image_attribute(@image_id, 'Add.UserId' => [@other_account.data[:owner_id]]) tests("other_account#describe_tags('key' => 'foo', 'value' => 'baz')").returns([]) do @other_account.describe_tags('key' => 'foo', 'value' => 'baz').body['tagSet'].map {|t| t['resourceId'] } end tests("other_account#create_tags('#{@image_id}', 'foo' => 'quux')").formats(AWS::Compute::Formats::BASIC) do @other_account.create_tags(@image_id, 'foo' => 'quux').body end tests("other_account#describe_tags('key' => 'foo', 'value' => 'quux')").returns([@image_id]) do @other_account.describe_tags('key' => 'foo', 'value' => 'quux').body['tagSet'].map {|t| t['resourceId'] } end end @volume.destroy tests("#delete_tags('#{@volume.identity}', 'foo' => 'bar')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_tags(@volume.identity, 'foo' => 'bar').body end end tests('failure') do tests("#create_tags('vol-00000000', 'baz' => 'qux')").raises(Fog::Service::NotFound) do Fog::Compute[:aws].create_tags('vol-00000000', 'baz' => 'qux') end tests("#create_tags('abc-12345678', 'type' => 'fake_type')").raises(Fog::Service::NotFound) do Fog::Compute[:aws].create_tags('abc-12345678', 'type' => 'fake_type') end tests("#create_tags('vpc-12345678', 'type' => 'non-existent_vpc)").raises(Fog::Service::NotFound) do Fog::Compute[:aws].create_tags('vpc-12345678', 'type' => 'non-existent_vpc') end tests("#create_tags('vpc-123', 'type' => 'bad_resource_id)").raises(Fog::Service::NotFound) do Fog::Compute[:aws].create_tags('vpc-123', 'type' => 'bad_resource_id') end end Fog::AWS::Compute::Mock.reset if Fog.mocking? end fog-aws-3.18.0/tests/requests/compute/volume_tests.rb000066400000000000000000000235021437344660100227270ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | volume requests', ['aws']) do @volume_format = { 'availabilityZone' => String, 'createTime' => Time, 'encrypted' => Fog::Boolean, 'iops' => Fog::Nullable::Integer, 'requestId' => String, 'size' => Integer, 'snapshotId' => Fog::Nullable::String, 'status' => String, 'volumeId' => String, 'volumeType' => String } @volume_attachment_format = { 'attachTime' => Time, 'device' => String, 'instanceId' => String, 'requestId' => String, 'status' => String, 'volumeId' => String } @volume_status_format = { 'volumeStatusSet' => [{ 'availabilityZone' => String, 'volumeId' => String, 'volumeStatus' => { 'status' => String, 'details' => [{ 'name' => String, 'status' => String }] }, 'actionsSet' => [{ 'code' => String, 'description' => String, 'eventId' => String, 'eventType' => String }], 'eventsSet' => [{ 'description' => String, 'eventId' => String, 'eventType' => String, 'notBefore' => Time, 'notAfter' => Time }] }], 'requestId' => String } @volumes_format = { 'volumeSet' => [{ 'availabilityZone' => String, 'attachmentSet' => Array, 'createTime' => Time, 'encrypted' => Fog::Boolean, 'iops' => Fog::Nullable::Integer, 'size' => Integer, 'snapshotId' => Fog::Nullable::String, 'kmsKeyId' => Fog::Nullable::String, 'status' => String, 'tagSet' => Hash, 'volumeId' => String, 'volumeType' => String }], 'requestId' => String } @volume_modification_format = { 'endTime' => Fog::Nullable::Time, 'modificationState' => String, 'originalIops' => Fog::Nullable::Integer, 'originalSize' => Fog::Nullable::Integer, 'originalVolumeType' => Fog::Nullable::String, 'startTime' => Time, 'targetIops' => Fog::Nullable::Integer, 'targetSize' => Fog::Nullable::Integer, 'targetVolumeType' => Fog::Nullable::String, 'volumeId' => String, } @modify_volume_format = { 'requestId' => String, 'volumeModification' => @volume_modification_format } @describe_volume_modifications_format = { 'requestId' => String, 'volumeModificationSet' => [@volume_modification_format] } @server = Fog::Compute[:aws].servers.create @server.wait_for { ready? } tests('success') do @volume_id = nil tests('#create_volume').formats(@volume_format) do data = Fog::Compute[:aws].create_volume(@server.availability_zone, 1).body @volume_id = data['volumeId'] data end Fog::Compute[:aws].delete_volume(@volume_id) tests('#create_volume from snapshot').formats(@volume_format) do volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1d', :size => 1) volume.wait_for { ready? } snapshot = Fog::Compute[:aws].create_snapshot(volume.identity).body Fog::Compute[:aws].snapshots.new(snapshot).wait_for { ready? } data = Fog::Compute[:aws].create_volume(@server.availability_zone, nil, 'SnapshotId' => snapshot['snapshotId']).body @volume_id = data['volumeId'] data end Fog::Compute[:aws].delete_volume(@volume_id) tests('#create_volume with type and iops').formats(@volume_format) do data = Fog::Compute[:aws].create_volume(@server.availability_zone, 10, 'VolumeType' => 'io1', 'Iops' => 100).body @volume_id = data['volumeId'] data end Fog::Compute[:aws].delete_volume(@volume_id) tests('#create_volume with encryption').returns(true) do volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1d', :size => 1, :encrypted => true) @volume_id = volume.id volume.reload.encrypted end Fog::Compute[:aws].delete_volume(@volume_id) tests('#create_volume from snapshot with size').formats(@volume_format) do volume = Fog::Compute[:aws].volumes.create(:availability_zone => 'us-east-1d', :size => 1, :type => 'gp2') volume.wait_for { ready? } snapshot = Fog::Compute[:aws].create_snapshot(volume.identity).body Fog::Compute[:aws].snapshots.new(snapshot).wait_for { ready? } data = Fog::Compute[:aws].create_volume(@server.availability_zone, 1, 'SnapshotId' => snapshot['snapshotId'], 'VolumeType' => 'gp2').body @volume_id = data['volumeId'] data end Fog::Compute[:aws].volumes.get(@volume_id).wait_for { ready? } tests('#describe_volumes').formats(@volumes_format) do Fog::Compute[:aws].describe_volumes.body end tests("#describe_volumes('volume-id' => #{@volume_id})").formats(@volumes_format) do Fog::Compute[:aws].describe_volumes('volume-id' => @volume_id).body end tests("#attach_volume(#{@server.identity}, #{@volume_id}, '/dev/sdh')").formats(@volume_attachment_format) do Fog::Compute[:aws].attach_volume(@server.identity, @volume_id, '/dev/sdh').body end Fog::Compute[:aws].volumes.get(@volume_id).wait_for { state == 'in-use' } tests("#describe_volume('attachment.device' => '/dev/sdh')").formats(@volumes_format) do Fog::Compute[:aws].describe_volumes('attachment.device' => '/dev/sdh').body end tests("#describe_volume_status('volume-id' => #{@volume_id})").formats(@volume_status_format) do pending if Fog.mocking? Fog::Compute[:aws].describe_volume_status('volume-id' => @volume_id).body end tests("#detach_volume('#{@volume_id}')").formats(@volume_attachment_format) do Fog::Compute[:aws].detach_volume(@volume_id).body end Fog::Compute[:aws].volumes.get(@volume_id).wait_for { ready? } tests("#modify_volume('#{@volume_id}', 'Size' => 100, 'VolumeType' => 'io1', 'Iops' => 5000").formats(@modify_volume_format) do Fog::Compute[:aws].modify_volume(@volume_id, 'Size' => 100, 'VolumeType' => 'io1', 'Iops' => 5000).body end tests("#describe_volumes_modifications('volume-id' => '#{@volume_id}')").formats(@describe_volume_modifications_format) do Fog.wait_for do Fog::Compute[:aws].describe_volumes_modifications('volume-id' => @volume_id).body['volumeModificationSet'].first['modificationState'] == 'completed' end volume = Fog::Compute[:aws].describe_volumes('volume-id' => @volume_id).body['volumeSet'].first returns(100) { volume['size'] } returns('io1') { volume['volumeType'] } returns(5000) { volume['iops'] } Fog::Compute[:aws].describe_volumes_modifications('volume-id' => @volume_id).body end tests("#modify_volume_attribute('#{@volume_id}', true)").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].modify_volume_attribute(@volume_id, true).body end tests("#delete_volume('#{@volume_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_volume(@volume_id).body end end tests('failure') do @volume = Fog::Compute[:aws].volumes.create(:availability_zone => @server.availability_zone, :size => 1) tests("#attach_volume('i-00000000', '#{@volume.identity}', '/dev/sdh')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].attach_volume('i-00000000', @volume.identity, '/dev/sdh') end tests("#attach_volume('#{@server.identity}', 'vol-00000000', '/dev/sdh')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].attach_volume(@server.identity, 'vol-00000000', '/dev/sdh') end tests("#detach_volume('vol-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].detach_volume('vol-00000000') end tests("#modify_volume_attribute('vol-00000000', true)").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].modify_volume_attribute('vol-00000000', true) end tests("#detach_volume('#{@volume.identity}')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].detach_volume(@volume.identity) end tests("#delete_volume('vol-00000000')").raises(Fog::AWS::Compute::NotFound) do Fog::Compute[:aws].delete_volume('vol-00000000') end # Iops required tests("#create_volume('#{@server.availability_zone}', 10, 'VolumeType' => 'io1')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_volume(@server.availability_zone, 10, 'VolumeType' => 'io1') end # size too small for iops tests("#create_volume('#{@server.availability_zone}', 9, 'VolumeType' => 'io1', 'Iops' => 100)").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_volume(@server.availability_zone, 9, 'VolumeType' => 'io1', 'Iops' => 100) end # iops:size ratio too big tests("#create_volume('#{@server.availability_zone}', 10, 'VolumeType' => 'io1', 'Iops' => 301)").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_volume(@server.availability_zone, 10, 'VolumeType' => 'io1', 'Iops' => 301) end # iops invalid value (lower than 100) tests("#create_volume('#{@server.availability_zone}', 10, 'VolumeType' => 'io1', 'Iops' => 99)").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_volume(@server.availability_zone, 10, 'VolumeType' => 'io1', 'Iops' => 99) end # iops invalid value (greater than 4000) tests("#create_volume('#{@server.availability_zone}', 1024, 'VolumeType' => 'io1', 'Iops' => 4001)").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].create_volume(@server.availability_zone, 1024, 'VolumeType' => 'io1', 'Iops' => 4001) end @volume.destroy end @server.destroy end fog-aws-3.18.0/tests/requests/compute/vpc_tests.rb000066400000000000000000000217431437344660100222150ustar00rootroot00000000000000Shindo.tests('Fog::Compute[:aws] | vpc requests', ['aws']) do @create_vpcs_format = { 'vpcSet' => [{ 'vpcId' => String, 'state' => String, 'cidrBlock' => String, 'dhcpOptionsId' => String, 'tagSet' => Hash }], 'requestId' => String } @describe_vpcs_classic_link_format = { 'vpcSet' => [{ 'vpcId' => String, 'tagSet' => Hash, 'classicLinkEnabled' => Fog::Boolean }], 'requestId' => String } @describe_classic_link_instances = { 'instancesSet' => [{ 'vpcId' => String, 'tagSet' => Hash, 'instanceId' => String, 'groups' => [{'groupId' => String, 'groupName' => String}] }], 'requestId' => String, 'NextToken' => Fog::Nullable::String } @describe_vpcs_format = { 'vpcSet' => [{ 'vpcId' => String, 'state' => String, 'cidrBlock' => String, 'dhcpOptionsId' => String, 'tagSet' => Hash, 'instanceTenancy' => Fog::Nullable::String, 'cidrBlockAssociationSet' => [{'cidrBlock' => String, 'associationId' => String, 'state' => String}], 'ipv6CidrBlockAssociationSet' => [{'ipv6CidrBlock' => String, 'associationId' => String, 'state' => String}] }], 'requestId' => String } @describe_vpc_classic_link_dns_support_format = { "vpcs" => [{ "vpcId" => String, "classicLinkDnsSupported" => Fog::Boolean }] } tests('success') do @vpc_id = nil tests('#create_vpc').formats(@create_vpcs_format) do data = Fog::Compute[:aws].create_vpc('10.255.254.0/28').body @vpc_id = data['vpcSet'].first['vpcId'] data end tests('#create_vpc').formats(@create_vpcs_format) do data = Fog::Compute[:aws].create_vpc('10.255.254.0/28', {'InstanceTenancy' => 'default'}).body @vpc_id = data['vpcSet'].first['vpcId'] data end tests("#create_vpc('10.255.254.0/28', {'InstanceTenancy' => 'dedicated'})").returns('dedicated') do data = Fog::Compute[:aws].create_vpc('10.255.254.0/28', {'InstanceTenancy' => 'dedicated'}).body data['vpcSet'].first['instanceTenancy'] end tests('#describe_vpcs').formats(@describe_vpcs_format) do Fog::Compute[:aws].describe_vpcs.body end [ 'enableDnsSupport', 'enableDnsHostnames'].each do |attrib| tests("#describe_vpc_attribute('#{@vpc_id}', #{attrib})").returns(@vpc_id) do Fog::Compute[:aws].describe_vpc_attribute(@vpc_id, attrib).body['vpcId'] end end tests("#modify_vpc_attribute('#{@vpc_id}', {'EnableDnsSupport.Value' => false})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id, {'EnableDnsSupport.Value' => false}).body end tests("#describe_vpc_attribute(#{@vpc_id}, 'enableDnsSupport')").returns(false) do Fog::Compute[:aws].describe_vpc_attribute(@vpc_id, 'enableDnsSupport').body["enableDnsSupport"] end tests("#modify_vpc_attribute('#{@vpc_id}', {'EnableDnsSupport.Value' => true})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id, {'EnableDnsSupport.Value' => true}).body end tests("#describe_vpc_attribute(#{@vpc_id}, 'enableDnsSupport')").returns(true) do Fog::Compute[:aws].describe_vpc_attribute(@vpc_id, 'enableDnsSupport').body["enableDnsSupport"] end tests("#modify_vpc_attribute('#{@vpc_id}', {'EnableDnsHostnames.Value' => true})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id, {'EnableDnsHostnames.Value' => true}).body end tests("#describe_vpc_attribute(#{@vpc_id}, 'enableDnsHostnames')").returns(true) do Fog::Compute[:aws].describe_vpc_attribute(@vpc_id, 'enableDnsHostnames').body["enableDnsHostnames"] end tests("#modify_vpc_attribute('#{@vpc_id}', {'EnableDnsHostnames.Value' => false})").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id, {'EnableDnsHostnames.Value' => false}).body end tests("#describe_vpc_attribute(#{@vpc_id}, 'enableDnsHostnames')").returns(false) do Fog::Compute[:aws].describe_vpc_attribute(@vpc_id, 'enableDnsHostnames').body["enableDnsHostnames"] end tests("#modify_vpc_attribute('#{@vpc_id}')").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id).body end tests("#modify_vpc_attribute('#{@vpc_id}', {'EnableDnsSupport.Value' => true, 'EnableDnsHostnames.Value' => true})").raises(Fog::AWS::Compute::Error) do Fog::Compute[:aws].modify_vpc_attribute(@vpc_id, {'EnableDnsSupport.Value' => true, 'EnableDnsHostnames.Value' => true}).body end # Create another vpc to test tag filters test_tags = {'foo' => 'bar'} @another_vpc = Fog::Compute[:aws].vpcs.create :cidr_block => '1.2.3.4/24', :tags => test_tags tests("#describe_vpcs('tag-key' => 'foo')").formats(@describe_vpcs_format)do body = Fog::Compute[:aws].describe_vpcs('tag-key' => 'foo').body tests("returns 1 vpc").returns(1) { body['vpcSet'].size } body end tests("#describe_vpcs('tag-value' => 'bar')").formats(@describe_vpcs_format)do body = Fog::Compute[:aws].describe_vpcs('tag-value' => 'bar').body tests("returns 1 vpc").returns(1) { body['vpcSet'].size } body end tests("#describe_vpcs('tag:foo' => 'bar')").formats(@describe_vpcs_format)do body = Fog::Compute[:aws].describe_vpcs('tag:foo' => 'bar').body tests("returns 1 vpc").returns(1) { body['vpcSet'].size } body end tests("describe_vpc_classic_link(:filters => {'tag-key' => 'foo'}").formats(@describe_vpcs_classic_link_format) do body = Fog::Compute[:aws].describe_vpc_classic_link(:filters => {'tag-key' => 'foo'}).body tests("returns 1 vpc").returns(1) { body['vpcSet'].size } body end tests("enable_vpc_classic_link").returns(true) do Fog::Compute[:aws].enable_vpc_classic_link @vpc_id body = Fog::Compute[:aws].describe_vpc_classic_link(:vpc_ids => [@vpc_id]).body body['vpcSet'].first['classicLinkEnabled'] end @server = Fog::Compute[:aws].servers.create @server.wait_for {ready?} @group = Fog::Compute[:aws].security_groups.create :name => 'test-group', :description => 'vpc security group' tests("attach_classic_link_vpc") do Fog::Compute[:aws].attach_classic_link_vpc(@server.id, @vpc_id, [@group.group_id]) end tests('describe_classic_link_instances').formats(@describe_classic_link_instances) do Fog::Compute[:aws].describe_classic_link_instances().body end tests("detach_classic_link_vpc").returns([]) do Fog::Compute[:aws].detach_classic_link_vpc(@server.id, @vpc_id) Fog::Compute[:aws].describe_classic_link_instances().body['instancesSet'] end tests("enable_vpc_classic_link_dns_support('#{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do body = Fog::Compute[:aws].enable_vpc_classic_link_dns_support(@vpc_id).body body end tests("#describe_vpc_classic_link_dns_support").formats(@describe_vpc_classic_link_dns_support_format) do Fog::Compute[:aws].describe_vpc_classic_link_dns_support.body end tests("#describe_vpc_classic_link_dns_support(:vpc_ids => ['#{@vpc_id}'])").formats(@describe_vpc_classic_link_dns_support_format) do body = Fog::Compute[:aws].describe_vpc_classic_link_dns_support(:vpc_ids => [@vpc_id]).body returns(1) { body['vpcs'].count } returns(@vpc_id) { body['vpcs'].first['vpcId'] } returns(true) { body['vpcs'].first['classicLinkDnsSupported'] } body end tests("disable_vpc_classic_link_dns_support('#{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].disable_vpc_classic_link_dns_support(@vpc_id).body end tests("#describe_vpc_classic_link_dns_support(:vpc_ids => ['#{@vpc_id}'])").formats(@describe_vpc_classic_link_dns_support_format) do body = Fog::Compute[:aws].describe_vpc_classic_link_dns_support(:vpc_ids => [@vpc_id]).body returns(1) { body['vpcs'].count } returns(@vpc_id) { body['vpcs'].first['vpcId'] } returns(false) { body['vpcs'].first['classicLinkDnsSupported'] } body end if !Fog.mocking? @server.destroy @server.wait_for {state == 'terminated'} end tests("disable_vpc_classic_link").returns(false) do Fog::Compute[:aws].disable_vpc_classic_link @vpc_id body = Fog::Compute[:aws].describe_vpc_classic_link(:vpc_ids => [@vpc_id]).body body['vpcSet'].first['classicLinkEnabled'] end tests("#delete_vpc('#{@vpc_id}')").formats(AWS::Compute::Formats::BASIC) do Fog::Compute[:aws].delete_vpc(@vpc_id).body end # Clean up Fog::Compute[:aws].delete_tags(@another_vpc.id, test_tags) @another_vpc.destroy Fog::AWS::Compute::Mock.reset if Fog.mocking? end end fog-aws-3.18.0/tests/requests/data_pipeline/000077500000000000000000000000001437344660100207715ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/data_pipeline/helper.rb000066400000000000000000000033601437344660100225770ustar00rootroot00000000000000class AWS module DataPipeline module Formats BASIC = { 'pipelineId' => String, } FIELDS = [ { "key" => String, "refValue" => Fog::Nullable::String, "stringValue" => Fog::Nullable::String, } ] LIST_PIPELINES = { "hasMoreResults" => Fog::Nullable::Boolean, "marker" => Fog::Nullable::String, "pipelineIdList" => [ { "id" => String, "name" => String, } ] } QUERY_OBJECTS = { "hasMoreResults" => Fog::Nullable::Boolean, "marker" => Fog::Nullable::String, "ids" => Fog::Nullable::Array, } DESCRIBE_OBJECTS = { "hasMoreResults" => Fog::Nullable::Boolean, "marker" => Fog::Nullable::String, "pipelineObjects" => [ { "fields" => [ { 'id' => String, 'name' => String, 'fields' => FIELDS, } ] } ] } DESCRIBE_PIPELINES = { "pipelineDescriptionList" => [ { "description" => Fog::Nullable::String, "name" => String, "pipelineId" => String, "fields" => FIELDS, } ] } PUT_PIPELINE_DEFINITION = { "errored" => Fog::Boolean, "validationErrors" => Fog::Nullable::Array, } GET_PIPELINE_DEFINITION = { "pipelineObjects" => [ { "id" => String, "name" => String, "fields" => FIELDS, } ], "parameterObjects" => Fog::Nullable::Array, "parameterValues" => Fog::Nullable::Array, } end end end fog-aws-3.18.0/tests/requests/data_pipeline/pipeline_tests.rb000066400000000000000000000053451437344660100243540ustar00rootroot00000000000000Shindo.tests('AWS::DataPipeline | pipeline_tests', ['aws', 'data_pipeline']) do @pipeline_id = nil tests('success') do tests("#create_pipeline").formats(AWS::DataPipeline::Formats::BASIC) do unique_id = 'fog-test-pipeline-unique-id' name = 'fog-test-pipeline-name' description = 'Fog test pipeline' result = Fog::AWS[:data_pipeline].create_pipeline(unique_id, name, description, {}).body @pipeline_id = result['pipelineId'] result end tests("#list_pipelines").formats(AWS::DataPipeline::Formats::LIST_PIPELINES) do Fog::AWS[:data_pipeline].list_pipelines.body end tests("#describe_pipelines").formats(AWS::DataPipeline::Formats::DESCRIBE_PIPELINES) do ids = [@pipeline_id] Fog::AWS[:data_pipeline].describe_pipelines(ids).body end tests("#put_pipeline_definition").formats(AWS::DataPipeline::Formats::PUT_PIPELINE_DEFINITION) do objects = [ { "id" => "Nightly", "type" => "Schedule", "startDateTime" => Time.now.strftime("%Y-%m-%dT%H:%M:%S"), "period" => "24 hours", }, { "id" => "Default", "role" => "role-dumps", "resourceRole" => "role-dumps-inst", "schedule" => { "ref" => "Nightly" }, }, ] Fog::AWS[:data_pipeline].put_pipeline_definition(@pipeline_id, objects).body end tests("#activate_pipeline") do Fog::AWS[:data_pipeline].activate_pipeline(@pipeline_id) end tests("#deactivate_pipeline") do Fog::AWS[:data_pipeline].activate_pipeline(@pipeline_id) end tests("#get_pipeline_definition").formats(AWS::DataPipeline::Formats::GET_PIPELINE_DEFINITION) do Fog::AWS[:data_pipeline].get_pipeline_definition(@pipeline_id).body end tests("#query_objects") do tests("for COMPONENTs").formats(AWS::DataPipeline::Formats::QUERY_OBJECTS) do Fog::AWS[:data_pipeline].query_objects(@pipeline_id, 'COMPONENT').body end tests("for INSTANCEs").formats(AWS::DataPipeline::Formats::QUERY_OBJECTS) do Fog::AWS[:data_pipeline].query_objects(@pipeline_id, 'INSTANCE').body end tests("for ATTEMPTs").formats(AWS::DataPipeline::Formats::QUERY_OBJECTS) do Fog::AWS[:data_pipeline].query_objects(@pipeline_id, 'ATTEMPT').body end end tests('#describe_objects').formats(AWS::DataPipeline::Formats::DESCRIBE_OBJECTS) do attempts = Fog::AWS[:data_pipeline].query_objects(@pipeline_id, 'ATTEMPT').body object_ids = attempts['ids'][0..5] Fog::AWS[:data_pipeline].describe_objects(@pipeline_id, object_ids).body end tests("#delete_pipeline").returns(true) do Fog::AWS[:data_pipeline].delete_pipeline(@pipeline_id) end end end fog-aws-3.18.0/tests/requests/dns/000077500000000000000000000000001437344660100167575ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/dns/change_resource_record_sets_tests.rb000066400000000000000000000021451437344660100262600ustar00rootroot00000000000000Shindo.tests('Fog::DNS[:aws] | change_resource_record_sets', ['aws', 'dns']) do tests('success') do test('#elb_hosted_zone_mapping from DNS name') do zone_id = Fog::AWS::DNS.hosted_zone_for_alias_target('arbitrary-sub-domain.eu-west-1.elb.amazonaws.com') zone_id == Fog::AWS::DNS.elb_hosted_zone_mapping['eu-west-1'] end end tests("#change_resource_record_sets_data formats geolocation properly") do change_batch = [{ :action=>"CREATE", :name=>"ark.m.example.net.", :resource_records=>["1.1.1.1"], :ttl=>"300", :type=>"A", :set_identifier=>"ark", :geo_location=>{"CountryCode"=>"US", "SubdivisionCode"=>"AR"}, }] version = '2013-04-01' result = Fog::AWS::DNS.change_resource_record_sets_data('zone_id123', change_batch, version) doc = Nokogiri::XML(result) returns("https://route53.amazonaws.com/doc/#{version}/") { doc.namespaces['xmlns'] } returns(%w[US AR]) { [ doc.css("GeoLocation CountryCode").text, doc.css("GeoLocation SubdivisionCode").text ] } result end end fog-aws-3.18.0/tests/requests/dns/dns_tests.rb000066400000000000000000000173041437344660100213170ustar00rootroot00000000000000Shindo.tests('Fog::DNS[:aws] | DNS requests', ['aws', 'dns']) do @org_zone_count = 0 @zone_id = '' @change_id = '' @new_records = [] @domain_name = generate_unique_domain @elb_connection = Fog::AWS::ELB.new @r53_connection = Fog::DNS[:aws] tests('success') do test('get current zone count') do @org_zone_count= 0 response = @r53_connection.list_hosted_zones if response.status == 200 @hosted_zones = response.body['HostedZones'] @org_zone_count = @hosted_zones.count end response.status == 200 end test('create simple zone') { result = false response = @r53_connection.create_hosted_zone(@domain_name) if response.status == 201 zone = response.body['HostedZone'] change_info = response.body['ChangeInfo'] ns_servers = response.body['NameServers'] if (zone and change_info and ns_servers) @zone_id = zone['Id'] caller_ref = zone['CallerReference'] @change_id = change_info['Id'] status = change_info['Status'] ns_srv_count = ns_servers.count if (@zone_id.length > 0) and (caller_ref.length > 0) and (@change_id.length > 0) and (status.length > 0) and (ns_srv_count > 0) result = true end end end result } test("get status of change #{@change_id}") { result = false response = @r53_connection.get_change(@change_id) if response.status == 200 status = response.body['Status'] if (status == 'PENDING') or (status == 'INSYNC') result = true end end result } test("get info on hosted zone #{@zone_id}") { result = false response = @r53_connection.get_hosted_zone(@zone_id) if response.status == 200 zone = response.body['HostedZone'] zone_id = zone['Id'] name = zone['Name'] caller_ref = zone['CallerReference'] ns_servers = response.body['NameServers'] # AWS returns domain with a dot at end - so when compare, remove dot if (zone_id == @zone_id) and (name.chop == @domain_name) and (caller_ref.length > 0) and (ns_servers.count > 0) result = true end end result } test('list zones') do result = false response = @r53_connection.list_hosted_zones if response.status == 200 zones= response.body['HostedZones'] if (zones.count > 0) zone = zones[0] zone_id = zone['Id'] zone_name= zone['Name'] caller_ref = zone['CallerReference'] end max_items = response.body['MaxItems'] if (zone_id.length > 0) and (zone_name.length > 0) and (caller_ref.length > 0) and (max_items > 0) result = true end end result end test("add a A resource record") { # create an A resource record host = 'www.' + @domain_name ip_addrs = ['1.2.3.4'] resource_record = { :name => host, :type => 'A', :ttl => 3600, :resource_records => ip_addrs } resource_record_set = resource_record.merge(:action => 'CREATE') change_batch = [] change_batch << resource_record_set options = { :comment => 'add A record to domain'} response = @r53_connection.change_resource_record_sets(@zone_id, change_batch, options) Fog.wait_for { @r53_connection.get_change(response.body["Id"]).body["Status"] != "PENDING" } @new_records << resource_record @r53_connection.get_change(response.body["Id"]).body["Status"] == "INSYNC" } test("add a CNAME resource record") { # create a CNAME resource record host = 'mail.' + @domain_name value = ['www.' + @domain_name] resource_record = { :name => host, :type => 'CNAME', :ttl => 3600, :resource_records => value } resource_record_set = resource_record.merge(:action => 'CREATE') change_batch = [] change_batch << resource_record_set options = { :comment => 'add CNAME record to domain'} response = @r53_connection.change_resource_record_sets( @zone_id, change_batch, options) Fog.wait_for { @r53_connection.get_change(response.body["Id"]).body["Status"] != "PENDING" } @new_records << resource_record @r53_connection.get_change(response.body["Id"]).body["Status"] == "INSYNC" } test("add a MX resource record") { # create a MX resource record host = @domain_name value = ['7 mail.' + @domain_name] resource_record = { :name => host, :type => 'MX', :ttl => 3600, :resource_records => value } resource_record_set = resource_record.merge( :action => 'CREATE') change_batch = [] change_batch << resource_record_set options = { :comment => 'add MX record to domain'} response = @r53_connection.change_resource_record_sets( @zone_id, change_batch, options) Fog.wait_for { @r53_connection.get_change(response.body["Id"]).body["Status"] != "PENDING" } @new_records << resource_record @r53_connection.get_change(response.body["Id"]).body["Status"] == "INSYNC" } test("add an ALIAS resource record") { # create a load balancer @elb_connection.create_load_balancer(["us-east-1a"], "fog", [{"Protocol" => "HTTP", "LoadBalancerPort" => "80", "InstancePort" => "80"}]) elb_response = @elb_connection.describe_load_balancers("LoadBalancerNames" => "fog") elb = elb_response.body["DescribeLoadBalancersResult"]["LoadBalancerDescriptions"].first hosted_zone_id = elb["CanonicalHostedZoneNameID"] dns_name = elb["DNSName"] # create an ALIAS record host = @domain_name alias_target = { :hosted_zone_id => hosted_zone_id, :dns_name => dns_name, :evaluate_target_health => false } resource_record = { :name => host, :type => 'A', :alias_target => alias_target } resource_record_set = resource_record.merge(:action => 'CREATE') change_batch = [] change_batch << resource_record_set options = { :comment => 'add ALIAS record to domain'} response = @r53_connection.change_resource_record_sets(@zone_id, change_batch, options) Fog.wait_for { @r53_connection.get_change(response.body["Id"]).body["Status"] != "PENDING" } @new_records << resource_record @r53_connection.get_change(response.body["Id"]).body["Status"] == "INSYNC" } tests("list resource records").formats(AWS::DNS::Formats::LIST_RESOURCE_RECORD_SETS) { # get resource records for zone @r53_connection.list_resource_record_sets(@zone_id).body } test("delete #{@new_records.count} resource records") { change_batch = @new_records.map { |record| record.merge(:action => 'DELETE') } options = { :comment => 'remove records from domain'} response = @r53_connection.change_resource_record_sets(@zone_id, change_batch, options) Fog.wait_for { @r53_connection.get_change(response.body["Id"]).body["Status"] != "PENDING" } @r53_connection.get_change(response.body["Id"]).body["Status"] == "INSYNC" } test("delete hosted zone #{@zone_id}") { # cleanup the ELB as well @elb_connection.delete_load_balancer("fog") @r53_connection.delete_hosted_zone(@zone_id).status == 200 } end tests('failure') do tests('create hosted zone using invalid domain name').raises(Excon::Errors::BadRequest) do pending if Fog.mocking? @r53_connection.create_hosted_zone('invalid-domain') end tests('get hosted zone using invalid ID').raises(Excon::Errors::NotFound) do pending if Fog.mocking? zone_id = 'dummy-id' @r53_connection.get_hosted_zone(zone_id) end end end fog-aws-3.18.0/tests/requests/dns/health_check_tests.rb000066400000000000000000000135711437344660100231370ustar00rootroot00000000000000Shindo.tests('Fog::DNS[:aws] | DNS requests', ['aws', 'dns']) do pending if Fog.mocking? @r53_connection = Fog::DNS[:aws] tests('success') do tests('create a health check') do after do @r53_connection.delete_health_check(@response.body['HealthCheck']['Id']) end test('create an IP TCP based health check') do @response = @r53_connection.create_health_check('8.8.8.8', '53', 'TCP') @response.status == 201 && @response.body['HealthCheck']['HealthCheckConfig']['IPAddress'] == '8.8.8.8' && @response.body['HealthCheck']['HealthCheckConfig']['Port'] == '53' end test('create a FQDN HTTP based health check') do @options = { :fqdn => "www.amazon.com", :resource_path => "/gp/cart/view.html/ref=nav_cart" } @response = @r53_connection.create_health_check(nil, '80', 'HTTP', @options) @response.status == 201 && @response.body['HealthCheck']['HealthCheckConfig']['IPAddress'].nil? && @response.body['HealthCheck']['HealthCheckConfig']['Port'] == '80' && @response.body['HealthCheck']['HealthCheckConfig']['FullyQualifiedDomainName'] == 'www.amazon.com' end end tests('get a health check') do @options = { :fqdn => "www.amazon.com", :resource_path => "/gp/cart/view.html/ref=nav_cart", :search_string => "Amazon", :request_interval => 10, :failure_threshold => "7" } create_response = @r53_connection.create_health_check('8.8.8.8', '443', 'HTTPS_STR_MATCH', @options) @health_check_id = create_response.body['HealthCheck']['Id'] @response = @r53_connection.get_health_check(@health_check_id) sleep 2 @r53_connection.delete_health_check(@health_check_id) test('id') do @response.body['HealthCheck']['Id'] == @health_check_id end { 'IPAddress' => '8.8.8.8', 'Port' => '443', 'Type' => 'HTTPS_STR_MATCH', 'FullyQualifiedDomainName' => @options[:fqdn], 'ResourcePath' => @options[:resource_path], 'RequestInterval' => @options[:request_interval], 'FailureThreshold' => @options[:failure_threshold] }.each do |key, value| test("and check property #{key}") do @response.body['HealthCheck']['HealthCheckConfig'][key] == value end end end tests('delete a health check') do before do response = @r53_connection.create_health_check('8.8.8.8', '53', 'TCP') @health_check_id = response.body['HealthCheck']['Id'] end test('setup as IP TCP') do response = @r53_connection.delete_health_check(@health_check_id) response.status == 200 end end tests('listing health checks') do test('succeeds') do response = @r53_connection.list_health_checks response.status == 200 end before do response_1 = @r53_connection.create_health_check('8.8.8.8', '53', 'TCP') @health_check_1_id = response_1.body['HealthCheck']['Id'] options = { :fqdn => "www.amazon.com", :resource_path => "/gp/cart/view.html/ref=nav_cart" } response_2 = @r53_connection.create_health_check(nil, '80', 'HTTP', options) @health_check_2_id = response_2.body['HealthCheck']['Id'] @health_check_ids = [@health_check_1_id, @health_check_2_id] end after do @health_check_ids.each { |id| @r53_connection.delete_health_check id } end test('contains 2 new health checks') do sleep 2 response = @r53_connection.list_health_checks health_checks_by_id = response.body['HealthChecks'].map do |health_check| health_check['Id'] end.to_a @health_check_ids.all? { |id| health_checks_by_id.include?(id) } end test('contains properties') do sleep 2 response = @r53_connection.list_health_checks list_response_2 = response.body['HealthChecks'].find { |health_check| health_check['Id'] == @health_check_2_id } list_response_2['HealthCheckConfig']['Type'] == 'HTTP' && list_response_2['HealthCheckConfig']['FullyQualifiedDomainName'] == 'www.amazon.com' && list_response_2['HealthCheckConfig']['IPAddress'].nil? end end tests('assign a health check to a DNS record') do after do @r53_connection.change_resource_record_sets(@zone_id, [@resource_record.merge(:action => 'DELETE')]) @r53_connection.delete_hosted_zone(@zone_id) @r53_connection.delete_health_check @health_check_id end health_check_response = @r53_connection.create_health_check('8.8.8.8', '53', 'TCP') raise "Health check was not created" unless health_check_response.status == 201 @health_check_id = health_check_response.body['HealthCheck']['Id'] @domain_name = generate_unique_domain zone_response = @r53_connection.create_hosted_zone(@domain_name) raise "Zone was not created for #{@domain_name}" unless zone_response.status == 201 @zone_id = zone_response.body['HostedZone']['Id'] @resource_record = { :name => "www.#{@domain_name}.", :type => 'A', :ttl => 3600, :resource_records => ['8.8.4.4'], :health_check_id => @health_check_id, :set_identifier => SecureRandom.hex(8), :weight => 50 } resource_record_set = [@resource_record.merge(:action => 'CREATE')] record_response = @r53_connection.change_resource_record_sets @zone_id, resource_record_set raise "A record was not created" unless record_response.status == 200 test('succeeds') do new_record = @r53_connection.list_resource_record_sets(@zone_id).body['ResourceRecordSets'].find do |record| record['Name'] == @resource_record[:name] end new_record['HealthCheckId'] == @health_check_id end end end end fog-aws-3.18.0/tests/requests/dns/helper.rb000066400000000000000000000010351437344660100205620ustar00rootroot00000000000000class AWS module DNS module Formats RESOURCE_RECORD_SET = { "ResourceRecords" => Array, "Name" => String, "Type" => String, "AliasTarget"=> Fog::Nullable::Hash, "TTL" => Fog::Nullable::String } LIST_RESOURCE_RECORD_SETS = { "ResourceRecordSets" => [RESOURCE_RECORD_SET], "IsTruncated" => Fog::Boolean, "MaxItems" => Integer, "NextRecordName" => Fog::Nullable::String, "NextRecordType" => Fog::Nullable::String } end end end fog-aws-3.18.0/tests/requests/dynamodb/000077500000000000000000000000001437344660100177705ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/dynamodb/item_tests.rb000066400000000000000000000116711437344660100225030ustar00rootroot00000000000000Shindo.tests('Fog::AWS[:dynamodb] | item requests', ['aws']) do @table_name = "fog_table_#{Time.now.to_f.to_s.gsub('.','')}" unless Fog.mocking? Fog::AWS[:dynamodb].create_table( @table_name, {'HashKeyElement' => {'AttributeName' => 'key', 'AttributeType' => 'S'}}, {'ReadCapacityUnits' => 5, 'WriteCapacityUnits' => 5} ) Fog.wait_for { Fog::AWS[:dynamodb].describe_table(@table_name).body['Table']['TableStatus'] == 'ACTIVE' } end tests('success') do tests("#put_item('#{@table_name}', {'key' => {'S' => 'key'}}, {'value' => {'S' => 'value'}})").formats('ConsumedCapacityUnits' => Float) do pending if Fog.mocking? Fog::AWS[:dynamodb].put_item(@table_name, {'key' => {'S' => 'key'}}, {'value' => {'S' => 'value'}}).body end tests("#update_item('#{@table_name}', {'HashKeyElement' => {'S' => 'key'}}, {'value' => {'Value' => {'S' => 'value'}}})").formats('ConsumedCapacityUnits' => Float) do pending if Fog.mocking? Fog::AWS[:dynamodb].update_item(@table_name, {'HashKeyElement' => {'S' => 'key'}}, {'value' => {'Value' => {'S' => 'value'}}}).body end @batch_get_item_format = { 'Responses' => { @table_name => { 'ConsumedCapacityUnits' => Float, 'Items' => [{ 'key' => { 'S' => String }, 'value' => { 'S' => String } }] } }, 'UnprocessedKeys' => {} } tests("#batch_get_item({'#{@table_name}' => {'Keys' => [{'HashKeyElement' => {'S' => 'key'}}]}})").formats(@batch_get_item_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].batch_get_item( {@table_name => {'Keys' => [{'HashKeyElement' => {'S' => 'key'}}]}} ).body end @batch_put_item_format = { 'Responses'=> { @table_name => { 'ConsumedCapacityUnits' => Float} }, 'UnprocessedItems'=> {} } tests("#batch_put_item({ '#{@table_name}' => [{ 'PutRequest' => { 'Item' => { 'HashKeyElement' => { 'S' => 'key' }, 'RangeKeyElement' => { 'S' => 'key' }}}}]})" ).formats(@batch_put_item_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].batch_put_item( {@table_name => [{'PutRequest'=> {'Item'=> {'HashKeyElement' => { 'S' => 'key' }, 'RangeKeyElement' => { 'S' => 'key' } }}}]} ).body end @get_item_format = { 'ConsumedCapacityUnits' => Float, 'Item' => { 'key' => { 'S' => String }, 'value' => { 'S' => String } } } tests("#get_item('#{@table_name}', {'HashKeyElement' => {'S' => 'key'}})").formats(@get_item_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].get_item(@table_name, {'HashKeyElement' => {'S' => 'key'}}).body end tests("#get_item('#{@table_name}', {'HashKeyElement' => {'S' => 'notakey'}})").formats('ConsumedCapacityUnits' => Float) do pending if Fog.mocking? Fog::AWS[:dynamodb].get_item(@table_name, {'HashKeyElement' => {'S' => 'notakey'}}).body end @query_format = { 'ConsumedCapacityUnits' => Float, 'Count' => Integer, 'Items' => [{ 'key' => { 'S' => String }, 'value' => { 'S' => String } }], 'LastEvaluatedKey' => NilClass } tests("#query('#{@table_name}')").formats(@query_format) do pending if Fog.mocking? pending # requires a table with range key Fog::AWS[:dynamodb].query(@table_name).body end @scan_format = @query_format.merge('ScannedCount' => Integer) tests("scan('#{@table_name}')").formats(@scan_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].scan(@table_name).body end tests("#delete_item('#{@table_name}', {'HashKeyElement' => {'S' => 'key'}})").formats('ConsumedCapacityUnits' => Float) do pending if Fog.mocking? Fog::AWS[:dynamodb].delete_item(@table_name, {'HashKeyElement' => {'S' => 'key'}}).body end tests("#delete_item('#{@table_name}, {'HashKeyElement' => {'S' => 'key'}})").formats('ConsumedCapacityUnits' => Float) do pending if Fog.mocking? Fog::AWS[:dynamodb].delete_item(@table_name, {'HashKeyElement' => {'S' => 'key'}}).body end end tests('failure') do tests("#put_item('notatable', {'key' => {'S' => 'key'}}, {'value' => {'S' => 'value'}})").raises(Excon::Errors::BadRequest) do pending if Fog.mocking? Fog::AWS[:dynamodb].put_item('notatable', {'key' => {'S' => 'key'}}, {'value' => {'S' => 'value'}}) end tests("#update_item('notatable', {'HashKeyElement' => {'S' => 'key'}}, {'value' => {'Value' => {'S' => 'value'}}})").raises(Excon::Errors::BadRequest) do pending if Fog.mocking? Fog::AWS[:dynamodb].update_item('notatable', {'HashKeyElement' => {'S' => 'key'}}, {'value' => {'Value' => {'S' => 'value'}}}) end end unless Fog.mocking? Fog::AWS[:dynamodb].delete_table(@table_name) end end fog-aws-3.18.0/tests/requests/dynamodb/table_tests.rb000066400000000000000000000065501437344660100226340ustar00rootroot00000000000000Shindo.tests('Fog::AWS[:dynamodb] | table requests', ['aws']) do @table_format = { 'CreationDateTime' => Float, 'KeySchema' => { 'HashKeyElement' => { 'AttributeName' => String, 'AttributeType' => String } }, 'ProvisionedThroughput' => { 'ReadCapacityUnits' => Integer, 'WriteCapacityUnits' => Integer }, 'TableName' => String, 'TableStatus' => String } @table_name = "fog_table_#{Time.now.to_f.to_s.gsub('.','')}" tests('success') do tests("#create_table(#{@table_name}, {'HashKeyElement' => {'AttributeName' => 'id', 'AttributeType' => 'S'}, {'ReadCapacityUnits' => 5, 'WriteCapacityUnits' => 5})").formats('TableDescription' => @table_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].create_table(@table_name, {'HashKeyElement' => {'AttributeName' => 'id', 'AttributeType' => 'S'}}, {'ReadCapacityUnits' => 5, 'WriteCapacityUnits' => 5}).body end tests("#describe_table(#{@table_name})").formats('Table' => @table_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].describe_table(@table_name).body end tests("#list_tables").formats({'LastEvaluatedTableName' => Fog::Nullable::String, 'TableNames' => [String]}) do pending if Fog.mocking? Fog::AWS[:dynamodb].list_tables.body end unless Fog.mocking? Fog.wait_for { Fog::AWS[:dynamodb].describe_table(@table_name).body['Table']['TableStatus'] == 'ACTIVE' } end @update_table_format = { 'TableDescription' => @table_format.merge({ 'ItemCount' => Integer, 'ProvisionedThroughput' => { 'LastIncreaseDateTime' => Float, 'ReadCapacityUnits' => Integer, 'WriteCapacityUnits' => Integer }, 'TableSizeBytes' => Integer }) } tests("#update_table(#{@table_name}, {'ReadCapacityUnits' => 10, 'WriteCapacityUnits' => 10})").formats(@update_table_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].update_table(@table_name, {'ReadCapacityUnits' => 10, 'WriteCapacityUnits' => 10}).body end unless Fog.mocking? Fog.wait_for { Fog::AWS[:dynamodb].describe_table(@table_name).body['Table']['TableStatus'] == 'ACTIVE' } end @delete_table_format = { 'TableDescription' => { 'ProvisionedThroughput' => { 'ReadCapacityUnits' => Integer, 'WriteCapacityUnits' => Integer }, 'TableName' => String, 'TableStatus' => String } } tests("#delete_table(#{@table_name}").formats(@delete_table_format) do pending if Fog.mocking? Fog::AWS[:dynamodb].delete_table(@table_name).body end end tests('failure') do tests("#delete_table('notatable')").raises(Excon::Errors::BadRequest) do pending if Fog.mocking? Fog::AWS[:dynamodb].delete_table('notatable') end tests("#describe_table('notatable')").raises(Excon::Errors::BadRequest) do pending if Fog.mocking? Fog::AWS[:dynamodb].describe_table('notatable') end tests("#update_table('notatable', {'ReadCapacityUnits' => 10, 'WriteCapacityUnits' => 10})").raises(Excon::Errors::BadRequest) do pending if Fog.mocking? Fog::AWS[:dynamodb].update_table('notatable', {'ReadCapacityUnits' => 10, 'WriteCapacityUnits' => 10}).body end end end fog-aws-3.18.0/tests/requests/ecs/000077500000000000000000000000001437344660100167455ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/ecs/cluster_tests.rb000066400000000000000000000105051437344660100221760ustar00rootroot00000000000000Shindo.tests('AWS::ECS | cluster requests', ['aws', 'ecs']) do Fog::AWS[:ecs].reset_data tests('success') do tests("#create_cluster").formats(AWS::ECS::Formats::CREATE_CLUSTER) do result = Fog::AWS[:ecs].create_cluster('clusterName' => 'cluster1').body cluster = result['CreateClusterResult']['cluster'] returns('cluster1') { cluster['clusterName'] } returns('ACTIVE') { cluster['status'] } result end tests("#create_cluster another").formats(AWS::ECS::Formats::CREATE_CLUSTER) do result = Fog::AWS[:ecs].create_cluster('clusterName' => 'foobar').body cluster = result['CreateClusterResult']['cluster'] returns('foobar') { cluster['clusterName'] } returns('ACTIVE') { cluster['status'] } result end tests("#create_cluster without params").formats(AWS::ECS::Formats::CREATE_CLUSTER) do result = Fog::AWS[:ecs].create_cluster.body cluster = result['CreateClusterResult']['cluster'] returns('default') { cluster['clusterName'] } result end tests("#list_clusters").formats(AWS::ECS::Formats::LIST_CLUSTERS) do result = Fog::AWS[:ecs].list_clusters.body clusters = result['ListClustersResult']['clusterArns'] returns(true) { clusters.size.eql?(3) } result end tests("#describe_clusters with name").formats(AWS::ECS::Formats::DESCRIBE_CLUSTERS) do result = Fog::AWS[:ecs].describe_clusters('clusters' => 'cluster1').body clusters = result['DescribeClustersResult']['clusters'] failures = result['DescribeClustersResult']['failures'] returns(true) { clusters.size.eql?(1) } returns('cluster1') { clusters.first['clusterName'] } returns(true) { failures.empty? } result end tests("#describe_clusters without params").formats(AWS::ECS::Formats::DESCRIBE_CLUSTERS) do result = Fog::AWS[:ecs].describe_clusters.body clusters = result['DescribeClustersResult']['clusters'] failures = result['DescribeClustersResult']['failures'] returns(true) { clusters.size.eql?(1) } returns('default') { clusters.first['clusterName'] } result end tests("#describe_clusters several with name").formats(AWS::ECS::Formats::DESCRIBE_CLUSTERS) do result = Fog::AWS[:ecs].describe_clusters('clusters' => %w(cluster1 foobar)).body clusters = result['DescribeClustersResult']['clusters'] cluster_names = clusters.map { |c| c['clusterName'] }.sort returns(true) { clusters.size.eql?(2) } returns('cluster1') { cluster_names.first } returns('foobar') { cluster_names[1] } result end tests("#describe_clusters with errors").formats(AWS::ECS::Formats::DESCRIBE_CLUSTERS) do result = Fog::AWS[:ecs].describe_clusters('clusters' => %w(foobar not_here wtf)).body clusters = result['DescribeClustersResult']['clusters'] failures = result['DescribeClustersResult']['failures'] returns(true) { failures.size.eql?(2) } returns('MISSING') { failures.first['reason'] } returns(true) { clusters.size.eql?(1) } result end tests("#delete_cluster").formats(AWS::ECS::Formats::DELETE_CLUSTER) do cluster_name = 'foobar' result = Fog::AWS[:ecs].delete_cluster('cluster' => cluster_name).body cluster = result['DeleteClusterResult']['cluster'] returns(true) { cluster['clusterName'].eql?(cluster_name) } returns('INACTIVE') { cluster['status'] } result end tests("#list_clusters after one delete").formats(AWS::ECS::Formats::LIST_CLUSTERS) do result = Fog::AWS[:ecs].list_clusters.body clusters = result['ListClustersResult']['clusterArns'] returns(true) { clusters.size.eql?(2) } result end tests("#delete_cluster by arn").formats(AWS::ECS::Formats::DELETE_CLUSTER) do result1 = Fog::AWS[:ecs].describe_clusters.body cluster1 = result1['DescribeClustersResult']['clusters'].first result2 = Fog::AWS[:ecs].delete_cluster('cluster' => cluster1['clusterArn']).body cluster2 = result2['DeleteClusterResult']['cluster'] returns('default') { cluster2['clusterName'] } returns('INACTIVE') { cluster2['status'] } result2 end end tests('failures') do tests('#delete_cluster without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].delete_cluster.body end end end fog-aws-3.18.0/tests/requests/ecs/container_instance_tests.rb000066400000000000000000000106251437344660100243660ustar00rootroot00000000000000Shindo.tests('AWS::ECS | container instance requests', ['aws', 'ecs']) do Fog::AWS[:ecs].reset_data container_instance_arn = 'arn:aws:ecs:us-west-2:738152598183:container-instance/eff1068d-5fcb-4804-89f0-7d18ffc6879c' ec2_instance_id = 'i-58f4b4ae' Fog::AWS[:ecs].data[:container_instances] << { 'remainingResources' => [ { 'longValue' => 0, 'name' => 'CPU', 'integerValue' => 1004, 'doubleValue' => 0.0, 'type' => 'INTEGER' }, { 'longValue' => 0, 'name' => 'MEMORY', 'integerValue' => 496, 'doubleValue' => 0.0, 'type' => 'INTEGER' }, { 'stringSetValue' => [2376, 22, 80, 51678, 2375], 'longValue' => 0, 'name' => 'PORTS', 'integerValue' => 0, 'doubleValue' => 0.0, 'type' => 'STRINGSET' } ], 'agentConnected' => true, 'runningTasksCount' => 1, 'status' => 'ACTIVE', 'registeredResources' => [ { 'longValue' => 0, 'name' => 'CPU', 'integerValue' => 1024, 'doubleValue' => 0.0, 'type' => 'INTEGER' }, { 'longValue' => 0, 'name' => 'MEMORY', 'integerValue' => 996, 'doubleValue' => 0.0, 'type' => 'INTEGER' }, { 'stringSetValue' => [2376, 22, 80, 51678, 2375], 'longValue' => 0, 'name' => 'PORTS', 'integerValue' => 0, 'doubleValue' => 0.0, 'type' => 'STRINGSET' } ], 'containerInstanceArn' => container_instance_arn, 'pendingTasksCount' => 0, 'ec2InstanceId' => ec2_instance_id } tests('success') do tests("#list_container_instances").formats(AWS::ECS::Formats::LIST_CONTAINER_INSTANCES) do result = Fog::AWS[:ecs].list_container_instances.body list_instances_arns = result['ListContainerInstancesResult']['containerInstanceArns'] returns(false) { list_instances_arns.empty? } returns(true) { list_instances_arns.first.eql?(container_instance_arn) } result end tests("#describe_container_instances").formats(AWS::ECS::Formats::DESCRIBE_CONTAINER_INSTANCES) do result = Fog::AWS[:ecs].describe_container_instances('containerInstances' => container_instance_arn).body instance = result['DescribeContainerInstancesResult']['containerInstances'].first returns(true) { instance['containerInstanceArn'].eql?(container_instance_arn) } returns(true) { instance['ec2InstanceId'].eql?(ec2_instance_id) } returns(true) { instance['status'].eql?('ACTIVE') } result end tests("#deregister_container_instance").formats(AWS::ECS::Formats::DEREGISTER_CONTAINER_INSTANCE) do result = Fog::AWS[:ecs].deregister_container_instance('containerInstance' => container_instance_arn).body instance = result['DeregisterContainerInstanceResult']['containerInstance'] returns(true) { instance['containerInstanceArn'].eql?(container_instance_arn) } returns(true) { instance['ec2InstanceId'].eql?(ec2_instance_id) } returns(true) { instance['pendingTasksCount'].eql?(0) } result end tests("#list_container_instances again").formats(AWS::ECS::Formats::LIST_CONTAINER_INSTANCES) do result = Fog::AWS[:ecs].list_container_instances.body list_instances_arns = result['ListContainerInstancesResult']['containerInstanceArns'] returns(true) { list_instances_arns.empty? } result end end tests('failures') do tests('#describe_container_instances without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].describe_container_instances.body end tests('#deregister_container_instance without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].deregister_container_instance.body end tests('#deregister_container_instance nonexistent').raises(Fog::AWS::ECS::Error) do instance_uuid = 'ffffffff-ffff-0000-ffff-deadbeefff' response = Fog::AWS[:ecs].deregister_container_instance('containerInstance' => instance_uuid).body end end end fog-aws-3.18.0/tests/requests/ecs/helper.rb000066400000000000000000000231451437344660100205560ustar00rootroot00000000000000class AWS module ECS module Formats BASIC = { 'ResponseMetadata' => { 'RequestId' => String } } CREATE_CLUSTER = BASIC.merge({ 'CreateClusterResult' => { 'cluster' => { 'clusterName' => String, 'clusterArn' => String, 'status' => String, 'registeredContainerInstancesCount' => Integer, 'runningTasksCount' => Integer, 'pendingTasksCount' => Integer } } }) LIST_CLUSTERS = BASIC.merge({ 'ListClustersResult' => { 'clusterArns' => [String], 'nextToken' => Fog::Nullable::String } }) DELETE_CLUSTER = BASIC.merge({ 'DeleteClusterResult' => { 'cluster' => { 'clusterName' => String, 'clusterArn' => String, 'status' => String, 'registeredContainerInstancesCount' => Integer, 'runningTasksCount' => Integer, 'pendingTasksCount' => Integer } } }) DESCRIBE_CLUSTERS = BASIC.merge({ 'DescribeClustersResult' => { 'failures' => [Fog::Nullable::Hash], 'clusters' => [Fog::Nullable::Hash] } }) REGISTER_TASK_DEFINITION = BASIC.merge({ 'RegisterTaskDefinitionResult' => { 'taskDefinition' => { 'revision' => Integer, 'taskDefinitionArn' => String, 'family' => String, 'containerDefinitions' => [Hash], 'volumes' => Fog::Nullable::Array } } }) LIST_TASK_DEFINITIONS = BASIC.merge({ 'ListTaskDefinitionsResult' => { 'taskDefinitionArns' => [String] } }) DESCRIBE_TASK_DEFINITION = BASIC.merge({ 'DescribeTaskDefinitionResult' => { 'taskDefinition' => { 'revision' => Integer, 'taskDefinitionArn' => String, 'family' => String, 'containerDefinitions' => [Hash], 'volumes' => Fog::Nullable::Array } } }) DEREGISTER_TASK_DEFINITION = BASIC.merge({ 'DeregisterTaskDefinitionResult' => { 'taskDefinition' => { 'revision' => Integer, 'taskDefinitionArn' => String, 'family' => String, 'containerDefinitions' => [Hash], 'volumes' => Fog::Nullable::Array } } }) LIST_TASK_DEFINITION_FAMILIES = BASIC.merge({ 'ListTaskDefinitionFamiliesResult' => { 'families' => [String] } }) CREATE_SERVICE = BASIC.merge({ 'CreateServiceResult' => { 'service' => { 'events' => [Fog::Nullable::Hash], 'serviceName' => String, 'serviceArn' => String, 'taskDefinition' => String, 'clusterArn' => String, 'status' => String, 'roleArn' => Fog::Nullable::String, 'loadBalancers' => [Fog::Nullable::Hash], 'deployments' => [Fog::Nullable::Hash], 'desiredCount' => Integer, 'pendingCount' => Integer, 'runningCount' => Integer } } }) DELETE_SERVICE = BASIC.merge({ 'DeleteServiceResult' => { 'service' => { 'events' => [Fog::Nullable::Hash], 'serviceName' => String, 'serviceArn' => String, 'taskDefinition' => String, 'clusterArn' => String, 'status' => String, 'roleArn' => Fog::Nullable::String, 'loadBalancers' => [Fog::Nullable::Hash], 'deployments' => [Fog::Nullable::Hash], 'desiredCount' => Integer, 'pendingCount' => Integer, 'runningCount' => Integer } } }) DESCRIBE_SERVICES = BASIC.merge({ 'DescribeServicesResult' => { 'failures' => [Fog::Nullable::Hash], 'services' => [{ 'events' => [Fog::Nullable::Hash], 'serviceName' => String, 'serviceArn' => String, 'taskDefinition' => String, 'clusterArn' => String, 'status' => String, 'roleArn' => Fog::Nullable::String, 'loadBalancers' => [Fog::Nullable::Hash], 'deployments' => [Fog::Nullable::Hash], 'desiredCount' => Integer, 'pendingCount' => Integer, 'runningCount' => Integer }] } }) LIST_SERVICES = BASIC.merge({ 'ListServicesResult' => { 'serviceArns' => [Fog::Nullable::String], 'nextToken' => Fog::Nullable::String } }) UPDATE_SERVICE = BASIC.merge({ 'UpdateServiceResult' => { 'service' => { 'events' => [Fog::Nullable::Hash], 'serviceName' => String, 'serviceArn' => String, 'taskDefinition' => String, 'clusterArn' => String, 'status' => String, 'roleArn' => Fog::Nullable::String, 'loadBalancers' => [Fog::Nullable::Hash], 'deployments' => [Fog::Nullable::Hash], 'desiredCount' => Integer, 'pendingCount' => Integer, 'runningCount' => Integer } } }) LIST_CONTAINER_INSTANCES = BASIC.merge({ 'ListContainerInstancesResult' => { 'containerInstanceArns' => [Fog::Nullable::String] } }) DESCRIBE_CONTAINER_INSTANCES = BASIC.merge({ 'DescribeContainerInstancesResult' => { 'containerInstances' => [{ 'remainingResources' => [Hash], 'agentConnected' => Fog::Boolean, 'runningTasksCount' => Integer, 'status' => String, 'registeredResources' => [Hash], 'containerInstanceArn' => String, 'pendingTasksCount' => Integer, 'ec2InstanceId' => String }], 'failures' => [Fog::Nullable::Hash], } }) DEREGISTER_CONTAINER_INSTANCE = BASIC.merge({ 'DeregisterContainerInstanceResult' => { 'containerInstance' => { 'remainingResources' => [Hash], 'agentConnected' => Fog::Boolean, 'runningTasksCount' => Integer, 'status' => String, 'registeredResources' => [Hash], 'containerInstanceArn' => String, 'pendingTasksCount' => Integer, 'ec2InstanceId' => String } } }) LIST_TASKS = BASIC.merge({ 'ListTasksResult' => { 'taskArns' => [Fog::Nullable::String] } }) DESCRIBE_TASKS = BASIC.merge({ 'DescribeTasksResult' => { 'failures' => [Fog::Nullable::Hash], 'tasks' => [ { 'clusterArn' => String, 'containers' => Array, 'overrides' => Fog::Nullable::Hash, 'startedBy' => Fog::Nullable::String, 'desiredStatus' => String, 'taskArn' => String, 'containerInstanceArn' => String, 'lastStatus' => String, 'taskDefinitionArn' => String } ] } }) RUN_TASK = BASIC.merge({ 'RunTaskResult' => { 'failures' => [Fog::Nullable::Hash], 'tasks' => [ { 'clusterArn' => String, 'containers' => [Hash], 'overrides' => Fog::Nullable::Hash, 'desiredStatus' => String, 'taskArn' => String, 'containerInstanceArn' => String, 'lastStatus' => String, 'taskDefinitionArn' => String } ] } }) STOP_TASK = BASIC.merge({ 'StopTaskResult' => { 'task' => { 'clusterArn' => String, 'containers' => [Hash], 'overrides' => Fog::Nullable::Hash, 'desiredStatus' => String, 'taskArn' => String, 'startedBy' => Fog::Nullable::String, 'containerInstanceArn' => String, 'lastStatus' => String, 'taskDefinitionArn' => String } } }) START_TASK = BASIC.merge({ 'StartTaskResult' => { 'failures' => [Fog::Nullable::Hash], 'tasks' => [ { 'clusterArn' => String, 'containers' => [Hash], 'overrides' => Fog::Nullable::Hash, 'desiredStatus' => String, 'taskArn' => String, 'containerInstanceArn' => String, 'lastStatus' => String, 'taskDefinitionArn' => String } ] } }) end module Samples TASK_DEFINITION_1 = File.dirname(__FILE__) + '/sample_task_definition1.json' end end end fog-aws-3.18.0/tests/requests/ecs/sample_task_definition1.json000066400000000000000000000027171437344660100244430ustar00rootroot00000000000000{ "family": "console-sample-app-static", "containerDefinitions": [ { "name": "simple-app", "image": "httpd:2.4", "cpu": 10, "memory": 300, "environment": [], "portMappings": [ { "hostPort": 80, "containerPort": 80 } ], "volumesFrom": [], "links": [], "mountPoints": [ { "sourceVolume": "my-vol", "containerPath": "/usr/local/apache2/htdocs" } ], "essential": true }, { "name": "busybox", "image": "busybox", "cpu": 10, "memory": 200, "entryPoint": [ "sh", "-c" ], "environment": [], "command": [ "/bin/sh -c \"while true; do echo ' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

' > top; /bin/date > date ; echo '
' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done\"" ], "portMappings": [], "volumesFrom": [ { "sourceContainer": "simple-app" } ], "links": [], "mountPoints": [], "essential": false } ], "volumes": [ { "name": "my-vol", "host": {} } ] } fog-aws-3.18.0/tests/requests/ecs/service_tests.rb000066400000000000000000000115531437344660100221610ustar00rootroot00000000000000Shindo.tests('AWS::ECS | service requests', ['aws', 'ecs']) do Fog::AWS[:ecs].reset_data cluster = 'arn:aws:ecs:us-east-1:994922842243:cluster/default' desired_count = 1 role = 'arn:aws:iam::806753142346:role/ecsServiceRole' service_name = 'sample-webapp' task_definition = 'console-sample-app-static:18' load_balancers = [{ 'containerName' => 'simple-app', 'containerPort' => 80, 'loadBalancerName' => 'ecsunittests-EcsElastic-OI09IAP3PVIP' }] tests('success') do tests("#list_services").formats(AWS::ECS::Formats::LIST_SERVICES) do result = Fog::AWS[:ecs].list_services('cluster' => cluster).body list_services_arns = result['ListServicesResult']['serviceArns'] returns(true) { list_services_arns.empty? } result end tests("#create_service").formats(AWS::ECS::Formats::CREATE_SERVICE) do params = { 'cluster' => cluster, 'desiredCount' => desired_count, 'loadBalancers' => load_balancers, 'role' => role, 'serviceName' => service_name, 'taskDefinition' => task_definition } result = Fog::AWS[:ecs].create_service(params).body service = result['CreateServiceResult']['service'] returns('sample-webapp') { service['serviceName'] } returns(false) { service['serviceArn'].match(/^arn:aws:ecs:.+:.+:service\/.+$/).nil? } result end tests("#list_services again").formats(AWS::ECS::Formats::LIST_SERVICES) do result = Fog::AWS[:ecs].list_services('cluster' => cluster).body list_services_arns = result['ListServicesResult']['serviceArns'] returns(false) { list_services_arns.empty? } returns(true) { !list_services_arns.first.match(/#{service_name}/).nil? } result end tests("#describe_services").formats(AWS::ECS::Formats::DESCRIBE_SERVICES) do result1 = Fog::AWS[:ecs].list_services('cluster' => cluster).body service_arn = result1['ListServicesResult']['serviceArns'].first result2 = Fog::AWS[:ecs].describe_services( 'services' => service_arn, 'cluster' => cluster ).body returns(true) { result2['DescribeServicesResult']['services'].size.eql?(1) } service = result2['DescribeServicesResult']['services'].first returns(true) { service['serviceName'].eql?(service_name) } returns(true) { service['status'].eql?('ACTIVE') } returns(false) { service['deployments'].empty? } returns(true) { service['desiredCount'].eql?(desired_count) } result2 end tests("#update_service").formats(AWS::ECS::Formats::UPDATE_SERVICE) do new_task_def = 'arn:aws:ecs:us-east-1:994922842243:task-definitions/foobar-app:32' result1 = Fog::AWS[:ecs].list_services('cluster' => cluster).body service_arn = result1['ListServicesResult']['serviceArns'].first result2 = Fog::AWS[:ecs].update_service( 'service' => service_arn, 'cluster' => cluster, 'taskDefinition' => new_task_def ).body service = result2['UpdateServiceResult']['service'] returns(true) { service['serviceName'].eql?(service_name) } returns(true) { service['taskDefinition'].eql?(new_task_def) } result2 end tests("#delete_service").formats(AWS::ECS::Formats::DELETE_SERVICE) do result1 = Fog::AWS[:ecs].list_services('cluster' => cluster).body service_arn = result1['ListServicesResult']['serviceArns'].first result2 = Fog::AWS[:ecs].delete_service( 'service' => service_arn, 'cluster' => cluster ).body service = result2['DeleteServiceResult']['service'] returns(true) { service['serviceName'].eql?(service_name) } result2 end tests("#list_services yet again").formats(AWS::ECS::Formats::LIST_SERVICES) do result = Fog::AWS[:ecs].list_services('cluster' => cluster).body list_services_arns = result['ListServicesResult']['serviceArns'] returns(true) { list_services_arns.empty? } result end end tests('failures') do tests('#describe_services without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].describe_services.body end tests('#create_service without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].create_service.body end tests('#update_service without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].update_service.body end tests('#update_service nonexistent').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].update_service('service' => 'whatever2329').body end tests('#delete_service without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].delete_service.body end tests('#delete_service nonexistent').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].delete_service('service' => 'foobar787383').body end end end fog-aws-3.18.0/tests/requests/ecs/task_definitions_tests.rb000066400000000000000000000102321437344660100240470ustar00rootroot00000000000000require 'fog/json' Shindo.tests('AWS::ECS | task definitions requests', ['aws', 'ecs']) do Fog::AWS[:ecs].reset_data tests('success') do tests("#list_task_definitions").formats(AWS::ECS::Formats::LIST_TASK_DEFINITIONS) do result = Fog::AWS[:ecs].list_task_definitions.body list_task_def_arns = result['ListTaskDefinitionsResult']['taskDefinitionArns'] returns(true) { list_task_def_arns.empty? } result end tests("#register_task_definition").formats(AWS::ECS::Formats::REGISTER_TASK_DEFINITION) do task_def_params = Fog::JSON.decode(IO.read(AWS::ECS::Samples::TASK_DEFINITION_1)) result = Fog::AWS[:ecs].register_task_definition(task_def_params).body task_def = result['RegisterTaskDefinitionResult']['taskDefinition'] returns('console-sample-app-static') { task_def['family'] } returns(true) { task_def['revision'] > 0 } returns(false) { task_def['taskDefinitionArn'].match(/^arn:aws:ecs:.+:.+:task-definition\/.+:\d+$/).nil? } result end tests("#list_task_definition_families").formats(AWS::ECS::Formats::LIST_TASK_DEFINITION_FAMILIES) do result = Fog::AWS[:ecs].list_task_definition_families.body families = result['ListTaskDefinitionFamiliesResult']['families'] returns(false) { families.empty? } returns(true) { families.include?('console-sample-app-static') } result end tests("#list_task_definitions again").formats(AWS::ECS::Formats::LIST_TASK_DEFINITIONS) do result = Fog::AWS[:ecs].list_task_definitions.body list_task_def_arns = result['ListTaskDefinitionsResult']['taskDefinitionArns'] returns(true) { list_task_def_arns.size.eql?(1) } result end tests("#describe_task_definition").formats(AWS::ECS::Formats::DESCRIBE_TASK_DEFINITION) do result1 = Fog::AWS[:ecs].list_task_definitions.body task_def_arn = result1['ListTaskDefinitionsResult']['taskDefinitionArns'].first result2 = Fog::AWS[:ecs].describe_task_definition('taskDefinition' => task_def_arn).body task_def = result2['DescribeTaskDefinitionResult']['taskDefinition'] returns(true) { task_def['taskDefinitionArn'].eql?(task_def_arn) } returns(true) { task_def['containerDefinitions'].size > 0 } result2 end tests("#deregister_task_definition").formats(AWS::ECS::Formats::DEREGISTER_TASK_DEFINITION) do result1 = Fog::AWS[:ecs].list_task_definitions.body task_def_arn = result1['ListTaskDefinitionsResult']['taskDefinitionArns'].first result2 = Fog::AWS[:ecs].deregister_task_definition('taskDefinition' => task_def_arn).body task_def = result2['DeregisterTaskDefinitionResult']['taskDefinition'] returns(true) { task_def['taskDefinitionArn'].eql?(task_def_arn) } result2 end tests("#list_task_definitions yet again").formats(AWS::ECS::Formats::LIST_TASK_DEFINITIONS) do result = Fog::AWS[:ecs].list_task_definitions.body list_task_def_arns = result['ListTaskDefinitionsResult']['taskDefinitionArns'] returns(true) { list_task_def_arns.empty? } result end tests("#list_task_definition_families again").formats(AWS::ECS::Formats::LIST_TASK_DEFINITION_FAMILIES) do result = Fog::AWS[:ecs].list_task_definition_families.body families = result['ListTaskDefinitionFamiliesResult']['families'] returns(true) { families.empty? } returns(false) { families.include?('console-sample-app-static') } result end end tests('failures') do tests('#describe_task_definition without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].describe_task_definition.body end tests('#describe_task_definition nonexistent').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].describe_task_definition('taskDefinition' => 'foobar').body end tests('#deregister_task_definition without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].deregister_task_definition.body end tests('#deregister_task_definition nonexistent').raises(Fog::AWS::ECS::NotFound) do response = Fog::AWS[:ecs].deregister_task_definition('taskDefinition' => 'foobar:7873287283').body end end end fog-aws-3.18.0/tests/requests/ecs/task_tests.rb000066400000000000000000000131701437344660100214600ustar00rootroot00000000000000require 'fog/json' Shindo.tests('AWS::ECS | task requests', ['aws', 'ecs']) do Fog::AWS[:ecs].reset_data tests('success') do tests("#list_tasks").formats(AWS::ECS::Formats::LIST_TASKS) do result = Fog::AWS[:ecs].list_tasks.body list_instances_arns = result['ListTasksResult']['taskArns'] returns(true) { list_instances_arns.empty? } result end tests("#run_task").formats(AWS::ECS::Formats::RUN_TASK) do task_def_params = Fog::JSON.decode(IO.read(AWS::ECS::Samples::TASK_DEFINITION_1)) result1 = Fog::AWS[:ecs].register_task_definition(task_def_params).body task_def = result1['RegisterTaskDefinitionResult']['taskDefinition'] task_def_arn = task_def['taskDefinitionArn'] result2 = Fog::AWS[:ecs].run_task('taskDefinition' => task_def_arn).body task = result2['RunTaskResult']['tasks'].first returns(true) { task.has_key?('containerInstanceArn') } returns(true) { task['containers'].size.eql?(2) } returns(true) { task['desiredStatus'].eql?('RUNNING') } returns(true) { task['taskDefinitionArn'].eql?(task_def_arn) } result2 end tests("#describe_tasks").formats(AWS::ECS::Formats::DESCRIBE_TASKS) do result1 = Fog::AWS[:ecs].list_tasks.body task_arn = result1['ListTasksResult']['taskArns'].first result2 = Fog::AWS[:ecs].describe_tasks('tasks' => task_arn).body task = result2['DescribeTasksResult']['tasks'].first returns(true) { task['taskArn'].eql?(task_arn) } returns(true) { task['containers'].size.eql?(2) } returns(true) { task['desiredStatus'].eql?('RUNNING') } result2 end tests("#list_tasks").formats(AWS::ECS::Formats::LIST_TASKS) do result = Fog::AWS[:ecs].list_tasks.body list_instances_arns = result['ListTasksResult']['taskArns'] returns(false) { list_instances_arns.empty? } result end tests("#stop_task").formats(AWS::ECS::Formats::STOP_TASK) do result1 = Fog::AWS[:ecs].list_tasks.body task_arn = result1['ListTasksResult']['taskArns'].first result2 = Fog::AWS[:ecs].stop_task('task' => task_arn).body task = result2['StopTaskResult']['task'] returns(true) { task['taskArn'].eql?(task_arn) } returns(true) { task['containers'].size.eql?(2) } returns(true) { task['desiredStatus'].eql?('STOPPED') } result2 end tests("#start_task").formats(AWS::ECS::Formats::START_TASK) do owner_id = Fog::AWS::Mock.owner_id container_instance_path = "container-instance/#{Fog::UUID.uuid}" region = "us-east-1" container_instance_arn = Fog::AWS::Mock.arn('ecs', owner_id, container_instance_path, region) task_def_params = Fog::JSON.decode(IO.read(AWS::ECS::Samples::TASK_DEFINITION_1)) result1 = Fog::AWS[:ecs].register_task_definition(task_def_params).body task_def = result1['RegisterTaskDefinitionResult']['taskDefinition'] task_def_arn = task_def['taskDefinitionArn'] result2 = Fog::AWS[:ecs].start_task( 'taskDefinition' => task_def_arn, 'containerInstances' => container_instance_arn ).body task = result2['StartTaskResult']['tasks'].first returns(true) { task['containerInstanceArn'].eql?(container_instance_arn) } returns(true) { task['containers'].size.eql?(2) } returns(true) { task['desiredStatus'].eql?('RUNNING') } returns(true) { task['taskDefinitionArn'].eql?(task_def_arn) } result2 end tests("#list_tasks").formats(AWS::ECS::Formats::LIST_TASKS) do result = Fog::AWS[:ecs].list_tasks.body list_instances_arns = result['ListTasksResult']['taskArns'] returns(false) { list_instances_arns.empty? } result end end tests('failures') do tests("#describe_tasks nonexistent") do task_arn = "arn:aws:ecs:us-west-2:938269302734:task/6893440f-2165-47aa-8cfa-b2f413a26f00" result = Fog::AWS[:ecs].describe_tasks('tasks' => task_arn).body end tests('describe_tasks without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].describe_tasks.body end tests('#run_task without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].run_task.body end tests('#run_task nonexistent').raises(Fog::AWS::ECS::Error) do task_def_arn = "arn:aws:ecs:us-west-2:539573770077:task-definition/foo-xanadu-app-static:33" response = Fog::AWS[:ecs].run_task('taskDefinition' => task_def_arn).body end tests('#start_task without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].start_task.body end tests('#start_task with missing params').raises(Fog::AWS::ECS::Error) do task_def_arn = "arn:aws:ecs:us-west-2:539573770077:task-definition/foo-xanadu-app-static:33" response = Fog::AWS[:ecs].start_task('taskDefinition' => task_def_arn).body end tests('#start_task nonexistent').raises(Fog::AWS::ECS::Error) do task_def_arn = "arn:aws:ecs:us-west-2:539573770077:task-definition/foo-xanadu-app-static:33" container_instance_arn = "arn:aws:ecs:us-west-2:938269302734:container-instance/6893440f-2165-47aa-8cfa-b2f413a26f00" response = Fog::AWS[:ecs].start_task( 'taskDefinition' => task_def_arn, 'containerInstances' => container_instance_arn ).body end tests('#stop_task without params').raises(Fog::AWS::ECS::Error) do response = Fog::AWS[:ecs].stop_task.body end tests('#stop_task nonexistent params').raises(Fog::AWS::ECS::Error) do task_arn = "arn:aws:ecs:us-west-2:938269302734:task/6893440f-2165-47aa-8cfa-b2f413a26f00" response = Fog::AWS[:ecs].stop_task('task' => task_arn).body end end end fog-aws-3.18.0/tests/requests/efs/000077500000000000000000000000001437344660100167505ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/efs/file_system_tests.rb000066400000000000000000000143711437344660100230500ustar00rootroot00000000000000Shindo.tests('AWS::EFS | file systems', ['aws', 'efs']) do suffix = rand(65535).to_s(16) @creation_token = "fogtest#{suffix}" tests('success') do tests("#create_file_system('#{@creation_token}')").formats(AWS::EFS::Formats::FILE_SYSTEM_FORMAT) do result = Fog::AWS[:efs].create_file_system(@creation_token).body returns('creating') { result['LifeCycleState'] } result end tests("#describe_file_systems").formats(AWS::EFS::Formats::DESCRIBE_FILE_SYSTEMS_RESULT) do Fog::AWS[:efs].describe_file_systems.body end tests("#describe_file_systems(creation_token: #{@creation_token})").formats(AWS::EFS::Formats::DESCRIBE_FILE_SYSTEMS_RESULT) do result = Fog::AWS[:efs].describe_file_systems(:creation_token => @creation_token).body returns(@creation_token) { result["FileSystems"].first["CreationToken"] } result end file_system_id = Fog::AWS[:efs].describe_file_systems(:creation_token => @creation_token).body["FileSystems"].first["FileSystemId"] file_system = Fog::AWS[:efs].file_systems.get(file_system_id) tests("#describe_file_systems(id: #{file_system_id})").formats(AWS::EFS::Formats::DESCRIBE_FILE_SYSTEMS_RESULT) do Fog::AWS[:efs].describe_file_systems(:id => file_system_id).body end if Fog.mocking? vpc = Fog::Compute[:aws].vpcs.create(:cidr_block => "10.0.0.0/16") subnet = Fog::Compute[:aws].subnets.create( :vpc_id => vpc.id, :cidr_block => "10.0.1.0/24" ) default_security_group = Fog::Compute[:aws].security_groups.detect { |sg| sg.description == 'default_elb security group' } else vpc = Fog::Compute[:aws].vpcs.first subnet = vpc.subnets.first default_security_group = Fog::Compute[:aws].security_groups.detect { |sg| sg.description == 'default VPC security group' } end security_group = Fog::Compute[:aws].security_groups.create( :vpc_id => vpc.id, :name => "fog#{suffix}", :description => "fog#{suffix}" ) raises(Fog::AWS::EFS::InvalidSubnet, "invalid subnet ID: foobar#{suffix}") do Fog::AWS[:efs].create_mount_target(file_system_id, "foobar#{suffix}") end raises(Fog::AWS::EFS::NotFound, "invalid file system ID: foobar#{suffix}") do Fog::AWS[:efs].create_mount_target("foobar#{suffix}", subnet.identity) end if Fog.mocking? tests("#create_mount_target") do Fog::AWS[:efs].data[:file_systems][file_system_id]["LifeCycleState"] = 'creating' raises(Fog::AWS::EFS::IncorrectFileSystemLifeCycleState) do Fog::AWS[:efs].create_mount_target(file_system_id, subnet.identity) end Fog::AWS[:efs].data[:file_systems][file_system_id]["LifeCycleState"] = 'available' end end raises(Fog::AWS::EFS::NotFound, "invalid security group ID: foobar#{suffix}") do Fog::AWS[:efs].create_mount_target(file_system_id, subnet.identity, 'SecurityGroups' => ["foobar#{suffix}"]) end tests("#create_mount_target(#{file_system_id}, #{subnet.identity})").formats(AWS::EFS::Formats::MOUNT_TARGET_FORMAT) do Fog::AWS[:efs].create_mount_target(file_system_id, subnet.identity).body end tests("#describe_mount_targets(file_system_id: #{file_system_id})").formats(AWS::EFS::Formats::DESCRIBE_MOUNT_TARGETS_RESULT) do Fog::AWS[:efs].describe_mount_targets(:file_system_id => file_system_id).body end mount_target_id = Fog::AWS[:efs].describe_mount_targets(:file_system_id => file_system_id).body["MountTargets"].first["MountTargetId"] tests("#describe_mount_target_security_groups(#{mount_target_id})").formats(AWS::EFS::Formats::DESCRIBE_MOUNT_TARGET_SECURITY_GROUPS_FORMAT) do result = Fog::AWS[:efs].describe_mount_target_security_groups(mount_target_id).body returns([default_security_group.group_id]) { result["SecurityGroups"] } result end raises(Fog::AWS::EFS::Error, 'Must provide at least one security group.') do Fog::AWS[:efs].modify_mount_target_security_groups(mount_target_id, []) end tests("#modify_mount_target_security_groups(#{mount_target_id}, [#{security_group.group_id}])") do returns(204) do Fog::AWS[:efs].modify_mount_target_security_groups(mount_target_id, [security_group.group_id]).status end end Fog.wait_for { Fog::AWS[:efs].describe_mount_target_security_groups(mount_target_id).body["SecurityGroups"] != [default_security_group.group_id] } tests("#describe_mount_target_security_groups(#{mount_target_id})").formats(AWS::EFS::Formats::DESCRIBE_MOUNT_TARGET_SECURITY_GROUPS_FORMAT) do result = Fog::AWS[:efs].describe_mount_target_security_groups(mount_target_id).body returns([security_group.group_id]) { result["SecurityGroups"] } result end tests("#describe_mount_targets(id: #{mount_target_id})").formats(AWS::EFS::Formats::DESCRIBE_MOUNT_TARGETS_RESULT) do Fog::AWS[:efs].describe_mount_targets(:id => mount_target_id).body end raises(Fog::AWS::EFS::NotFound, 'Mount target does not exist.') do Fog::AWS[:efs].describe_mount_targets(:id => "foobar") end raises(Fog::AWS::EFS::Error, 'file system ID or mount target ID must be specified') do Fog::AWS[:efs].describe_mount_targets end raises(Fog::AWS::EFS::NotFound, "invalid mount target id: foobar#{suffix}") do Fog::AWS[:efs].delete_mount_target("foobar#{suffix}") end tests("#delete_mount_target(id: #{mount_target_id})") do returns(true) do result = Fog::AWS[:efs].delete_mount_target(mount_target_id) result.body.empty? end end file_system.wait_for { number_of_mount_targets == 0 } if Fog.mocking? Fog::AWS[:efs].data[:file_systems][file_system_id]["NumberOfMountTargets"] = 1 raises(Fog::AWS::EFS::FileSystemInUse) do Fog::AWS[:efs].delete_file_system(file_system_id) end Fog::AWS[:efs].data[:file_systems][file_system_id]["NumberOfMountTargets"] = 0 end raises(Fog::AWS::EFS::NotFound, "invalid file system ID: foobar#{suffix}") do Fog::AWS[:efs].delete_file_system("foobar#{suffix}") end tests("#delete_file_system") do returns(true) do result = Fog::AWS[:efs].delete_file_system(file_system_id) result.body.empty? end end security_group.destroy end end fog-aws-3.18.0/tests/requests/efs/helper.rb000066400000000000000000000024201437344660100205520ustar00rootroot00000000000000class AWS module EFS module Formats FILE_SYSTEM_FORMAT = { "CreationTime" => Float, "CreationToken" => String, "FileSystemId" => String, "LifeCycleState" => String, "Name" => Fog::Nullable::String, "NumberOfMountTargets" => Integer, "OwnerId" => String, "PerformanceMode" => String, "Encrypted" => Fog::Nullable::Boolean, "KmsKeyId" => Fog::Nullable::String, "SizeInBytes" => { "Timestamp" => Fog::Nullable::Float, "Value" => Integer } } MOUNT_TARGET_FORMAT = { "FileSystemId" => String, "IpAddress" => String, "LifeCycleState" => String, "MountTargetId" => String, "NetworkInterfaceId" => String, "OwnerId" => String, "SubnetId" => String } DESCRIBE_FILE_SYSTEMS_RESULT = { "FileSystems" => [FILE_SYSTEM_FORMAT] } DESCRIBE_MOUNT_TARGETS_RESULT = { "MountTargets" => [MOUNT_TARGET_FORMAT] } DESCRIBE_MOUNT_TARGET_SECURITY_GROUPS_FORMAT = { "SecurityGroups" => Array } end end end fog-aws-3.18.0/tests/requests/elasticache/000077500000000000000000000000001437344660100204405ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/elasticache/cache_cluster_tests.rb000066400000000000000000000115741437344660100250230ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | cache cluster requests', ['aws', 'elasticache']) do tests('success') do # Randomize the cluster ID so tests can be fequently re-run CLUSTER_ID = "fog-test-cluster-#{rand(999).to_s}" # 20 chars max! NUM_NODES = 2 # Must be > 1, because one of the tests reomves a node! tests( '#create_cache_cluster' ).formats(AWS::Elasticache::Formats::SINGLE_CACHE_CLUSTER) do body = Fog::AWS[:elasticache].create_cache_cluster(CLUSTER_ID, :num_nodes => NUM_NODES ).body cluster = body['CacheCluster'] returns(CLUSTER_ID) { cluster['CacheClusterId'] } returns('creating') { cluster['CacheClusterStatus'] } body end tests( '#describe_cache_clusters without options' ).formats(AWS::Elasticache::Formats::DESCRIBE_CACHE_CLUSTERS) do body = Fog::AWS[:elasticache].describe_cache_clusters.body returns(true, "has #{CLUSTER_ID}") do body['CacheClusters'].any? do |cluster| cluster['CacheClusterId'] == CLUSTER_ID end end # The DESCRIBE_CACHE_CLUSTERS format must include only one cluster # So remove all but the relevant cluster from the response body test_cluster = body['CacheClusters'].delete_if do |cluster| cluster['CacheClusterId'] != CLUSTER_ID end body end tests( '#describe_cache_clusters with cluster ID' ).formats(AWS::Elasticache::Formats::DESCRIBE_CACHE_CLUSTERS) do body = Fog::AWS[:elasticache].describe_cache_clusters(CLUSTER_ID).body returns(1, "size of 1") { body['CacheClusters'].size } returns(CLUSTER_ID, "has #{CLUSTER_ID}") do body['CacheClusters'].first['CacheClusterId'] end body end Fog::Formatador.display_line "Waiting for cluster #{CLUSTER_ID}..." Fog::AWS[:elasticache].clusters.get(CLUSTER_ID).wait_for {ready?} tests( '#describe_cache_clusters with node info' ).formats(AWS::Elasticache::Formats::CACHE_CLUSTER_RUNNING) do cluster = Fog::AWS[:elasticache].describe_cache_clusters(CLUSTER_ID, :show_node_info => true ).body['CacheClusters'].first returns(NUM_NODES, "has #{NUM_NODES} nodes") do cluster['CacheNodes'].count end cluster end tests( '#modify_cache_cluster - change a non-pending cluster attribute' ).formats(AWS::Elasticache::Formats::CACHE_CLUSTER_RUNNING) do body = Fog::AWS[:elasticache].modify_cache_cluster(CLUSTER_ID, :auto_minor_version_upgrade => false ).body # now check that parameter change is in place returns('false') { body['CacheCluster']['AutoMinorVersionUpgrade'] } body['CacheCluster'] end tests( '#reboot_cache_cluster - reboot a node' ).formats(AWS::Elasticache::Formats::CACHE_CLUSTER_RUNNING) do c = Fog::AWS[:elasticache].clusters.get(CLUSTER_ID) node_id = c.nodes.last['CacheNodeId'] Fog::Formatador.display_line "Rebooting node #{node_id}..." body = Fog::AWS[:elasticache].reboot_cache_cluster(c.id, [ node_id ]).body returns('rebooting cache cluster nodes') do body['CacheCluster']['CacheClusterStatus'] end body['CacheCluster'] end Fog::Formatador.display_line "Waiting for cluster #{CLUSTER_ID}..." Fog::AWS[:elasticache].clusters.get(CLUSTER_ID).wait_for {ready?} tests( '#modify_cache_cluster - remove a node' ).formats(AWS::Elasticache::Formats::CACHE_CLUSTER_RUNNING) do c = Fog::AWS[:elasticache].clusters.get(CLUSTER_ID) node_id = c.nodes.last['CacheNodeId'] Fog::Formatador.display_line "Removing node #{node_id}..." body = Fog::AWS[:elasticache].modify_cache_cluster(c.id, { :num_nodes => NUM_NODES - 1, :nodes_to_remove => [node_id], :apply_immediately => true, }).body returns(node_id) { body['CacheCluster']['PendingModifiedValues']['CacheNodeId'] } body['CacheCluster'] end Fog::Formatador.display_line "Waiting for cluster #{CLUSTER_ID}..." Fog::AWS[:elasticache].clusters.get(CLUSTER_ID).wait_for {ready?} tests( '#delete_cache_clusters' ).formats(AWS::Elasticache::Formats::CACHE_CLUSTER_RUNNING) do body = Fog::AWS[:elasticache].delete_cache_cluster(CLUSTER_ID).body # make sure this particular cluster is in the returned list returns(true, "has #{CLUSTER_ID}") do body['CacheClusters'].any? do |cluster| cluster['CacheClusterId'] == CLUSTER_ID end end # now check that it reports itself as 'deleting' cluster = body['CacheClusters'].find do |cluster| cluster['CacheClusterId'] == CLUSTER_ID end returns('deleting') { cluster['CacheClusterStatus'] } cluster end end tests('failure') do # TODO: # Create a duplicate cluster ID # List a missing cache cluster # Delete a missing cache cluster end end fog-aws-3.18.0/tests/requests/elasticache/describe_events.rb000066400000000000000000000005451437344660100241350ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | describe cache cluster events', ['aws', 'elasticache']) do tests('success') do pending if Fog.mocking? tests( '#describe_events' ).formats(AWS::Elasticache::Formats::EVENT_LIST) do Fog::AWS[:elasticache].describe_events().body['Events'] end end tests('failure') do # TODO: end end fog-aws-3.18.0/tests/requests/elasticache/describe_reserved_cache_nodes.rb000066400000000000000000000006271437344660100267640ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | describe reserved cache nodes', ['aws', 'elasticache']) do tests('success') do pending if Fog.mocking? tests( '#describe_reserved_cache_nodes' ).formats(AWS::Elasticache::Formats::RESERVED_CACHE_NODES) do Fog::AWS[:elasticache].describe_reserved_cache_nodes().body['ReservedCacheNodes'] end end tests('failure') do # TODO: end end fog-aws-3.18.0/tests/requests/elasticache/helper.rb000066400000000000000000000075261437344660100222560ustar00rootroot00000000000000class AWS module Elasticache module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } # Cache Security Groups SECURITY_GROUP = { 'EC2SecurityGroups' => Array, 'CacheSecurityGroupName' => String, 'Description' => String, 'OwnerId' => String, } SINGLE_SECURITY_GROUP = BASIC.merge('CacheSecurityGroup' => SECURITY_GROUP) DESCRIBE_SECURITY_GROUPS = {'CacheSecurityGroups' => [SECURITY_GROUP]} CACHE_SUBNET_GROUP = { 'CacheSubnetGroupName' => String, 'CacheSubnetGroupDescription' => String, 'VpcId' => String, 'Subnets' => [String] } CREATE_CACHE_SUBNET_GROUP = BASIC.merge({ 'CreateCacheSubnetGroupResult' => { 'CacheSubnetGroup' => CACHE_SUBNET_GROUP } }) DESCRIBE_CACHE_SUBNET_GROUPS = BASIC.merge({ 'DescribeCacheSubnetGroupsResult' => { 'CacheSubnetGroups' => [CACHE_SUBNET_GROUP] } }) # Cache Parameter Groups PARAMETER_GROUP = { 'CacheParameterGroupFamily' => String, 'CacheParameterGroupName' => String, 'Description' => String, } SINGLE_PARAMETER_GROUP = BASIC.merge('CacheParameterGroup' => PARAMETER_GROUP) DESCRIBE_PARAMETER_GROUPS = BASIC.merge('CacheParameterGroups' => [PARAMETER_GROUP]) MODIFY_PARAMETER_GROUP = {'CacheParameterGroupName' => String } PARAMETER_SET = { 'Parameters' => Array, 'CacheNodeTypeSpecificParameters' => Array, } ENGINE_DEFAULTS = PARAMETER_SET.merge('CacheParameterGroupFamily' => String) # Cache Clusters - more parameters get added as the lifecycle progresses CACHE_CLUSTER = { 'AutoMinorVersionUpgrade' => String, # actually TrueClass or FalseClass 'CacheSecurityGroups' => Array, 'CacheClusterId' => String, 'CacheClusterStatus' => String, 'CacheNodeType' => String, 'Engine' => String, 'EngineVersion' => String, 'CacheParameterGroup' => Hash, 'NumCacheNodes' => Integer, 'PreferredMaintenanceWindow' => String, 'CacheNodes' => Array, 'PendingModifiedValues' => Hash, } CACHE_CLUSTER_RUNNING = CACHE_CLUSTER.merge({ 'CacheClusterCreateTime' => DateTime, 'PreferredAvailabilityZone' => String, }) CACHE_CLUSTER_MODIFIED = CACHE_CLUSTER_RUNNING.merge({ 'NotificationConfiguration' => Hash, 'PendingModifiedValues' => Hash, }) SINGLE_CACHE_CLUSTER = BASIC.merge('CacheCluster' => CACHE_CLUSTER) DESCRIBE_CACHE_CLUSTERS = BASIC.merge('CacheClusters' => [CACHE_CLUSTER]) EVENT = { 'Date' => DateTime, 'Message' => String, 'SourceIdentifier' => String, 'SourceType' => String, } EVENT_LIST = [EVENT] RESERVED_CACHE_CLUSTER = { 'CacheNodeCount' => Integer, 'CacheNodeType' => String, 'Duration' => Integer, 'FixedPrice' => Float, 'OfferingType' => String, 'ProductDescription' => String, 'RecurringCharges' => Array, 'ReservedCacheNodeId' => String, 'ReservedCacheNodesOfferingId' => String, 'StartTime' => DateTime, 'State' => String, 'UsagePrice' => Float } RESERVED_CACHE_CLUSTER_LIST = [RESERVED_CACHE_CLUSTER] end end end fog-aws-3.18.0/tests/requests/elasticache/parameter_group_tests.rb000066400000000000000000000067111437344660100254100ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | parameter group requests', ['aws', 'elasticache']) do tests('success') do pending if Fog.mocking? name = 'fog-test' description = 'Fog Test Parameter Group' tests( '#describe_engine_default_parameters' ).formats(AWS::Elasticache::Formats::ENGINE_DEFAULTS) do response = Fog::AWS[:elasticache].describe_engine_default_parameters engine_defaults = response.body['EngineDefaults'] returns('memcached1.4') { engine_defaults['CacheParameterGroupFamily'] } engine_defaults end tests( '#create_cache_parameter_group' ).formats(AWS::Elasticache::Formats::SINGLE_PARAMETER_GROUP) do body = Fog::AWS[:elasticache].create_cache_parameter_group(name, description).body group = body['CacheParameterGroup'] returns(name) { group['CacheParameterGroupName'] } returns(description) { group['Description'] } returns('memcached1.4') { group['CacheParameterGroupFamily'] } body end tests( '#describe_cache_parameters' ).formats(AWS::Elasticache::Formats::PARAMETER_SET) do response = Fog::AWS[:elasticache].describe_cache_parameters(name) parameter_set = response.body['DescribeCacheParametersResult'] parameter_set end tests( '#describe_cache_parameter_groups without options' ).formats(AWS::Elasticache::Formats::DESCRIBE_PARAMETER_GROUPS) do body = Fog::AWS[:elasticache].describe_cache_parameter_groups.body returns(true, "has #{name}") do body['CacheParameterGroups'].any? do |group| group['CacheParameterGroupName'] == name end end body end tests( '#reset_cache_parameter_group completely' ).formats('CacheParameterGroupName' => String) do result = Fog::AWS[:elasticache].reset_cache_parameter_group( name ).body['ResetCacheParameterGroupResult'] returns(name) {result['CacheParameterGroupName']} result end tests( '#modify_cache_parameter_group' ).formats('CacheParameterGroupName' => String) do result = Fog::AWS[:elasticache].modify_cache_parameter_group( name, {"chunk_size" => 32} ).body['ModifyCacheParameterGroupResult'] returns(name) {result['CacheParameterGroupName']} result end # BUG: returns "MalformedInput - Unexpected complex element termination" tests( '#reset_cache_parameter_group with one parameter' ).formats('CacheParameterGroupName' => String) do pending result = Fog::AWS[:elasticache].reset_cache_parameter_group( name, ["chunk_size"] ).body['ResetCacheParameterGroupResult'] returns(name) {result['CacheParameterGroupName']} result end tests( '#describe_cache_parameter_groups with name' ).formats(AWS::Elasticache::Formats::DESCRIBE_PARAMETER_GROUPS) do body = Fog::AWS[:elasticache].describe_cache_parameter_groups(name).body returns(1, "size of 1") { body['CacheParameterGroups'].size } returns(name, "has #{name}") do body['CacheParameterGroups'].first['CacheParameterGroupName'] end body end tests( '#delete_cache_parameter_group' ).formats(AWS::Elasticache::Formats::BASIC) do body = Fog::AWS[:elasticache].delete_cache_parameter_group(name).body end end tests('failure') do # TODO: # Create a duplicate parameter group # List a missing parameter group # Delete a missing parameter group end end fog-aws-3.18.0/tests/requests/elasticache/security_group_tests.rb000066400000000000000000000071071437344660100252770ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | security group requests', ['aws', 'elasticache']) do tests('success') do name = 'fog-test' description = 'Fog Test Security Group' tests( '#create_cache_security_group' ).formats(AWS::Elasticache::Formats::SINGLE_SECURITY_GROUP) do body = Fog::AWS[:elasticache].create_cache_security_group(name, description).body group = body['CacheSecurityGroup'] returns(name) { group['CacheSecurityGroupName'] } returns(description) { group['Description'] } returns([], "no authorized security group") { group['EC2SecurityGroups'] } body end tests( '#describe_cache_security_groups without options' ).formats(AWS::Elasticache::Formats::DESCRIBE_SECURITY_GROUPS) do body = Fog::AWS[:elasticache].describe_cache_security_groups.body returns(true, "has #{name}") do body['CacheSecurityGroups'].any? do |group| group['CacheSecurityGroupName'] == name end end body end tests( '#describe_cache_security_groups with name' ).formats(AWS::Elasticache::Formats::DESCRIBE_SECURITY_GROUPS) do body = Fog::AWS[:elasticache].describe_cache_security_groups(name).body returns(1, "size of 1") { body['CacheSecurityGroups'].size } returns(name, "has #{name}") do body['CacheSecurityGroups'].first['CacheSecurityGroupName'] end body end tests('authorization') do ec2_group = Fog::Compute.new(:provider => 'AWS').security_groups.create( :name => 'fog-test-elasticache', :description => 'Fog Test Elasticache' ) # Reload to get the owner_id ec2_group.reload tests( '#authorize_cache_security_group_ingress' ).formats(AWS::Elasticache::Formats::SINGLE_SECURITY_GROUP) do body = Fog::AWS[:elasticache].authorize_cache_security_group_ingress( name, ec2_group.name, ec2_group.owner_id ).body group = body['CacheSecurityGroup'] expected_ec2_groups = [{ 'Status' => 'authorizing', 'EC2SecurityGroupName' => ec2_group.name, 'EC2SecurityGroupOwnerId' => ec2_group.owner_id }] returns(expected_ec2_groups, 'has correct EC2 groups') do group['EC2SecurityGroups'] end body end # Wait for the state to be active Fog.wait_for do response = Fog::AWS[:elasticache].describe_cache_security_groups(name) group = response.body['CacheSecurityGroups'].first group['EC2SecurityGroups'].all? {|ec2| ec2['Status'] == 'authorized'} end tests( '#revoke_cache_security_group_ingress' ).formats(AWS::Elasticache::Formats::SINGLE_SECURITY_GROUP) do pending if Fog.mocking? body = Fog::AWS[:elasticache].revoke_cache_security_group_ingress( name, ec2_group.name, ec2_group.owner_id ).body group = body['CacheSecurityGroup'] expected_ec2_groups = [{ 'Status' => 'revoking', 'EC2SecurityGroupName' => ec2_group.name, 'EC2SecurityGroupOwnerId' => ec2_group.owner_id }] returns(expected_ec2_groups, 'has correct EC2 groups') do group['EC2SecurityGroups'] end body end ec2_group.destroy end tests( '#delete_cache_security_group' ).formats(AWS::Elasticache::Formats::BASIC) do body = Fog::AWS[:elasticache].delete_cache_security_group(name).body end end tests('failure') do # TODO: # Create a duplicate security group # List a missing security group # Delete a missing security group end end fog-aws-3.18.0/tests/requests/elasticache/subnet_group_tests.rb000066400000000000000000000037551437344660100247350ustar00rootroot00000000000000Shindo.tests('AWS::Elasticache | subnet group requests', ['aws', 'elasticache']) do # random_differentiator # Useful when rapidly re-running tests, so we don't have to wait # serveral minutes for deleted VPCs/subnets to disappear suffix = rand(65536).to_s(16) @subnet_group_name = "fog-test-#{suffix}" vpc_range = rand(245) + 10 @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => "10.#{vpc_range}.0.0/16") # Create 4 subnets in this VPC, each one in a different AZ subnet_az = 'us-east-1a' subnet_range = 8 @subnets = (1..4).map do subnet = Fog::Compute[:aws].create_subnet(@vpc.id, "10.#{vpc_range}.#{subnet_range}.0/24", 'AvailabilityZone' => subnet_az).body['subnet'] subnet_az = subnet_az.succ subnet_range *= 2 subnet end tests('success') do subnet_ids = @subnets.map { |sn| sn['subnetId'] }.to_a tests("#create_cache_subnet_group").formats(AWS::Elasticache::Formats::CREATE_CACHE_SUBNET_GROUP) do result = Fog::AWS[:elasticache].create_cache_subnet_group(@subnet_group_name, subnet_ids, 'A subnet group').body returns(@subnet_group_name) { result['CreateCacheSubnetGroupResult']['CacheSubnetGroup']['CacheSubnetGroupName'] } returns('A subnet group') { result['CreateCacheSubnetGroupResult']['CacheSubnetGroup']['CacheSubnetGroupDescription'] } returns(@vpc.id) { result['CreateCacheSubnetGroupResult']['CacheSubnetGroup']['VpcId'] } returns(subnet_ids.sort) { result['CreateCacheSubnetGroupResult']['CacheSubnetGroup']['Subnets'].sort } result end tests("#describe_cache_subnet_groups").formats(AWS::Elasticache::Formats::DESCRIBE_CACHE_SUBNET_GROUPS) do Fog::AWS[:elasticache].describe_cache_subnet_groups.body end tests("#delete_cache_subnet_group").formats(AWS::Elasticache::Formats::BASIC) do Fog::AWS[:elasticache].delete_cache_subnet_group(@subnet_group_name).body end end @subnets.each do |sn| Fog::Compute[:aws].delete_subnet(sn['subnetId']) end @vpc.destroy end fog-aws-3.18.0/tests/requests/elb/000077500000000000000000000000001437344660100167355ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/elb/helper.rb000066400000000000000000000056631437344660100205530ustar00rootroot00000000000000class AWS module ELB module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } LOAD_BALANCER = { "AvailabilityZones" => Array, "BackendServerDescriptions" => Array, "CanonicalHostedZoneName" => String, "CanonicalHostedZoneNameID" => String, "CreatedTime" => Time, "DNSName" => String, "HealthCheck" => {"HealthyThreshold" => Integer, "Timeout" => Integer, "UnhealthyThreshold" => Integer, "Interval" => Integer, "Target" => String}, "Instances" => Array, "ListenerDescriptions" => [{ 'PolicyNames' => Array, 'Listener' => { 'InstancePort' => Integer, 'InstanceProtocol' => String, 'LoadBalancerPort' => Integer, 'Protocol' => String, 'SSLCertificateId' => Fog::Nullable::String } }], "LoadBalancerName" => String, "Policies" => {"LBCookieStickinessPolicies" => Array, "AppCookieStickinessPolicies" => Array, "OtherPolicies" => Array}, "Scheme" => String, "SecurityGroups" => [Fog::Nullable::String], "SourceSecurityGroup" => {"GroupName" => String, "OwnerAlias" => String}, "Subnets" => [Fog::Nullable::String] } CREATE_LOAD_BALANCER = BASIC.merge({ 'CreateLoadBalancerResult' => { 'DNSName' => String } }) DESCRIBE_LOAD_BALANCERS = BASIC.merge({ 'DescribeLoadBalancersResult' => {'LoadBalancerDescriptions' => [LOAD_BALANCER], 'NextMarker' => Fog::Nullable::String} }) POLICY_ATTRIBUTE_DESCRIPTION = { "AttributeName" => String, "AttributeValue" => String } POLICY = { "PolicyAttributeDescriptions" => [POLICY_ATTRIBUTE_DESCRIPTION], "PolicyName" => String, "PolicyTypeName" => String } DESCRIBE_LOAD_BALANCER_POLICIES = BASIC.merge({ 'DescribeLoadBalancerPoliciesResult' => { 'PolicyDescriptions' => [POLICY] } }) POLICY_ATTRIBUTE_TYPE_DESCRIPTION = { "AttributeName" => String, "AttributeType" => String, "Cardinality" => String, "DefaultValue" => String, "Description" => String } POLICY_TYPE = { "Description" => String, "PolicyAttributeTypeDescriptions" => [POLICY_ATTRIBUTE_TYPE_DESCRIPTION], "PolicyTypeName" => String } DESCRIBE_LOAD_BALANCER_POLICY_TYPES = BASIC.merge({ 'DescribeLoadBalancerPolicyTypesResult' => {'PolicyTypeDescriptions' => [POLICY_TYPE] } }) CONFIGURE_HEALTH_CHECK = BASIC.merge({ 'ConfigureHealthCheckResult' => {'HealthCheck' => { 'Target' => String, 'Interval' => Integer, 'Timeout' => Integer, 'UnhealthyThreshold' => Integer, 'HealthyThreshold' => Integer }} }) DELETE_LOAD_BALANCER = BASIC.merge({ 'DeleteLoadBalancerResult' => NilClass }) end end end fog-aws-3.18.0/tests/requests/elb/listener_tests.rb000066400000000000000000000067311437344660100223400ustar00rootroot00000000000000Shindo.tests('AWS::ELB | listener_tests', ['aws', 'elb']) do @load_balancer_id = 'fog-test-listener' @key_name = 'fog-test' tests('success') do Fog::AWS[:elb].create_load_balancer(['us-east-1a'], @load_balancer_id, [{'LoadBalancerPort' => 80, 'InstancePort' => 80, 'Protocol' => 'HTTP'}]) @certificate = Fog::AWS[:iam].upload_server_certificate(AWS::IAM::SERVER_CERT, AWS::IAM::SERVER_CERT_PRIVATE_KEY, @key_name).body['Certificate'] tests("#create_load_balancer_listeners").formats(AWS::ELB::Formats::BASIC) do listeners = [ {'Protocol' => 'TCP', 'InstanceProtocol' => 'TCP', 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'SSLCertificateId' => @certificate['Arn']}, {'Protocol' => 'HTTP', 'InstanceProtocol' => 'HTTP', 'LoadBalancerPort' => 80, 'InstancePort' => 80} ] response = Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners).body response end tests("#delete_load_balancer_listeners").formats(AWS::ELB::Formats::BASIC) do ports = [80, 443] Fog::AWS[:elb].delete_load_balancer_listeners(@load_balancer_id, ports).body end tests("#create_load_balancer_listeners with non-existant SSL certificate") do listeners = [ {'Protocol' => 'HTTPS', 'InstanceProtocol' => 'HTTPS', 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'SSLCertificateId' => 'non-existant'}, ] raises(Fog::AWS::IAM::NotFound) { Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners) } end tests("#create_load_balancer_listeners with invalid SSL certificate").raises(Fog::AWS::IAM::NotFound) do sleep 8 unless Fog.mocking? listeners = [ {'Protocol' => 'HTTPS', 'InstanceProtocol' => 'HTTPS', 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'SSLCertificateId' => "#{@certificate['Arn']}fake"}, ] Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners).body end # This is sort of fucked up, but it may or may not fail, thanks AWS tests("#create_load_balancer_listeners with SSL certificate").formats(AWS::ELB::Formats::BASIC) do sleep 8 unless Fog.mocking? listeners = [ {'Protocol' => 'HTTPS', 'InstanceProtocol' => 'HTTPS', 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'SSLCertificateId' => @certificate['Arn']}, ] Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners).body end tests("#set_load_balancer_listener_ssl_certificate").formats(AWS::ELB::Formats::BASIC) do Fog::AWS[:elb].set_load_balancer_listener_ssl_certificate(@load_balancer_id, 443, @certificate['Arn']).body end tests("#create_load_balancer_listeners with invalid Protocol and InstanceProtocol configuration").raises(Fog::AWS::ELB::ValidationError) do listeners = [ {'Protocol' => 'HTTP', 'InstanceProtocol' => 'TCP', 'LoadBalancerPort' => 80, 'InstancePort' => 80}, ] Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners).body end tests("#create_load_balancer_listeners with valid Protocol and InstanceProtocol configuration").formats(AWS::ELB::Formats::BASIC) do listeners = [ {'Protocol' => 'HTTP', 'InstanceProtocol' => 'HTTPS', 'LoadBalancerPort' => 80, 'InstancePort' => 80}, ] Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners).body end Fog::AWS[:iam].delete_server_certificate(@key_name) Fog::AWS[:elb].delete_load_balancer(@load_balancer_id) end end fog-aws-3.18.0/tests/requests/elb/load_balancer_tests.rb000066400000000000000000000074031437344660100232560ustar00rootroot00000000000000Shindo.tests('AWS::ELB | load_balancer_tests', ['aws', 'elb']) do @load_balancer_id = 'fog-test-elb' @key_name = 'fog-test' tests('success') do if (Fog::AWS[:iam].get_server_certificate(@key_name) rescue nil) Fog::AWS[:iam].delete_server_certificate(@key_name) end @certificate = Fog::AWS[:iam].upload_server_certificate(AWS::IAM::SERVER_CERT, AWS::IAM::SERVER_CERT_PRIVATE_KEY, @key_name).body['Certificate'] tests("#create_load_balancer").formats(AWS::ELB::Formats::CREATE_LOAD_BALANCER) do zones = ['us-east-1a'] listeners = [{'LoadBalancerPort' => 80, 'InstancePort' => 80, 'InstanceProtocol' => 'HTTP', 'Protocol' => 'HTTP'}] Fog::AWS[:elb].create_load_balancer(zones, @load_balancer_id, listeners).body end tests("#describe_load_balancers").formats(AWS::ELB::Formats::DESCRIBE_LOAD_BALANCERS) do Fog::AWS[:elb].describe_load_balancers.body end tests('#describe_load_balancers with bad lb') do raises(Fog::AWS::ELB::NotFound) { Fog::AWS[:elb].describe_load_balancers('LoadBalancerNames' => 'none-such-lb') } end tests("#describe_load_balancers with SSL listener") do sleep 5 unless Fog.mocking? listeners = [ {'Protocol' => 'HTTPS', 'LoadBalancerPort' => 443, 'InstancePort' => 443, 'SSLCertificateId' => @certificate['Arn']}, ] Fog::AWS[:elb].create_load_balancer_listeners(@load_balancer_id, listeners) response = Fog::AWS[:elb].describe_load_balancers('LoadBalancerNames' => @load_balancer_id).body tests("SSLCertificateId is set").returns(@certificate['Arn']) do listeners = response["DescribeLoadBalancersResult"]["LoadBalancerDescriptions"].first["ListenerDescriptions"] listeners.find {|l| l["Listener"]["Protocol"] == 'HTTPS' }["Listener"]["SSLCertificateId"] end end tests("modify_load_balancer_attributes") do attributes = { 'ConnectionDraining' => {'Enabled' => true, 'Timeout' => 600}, 'CrossZoneLoadBalancing' => {'Enabled' => true}, 'ConnectionSettings' => {'IdleTimeout' => 180} } Fog::AWS[:elb].modify_load_balancer_attributes(@load_balancer_id, attributes).body response = Fog::AWS[:elb].describe_load_balancer_attributes(@load_balancer_id). body['DescribeLoadBalancerAttributesResult']['LoadBalancerAttributes'] tests("ConnectionDraining is enabled") do response['ConnectionDraining']['Enabled'] == true end tests("ConnectionDraining has a 600 second Timeout").returns(600) do response['ConnectionDraining']['Timeout'] end tests("ConnectionSettings has a 180 second IdleTimeout").returns(180) do response['ConnectionSettings']['IdleTimeout'] end tests("CrossZoneLoadBalancing is enabled") do response['CrossZoneLoadBalancing']['Enabled'] == true end end tests("#configure_health_check").formats(AWS::ELB::Formats::CONFIGURE_HEALTH_CHECK) do health_check = { 'Target' => 'HTTP:80/index.html', 'Interval' => 10, 'Timeout' => 5, 'UnhealthyThreshold' => 2, 'HealthyThreshold' => 3 } Fog::AWS[:elb].configure_health_check(@load_balancer_id, health_check).body end tests("#delete_load_balancer").formats(AWS::ELB::Formats::DELETE_LOAD_BALANCER) do Fog::AWS[:elb].delete_load_balancer(@load_balancer_id).body end tests("#delete_load_balancer when non existant").formats(AWS::ELB::Formats::DELETE_LOAD_BALANCER) do Fog::AWS[:elb].delete_load_balancer('non-existant').body end tests("#delete_load_balancer when already deleted").formats(AWS::ELB::Formats::DELETE_LOAD_BALANCER) do Fog::AWS[:elb].delete_load_balancer(@load_balancer_id).body end Fog::AWS[:iam].delete_server_certificate(@key_name) end end fog-aws-3.18.0/tests/requests/elb/policy_tests.rb000066400000000000000000000145121437344660100220060ustar00rootroot00000000000000Shindo.tests('AWS::ELB | policy_tests', ['aws', 'elb']) do @load_balancer_id = 'fog-test-policies' tests('success') do listeners = [{'LoadBalancerPort' => 80, 'InstancePort' => 80, 'Protocol' => 'HTTP'}] Fog::AWS[:elb].create_load_balancer(['us-east-1a'], @load_balancer_id, listeners) tests("#describe_load_balancer_policy_types").formats(AWS::ELB::Formats::DESCRIBE_LOAD_BALANCER_POLICY_TYPES) do @policy_types = Fog::AWS[:elb].describe_load_balancer_policy_types.body end tests("#create_app_cookie_stickiness_policy").formats(AWS::ELB::Formats::BASIC) do cookie, policy = 'fog-app-cookie', 'fog-app-policy' Fog::AWS[:elb].create_app_cookie_stickiness_policy(@load_balancer_id, policy, cookie).body end tests("#create_lb_cookie_stickiness_policy with expiry").formats(AWS::ELB::Formats::BASIC) do policy = 'fog-lb-expiry' expiry = 300 Fog::AWS[:elb].create_lb_cookie_stickiness_policy(@load_balancer_id, policy, expiry).body end tests("#create_lb_cookie_stickiness_policy without expiry").formats(AWS::ELB::Formats::BASIC) do policy = 'fog-lb-no-expiry' Fog::AWS[:elb].create_lb_cookie_stickiness_policy(@load_balancer_id, policy).body end tests("#create_load_balancer_policy").formats(AWS::ELB::Formats::BASIC) do policy = 'fog-policy' Fog::AWS[:elb].create_load_balancer_policy(@load_balancer_id, policy, 'PublicKeyPolicyType', {'PublicKey' => AWS::IAM::SERVER_CERT_PUBLIC_KEY}).body end tests("#describe_load_balancer_policies") do body = Fog::AWS[:elb].describe_load_balancer_policies(@load_balancer_id).body formats(AWS::ELB::Formats::DESCRIBE_LOAD_BALANCER_POLICIES) { body } # Check the result of each policy by name returns({ "PolicyAttributeDescriptions"=>[{ "AttributeName"=>"CookieName", "AttributeValue"=>"fog-app-cookie" }], "PolicyName"=>"fog-app-policy", "PolicyTypeName"=>"AppCookieStickinessPolicyType" }) do body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"].find{|e| e['PolicyName'] == 'fog-app-policy' } end returns({ "PolicyAttributeDescriptions"=>[{ "AttributeName"=>"CookieExpirationPeriod", "AttributeValue"=>"300" }], "PolicyName"=>"fog-lb-expiry", "PolicyTypeName"=>"LBCookieStickinessPolicyType" }) do body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"].find{|e| e['PolicyName'] == 'fog-lb-expiry' } end returns({ "PolicyAttributeDescriptions"=>[{ "AttributeName"=>"CookieExpirationPeriod", "AttributeValue"=>"0" }], "PolicyName"=>"fog-lb-no-expiry", "PolicyTypeName"=>"LBCookieStickinessPolicyType" }) do body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"].find{|e| e['PolicyName'] == 'fog-lb-no-expiry' } end returns({ "PolicyAttributeDescriptions"=>[{ "AttributeName"=>"PublicKey", "AttributeValue"=> AWS::IAM::SERVER_CERT_PUBLIC_KEY }], "PolicyName"=>"fog-policy", "PolicyTypeName"=>"PublicKeyPolicyType" }) do body["DescribeLoadBalancerPoliciesResult"]["PolicyDescriptions"].find{|e| e['PolicyName'] == 'fog-policy' } end end tests("#describe_load_balancer includes all policies") do lb = Fog::AWS[:elb].describe_load_balancers("LoadBalancerNames" => [@load_balancer_id]).body["DescribeLoadBalancersResult"]["LoadBalancerDescriptions"].first returns([ {"PolicyName"=>"fog-app-policy", "CookieName"=>"fog-app-cookie"} ]) { lb["Policies"]["AppCookieStickinessPolicies"] } returns([ {"PolicyName"=>"fog-lb-expiry", "CookieExpirationPeriod"=> 300} ]) { lb["Policies"]["LBCookieStickinessPolicies"].select{|e| e["PolicyName"] == "fog-lb-expiry"} } returns([ {"PolicyName" => "fog-lb-no-expiry"} ]) { lb["Policies"]["LBCookieStickinessPolicies"].select{|e| e["PolicyName"] == "fog-lb-no-expiry"} } returns([ "fog-policy" ]) { lb["Policies"]["OtherPolicies"] } end tests("#delete_load_balancer_policy").formats(AWS::ELB::Formats::BASIC) do policy = 'fog-lb-no-expiry' Fog::AWS[:elb].delete_load_balancer_policy(@load_balancer_id, policy).body end tests("#set_load_balancer_policies_of_listener adds policy").formats(AWS::ELB::Formats::BASIC) do port, policies = 80, ['fog-lb-expiry'] Fog::AWS[:elb].set_load_balancer_policies_of_listener(@load_balancer_id, port, policies).body end tests("#set_load_balancer_policies_of_listener removes policy").formats(AWS::ELB::Formats::BASIC) do port = 80 Fog::AWS[:elb].set_load_balancer_policies_of_listener(@load_balancer_id, port, []).body end proxy_policy = "EnableProxyProtocol" Fog::AWS[:elb].create_load_balancer_policy(@load_balancer_id, proxy_policy, 'ProxyProtocolPolicyType', { "ProxyProtocol" => true }) tests("#set_load_balancer_policies_for_backend_server replaces policies on port").formats(AWS::ELB::Formats::BASIC) do Fog::AWS[:elb].set_load_balancer_policies_for_backend_server(@load_balancer_id, 80, [proxy_policy]).body end tests("#describe_load_balancers has other policies") do Fog::AWS[:elb].set_load_balancer_policies_for_backend_server(@load_balancer_id, 80, [proxy_policy]).body description = Fog::AWS[:elb].describe_load_balancers("LoadBalancerNames" => [@load_balancer_id]).body["DescribeLoadBalancersResult"]["LoadBalancerDescriptions"].first returns(true) { description["Policies"]["OtherPolicies"].include?(proxy_policy) } end Fog::AWS[:elb].delete_load_balancer(@load_balancer_id) end end fog-aws-3.18.0/tests/requests/elbv2/000077500000000000000000000000001437344660100172055ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/elbv2/helper.rb000066400000000000000000000036131437344660100210140ustar00rootroot00000000000000class AWS module ELBV2 module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } LOAD_BALANCER = { "AvailabilityZones" => [{ "SubnetId" => String, "ZoneName" => String, "LoadBalancerAddresses" => [Fog::Nullable::Hash] }], "LoadBalancerArn" => String, "DNSName" => String, "CreatedTime" => Time, "LoadBalancerName" => String, "VpcId" => String, "CanonicalHostedZoneId" => String, "Scheme" => String, "Type" => String, "State" => {"Code" => String}, "SecurityGroups" => [Fog::Nullable::String] } DESCRIBE_LOAD_BALANCERS = BASIC.merge({ 'DescribeLoadBalancersResult' => {'LoadBalancers' => [LOAD_BALANCER], 'NextMarker' => Fog::Nullable::String} }) CREATE_LOAD_BALANCER = BASIC.merge({ 'CreateLoadBalancerResult' => {'LoadBalancers' => [LOAD_BALANCER]} }) LISTENER_DEFAULT_ACTIONS = [{ "Type" => String, "Order" => String, "TargetGroupArn" => String, "RedirectConfig" => Fog::Nullable::Hash, "ForwardConfig" => Fog::Nullable::Hash, "FixedResponseConfig" => Fog::Nullable::Hash }] LISTENER = { "LoadBalancerArn" => String, "Protocol" => String, "Port" => String, "ListenerArn" => String, "SslPolicy" => String, "DefaultActions" => LISTENER_DEFAULT_ACTIONS, "Certificates" => [{"CertificateArn" => String}] } DESCRIBE_LISTENERS = BASIC.merge({ 'DescribeListenersResult' => {'Listeners' => [LISTENER], 'NextMarker' => Fog::Nullable::String} }) TAG_DESCRIPTIONS = [{ "Tags" => Hash, "ResourceArn" => String }] DESCRIBE_TAGS = BASIC.merge({ 'DescribeTagsResult' => {'TagDescriptions' => TAG_DESCRIPTIONS} }) end end end fog-aws-3.18.0/tests/requests/elbv2/load_balancer_tests.rb000066400000000000000000000042171437344660100235260ustar00rootroot00000000000000Shindo.tests('AWS::ELBV2 | load_balancer_tests', ['aws', 'elb']) do @load_balancer_id = 'fog-test-elb' @key_name = 'fog-test' vpc = Fog::Compute[:aws].create_vpc('10.255.254.64/28').body['vpcSet'].first @subnet_id = Fog::Compute[:aws].create_subnet(vpc['vpcId'], vpc['cidrBlock']).body['subnet']['subnetId'] @tags = { 'test1' => 'Value1', 'test2' => 'Value2' } tests('success') do tests('#create_load_balancer').formats(AWS::ELBV2::Formats::CREATE_LOAD_BALANCER) do options = { subnets: [@subnet_id] } load_balancer = Fog::AWS[:elbv2].create_load_balancer(@load_balancer_id, options).body @load_balancer_arn = load_balancer['CreateLoadBalancerResult']['LoadBalancers'].first['LoadBalancerArn'] load_balancer end tests('#describe_load_balancers').formats(AWS::ELBV2::Formats::DESCRIBE_LOAD_BALANCERS) do Fog::AWS[:elbv2].describe_load_balancers.body end tests('#describe_load_balancers with bad name') do raises(Fog::AWS::ELBV2::NotFound) { Fog::AWS[:elbv2].describe_load_balancers('LoadBalancerNames' => 'none-such-lb') } end tests("#add_tags('#{@load_balancer_arn}', #{@tags})").formats(AWS::ELBV2::Formats::BASIC) do Fog::AWS[:elbv2].add_tags(@load_balancer_arn, @tags).body end tests('#describe_tags').formats(AWS::ELBV2::Formats::DESCRIBE_TAGS) do Fog::AWS[:elbv2].describe_tags(@load_balancer_arn).body end tests('#describe_tags with at least one wrong arn') do raises(Fog::AWS::ELBV2::NotFound) { Fog::AWS[:elbv2].describe_tags([@load_balancer_arn, 'wrong_arn']) } end tests("#describe_tags(#{@load_balancer_arn})").returns(@tags) do Fog::AWS[:elbv2].describe_tags(@load_balancer_arn).body['DescribeTagsResult']['TagDescriptions'].first['Tags'] end tests("#remove_tags('#{@load_balancer_arn}', #{@tags.keys})").formats(AWS::ELBV2::Formats::BASIC) do Fog::AWS[:elbv2].remove_tags(@load_balancer_arn, @tags.keys).body end tests("#describe_tags(#{@load_balancer_arn})").returns({}) do Fog::AWS[:elbv2].describe_tags(@load_balancer_arn).body['DescribeTagsResult']['TagDescriptions'].first['Tags'] end end end fog-aws-3.18.0/tests/requests/emr/000077500000000000000000000000001437344660100167565ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/emr/helper.rb000066400000000000000000000115471437344660100205720ustar00rootroot00000000000000class AWS module EMR module Formats BASIC = { 'RequestId' => String } RUN_JOB_FLOW = BASIC.merge({ 'JobFlowId' => String }) ADD_INSTANCE_GROUPS = { 'JobFlowId' => String, 'InstanceGroupIds' => Array } SIMPLE_DESCRIBE_JOB_FLOW = { 'JobFlows' => [{ 'Name' => String, 'BootstrapActions' => { 'ScriptBootstrapActionConfig' => { 'Args' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String, 'LastStateChangeReason' => String }, 'Steps' => [{ 'ActionOnFailure' => String, 'Name' => String, 'StepConfig' => { 'HadoopJarStepConfig' => { 'MainClass' => String, 'Jar' => String, 'Args' => Array, 'Properties' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String } }], 'JobFlowId' => String, 'Instances' => { 'InstanceCount' => String, 'NormalizedInstanceHours' => String, 'KeepJobFlowAliveWhenNoSteps' => String, 'Placement' => { 'AvailabilityZone' => String }, 'MasterInstanceType' => String, 'SlaveInstanceType' => String, 'InstanceGroups' => Array, 'TerminationProtected' => String, 'HadoopVersion' => String } }] } JOB_FLOW_WITHOUT_CHANGE = { 'JobFlows' => [{ 'Name' => String, 'BootstrapActions' => { 'ScriptBootstrapActionConfig' => { 'Args' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String, 'LastStateChangeReason' => NilClass }, 'Steps' => [{ 'ActionOnFailure' => String, 'Name' => String, 'StepConfig' => { 'HadoopJarStepConfig' => { 'MainClass' => String, 'Jar' => String, 'Args' => Array, 'Properties' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String } }], 'JobFlowId' => String, 'Instances' => { 'InstanceCount' => String, 'NormalizedInstanceHours' => String, 'KeepJobFlowAliveWhenNoSteps' => String, 'Placement' => { 'AvailabilityZone' => String }, 'MasterInstanceType' => String, 'SlaveInstanceType' => String, 'InstanceGroups' => Array, 'TerminationProtected' => String, 'HadoopVersion' => String } }] } DESCRIBE_JOB_FLOW_WITH_INSTANCE_GROUPS = { 'JobFlows' => [{ 'Name' => String, 'BootstrapActions' => { 'ScriptBootstrapActionConfig' => { 'Args' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String, 'LastStateChangeReason' => NilClass }, 'Steps' => [{ 'ActionOnFailure' => String, 'Name' => String, 'StepConfig' => { 'HadoopJarStepConfig' => { 'MainClass' => String, 'Jar' => String, 'Args' => Array, 'Properties' => Array } }, 'ExecutionStatusDetail' => { 'CreationDateTime' => String, 'State' => String } }], 'JobFlowId' => String, 'Instances' => { 'InstanceCount' => String, 'NormalizedInstanceHours' => String, 'KeepJobFlowAliveWhenNoSteps' => String, 'Placement' => { 'AvailabilityZone' => String }, 'InstanceGroups' => [{ 'Name' => String, 'InstanceRole' => String, 'CreationDateTime' => String, 'LastStateChangeReason' => nil, 'InstanceGroupId' => String, 'Market' => String, 'InstanceType' => String, 'State' => String, 'InstanceRunningCount' => String, 'InstanceRequestCount' => String }], 'MasterInstanceType' => String, 'SlaveInstanceType' => String, 'TerminationProtected' => String, 'HadoopVersion' => String } }] } end end end fog-aws-3.18.0/tests/requests/emr/instance_group_tests.rb000066400000000000000000000063641437344660100235560ustar00rootroot00000000000000Shindo.tests('AWS::EMR | instance groups', ['aws', 'emr']) do pending if Fog.mocking? @job_flow_name = "fog_job_flow_#{Time.now.to_f.to_s.gsub('.','')}" @job_flow_options = { 'Instances' => { 'MasterInstanceType' => 'm1.small', 'SlaveInstanceType' => 'm1.small', 'InstanceCount' => 2, 'Placement' => { 'AvailabilityZone' => 'us-east-1a' }, 'KeepJobFlowAliveWhenNoSteps' => false, 'TerminationProtected' => false, 'HadoopVersion' => '0.20' } } @job_flow_steps = { 'Steps' => [{ 'Name' => 'Dummy streaming job', 'ActionOnFailure' => 'CONTINUE', 'HadoopJarStep' => { 'Jar' => '/home/hadoop/contrib/streaming/hadoop-streaming.jar', 'MainClass' => nil, 'Args' => %w(-input s3n://elasticmapreduce/samples/wordcount/input -output hdfs:///examples/output/2011-11-03T090856 -mapper s3n://elasticmapreduce/samples/wordcount/wordSplitter.py -reducer aggregate) } }] } @instance_group_name = "fog_instance_group_#{Time.now.to_f.to_s.gsub('.','')}" @instance_groups = { 'InstanceGroups' => [{ 'Name' => @instance_group_name, 'InstanceRole' => 'TASK', 'InstanceType' => 'm1.small', 'InstanceCount' => 2 }] } result = Fog::AWS[:emr].run_job_flow(@job_flow_name, @job_flow_options).body @job_flow_id = result['JobFlowId'] tests('success') do tests("#add_instance_groups").formats(AWS::EMR::Formats::ADD_INSTANCE_GROUPS) do pending if Fog.mocking? result = Fog::AWS[:emr].add_instance_groups(@job_flow_id, @instance_groups).body @instance_group_id = result['InstanceGroupIds'].first result end tests("#describe_job_flows_with_instance_groups").formats(AWS::EMR::Formats::DESCRIBE_JOB_FLOW_WITH_INSTANCE_GROUPS) do pending if Fog.mocking? result = Fog::AWS[:emr].describe_job_flows('JobFlowIds' => [@job_flow_id]).body result end tests("#modify_instance_groups").formats(AWS::EMR::Formats::BASIC) do pending if Fog.mocking? # Add a step so the state doesn't go directly from STARTING to SHUTTING_DOWN Fog::AWS[:emr].add_job_flow_steps(@job_flow_id, @job_flow_steps) # Wait until job has started before modifying the instance group begin sleep 10 result = Fog::AWS[:emr].describe_job_flows('JobFlowIds' => [@job_flow_id]).body job_flow = result['JobFlows'].first state = job_flow['ExecutionStatusDetail']['State'] print "." end while(state == 'STARTING') # Check results result = Fog::AWS[:emr].modify_instance_groups('InstanceGroups' => [{'InstanceGroupId' => @instance_group_id, 'InstanceCount' => 4}]).body # Check the it actually modified the instance count tests("modify worked?") do ig_res = Fog::AWS[:emr].describe_job_flows('JobFlowIds' => [@job_flow_id]).body matched = false jf = ig_res['JobFlows'].first jf['Instances']['InstanceGroups'].each do | ig | if ig['InstanceGroupId'] == @instance_group_id matched = true if ig['InstanceRequestCount'].to_i == 4 end end matched end result end end Fog::AWS[:emr].terminate_job_flows('JobFlowIds' => [@job_flow_id]) end fog-aws-3.18.0/tests/requests/emr/job_flow_tests.rb000066400000000000000000000043751437344660100223370ustar00rootroot00000000000000Shindo.tests('AWS::EMR | job flows', ['aws', 'emr']) do pending if Fog.mocking? @job_flow_name = "fog_job_flow_#{Time.now.to_f.to_s.gsub('.','')}" @job_flow_options = { 'Instances' => { 'MasterInstanceType' => 'm1.small', 'SlaveInstanceType' => 'm1.small', 'InstanceCount' => 2, 'Placement' => { 'AvailabilityZone' => 'us-east-1a' }, 'KeepJobFlowAliveWhenNoSteps' => false, 'TerminationProtected' => false, 'HadoopVersion' => '0.20' } } @step_name = "fog_job_flow_step_#{Time.now.to_f.to_s.gsub('.','')}" @job_flow_steps = { 'Steps' => [{ 'Name' => @step_name, 'ActionOnFailure' => 'CONTINUE', 'HadoopJarStep' => { 'Jar' => 'FakeJar', 'MainClass' => 'FakeMainClass', 'Args' => ['arg1', 'arg2'] } }] } @job_flow_id = nil tests('success') do tests("#run_job_flow").formats(AWS::EMR::Formats::RUN_JOB_FLOW) do pending if Fog.mocking? result = Fog::AWS[:emr].run_job_flow(@job_flow_name, @job_flow_options).body @job_flow_id = result['JobFlowId'] result end tests("#add_job_flow_steps").formats(AWS::EMR::Formats::BASIC) do pending if Fog.mocking? result = Fog::AWS[:emr].add_job_flow_steps(@job_flow_id, @job_flow_steps).body result end tests("#set_termination_protection").formats(AWS::EMR::Formats::BASIC) do result = Fog::AWS[:emr].set_termination_protection(true, 'JobFlowIds' => [@job_flow_id]).body test("protected?") do res = Fog::AWS[:emr].describe_job_flows('JobFlowIds' => [@job_flow_id]).body jf = res['JobFlows'].first jf['Instances']['TerminationProtected'] == 'true' end result end tests("#terminate_job_flow").formats(AWS::EMR::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:emr].set_termination_protection(false, 'JobFlowIds' => [@job_flow_id]) result = Fog::AWS[:emr].terminate_job_flows('JobFlowIds' => [@job_flow_id]).body result end tests("#describe_job_flows").formats(AWS::EMR::Formats::SIMPLE_DESCRIBE_JOB_FLOW) do pending if Fog.mocking? result = Fog::AWS[:emr].describe_job_flows('JobFlowIds' => [@job_flow_id]).body result end end end fog-aws-3.18.0/tests/requests/federation/000077500000000000000000000000001437344660100203135ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/federation/get_signin_token_tests.rb000066400000000000000000000004331437344660100254100ustar00rootroot00000000000000Shindo.tests('AWS::Federation | signin tokens', ['aws']) do @signin_token_format = { 'SigninToken' => String } tests("#get_signin_token").formats(@signin_token_format) do pending unless Fog.mocking? Fog::AWS[:federation].get_signin_token("test_policy") end end fog-aws-3.18.0/tests/requests/glacier/000077500000000000000000000000001437344660100176015ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/glacier/archive_tests.rb000066400000000000000000000007141437344660100227730ustar00rootroot00000000000000Shindo.tests('AWS::Glacier | glacier archive tests', ['aws']) do pending if Fog.mocking? Fog::AWS[:glacier].create_vault('Fog-Test-Vault-upload') tests('single part upload') do id = Fog::AWS[:glacier].create_archive('Fog-Test-Vault-upload', 'data body').headers['x-amz-archive-id'] Fog::AWS[:glacier].delete_archive('Fog-Test-Vault-upload', id) end #amazon won't let us delete the vault because it has been written to in the past day end fog-aws-3.18.0/tests/requests/glacier/multipart_upload_tests.rb000066400000000000000000000030261437344660100247360ustar00rootroot00000000000000Shindo.tests('AWS::Glacier | glacier archive tests', ['aws']) do pending if Fog.mocking? Fog::AWS[:glacier].create_vault('Fog-Test-Vault-upload') tests('initiate and abort') do id = Fog::AWS[:glacier].initiate_multipart_upload('Fog-Test-Vault-upload', 1024*1024).headers['x-amz-multipart-upload-id'] returns(true){ Fog::AWS[:glacier].list_multipart_uploads('Fog-Test-Vault-upload').body['UploadsList'].map {|item| item['MultipartUploadId']}.include?(id)} Fog::AWS[:glacier].abort_multipart_upload('Fog-Test-Vault-upload', id) returns(false){ Fog::AWS[:glacier].list_multipart_uploads('Fog-Test-Vault-upload').body['UploadsList'].map {|item| item['MultipartUploadId']}.include?(id)} end tests('do multipart upload') do hash = Fog::AWS::Glacier::TreeHash.new id = Fog::AWS[:glacier].initiate_multipart_upload('Fog-Test-Vault-upload', 1024*1024).headers['x-amz-multipart-upload-id'] part = 't'*1024*1024 hash_for_part = hash.add_part(part) Fog::AWS[:glacier].upload_part('Fog-Test-Vault-upload', id, part, 0, hash_for_part) part_2 = 'u'*1024*1024 hash_for_part_2 = hash.add_part(part_2) Fog::AWS[:glacier].upload_part('Fog-Test-Vault-upload', id, part_2, 1024*1024, hash_for_part_2) archive = Fog::AWS[:glacier].complete_multipart_upload('Fog-Test-Vault-upload', id, 2*1024*1024, hash.hexdigest).headers['x-amz-archive-id'] Fog::AWS[:glacier].delete_archive('Fog-Test-Vault-upload', archive) #amazon won't let us delete the vault because it has been written to in the past day end end fog-aws-3.18.0/tests/requests/glacier/tree_hash_tests.rb000066400000000000000000000100611437344660100233100ustar00rootroot00000000000000Shindo.tests('AWS::Glacier | glacier tree hash calcuation', ['aws']) do tests('tree_hash(single part < 1MB)') do returns(OpenSSL::Digest::SHA256.hexdigest('')) { Fog::AWS::Glacier::TreeHash.digest('')} end tests('tree_hash(multibyte characters)') do body = ("\xC2\xA1" * 1024*1024) body.force_encoding('UTF-8') if body.respond_to? :encoding expected = OpenSSL::Digest::SHA256.hexdigest( OpenSSL::Digest::SHA256.digest("\xC2\xA1" * 1024*512) + OpenSSL::Digest::SHA256.digest("\xC2\xA1" * 1024*512) ) returns(expected) { Fog::AWS::Glacier::TreeHash.digest(body)} end tests('tree_hash(power of 2 number of parts)') do body = ('x' * 1024*1024) + ('y'*1024*1024) + ('z'*1024*1024) + ('t'*1024*1024) expected = OpenSSL::Digest::SHA256.hexdigest( OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('x' * 1024*1024) + OpenSSL::Digest::SHA256.digest('y' * 1024*1024) ) + OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('z' * 1024*1024) + OpenSSL::Digest::SHA256.digest('t' * 1024*1024) ) ) returns(expected) { Fog::AWS::Glacier::TreeHash.digest(body)} end tests('tree_hash(non power of 2 number of parts)') do body = ('x' * 1024*1024) + ('y'*1024*1024) + ('z'*1024*1024) expected = OpenSSL::Digest::SHA256.hexdigest( OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('x' * 1024*1024) + OpenSSL::Digest::SHA256.digest('y' * 1024*1024) ) + OpenSSL::Digest::SHA256.digest('z' * 1024*1024) ) returns(expected) { Fog::AWS::Glacier::TreeHash.digest(body)} end tests('multipart') do tree_hash = Fog::AWS::Glacier::TreeHash.new part = ('x' * 1024*1024) + ('y'*1024*1024) returns(Fog::AWS::Glacier::TreeHash.digest(part)) { tree_hash.add_part part } tree_hash.add_part('z'* 1024*1024 + 't'*1024*1024) expected = OpenSSL::Digest::SHA256.hexdigest( OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('x' * 1024*1024) + OpenSSL::Digest::SHA256.digest('y' * 1024*1024) ) + OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('z' * 1024*1024) + OpenSSL::Digest::SHA256.digest('t' * 1024*1024) ) ) returns(expected) { tree_hash.hexdigest} end # Aligned is used in general sense of https://en.wikipedia.org/wiki/Data_structure_alignment # except we are not dealing with data in memory, but with parts in "virtual" space of whole file. # Tests for https://github.com/fog/fog-aws/issues/520 and https://github.com/fog/fog-aws/issues/521 tests('multipart with unaligned parts') do tree_hash = Fog::AWS::Glacier::TreeHash.new part = ('x' * 512*1024) returns(Fog::AWS::Glacier::TreeHash.digest(part)) { tree_hash.add_part part } # At this point, we have 0.5MB in tree_hash. That means that the next part we add will not be aligned, # because it will start on 0.5MB which is not 1MB boundary. part2 = ('x' * 512*1024) + ('y'*1024*1024) + ('z'* 512*1024) returns(Fog::AWS::Glacier::TreeHash.digest(part + part2)) { tree_hash.add_part part2 ; tree_hash.hexdigest } # Here we are adding another 1.5MB to tree_hash which has size of 3.5MB. Again, 3.5MB is not on 1MB boundary, # so this is another unaligned part. It does test different part of code, though. tree_hash.add_part('z'* 512*1024 + 't'*1024*1024) expected = OpenSSL::Digest::SHA256.hexdigest( OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('x' * 1024*1024) + OpenSSL::Digest::SHA256.digest('y' * 1024*1024) ) + OpenSSL::Digest::SHA256.digest( OpenSSL::Digest::SHA256.digest('z' * 1024*1024) + OpenSSL::Digest::SHA256.digest('t' * 1024*1024) ) ) returns(expected) { tree_hash.hexdigest} end end fog-aws-3.18.0/tests/requests/glacier/vault_tests.rb000066400000000000000000000025741437344660100225130ustar00rootroot00000000000000Shindo.tests('AWS::Glacier | glacier vault requests', ['aws']) do pending if Fog.mocking? topic_arn = Fog::AWS[:sns].create_topic( 'fog_test_glacier_topic').body['TopicArn'] Fog::AWS[:glacier].create_vault('Fog-Test-Vault') tests('list_vaults') do returns(true){Fog::AWS[:glacier].list_vaults.body['VaultList'].map {|data| data['VaultName']}.include?('Fog-Test-Vault')} end tests('describe_vault') do returns('Fog-Test-Vault'){Fog::AWS[:glacier].describe_vault('Fog-Test-Vault').body['VaultName']} end tests('set_vault_notification_configuration') do Fog::AWS[:glacier].set_vault_notification_configuration 'Fog-Test-Vault', topic_arn, ['ArchiveRetrievalCompleted'] end tests('get_vault_notification_configuration') do returns('SNSTopic' => topic_arn, 'Events' => ['ArchiveRetrievalCompleted']){ Fog::AWS[:glacier].get_vault_notification_configuration( 'Fog-Test-Vault').body} end tests('delete_vault_notification_configuration') do Fog::AWS[:glacier].delete_vault_notification_configuration( 'Fog-Test-Vault') raises(Excon::Errors::NotFound){Fog::AWS[:glacier].get_vault_notification_configuration( 'Fog-Test-Vault')} end tests('delete_vault') do Fog::AWS[:glacier].delete_vault( 'Fog-Test-Vault') raises(Excon::Errors::NotFound){Fog::AWS[:glacier].describe_vault( 'Fog-Test-Vault')} end Fog::AWS[:sns].delete_topic topic_arn end fog-aws-3.18.0/tests/requests/iam/000077500000000000000000000000001437344660100167415ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/iam/access_key_tests.rb000066400000000000000000000032251437344660100226230ustar00rootroot00000000000000Shindo.tests('AWS::IAM | access key requests', ['aws']) do Fog::AWS[:iam].create_user('fog_access_key_tests') tests('success') do @access_key_format = { 'AccessKey' => { 'AccessKeyId' => String, 'UserName' => String, 'SecretAccessKey' => String, 'Status' => String }, 'RequestId' => String } tests("#create_access_key('UserName' => 'fog_access_key_tests')").formats(@access_key_format) do data = Fog::AWS[:iam].create_access_key('UserName' => 'fog_access_key_tests').body @access_key_id = data['AccessKey']['AccessKeyId'] data end @access_keys_format = { 'AccessKeys' => [{ 'AccessKeyId' => String, 'Status' => String }], 'IsTruncated' => Fog::Boolean, 'RequestId' => String } tests("#list_access_keys('Username' => 'fog_access_key_tests')").formats(@access_keys_format) do Fog::AWS[:iam].list_access_keys('UserName' => 'fog_access_key_tests').body end tests("#update_access_key('#{@access_key_id}', 'Inactive', 'UserName' => 'fog_access_key_tests')").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].update_access_key(@access_key_id, 'Inactive', 'UserName' => 'fog_access_key_tests').body end tests("#delete_access_key('#{@access_key_id}', 'UserName' => 'fog_access_key_tests)").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_access_key(@access_key_id, 'UserName' => 'fog_access_key_tests').body end end tests('failure') do test('failing conditions') end Fog::AWS[:iam].delete_user('fog_access_key_tests') end fog-aws-3.18.0/tests/requests/iam/account_policy_tests.rb000066400000000000000000000023261437344660100235260ustar00rootroot00000000000000Shindo.tests('AWS::IAM | account policy requests', ['aws']) do tests('success') do tests("#update_account_password_policy(minimum_password_length, max_password_age, password_reuse_prevention,require_symbols,require_numbers,require_uppercase_characters, require_lowercase_characters,allow_users_to_change_password, hard_expiry, expire_passwords)").formats(AWS::IAM::Formats::BASIC) do minimum_password_length, password_reuse_prevention, max_password_age = 5 require_symbols, require_numbers, require_uppercase_characters, require_lowercase_characters, allow_users_to_change_password, hard_expiry, expire_passwords = false Fog::AWS[:iam].update_account_password_policy(minimum_password_length, max_password_age, password_reuse_prevention,require_symbols,require_numbers,require_uppercase_characters, require_lowercase_characters,allow_users_to_change_password, hard_expiry, expire_passwords).body end tests("#get_account_password_policy()") do Fog::AWS[:iam].get_account_password_policy().body['AccountPasswordPolicy'] end tests("#delete_account_password_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_account_password_policy().body end end end fog-aws-3.18.0/tests/requests/iam/account_tests.rb000066400000000000000000000023241437344660100221450ustar00rootroot00000000000000Shindo.tests('AWS::IAM | account requests', ['aws']) do tests('success') do @get_account_summary_format = { 'Summary' => { 'AccessKeysPerUserQuota' => Integer, 'AccountMFAEnabled' => Integer, 'AssumeRolePolicySizeQuota' => Fog::Nullable::Integer, 'GroupPolicySizeQuota' => Integer, 'Groups' => Integer, 'GroupsPerUserQuota' => Integer, 'GroupsQuota' => Integer, 'InstanceProfiles' => Fog::Nullable::Integer, 'InstanceProfilesQuota' => Fog::Nullable::Integer, 'MFADevices' => Integer, 'MFADevicesInUse' => Integer, 'Providers' => Fog::Nullable::Integer, 'RolePolicySizeQuota' => Fog::Nullable::Integer, 'Roles' => Fog::Nullable::Integer, 'RolesQuota' => Fog::Nullable::Integer, 'ServerCertificates' => Integer, 'ServerCertificatesQuota' => Integer, 'SigningCertificatesPerUserQuota' => Integer, 'UserPolicySizeQuota' => Integer, 'Users' => Integer, 'UsersQuota' => Integer, }, 'RequestId' => String, } tests('#get_account_summary').formats(@get_account_summary_format) do Fog::AWS[:iam].get_account_summary.body end end end fog-aws-3.18.0/tests/requests/iam/group_policy_tests.rb000066400000000000000000000027731437344660100232340ustar00rootroot00000000000000Shindo.tests('AWS::IAM | group policy requests', ['aws']) do Fog::AWS[:iam].create_group('fog_group_policy_tests') tests('success') do @policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} tests("#put_group_policy('fog_group_policy_tests', 'fog_policy', #{@policy.inspect})").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].put_group_policy('fog_group_policy_tests', 'fog_policy', @policy).body end @group_policies_format = { 'IsTruncated' => Fog::Boolean, 'PolicyNames' => [String], 'RequestId' => String } tests("list_group_policies('fog_group_policy_tests')").formats(@group_policies_format) do pending if Fog.mocking? Fog::AWS[:iam].list_group_policies('fog_group_policy_tests').body end @group_policy_format = { 'GroupName' => String, 'PolicyName' => String, 'PolicyDocument' => Hash, } tests("#get_group_policy('fog_group_policy_tests', 'fog_policy'").formats(@group_policy_format) do Fog::AWS[:iam].get_group_policy('fog_policy', 'fog_group_policy_tests').body['Policy'] end tests("#delete_group_policy('fog_group_policy_tests', 'fog_policy')").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].delete_group_policy('fog_group_policy_tests', 'fog_policy').body end end tests('failure') do test('failing conditions') end unless Fog.mocking? Fog::AWS[:iam].delete_group('fog_group_policy_tests') end end fog-aws-3.18.0/tests/requests/iam/group_tests.rb000066400000000000000000000017151437344660100216500ustar00rootroot00000000000000Shindo.tests('AWS::IAM | group requests', ['aws']) do tests('success') do @group_format = { 'Group' => { 'Arn' => String, 'GroupId' => String, 'GroupName' => String, 'Path' => String }, 'RequestId' => String } tests("#create_group('fog_group')").formats(@group_format) do Fog::AWS[:iam].create_group('fog_group').body end @groups_format = { 'Groups' => [{ 'Arn' => String, 'GroupId' => String, 'GroupName' => String, 'Path' => String }], 'IsTruncated' => Fog::Boolean, 'RequestId' => String } tests("#list_groups").formats(@groups_format) do Fog::AWS[:iam].list_groups.body end tests("#delete_group('fog_group')").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_group('fog_group').body end end tests('failure') do test('failing conditions') end end fog-aws-3.18.0/tests/requests/iam/helper.rb000066400000000000000000000155521437344660100205550ustar00rootroot00000000000000class AWS module IAM # A self-signed test keypair. Generated using the command: # openssl req -new -newkey rsa:1024 -days 3650 -nodes -x509 -keyout server-private.key -out server-public.crt # NB: Amazon returns an error on extraneous linebreaks SERVER_CERT = %{-----BEGIN CERTIFICATE----- MIIDQzCCAqygAwIBAgIJAJaZ8wH+19AtMA0GCSqGSIb3DQEBBQUAMHUxCzAJBgNV BAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazERMA8GA1UEBxMITmV3IFlvcmsxHzAd BgNVBAoTFkZvZyBUZXN0IFNuYWtlb2lsIENlcnQxHzAdBgNVBAsTFkZvZyBUZXN0 IFNuYWtlb2lsIENlcnQwHhcNMTEwNTA3MTc0MDU5WhcNMjEwNTA0MTc0MDU5WjB1 MQswCQYDVQQGEwJVUzERMA8GA1UECBMITmV3IFlvcmsxETAPBgNVBAcTCE5ldyBZ b3JrMR8wHQYDVQQKExZGb2cgVGVzdCBTbmFrZW9pbCBDZXJ0MR8wHQYDVQQLExZG b2cgVGVzdCBTbmFrZW9pbCBDZXJ0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB gQC0CR76sovjdmpWRmEaf8XaG+nGe7czhpdLKkau2b16VtSjkPctxPL5U4vaMxQU boLPr+9oL+9fSYN31VzDD4hyaeGoeI5fhnGeqk71kq5uHONBOQUMbZbBQ8PVd9Sd k+y9JJ6E5fC+GhLL5I+y2DK7syBzyymq1Wi6rPp1XXF7AQIDAQABo4HaMIHXMB0G A1UdDgQWBBRfqBkpU/jEV324748fq6GJM80iVTCBpwYDVR0jBIGfMIGcgBRfqBkp U/jEV324748fq6GJM80iVaF5pHcwdTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE5l dyBZb3JrMREwDwYDVQQHEwhOZXcgWW9yazEfMB0GA1UEChMWRm9nIFRlc3QgU25h a2VvaWwgQ2VydDEfMB0GA1UECxMWRm9nIFRlc3QgU25ha2VvaWwgQ2VydIIJAJaZ 8wH+19AtMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAUV6NDdLHKNhl ACtzLycIhlMTmDr0xBeIBx3lpgw2K0+4oefMS8Z17eeZPeNodxnz56juJm81BZwt DF3qnnPyArLFx0HLB7wQdm9xYVIqQuLO+V6GRuOd+uSX//aDLDZhwbERf35hoyto Jfk4gX/qwuRFNy0vjQeTzdvhB1igG/w= -----END CERTIFICATE----- } # The public key for SERVER_CERT. Generated using the command: # openssl x509 -inform pem -in server-public.crt -pubkey -noout > server.pubkey SERVER_CERT_PUBLIC_KEY = "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC0CR76sovjdmpWRmEaf8XaG+nGe7czhpdLKkau2b16VtSjkPctxPL5U4vaMxQUboLPr+9oL+9fSYN31VzDD4hyaeGoeI5fhnGeqk71kq5uHONBOQUMbZbBQ8PVd9Sdk+y9JJ6E5fC+GhLL5I+y2DK7syBzyymq1Wi6rPp1XXF7AQIDAQAB" SERVER_CERT_PRIVATE_KEY = %{-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQC0CR76sovjdmpWRmEaf8XaG+nGe7czhpdLKkau2b16VtSjkPct xPL5U4vaMxQUboLPr+9oL+9fSYN31VzDD4hyaeGoeI5fhnGeqk71kq5uHONBOQUM bZbBQ8PVd9Sdk+y9JJ6E5fC+GhLL5I+y2DK7syBzyymq1Wi6rPp1XXF7AQIDAQAB AoGANjjRBbwkeXs+h4Fm2W5GDmx9ufOkt3X/tvmilCKr+F6SaDjO2RAKBaFt62ea 0pR9/UMFnaFiPJaNa9fsuirBcwId+RizruEp+7FGziM9mC5kcE7WKZrXgGGnLtqg 4x5twVLArgp0ji7TA18q/74uTrI4az8H5iTY4n29ORlLmmkCQQDsGMuLEgGHgN5Y 1c9ax1DT/rUXKxnqsIrijRkgbiTncHAArFJ88c3yykWqGvYnSFwMS8DSWiPyPaAI nNNlb/fPAkEAwzZ4CfvJ+OlE++rTPH9jemC89dnxC7EFGuWJmwdadnev8EYguvve cdGdGttD7QsZKpcz5mDngOUghbVm8vBELwJAMHfOoVgq9DRicP5DuTEdyMeLSZxR j7p6aJPqypuR++k7NQgrTvcc/nDD6G3shpf2PZf3l7dllb9M8TewtixMRQJBAIdX c0AQtoYBTJePxiYyd8i32ypkkK83ar+sFoxKO9jYwD1IkZax2xZ0aoTdMindQPR7 Yjs+QiLmOHcbPqX+GHcCQERsSn0RjzKmKirDntseMB59BB/cEN32+gMDVsZuCfb+ fOy2ZavFl13afnhbh2/AjKeDhnb19x/uXjF7JCUtwpA= -----END RSA PRIVATE KEY----- } # openssl pkcs8 -nocrypt -topk8 -in SERVER_CERT_PRIVATE_KEY.key -outform pem SERVER_CERT_PRIVATE_KEY_PKCS8 = %{-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALQJHvqyi+N2alZG YRp/xdob6cZ7tzOGl0sqRq7ZvXpW1KOQ9y3E8vlTi9ozFBRugs+v72gv719Jg3fV XMMPiHJp4ah4jl+GcZ6qTvWSrm4c40E5BQxtlsFDw9V31J2T7L0knoTl8L4aEsvk j7LYMruzIHPLKarVaLqs+nVdcXsBAgMBAAECgYA2ONEFvCR5ez6HgWbZbkYObH25 86S3df+2+aKUIqv4XpJoOM7ZEAoFoW3rZ5rSlH39QwWdoWI8lo1r1+y6KsFzAh35 GLOu4Sn7sUbOIz2YLmRwTtYpmteAYacu2qDjHm3BUsCuCnSOLtMDXyr/vi5Osjhr PwfmJNjifb05GUuaaQJBAOwYy4sSAYeA3ljVz1rHUNP+tRcrGeqwiuKNGSBuJOdw cACsUnzxzfLKRaoa9idIXAxLwNJaI/I9oAic02Vv988CQQDDNngJ+8n46UT76tM8 f2N6YLz12fELsQUa5YmbB1p2d6/wRiC6+95x0Z0a20PtCxkqlzPmYOeA5SCFtWby 8EQvAkAwd86hWCr0NGJw/kO5MR3Ix4tJnFGPunpok+rKm5H76Ts1CCtO9xz+cMPo beyGl/Y9l/eXt2WVv0zxN7C2LExFAkEAh1dzQBC2hgFMl4/GJjJ3yLfbKmSQrzdq v6wWjEo72NjAPUiRlrHbFnRqhN0yKd1A9HtiOz5CIuY4dxs+pf4YdwJARGxKfRGP MqYqKsOe2x4wHn0EH9wQ3fb6AwNWxm4J9v587LZlq8WXXdp+eFuHb8CMp4OGdvX3 H+5eMXskJS3CkA== -----END PRIVATE KEY----- } SERVER_CERT_PRIVATE_KEY_MISMATCHED = %{-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyITMqYJMzkPMcaC+x0W2hnZVW99RXzLR8RYyD3xo2AotdJKx 1DXR4ryegAjsnAhJVwVtxqzPcBMq/XS0hNtWFfKzf+vMZl7uAqotGjURUV8SRQPA 8tT07MemD929xRSV2vTnVATiPn87vcu5igsZ01+Ewd6rGythmvcZD13vtZ4rx0c8 kQJV3ok/CkFaIgDR6Or1NZBCtcIVK9nvqAmYMp6S5mWUMIsl/1qYPerpefrSJjlk J2+jyLp0LHarbzjkzzAdOkBRX1hPkk6cisBeQIpx35shLzfCe8U25XNqquP+ftcu JZ0Wjw+C4pTIzfgdGXmGGtBFY13BwiJvd4/i2wIDAQABAoIBABk8XWWX+IKdFcXX LSt3IpmZmvSNDniktLday8IXLjrCTSY2sBq9C0U159zFQsIAaPqCvGYcqZ65StfL MEzoLdVlTiHzUy4vFFVRhYue0icjh/EXn9jv5ENIfSXSCmgbRyDfYZ25X5/t817X nOo6q21mwBaGJ5KrywTtxEGi2OBKZrIbBrpJLhCXJc5xfuKT6DRa9X/OBSBiGKJP V9wHcZJkPG1HnC8izvQ37kNN/NyYE+8AGdYXQVNbTHq/emNLbEbdcR3tpGZamM9Q TwG5WsDPAnXnRsEEYvlVTOBI6DqdvkyBxM35iqd5aAc6i/Iu04Unfhhc5pAXmmIB a22GHcECgYEA7OheVHDDP8quO2qZjqaTlMbMnXnrFXJ41llFMoivTW9EmlTl9dOC fnkHEBcFCTPV0m6S2AQjt9QOgPqCFAq1r3J/xvEGBtl/UKnPRmjqXFgl0ENtGn5t w9wj/CsOPD05KkXXtXP+MyLPRD6gAxiQCTnXjvsLuVfP+E9BO2EQXScCgYEA2K2x QtcAAalrk3c0KzNVESzyFlf3ddEXThShVblSa7r6Ka9q9sxN/Xe2B+1oemPJm26G PfqKgxdKX0R0jl4f5pRBWKoarzWtUge/su8rx/xzbY/1hFKVuimtc6oTeU5xsOTS PVuCz4bxDTVhrbmKqbmMgqy17jfPA4BrF1FMRS0CgYBdMA4i4vQ6fIxKfOUIMsfs hsJn01RAbHXRwu2wMgnayMDQgEKwjtFO1GaN0rA9bXFXQ/1pET/HiJdn7qIKJihP aheO9rHrMdSdsx4AUTaWummtYUhiWobsuwRApeMEmQSKd0yhaI3+KVwkOQoSDbBi oKkE6gUzk7IPt4UuSUD5kwKBgQCjo/IGr8dieegz08gDhF4PfalLdJ4ATaxTHMOH sVFs6SY7Sy72Ou//qGRCcmsAW9KL35nkvw3S2Ukiz9lTGATxqC/93WIPxvMhy5Zc dcLT43XtXdanW5OWqBlGDEFu0O6OERIyoqUVRC1Ss2kUwdbWPbq/id5Qjbd7RoYa cxyt9QKBgF4bFLw1Iw2RBngQxIzoDbElEqme20FUyGGzyFQtxVwmwNr4OY5UzJzX 7G6diyzGrvRX81Yw616ppKJUJVr/zRc13K+eRXXKtNpGkf35B+1NDDjjWZpIHqgx Xb9WSr07saxZQbxBPQyTlb0Q9Tu2djAq2/o/nYD1/50/fXUTuWMB -----END RSA PRIVATE KEY----- } module Formats BASIC = { 'RequestId' => String } USER = { 'Arn' => String, 'Path' => String, 'UserId' => String, 'UserName' => String, } CREATE_USER = BASIC.merge('User' => USER) GET_USER = BASIC.merge('User' => USER.merge('CreateDate' => Time)) GET_CURRENT_USER = BASIC.merge( 'User' => { 'Arn' => String, 'UserId' => String, 'CreateDate' => Time } ) LIST_USER = BASIC.merge( 'Users' => [USER.merge('CreateDate' => Time)], 'IsTruncated' => Fog::Boolean ) GROUPS = BASIC.merge( 'GroupsForUser' => [{ 'Arn' => String, 'GroupId' => String, 'GroupName' => String, 'Path' => String }], 'IsTruncated' => Fog::Boolean ) INSTANCE_PROFILE = { 'Arn' => String, 'CreateDate' => Time, 'InstanceProfileId' => String, 'InstanceProfileName' => String, 'Path' => String, 'Roles' => Array } INSTANCE_PROFILE_RESULT = BASIC.merge( 'InstanceProfile' => INSTANCE_PROFILE ) LIST_INSTANCE_PROFILE_RESULT = BASIC.merge( "IsTruncated" => Fog::Boolean, "InstanceProfiles" => [INSTANCE_PROFILE] ) end end end fog-aws-3.18.0/tests/requests/iam/instance_profile_tests.rb000066400000000000000000000033241437344660100240360ustar00rootroot00000000000000include AWS::IAM::Formats Shindo.tests("AWS::IAM | instance profile requests", ['aws']) do tests('success') do profile_name = uniq_id('fog-instance-profile') @instance_profile_count = Fog::AWS[:iam].list_instance_profiles.body["InstanceProfiles"].count tests("#create_instance_profile('#{profile_name}')").formats(INSTANCE_PROFILE_RESULT) do Fog::AWS[:iam].create_instance_profile(profile_name).body end tests("#list_instance_profiles").formats(LIST_INSTANCE_PROFILE_RESULT) do body = Fog::AWS[:iam].list_instance_profiles.body returns(@instance_profile_count + 1) { body["InstanceProfiles"].count } body end tests("#get_instance_profile('#{profile_name}')").formats(INSTANCE_PROFILE_RESULT) do Fog::AWS[:iam].get_instance_profile(profile_name).body end @role = Fog::AWS[:iam].roles.create(:rolename => uniq_id('instance-profile-role')) tests("#add_role_to_instance_profile('#{@role.rolename}', '#{profile_name}')").formats(BASIC) do Fog::AWS[:iam].add_role_to_instance_profile(@role.rolename, profile_name).body end tests("#list_instance_profiles_for_role('#{@role.rolename}')").formats(LIST_INSTANCE_PROFILE_RESULT) do body = Fog::AWS[:iam].list_instance_profiles_for_role(@role.rolename).body returns(1) { body["InstanceProfiles"].count } body end tests("#remove_role_from_instance_profile('#{@role.rolename}', '#{profile_name}')").formats(BASIC) do Fog::AWS[:iam].remove_role_from_instance_profile(@role.rolename, profile_name).body end @role.destroy tests("#delete_instance_profile('#{profile_name}')").formats(BASIC) do Fog::AWS[:iam].delete_instance_profile(profile_name).body end end end fog-aws-3.18.0/tests/requests/iam/login_profile_tests.rb000066400000000000000000000036031437344660100233420ustar00rootroot00000000000000Shindo.tests('AWS::IAM | user requests', ['aws']) do unless Fog.mocking? Fog::AWS[:iam].create_user('fog_user') end tests('success') do @login_profile_format = { 'LoginProfile' => { 'UserName' => String, 'CreateDate' => Time }, 'RequestId' => String } tests("#create_login_profile('fog_user')").formats(@login_profile_format) do pending if Fog.mocking? Fog::AWS[:iam].create_login_profile('fog_user', 'somepassword').body end tests("#get_login_profile('fog_user')").formats(@login_profile_format) do pending if Fog.mocking? result = Fog::AWS[:iam].get_login_profile('fog_user').body returns('fog_user') {result['LoginProfile']['UserName']} result end tests("#update_login_profile('fog_user')").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? begin Fog::AWS[:iam].update_login_profile('fog_user', 'otherpassword').body rescue Excon::Errors::Conflict #profile cannot be updated or deleted until it has finished creating; api provides no way of telling whether creation process complete sleep 5 retry end end tests("#delete_login_profile('fog_user')").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].delete_login_profile('fog_user').body end tests("#get_login_profile('fog_user')") do pending if Fog.mocking? raises(Excon::Errors::NotFound) {Fog::AWS[:iam].get_login_profile('fog_user')} end end tests('failure') do tests('get login profile for non existing user') do pending if Fog.mocking? raises(Fog::AWS::IAM::NotFound) { Fog::AWS[:iam].get_login_profile('idontexist')} raises(Fog::AWS::IAM::NotFound) { Fog::AWS[:iam].delete_login_profile('fog_user')} end end unless Fog.mocking? Fog::AWS[:iam].delete_user('fog_user') end end fog-aws-3.18.0/tests/requests/iam/managed_policy_tests.rb000066400000000000000000000070601437344660100234660ustar00rootroot00000000000000Shindo.tests('AWS::IAM | managed policy requests', ['aws']) do Fog::AWS[:iam].create_group('fog_policy_test_group') Fog::AWS[:iam].create_user('fog_policy_test_user') Fog::AWS[:iam].create_role('fog_policy_test_role', Fog::AWS::IAM::EC2_ASSUME_ROLE_POLICY) tests('success') do @policy = {'Version' => '2012-10-17', "Statement" => [{"Effect" => "Deny", "Action" => "*", "Resource" => "*"}]} @policy_format = { 'Arn' => String, 'AttachmentCount' => Integer, 'Description' => Fog::Nullable::String, 'DefaultVersionId' => String, 'IsAttachable' => Fog::Boolean, 'Path' => String, 'PolicyId' => String, 'PolicyName' => String, 'CreateDate' => Time, 'UpdateDate' => Time } create_policy_format = { 'RequestId' => String, 'Policy' => @policy_format } list_policies_format = { 'RequestId' => String, 'Policies' => [@policy_format], 'Marker' => Fog::Nullable::String, 'IsTruncated' => Fog::Boolean } attached_policy_format = { 'PolicyArn' => String, 'PolicyName' => String } list_managed_policies_format = { 'RequestId' => String, 'Policies' => [attached_policy_format] } tests("#create_policy('fog_policy')").formats(create_policy_format) do body = Fog::AWS[:iam].create_policy('fog_policy', @policy, '/fog/').body @policy_arn = body['Policy']['Arn'] body end tests("#list_policies()").formats(list_policies_format) do body = Fog::AWS[:iam].list_policies('PathPrefix' => '/fog/').body tests('length 1').returns(1) do body['Policies'].length end body end tests("#attach_user_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].attach_user_policy('fog_policy_test_user', @policy_arn).body end tests("#list_attach_user_policies()").formats(list_managed_policies_format) do Fog::AWS[:iam].list_attached_user_policies('fog_policy_test_user').body end tests("#detach_user_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].detach_user_policy('fog_policy_test_user', @policy_arn).body end tests("#attach_group_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].attach_group_policy('fog_policy_test_group', @policy_arn).body end tests("#list_attach_group_policies()").formats(list_managed_policies_format) do Fog::AWS[:iam].list_attached_group_policies('fog_policy_test_group').body end tests("#detach_group_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].detach_group_policy('fog_policy_test_group', @policy_arn).body end tests("#attach_role_policy()").formats(AWS::IAM::Formats::BASIC) do body = Fog::AWS[:iam].attach_role_policy('fog_policy_test_role', @policy_arn).body end tests("#list_attached_role_policies()").formats(list_managed_policies_format) do Fog::AWS[:iam].list_attached_role_policies('fog_policy_test_role').body end tests("#detach_role_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].detach_role_policy('fog_policy_test_role', @policy_arn).body end tests("#delete_policy()").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_policy(@policy_arn).body end end tests('failure') do test('failing conditions') end Fog::AWS[:iam].delete_group('fog_policy_test_group') Fog::AWS[:iam].delete_user('fog_policy_test_user') Fog::AWS[:iam].delete_role('fog_policy_test_role') end fog-aws-3.18.0/tests/requests/iam/mfa_tests.rb000066400000000000000000000007631437344660100212610ustar00rootroot00000000000000Shindo.tests('AWS::IAM | mfa requests', ['aws']) do tests('success') do @mfa_devices_format = { 'MFADevices' => [{ 'EnableDate' => Time, 'SerialNumber' => String, 'UserName' => String }], 'IsTruncated' => Fog::Boolean, 'RequestId' => String } tests('#list_mfa_devices').formats(@mfa_devices_format) do Fog::AWS[:iam].list_mfa_devices.body end end tests('failure') do test('failing conditions') end end fog-aws-3.18.0/tests/requests/iam/role_tests.rb000066400000000000000000000114421437344660100214530ustar00rootroot00000000000000Shindo.tests('AWS::IAM | role requests', ['aws']) do tests('success') do @role = { 'Arn' => String, 'AssumeRolePolicyDocument' => String, 'CreateDate' => Time, 'Path' => String, 'RoleId' => String, 'RoleName' => String } @role_format = { 'Role' => @role, 'RequestId' => String } tests("#create_role('fogrole')").formats(@role_format) do Fog::AWS[:iam].create_role('fogrole', Fog::AWS::IAM::EC2_ASSUME_ROLE_POLICY).body end tests("#get_role('fogrole')").formats(@role_format) do Fog::AWS[:iam].get_role('fogrole').body end @list_roles_format = { 'Roles' => [@role], 'RequestId' => String, 'IsTruncated' => Fog::Boolean, } tests("#list_roles").formats(@list_roles_format) do body = Fog::AWS[:iam].list_roles.body returns(true){!! body['Roles'].find {|role| role['RoleName'] == 'fogrole'}} body end @profile_format = { 'InstanceProfile' => { 'Arn' => String, 'CreateDate' => Time, 'Path' => String, 'InstanceProfileId' => String, 'InstanceProfileName' => String, 'Roles' => [@role] }, 'RequestId' => String } tests("#create_instance_profile('fogprofile')").formats(@profile_format) do pending if Fog.mocking? Fog::AWS[:iam].create_instance_profile('fogprofile').body end tests("#get_instance_profile('fogprofile')").formats(@profile_format) do pending if Fog.mocking? Fog::AWS[:iam].get_instance_profile('fogprofile').body end tests("#add_role_to_instance_profile('fogprofile','fogrole')").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].add_role_to_instance_profile('fogrole', 'fogprofile').body end @profiles_format = { 'InstanceProfiles' => [{ 'Arn' => String, 'CreateDate' => Time, 'Path' => String, 'InstanceProfileId' => String, 'InstanceProfileName' => String, 'Roles' => [@role] }], 'IsTruncated' => Fog::Boolean, 'RequestId' => String } tests("list_instance_profiles_for_role('fogrole')").formats(@profiles_format) do pending if Fog.mocking? body = Fog::AWS[:iam].list_instance_profiles_for_role('fogrole').body returns(['fogprofile']) { body['InstanceProfiles'].map {|hash| hash['InstanceProfileName']}} body end tests("list_instance_profiles").formats(@profiles_format) do pending if Fog.mocking? Fog::AWS[:iam].list_instance_profiles.body end sample_policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} tests("put_role_policy").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].put_role_policy('fogrole', 'fogpolicy', sample_policy).body end @get_role_policy_format = { 'Policy' => { 'RoleName' => String, 'PolicyName' => String, 'PolicyDocument' => Hash, }, 'RequestId' => String } tests("get_role_policy").formats(@get_role_policy_format) do pending if Fog.mocking? body = Fog::AWS[:iam].get_role_policy('fogrole','fogpolicy').body returns('fogpolicy') {body['Policy']['PolicyName']} returns(sample_policy){body['Policy']['PolicyDocument']} body end @list_role_policies_format = { 'PolicyNames' => [String], 'IsTruncated' => Fog::Boolean, 'RequestId' => String } tests("list_role_policies").formats(@list_role_policies_format) do pending if Fog.mocking? body = Fog::AWS[:iam].list_role_policies('fogrole').body returns(['fogpolicy']) {body['PolicyNames']} body end tests("delete_role_policy").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].delete_role_policy('fogrole', 'fogpolicy').body end returns([]) do pending if Fog.mocking? Fog::AWS[:iam].list_role_policies('fogrole').body['PolicyNames'] end tests("remove_role_from_instance_profile").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].remove_role_from_instance_profile('fogrole', 'fogprofile').body end returns([]) do pending if Fog.mocking? Fog::AWS[:iam].list_instance_profiles_for_role('fogrole').body['InstanceProfiles'] end tests("#delete_instance_profile('fogprofile'").formats(AWS::IAM::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:iam].delete_instance_profile('fogprofile').body end tests("#delete_role('fogrole'").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_role('fogrole').body end end end fog-aws-3.18.0/tests/requests/iam/server_certificate_tests.rb000066400000000000000000000114721437344660100243650ustar00rootroot00000000000000Shindo.tests('AWS::IAM | server certificate requests', ['aws']) do @key_name = 'fog-test' @key_name_chained = 'fog-test-chained' @certificate_format = { 'Arn' => String, 'Path' => String, 'ServerCertificateId' => String, 'ServerCertificateName' => String, 'UploadDate' => Time } @upload_format = { 'Certificate' => @certificate_format, 'RequestId' => String } @update_format = { 'RequestId' => String } @get_server_certificate_format = { 'Certificate' => @certificate_format, 'RequestId' => String } @list_format = { 'Certificates' => [@certificate_format] } tests('#upload_server_certificate') do public_key = AWS::IAM::SERVER_CERT private_key = AWS::IAM::SERVER_CERT_PRIVATE_KEY private_key_pkcs8 = AWS::IAM::SERVER_CERT_PRIVATE_KEY_PKCS8 private_key_mismatch = AWS::IAM::SERVER_CERT_PRIVATE_KEY_MISMATCHED tests('empty public key').raises(Fog::AWS::IAM::ValidationError) do Fog::AWS::IAM.new.upload_server_certificate('', private_key, @key_name) end tests('empty private key').raises(Fog::AWS::IAM::ValidationError) do Fog::AWS::IAM.new.upload_server_certificate(public_key, '', @key_name) end tests('invalid public key').raises(Fog::AWS::IAM::MalformedCertificate) do Fog::AWS::IAM.new.upload_server_certificate('abcde', private_key, @key_name) end tests('invalid private key').raises(Fog::AWS::IAM::MalformedCertificate) do Fog::AWS::IAM.new.upload_server_certificate(public_key, 'abcde', @key_name) end tests('non-RSA private key').raises(Fog::AWS::IAM::MalformedCertificate) do Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key_pkcs8, @key_name) end tests('mismatched private key').raises(Fog::AWS::IAM::KeyPairMismatch) do Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key_mismatch, @key_name) end tests('format').formats(@upload_format) do Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key, @key_name).body end tests('format with chain').formats(@upload_format) do Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key, @key_name_chained, { 'CertificateChain' => public_key }).body end tests('duplicate name').raises(Fog::AWS::IAM::EntityAlreadyExists) do Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key, @key_name) end end tests('#update_server_certificate') do public_key = AWS::IAM::SERVER_CERT private_key = AWS::IAM::SERVER_CERT_PRIVATE_KEY key_name = "update-key" Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key, key_name) tests('duplicate name').raises(Fog::AWS::IAM::EntityAlreadyExists) do other_key_name = "other-key-name" Fog::AWS::IAM.new.upload_server_certificate(public_key, private_key, other_key_name) Fog::AWS::IAM.new.update_server_certificate(key_name, {'NewServerCertificateName' => other_key_name}) end tests('unknown name').raises(Fog::AWS::IAM::NotFound) do Fog::AWS::IAM.new.update_server_certificate("unknown-key-name", {'NewServerCertificateName' => "other-keyname"}) end tests('format').formats(@update_format) do Fog::AWS::IAM.new.update_server_certificate(key_name).body end tests('updates name') do other_key_name = "successful-update-key-name" Fog::AWS::IAM.new.update_server_certificate(key_name, {'NewServerCertificateName' => other_key_name}) returns(true) { Fog::AWS::IAM.new.get_server_certificate(other_key_name).body['Certificate']['ServerCertificateName'] == other_key_name } end end tests('#get_server_certificate').formats(@get_server_certificate_format) do tests('raises NotFound').raises(Fog::AWS::IAM::NotFound) do Fog::AWS::IAM.new.get_server_certificate("#{@key_name}fake") end Fog::AWS::IAM.new.get_server_certificate(@key_name).body end tests('#list_server_certificates').formats(@list_format) do result = Fog::AWS::IAM.new.list_server_certificates.body tests('includes key name') do returns(true) { result['Certificates'].any?{|c| c['ServerCertificateName'] == @key_name} } end result end tests("#list_server_certificates('path-prefix' => '/'").formats(@list_format) do result = Fog::AWS::IAM.new.list_server_certificates('PathPrefix' => '/').body tests('includes key name') do returns(true) { result['Certificates'].any?{|c| c['ServerCertificateName'] == @key_name} } end result end tests('#delete_server_certificate').formats(AWS::IAM::Formats::BASIC) do tests('raises NotFound').raises(Fog::AWS::IAM::NotFound) do Fog::AWS::IAM.new.delete_server_certificate("#{@key_name}fake") end Fog::AWS::IAM.new.delete_server_certificate(@key_name).body end Fog::AWS::IAM.new.delete_server_certificate(@key_name_chained) end fog-aws-3.18.0/tests/requests/iam/user_policy_tests.rb000066400000000000000000000026111437344660100230450ustar00rootroot00000000000000Shindo.tests('AWS::IAM | user policy requests', ['aws']) do Fog::AWS[:iam].create_user('fog_user_policy_tests') tests('success') do @policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} tests("#put_user_policy('fog_user_policy_tests', 'fog_policy', #{@policy.inspect})").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].put_user_policy('fog_user_policy_tests', 'fog_policy', @policy).body end @user_policies_format = { 'IsTruncated' => Fog::Boolean, 'PolicyNames' => [String], 'RequestId' => String } tests("#list_user_policies('fog_user_policy_tests')").formats(@user_policies_format) do Fog::AWS[:iam].list_user_policies('fog_user_policy_tests').body end @user_policy_format = { 'UserName' => String, 'PolicyName' => String, 'PolicyDocument' => Hash, } tests("#get_user_policy('fog_user_policy_tests', 'fog_policy'").formats(@user_policy_format) do Fog::AWS[:iam].get_user_policy('fog_policy', 'fog_user_policy_tests').body['Policy'] end tests("#delete_user_policy('fog_user_policy_tests', 'fog_policy')").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_user_policy('fog_user_policy_tests', 'fog_policy').body end end tests('failure') do test('failing conditions') end Fog::AWS[:iam].delete_user('fog_user_policy_tests') end fog-aws-3.18.0/tests/requests/iam/user_tests.rb000066400000000000000000000043251437344660100214720ustar00rootroot00000000000000Shindo.tests('AWS::IAM | user requests', ['aws']) do service = Fog::AWS[:iam] begin service.delete_group('fog_user_tests') rescue Fog::AWS::IAM::NotFound end begin service.delete_user('fog_user').body rescue Fog::AWS::IAM::NotFound end username = 'fog_user' service.create_group('fog_user_tests') tests("#create_user('#{username}')").data_matches_schema(AWS::IAM::Formats::CREATE_USER) do service.create_user(username).body end tests("#list_users").data_matches_schema(AWS::IAM::Formats::LIST_USER) do service.list_users.body end tests("#get_user('#{username}')").data_matches_schema(AWS::IAM::Formats::GET_USER) do service.get_user(username).body end tests("#get_user").data_matches_schema(AWS::IAM::Formats::GET_CURRENT_USER) do body = Fog::AWS[:iam].get_user.body if Fog.mocking? tests("correct root arn").returns(true) { body["User"]["Arn"].end_with?(":root") } end body end tests("#create_login_profile") do service.create_login_profile(username, SecureRandom.base64(10)) end tests("#get_login_profile") do service.get_login_profile(username) end tests("#update_login_profile") do # avoids Fog::AWS::IAM::Error: EntityTemporarilyUnmodifiable => Login Profile for User instance cannot be modified while login profile is being created. if Fog.mocking? service.update_login_profile(username, SecureRandom.base64(10)) end end tests("#delete_login_profile") do service.delete_login_profile(username) end tests("#add_user_to_group('fog_user_tests', '#{username}')").data_matches_schema(AWS::IAM::Formats::BASIC) do service.add_user_to_group('fog_user_tests', username).body end tests("#list_groups_for_user('#{username}')").data_matches_schema(AWS::IAM::Formats::GROUPS) do service.list_groups_for_user(username).body end tests("#remove_user_from_group('fog_user_tests', '#{username}')").data_matches_schema(AWS::IAM::Formats::BASIC) do service.remove_user_from_group('fog_user_tests', username).body end tests("#delete_user('#{username}')").data_matches_schema(AWS::IAM::Formats::BASIC) do service.delete_user(username).body end service.delete_group('fog_user_tests') end fog-aws-3.18.0/tests/requests/iam/versioned_managed_policy_tests.rb000066400000000000000000000061401437344660100255420ustar00rootroot00000000000000Shindo.tests('AWS::IAM | versioned managed policy requests', ['aws']) do pending if Fog.mocking? tests('success') do @policy = {'Version' => '2012-10-17', "Statement" => [{"Effect" => "Deny", "Action" => "*", "Resource" => "*"}]} @policy_v2 = {'Version' => '2012-10-17', "Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} @policy_format = { 'Arn' => String, 'AttachmentCount' => Integer, 'Description' => String, 'DefaultVersionId' => String, 'IsAttachable' => Fog::Boolean, 'Path' => String, 'PolicyId' => String, 'PolicyName' => String, 'CreateDate' => Time, 'UpdateDate' => Time } create_policy_format = { 'RequestId' => String, 'Policy' => @policy_format } list_policies_format = { 'RequestId' => String, 'Policies' => [@policy_format], 'Marker' => String, 'IsTruncated' => Fog::Boolean } versioned_policy_format = { 'CreateDate' => Time, 'Document' => Hash, 'IsDefaultVersion' => Fog::Boolean, 'Description' => String } create_versioned_policy_format = { 'RequestId' => String, 'PolicyVersion' => [versioned_policy_format] } policy_verions_format = { 'CreateDate' => Time, 'IsDefaultVersion' => Fog::Boolean, 'VersionId' => String } list_policy_versions_format = { 'RequestId' => String, 'Versions' => [policy_verions_format], 'Marker' => String, 'IsTruncated' => Fog::Boolean } tests("#create_policy('fog_policy')").formats(create_policy_format) do Fog::AWS[:iam].create_policy('fog_policy', @policy, '/fog/').body['Policy']['Arn'] end tests("#list_policies('fog_policy')").formats(list_policies_format) do body = Fog::AWS[:iam].list_policies('PathPrefix' => '/fog/').body tests('length 1').returns(1) do body['Policies'].length end body end tests("#create_versioned_policy('fog_policy')").formats(create_versioned_policy_format) do Fog::AWS[:iam].create_versioned_policy(@policy_arn, @policy_v2, true).body['PolicyVersion']['Document'] end tests("#list_policy_versions('fog_policy')").formats(list_policy_versions_format) do body = Fog::AWS[:iam].list_policy_versions(@policy_arn).body tests('length 2').returns(2) do body['Versions'].length end body end tests("#set_default_policy_version('fog_policy')").formats(AWS::IAM::Formats::BASIC) do body = Fog::AWS[:iam].set_default_policy_version(@policy_arn, 'v1').body tests('length 2').returns(2) do body['Versions'].length end body end tests("#delete_versioned_policy('fog_policy')").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_policy(@policy_arn, 'v2').body['PolicyVersion']['Document'] end tests("#delete_policy('fog_policy')").formats(AWS::IAM::Formats::BASIC) do Fog::AWS[:iam].delete_policy(@policy_arn).body end end tests('failure') do test('failing conditions') end end fog-aws-3.18.0/tests/requests/kinesis/000077500000000000000000000000001437344660100176405ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/kinesis/helper.rb000066400000000000000000000052301437344660100214440ustar00rootroot00000000000000class AWS module Kinesis def wait_for(&block) Fog.wait_for do block.call.tap do print '.' end end end def wait_for_status(stream_name, status) wait_for do Fog::AWS[:kinesis].describe_stream("StreamName" => stream_name).body["StreamDescription"]["StreamStatus"] == status end end def delete_if_exists(stream_name) if Fog::AWS[:kinesis].list_streams.body["StreamNames"].include?(stream_name) wait_for_status(stream_name, "ACTIVE") Fog::AWS[:kinesis].delete_stream("StreamName" => @stream_id) wait_for do begin Fog::AWS[:kinesis].describe_stream("StreamName" => stream_name) false rescue Fog::AWS::Kinesis::ResourceNotFound true end end end end module Formats # optional keys are commented out LIST_STREAMS_FORMAT = { "HasMoreStreams" => Fog::Boolean, "StreamNames" => [ String ] } DESCRIBE_STREAM_FORMAT = { "StreamDescription" => { "HasMoreShards" => Fog::Boolean, "Shards" => [ { #"AdjacentParentShardId" => String, "HashKeyRange" => { "EndingHashKey" => String, "StartingHashKey" => String }, #"ParentShardId" => String, "SequenceNumberRange" => { # "EndingSequenceNumber" => String, "StartingSequenceNumber" => String }, "ShardId" => String } ], "StreamARN" => String, "StreamName" => String, "StreamStatus" => String } } GET_SHARD_ITERATOR_FORMAT = { "ShardIterator" => String } PUT_RECORDS_FORMAT = { "FailedRecordCount" => Integer, "Records" => [ { # "ErrorCode" => String, # "ErrorMessage" => String, "SequenceNumber" => String, "ShardId" => String } ] } PUT_RECORD_FORMAT = { "SequenceNumber" => String, "ShardId" => String } GET_RECORDS_FORMAT = { "MillisBehindLatest" => Integer, "NextShardIterator" => String, "Records" => [ { "Data" => String, "PartitionKey" => String, "SequenceNumber" => String } ] } LIST_TAGS_FOR_STREAM_FORMAT = { "HasMoreTags" => Fog::Boolean, "Tags" => [ { "Key" => String, "Value" => String } ] } end end end fog-aws-3.18.0/tests/requests/kinesis/stream_tests.rb000066400000000000000000000155361437344660100227140ustar00rootroot00000000000000include AWS::Kinesis Shindo.tests('AWS::Kinesis | stream requests', ['aws', 'kinesis']) do Fog::AWS[:kinesis].reset_data if Fog.mocking? @stream_id = 'fog-test-stream' delete_if_exists(@stream_id) # ensure we start from a clean slate tests("#create_stream").returns("") do Fog::AWS[:kinesis].create_stream("StreamName" => @stream_id).body.tap do wait_for_status(@stream_id, "ACTIVE") end end tests("#list_streams").formats(Formats::LIST_STREAMS_FORMAT, false) do Fog::AWS[:kinesis].list_streams.body.tap do returns(true) { Fog::AWS[:kinesis].list_streams.body["StreamNames"].include?(@stream_id) } end end tests("#describe_stream") do tests("success").formats(AWS::Kinesis::Formats::DESCRIBE_STREAM_FORMAT) do Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body end tests("ResourceNotFound").raises(Fog::AWS::Kinesis::ResourceNotFound) do Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id + "-foo").body end end tests("#put_records") do records = [ { "Data" => Base64.encode64("foo").chomp!, "PartitionKey" => "1" }, { "Data" => Base64.encode64("bar").chomp!, "PartitionKey" => "1" } ] tests("success").formats(AWS::Kinesis::Formats::PUT_RECORDS_FORMAT, false) do Fog::AWS[:kinesis].put_records("StreamName" => @stream_id, "Records" => records).body end tests("ResourceNotFound").raises(Fog::AWS::Kinesis::ResourceNotFound) do Fog::AWS[:kinesis].put_records("StreamName" => @stream_id + "-foo", "Records" => records).body end end tests("#put_record").formats(AWS::Kinesis::Formats::PUT_RECORD_FORMAT) do Fog::AWS[:kinesis].put_record("StreamName" => @stream_id, "Data" => Base64.encode64("baz").chomp!, "PartitionKey" => "1").body end tests("#get_shard_iterator").formats(AWS::Kinesis::Formats::GET_SHARD_ITERATOR_FORMAT) do first_shard_id = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"].first["ShardId"] Fog::AWS[:kinesis].get_shard_iterator("StreamName" => @stream_id, "ShardId" => first_shard_id, "ShardIteratorType" => "TRIM_HORIZON").body end tests("#get_records").formats(AWS::Kinesis::Formats::GET_RECORDS_FORMAT) do first_shard_id = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"].first["ShardId"] shard_iterator = Fog::AWS[:kinesis].get_shard_iterator("StreamName" => @stream_id, "ShardId" => first_shard_id, "ShardIteratorType" => "TRIM_HORIZON").body["ShardIterator"] Fog::AWS[:kinesis].get_records("ShardIterator" => shard_iterator, "Limit" => 1).body end tests("#get_records").returns(["foo", "bar"]) do first_shard_id = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"].first["ShardId"] shard_iterator = Fog::AWS[:kinesis].get_shard_iterator("StreamName" => @stream_id, "ShardId" => first_shard_id, "ShardIteratorType" => "TRIM_HORIZON").body["ShardIterator"] data = [] 2.times do response = Fog::AWS[:kinesis].get_records("ShardIterator" => shard_iterator, "Limit" => 1).body response["Records"].each do |record| data << Base64.decode64(record["Data"]) end shard_iterator = response["NextShardIterator"] end data end tests("#split_shard").returns("") do shard = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"].first shard_id = shard["ShardId"] ending_hash_key = shard["HashKeyRange"]["EndingHashKey"] new_starting_hash_key = (ending_hash_key.to_i / 2).to_s result = Fog::AWS[:kinesis].split_shard("StreamName" => @stream_id, "ShardToSplit" => shard_id, "NewStartingHashKey" => new_starting_hash_key).body wait_for_status(@stream_id, "ACTIVE") shards = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"] parent_shard = shards.detect{ |shard| shard["ShardId"] == shard_id } child_shards = shards.select{ |shard| shard["ParentShardId"] == shard_id }.sort_by{ |shard| shard["ShardId"] } returns(3) { shards.size } returns(2) { child_shards.size } # parent is closed returns(false) { parent_shard["SequenceNumberRange"]["EndingSequenceNumber"].nil? } # ensure new ranges are what we expect (mostly for testing the mock) returns([ { "StartingHashKey" => "0", "EndingHashKey" => (new_starting_hash_key.to_i - 1).to_s }, { "StartingHashKey" => new_starting_hash_key, "EndingHashKey" => ending_hash_key } ]) { child_shards.map{ |shard| shard["HashKeyRange"] } } result end tests("#merge_shards").returns("") do shards = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"] child_shard_ids = shards.reject{ |shard| shard["SequenceNumberRange"].has_key?("EndingSequenceNumber") }.map{ |shard| shard["ShardId"] }.sort result = Fog::AWS[:kinesis].merge_shards("StreamName" => @stream_id, "ShardToMerge" => child_shard_ids[0], "AdjacentShardToMerge" => child_shard_ids[1]).body wait_for_status(@stream_id, "ACTIVE") shards = Fog::AWS[:kinesis].describe_stream("StreamName" => @stream_id).body["StreamDescription"]["Shards"] parent_shards = shards.select{ |shard| child_shard_ids.include?(shard["ShardId"]) } child_shard = shards.detect{ |shard| shard["ParentShardId"] == child_shard_ids[0] && shard["AdjacentParentShardId"] == child_shard_ids[1] } returns(2) { parent_shards.size } returns(false) { child_shard.nil? } returns({ "EndingHashKey" => "340282366920938463463374607431768211455", "StartingHashKey" => "0" }) { child_shard["HashKeyRange"] } result end tests("#add_tags_to_stream").returns("") do Fog::AWS[:kinesis].add_tags_to_stream("StreamName" => @stream_id, "Tags" => {"a" => "1", "b" => "2"}).body end tests("#list_tags_for_stream").formats(AWS::Kinesis::Formats::LIST_TAGS_FOR_STREAM_FORMAT) do Fog::AWS[:kinesis].list_tags_for_stream("StreamName" => @stream_id).body.tap do |body| returns({"a" => "1", "b" => "2"}) { body["Tags"].inject({}){ |m, tag| m.merge(tag["Key"] => tag["Value"]) } } end end tests("#remove_tags_from_stream").returns("") do Fog::AWS[:kinesis].remove_tags_from_stream("StreamName" => @stream_id, "TagKeys" => %w[b]).body.tap do returns({"a" => "1"}) { body = Fog::AWS[:kinesis].list_tags_for_stream("StreamName" => @stream_id).body body["Tags"].inject({}){ |m, tag| m.merge(tag["Key"] => tag["Value"]) } } end end tests("#delete_stream").returns("") do Fog::AWS[:kinesis].delete_stream("StreamName" => @stream_id).body end end fog-aws-3.18.0/tests/requests/kms/000077500000000000000000000000001437344660100167655ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/kms/helper.rb000066400000000000000000000012221437344660100205660ustar00rootroot00000000000000class AWS module KMS module Formats BASIC = { 'ResponseMetadata' => { 'RequestId' => String } } DESCRIBE_KEY = { "KeyMetadata" => { "KeyUsage" => String, "AWSAccountId" => String, "KeyId" => String, "Description" => Fog::Nullable::String, "CreationDate" => Time, "Arn" => String, "Enabled" => Fog::Boolean } } LIST_KEYS = { "Keys" => [{ "KeyId" => String, "KeyArn" => String }], "Truncated" => Fog::Boolean, "Marker" => Fog::Nullable::String } end end end fog-aws-3.18.0/tests/requests/kms/key_tests.rb000066400000000000000000000011631437344660100213250ustar00rootroot00000000000000Shindo.tests('AWS::KMS | key requests', ['aws', 'kms']) do key_id = nil tests('success') do tests("#create_key").data_matches_schema(AWS::KMS::Formats::DESCRIBE_KEY) do result = Fog::AWS[:kms].create_key.body key_id = result["KeyMetadata"]["KeyId"] result end end tests("#describe_key").data_matches_schema(AWS::KMS::Formats::DESCRIBE_KEY) do result = Fog::AWS[:kms].describe_key(key_id).body returns(key_id) { result['KeyMetadata']['KeyId'] } result end tests("#list_keys").data_matches_schema(AWS::KMS::Formats::LIST_KEYS) do Fog::AWS[:kms].list_keys.body end end fog-aws-3.18.0/tests/requests/lambda/000077500000000000000000000000001437344660100174135ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/lambda/function_sample_1.js000066400000000000000000000004751437344660100233650ustar00rootroot00000000000000console.log('Loading function'); exports.handler = function(event, context) { console.log('value1 =', event.key1); console.log('value2 =', event.key2); console.log('value3 =', event.key3); context.succeed(event.key1); // Echo back the first key value // context.fail('Something went wrong'); }; fog-aws-3.18.0/tests/requests/lambda/function_sample_2.js000066400000000000000000000004761437344660100233670ustar00rootroot00000000000000console.log('Loading function'); exports.handler = function(event, context) { console.log('value1 =', event.key1); console.log('value2 =', event.key2); console.log('value3 =', event.key3); context.succeed(event.key2); // Echo back the second key value // context.fail('Something went wrong'); }; fog-aws-3.18.0/tests/requests/lambda/function_tests.rb000066400000000000000000000441241437344660100230140ustar00rootroot00000000000000Shindo.tests('AWS::Lambda | function requests', ['aws', 'lambda']) do _lambda = Fog::AWS[:lambda] account_id = _lambda.account_id region = _lambda.region function1 = IO.read(AWS::Lambda::Samples::FUNCTION_1) function2 = IO.read(AWS::Lambda::Samples::FUNCTION_2) zipped_function1 = Base64::encode64(AWS::Lambda::Formats.zip(function1)) zipped_function2 = Base64::encode64(AWS::Lambda::Formats.zip(function2)) function1_arn = nil function1_name = 'function1' function2_name = 'function2' function1_handler = 'index.handler' function_role = 'arn:aws:iam::647975416665:role/lambda_basic_execution' sns_principal = 'sns.amazonaws.com' sns_topic_sid = Fog::Mock.random_letters_and_numbers(32) sns_allowed_action = 'lambda:invoke' sns_topic_arn = Fog::AWS::Mock.arn('sns', account_id, 'mock_topic', region) kinesis_stream_arn = Fog::AWS::Mock.arn('kinesis', account_id, 'mock_stream', region) event_source_mapping1_id = nil tests('success') do tests('#list_functions').formats(AWS::Lambda::Formats::LIST_FUNCTIONS) do result = _lambda.list_functions.body functions = result['Functions'] returns(true) { functions.empty? } result end tests('#create_function').formats(AWS::Lambda::Formats::CREATE_FUNCTION) do description = 'a copy of my first function' result = _lambda.create_function( 'FunctionName' => function1_name, 'Handler' => function1_handler, 'Role' => function_role, 'Description' => description, 'Code' => { 'ZipFile' => zipped_function1 } ).body returns(true) { result.has_key?('FunctionArn') } returns(true) { result['CodeSize'] > 0 } returns(true) { result['MemorySize'] >= 128 } returns(true) { result['FunctionName'].eql?(function1_name) } returns(true) { result['Handler'].eql?(function1_handler) } function1_arn = result['FunctionArn'] result end tests('#invoke') do payload = { 'value1' => 2, 'value2' => 42 } result = _lambda.invoke( 'FunctionName' => function1_name, 'Payload' => payload ).body returns(false) { result.length.zero? } returns(false) { result.match(/function:#{function1_name} was invoked/).nil? } result end tests('#get_function').formats(AWS::Lambda::Formats::GET_FUNCTION) do result = _lambda.get_function('FunctionName' => function1_name).body func_config = result['Configuration'] returns(false) { result['Code']['Location'].match(/^https:\/\/awslambda-/).nil? } returns(true) { func_config.has_key?('FunctionArn') } returns(true) { func_config['CodeSize'] > 0 } returns(true) { func_config['MemorySize'] >= 128 } returns(true) { func_config['FunctionName'].eql?(function1_name) } returns(true) { func_config['Handler'].eql?(function1_handler) } returns(true) { func_config['FunctionArn'].eql?(function1_arn) } result end tests('#get_function_configuration').formats(AWS::Lambda::Formats::GET_FUNCTION_CONFIGURATION) do result = _lambda.get_function_configuration( 'FunctionName' => function1_name).body returns(true) { result.has_key?('FunctionArn') } returns(true) { result['CodeSize'] > 0 } returns(true) { result['MemorySize'] >= 128 } returns(true) { result['FunctionName'].eql?(function1_name) } returns(true) { result['Handler'].eql?(function1_handler) } returns(true) { result['FunctionArn'].eql?(function1_arn) } result end tests('#update_function_configuration').formats(AWS::Lambda::Formats::UPDATE_FUNCTION_CONFIGURATION) do new_memory_size = 256 new_description = "this function does nothing, just let's call it foobar" new_timeout = 10 result = _lambda.update_function_configuration( 'FunctionName' => function1_name, 'MemorySize' => new_memory_size, 'Description' => new_description, 'Timeout' => new_timeout ).body returns(true) { result['CodeSize'] > 0 } returns(true) { result['MemorySize'].eql?(new_memory_size) } returns(true) { result['FunctionArn'].eql?(function1_arn) } returns(true) { result['Description'].eql?(new_description) } returns(true) { result['Timeout'].eql?(new_timeout) } result end tests('#update_function_code').formats(AWS::Lambda::Formats::UPDATE_FUNCTION_CODE) do result = _lambda.update_function_code( 'FunctionName' => function1_name, 'ZipFile' => zipped_function2 ).body returns(true) { result.has_key?('FunctionArn') } returns(true) { result['CodeSize'] > 0 } returns(true) { result['MemorySize'] >= 128 } returns(true) { result['FunctionName'].eql?(function1_name) } returns(true) { result['Handler'].eql?(function1_handler) } result end tests('#add_permission').formats(AWS::Lambda::Formats::ADD_PERMISSION) do params = { 'FunctionName' => function1_name, 'Principal' => sns_principal, 'StatementId' => sns_topic_sid, 'Action' => sns_allowed_action, 'SourceArn' => sns_topic_arn } result = _lambda.add_permission(params).body statement = result['Statement'] returns(true) { statement['Action'].include?(sns_allowed_action) } returns(true) { statement['Principal']['Service'].eql?(sns_principal) } returns(true) { statement['Sid'].eql?(sns_topic_sid) } returns(true) { statement['Resource'].eql?(function1_arn) } returns(true) { statement['Effect'].eql?('Allow') } returns(false) { statement['Condition'].empty? } result end tests('#get_policy').formats(AWS::Lambda::Formats::GET_POLICY) do result = _lambda.get_policy('FunctionName' => function1_name).body policy = result['Policy'] returns(false) { policy['Statement'].empty? } statement = policy['Statement'].first returns(true) { statement['Action'].include?(sns_allowed_action) } returns(true) { statement['Principal']['Service'].eql?(sns_principal) } returns(true) { statement['Sid'].eql?(sns_topic_sid) } returns(true) { statement['Resource'].eql?(function1_arn) } returns(true) { statement['Effect'].eql?('Allow') } returns(false) { statement['Condition'].empty? } result end tests('#remove_permission') do params = { 'FunctionName' => function1_name, 'StatementId' => sns_topic_sid } result = _lambda.remove_permission(params).body returns(true) { result.empty? } raises(Fog::AWS::Lambda::Error) do _lambda.get_policy('FunctionName' => function1_name) end result end tests('#create_event_source_mapping').formats(AWS::Lambda::Formats::CREATE_EVENT_SOURCE_MAPPING) do params = { 'FunctionName' => function1_name, 'EventSourceArn' => kinesis_stream_arn, 'Enabled' => true, 'StartingPosition' => 'TRIM_HORIZON' } result = _lambda.create_event_source_mapping(params).body returns(true) { result['BatchSize'] > 0 } returns(true) { result['EventSourceArn'].eql?(kinesis_stream_arn) } returns(true) { result['FunctionArn'].eql?(function1_arn) } returns(true) { result['LastProcessingResult'].eql?('No records processed') } returns(true) { result['State'].eql?('Creating') } returns(true) { result['StateTransitionReason'].eql?('User action') } event_source_mapping1_id = result['UUID'] result end tests('#list_event_source_mappings').formats(AWS::Lambda::Formats::LIST_EVENT_SOURCE_MAPPINGS) do params = { 'FunctionName' => function1_name } result = _lambda.list_event_source_mappings(params).body event_source_mappings = result['EventSourceMappings'] returns(false) { event_source_mappings.empty? } mapping = event_source_mappings.first returns(true) { mapping['UUID'].eql?(event_source_mapping1_id) } result end tests('#get_event_source_mapping').formats(AWS::Lambda::Formats::GET_EVENT_SOURCE_MAPPING) do params = { 'UUID' => event_source_mapping1_id } result = _lambda.get_event_source_mapping(params).body returns(true) { result['BatchSize'] > 0 } returns(true) { result['EventSourceArn'].eql?(kinesis_stream_arn) } returns(true) { result['FunctionArn'].eql?(function1_arn) } returns(true) { result['LastProcessingResult'].eql?('OK') } returns(true) { result['State'].eql?('Enabled') } returns(true) { result['StateTransitionReason'].eql?('User action') } returns(true) { result['UUID'].eql?(event_source_mapping1_id) } result end tests('#update_event_source_mapping').formats(AWS::Lambda::Formats::UPDATE_EVENT_SOURCE_MAPPING) do new_batch_size = 500 enabled_mapping = false params = { 'UUID' => event_source_mapping1_id, 'BatchSize' => new_batch_size, 'Enabled' => enabled_mapping } result = _lambda.update_event_source_mapping(params).body returns(true) { result['BatchSize'].eql?(new_batch_size) } returns(true) { result['EventSourceArn'].eql?(kinesis_stream_arn) } returns(true) { result['FunctionArn'].eql?(function1_arn) } returns(true) { result['LastProcessingResult'].eql?('OK') } returns(true) { result['State'].eql?('Disabling') } returns(true) { result['StateTransitionReason'].eql?('User action') } returns(true) { result['UUID'].eql?(event_source_mapping1_id) } result end tests('#delete_event_source_mapping').formats(AWS::Lambda::Formats::DELETE_EVENT_SOURCE_MAPPING) do params = { 'UUID' => event_source_mapping1_id } result = _lambda.delete_event_source_mapping(params).body returns(true) { result['BatchSize'] > 0 } returns(true) { result['EventSourceArn'].eql?(kinesis_stream_arn) } returns(true) { result['FunctionArn'].eql?(function1_arn) } returns(false) { result['LastProcessingResult'].empty? } returns(true) { result['State'].eql?('Deleting') } returns(true) { result['StateTransitionReason'].eql?('User action') } returns(true) { result['UUID'].eql?(event_source_mapping1_id) } result end tests('#list_event_source_mappings again').formats(AWS::Lambda::Formats::LIST_EVENT_SOURCE_MAPPINGS) do params = { 'FunctionName' => function1_name } result = _lambda.list_event_source_mappings(params).body event_source_mappings = result['EventSourceMappings'] returns(true) { event_source_mappings.empty? } result end tests('#delete_function') do result = _lambda.delete_function('FunctionName' => function1_name).body returns(true) { result.empty? } raises(Fog::AWS::Lambda::Error) do _lambda.get_function('FunctionName' => function1_name) end result end tests('#list_functions again').formats(AWS::Lambda::Formats::LIST_FUNCTIONS) do result = _lambda.list_functions.body functions = result['Functions'] returns(true) { functions.empty? } result end tests('#create_function for failures tests').formats(AWS::Lambda::Formats::CREATE_FUNCTION) do description = 'failure tests function' result = _lambda.create_function( 'FunctionName' => function2_name, 'Handler' => function1_handler, 'Role' => function_role, 'Description' => description, 'Code' => { 'ZipFile' => zipped_function1 } ).body returns(true) { result.has_key?('FunctionArn') } returns(true) { result['CodeSize'] > 0 } returns(true) { result['MemorySize'] >= 128 } returns(true) { result['FunctionName'].eql?(function2_name) } returns(true) { result['Handler'].eql?(function1_handler) } result end end tests('failures') do tests("#invoke without function name").raises(Fog::AWS::Lambda::Error) do response = _lambda.invoke.body end tests("#invoke nonexistent function").raises(Fog::AWS::Lambda::Error) do response = Fog::AWS[:lambda].invoke('FunctionName' => 'nonexistent').body end tests("#get_function without function name").raises(Fog::AWS::Lambda::Error) do response = _lambda.get_function.body end tests("#get_function on nonexistent function").raises(Fog::AWS::Lambda::Error) do response = _lambda.get_function('FunctionName' => 'nonexistent').body end tests("#get_function_configuration without function name").raises(Fog::AWS::Lambda::Error) do response = _lambda.get_function_configuration.body end tests("#get_function_configuration on nonexistent function").raises(Fog::AWS::Lambda::Error) do response = _lambda.get_function_configuration('FunctionName' => 'nonexistent').body end tests("update_function_configuration without function name").raises(Fog::AWS::Lambda::Error) do response = _lambda.update_function_configuration.body end tests("#update_function_configuration on nonexistent function").raises(Fog::AWS::Lambda::Error) do response = _lambda.update_function_configuration('FunctionName' => 'nonexistent').body end tests("update_function_code without function name").raises(Fog::AWS::Lambda::Error) do response = _lambda.update_function_code.body end tests("#update_function_code on nonexistent function").raises(Fog::AWS::Lambda::Error) do response = _lambda.update_function_code( 'FunctionName' => 'nonexistent', 'ZipFile' => zipped_function2 ).body end tests("#update_function_code on valid function without source").raises(Fog::AWS::Lambda::Error) do response = _lambda.update_function_code('FunctionName' => 'foobar').body end tests("#delete_function without params").raises(Fog::AWS::Lambda::Error) do response = _lambda.delete_function.body end tests("#delete_function on nonexistent function").raises(Fog::AWS::Lambda::Error) do response = _lambda.delete_function('FunctionName' => 'nonexistent').body end tests('#get_policy without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_policy.body end tests('#get_policy on nonexistent function').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_policy('FunctionName' => 'nonexistent').body end tests('#get_policy on function without permissions').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_policy('FunctionName' => function2_name).body end tests('#add_permission without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.add_permission.body end tests('#add_permission on nonexistent function').raises(Fog::AWS::Lambda::Error) do response = _lambda.add_permission('FunctionName' => 'nonexistent').body end tests('#add_permission with missing params').raises(Fog::AWS::Lambda::Error) do response = _lambda.add_permission('FunctionName' => function2_name).body end tests('#remove_permission without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.remove_permission.body end tests('#remove_permission on nonexistent function').raises(Fog::AWS::Lambda::Error) do response = _lambda.remove_permission('FunctionName' => 'nonexistent').body end tests('#remove_permission on function with missing sid param').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_policy('FunctionName' => function2_name).body end tests('#remove_permission on function with missing sid param').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_policy( 'FunctionName' => function2_name, 'StatementId' => 'nonexistent_statement_id' ).body end tests('#create_event_source_mapping without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.create_event_source_mapping.body end tests('#create_event_source_mapping on nonexistent function').raises(Fog::AWS::Lambda::Error) do response = _lambda.create_event_source_mapping('FunctionName' => 'nonexistent').body end tests('#create_event_source_mapping with missing params').raises(Fog::AWS::Lambda::Error) do response = _lambda.create_event_source_mapping('FunctionName' => function2_name).body end tests('#get_event_source_mapping without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.get_event_source_mapping.body end tests('#get_event_source_mapping nonexistent').raises(Fog::AWS::Lambda::Error) do mapping_id = "deadbeef-caca-cafe-cafa-ffffdeadbeef" response = _lambda.get_event_source_mapping('UUID' => mapping_id).body end tests('#update_event_source_mapping without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.update_event_source_mapping.body end tests('#update_event_source_mapping nonexistent').raises(Fog::AWS::Lambda::Error) do mapping_id = "deadbeef-caca-cafe-cafa-ffffdeadbeef" response = _lambda.update_event_source_mapping('UUID' => mapping_id).body end tests('#delete_event_source_mapping without params').raises(Fog::AWS::Lambda::Error) do response = _lambda.delete_event_source_mapping.body end tests('#delete_event_source_mapping nonexistent').raises(Fog::AWS::Lambda::Error) do mapping_id = "deadbeef-caca-cafe-cafa-ffffdeadbeef" response = _lambda.create_event_source_mapping('UUID' => mapping_id).body end end end fog-aws-3.18.0/tests/requests/lambda/helper.rb000066400000000000000000000050371437344660100212240ustar00rootroot00000000000000class AWS class Lambda module Formats require 'zip' GET_FUNCTION_CONFIGURATION = { 'CodeSize' => Integer, 'Description' => Fog::Nullable::String, 'FunctionArn' => String, 'FunctionName' => String, 'Handler' => String, 'LastModified' => Time, 'MemorySize' => Integer, 'Role' => String, 'Runtime' => String, 'Timeout' => Integer } LIST_FUNCTIONS = { 'Functions' => [GET_FUNCTION_CONFIGURATION], 'NextMarker' => Fog::Nullable::String } GET_FUNCTION = { 'Code' => { 'Location' => String, 'RepositoryType' => String }, 'Configuration' => GET_FUNCTION_CONFIGURATION } UPDATE_FUNCTION_CONFIGURATION = GET_FUNCTION_CONFIGURATION UPDATE_FUNCTION_CODE = GET_FUNCTION_CONFIGURATION CREATE_FUNCTION = GET_FUNCTION_CONFIGURATION ADD_PERMISSION = { 'Statement' => { 'Condition' => Fog::Nullable::Hash, 'Action' => Array, 'Resource' => String, 'Effect' => String, 'Principal' => Hash, 'Sid' => String } } GET_POLICY = { 'Policy' => { 'Version' => String, 'Id' => String, 'Statement' => [ADD_PERMISSION['Statement']] } } GET_EVENT_SOURCE_MAPPING = { 'BatchSize' => Integer, 'EventSourceArn' => String, 'FunctionArn' => String, 'LastModified' => Float, 'LastProcessingResult' => String, 'State' => String, 'StateTransitionReason' => String, 'UUID' => String } LIST_EVENT_SOURCE_MAPPINGS = { 'EventSourceMappings' => [GET_EVENT_SOURCE_MAPPING], 'NextMarker' => Fog::Nullable::String } CREATE_EVENT_SOURCE_MAPPING = GET_EVENT_SOURCE_MAPPING UPDATE_EVENT_SOURCE_MAPPING = GET_EVENT_SOURCE_MAPPING DELETE_EVENT_SOURCE_MAPPING = GET_EVENT_SOURCE_MAPPING def self.zip(data, filename='index.js') data_io = Zip::OutputStream.write_buffer do |zio| zio.put_next_entry(filename) zio.write(data) end data_io.rewind data_io.sysread end end module Samples FUNCTION_1 = File.dirname(__FILE__) + '/function_sample_1.js' FUNCTION_2 = File.dirname(__FILE__) + '/function_sample_2.js' end end end fog-aws-3.18.0/tests/requests/rds/000077500000000000000000000000001437344660100167635ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/rds/cluster_snapshot_tests.rb000066400000000000000000000044121437344660100241330ustar00rootroot00000000000000Shindo.tests('AWS::RDS | cluster snapshot requests', ['aws', 'rds']) do @cluster_id = uniq_id("fog-test") @snapshot_id = uniq_id("cluster-db-snapshot") @cluster = Fog::AWS[:rds].clusters.create(rds_default_cluster_params.merge(:id => @cluster_id)) @snapshot_count = Fog::AWS[:rds].describe_db_cluster_snapshots.body['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'].count tests("success") do tests("#create_db_cluster_snapshot").formats(AWS::RDS::Formats::CREATE_DB_CLUSTER_SNAPSHOT) do result = Fog::AWS[:rds].create_db_cluster_snapshot(@cluster_id, @snapshot_id).body snapshot = result['CreateDBClusterSnapshotResult']['DBClusterSnapshot'] returns(@snapshot_id) { snapshot["DBClusterSnapshotIdentifier"] } returns(@cluster.engine) { snapshot["Engine"] } returns(@cluster.id) { snapshot["DBClusterIdentifier"] } returns(@cluster.engine_version) { snapshot["EngineVersion"] } returns(@cluster.allocated_storage) { snapshot["AllocatedStorage"].to_i } returns(@cluster.master_username) { snapshot["MasterUsername"] } result end second_snapshot = Fog::AWS[:rds].create_db_cluster_snapshot(@cluster_id, uniq_id("second-snapshot")).body['CreateDBClusterSnapshotResult']['DBClusterSnapshot'] tests("#describe_db_cluster_snapshots").formats(AWS::RDS::Formats::DESCRIBE_DB_CLUSTER_SNAPSHOTS) do result = Fog::AWS[:rds].describe_db_cluster_snapshots.body snapshots = result['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'] returns(@snapshot_count + 2) { snapshots.count } single_result = Fog::AWS[:rds].describe_db_cluster_snapshots(:snapshot_id => second_snapshot['DBClusterSnapshotIdentifier']).body['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'] returns([second_snapshot['DBClusterSnapshotIdentifier']]) { single_result.map { |s| s['DBClusterSnapshotIdentifier'] } } result end tests("delete_db_cluster_snapshot").formats(AWS::RDS::Formats::DELETE_DB_CLUSTER_SNAPSHOT) do result = Fog::AWS[:rds].delete_db_cluster_snapshot(@snapshot_id).body raises(Fog::AWS::RDS::NotFound) { Fog::AWS[:rds].describe_db_cluster_snapshots(:snapshot_id => @snapshot_id) } result end end end fog-aws-3.18.0/tests/requests/rds/cluster_tests.rb000066400000000000000000000030131437344660100222100ustar00rootroot00000000000000Shindo.tests('AWS::RDS | cluster requests', ['aws', 'rds']) do suffix = rand(65535).to_s(16) @cluster_id = "fog-test-#{suffix}" @master_id = "fog-master-#{suffix}" @final_snapshot_id = "fog-snapshot-#{suffix}" tests("success") do tests("#create_db_cluster").formats(AWS::RDS::Formats::CREATE_DB_CLUSTER) do result = Fog::AWS[:rds].create_db_cluster(@cluster_id, 'Engine' => 'aurora', 'MasterUsername' => "fog-#{suffix}", 'MasterUserPassword' => "fog-#{suffix}" ).body cluster = result['CreateDBClusterResult']['DBCluster'] returns("1") { cluster['AllocatedStorage'] } returns('aurora') { cluster['Engine'] } returns("fog-#{suffix}") { cluster['MasterUsername'] } result end tests("#describe_db_clusters").formats(AWS::RDS::Formats::DESCRIBE_DB_CLUSTERS) do Fog::AWS[:rds].describe_db_clusters.body end tests("#delete_db_cluster").formats(AWS::RDS::Formats::DELETE_DB_CLUSTER) do body = Fog::AWS[:rds].delete_db_cluster(@cluster_id, @final_snapshot_id).body tests('final snapshot') do returns('creating') { Fog::AWS[:rds].describe_db_cluster_snapshots(:snapshot_id => @final_snapshot_id).body['DescribeDBClusterSnapshotsResult']['DBClusterSnapshots'].first['Status'] } end body end end end fog-aws-3.18.0/tests/requests/rds/db_engine_versions.rb000066400000000000000000000004111437344660100231460ustar00rootroot00000000000000Shindo.tests('AWS::RDS | describe db engine versions', ['aws', 'rds']) do tests('success') do tests("#describe_db_engine_versions").formats(AWS::RDS::Formats::DB_ENGINE_VERSIONS_LIST) do Fog::AWS[:rds].describe_db_engine_versions.body end end end fog-aws-3.18.0/tests/requests/rds/db_snapshot_tests.rb000066400000000000000000000040021437344660100230320ustar00rootroot00000000000000Shindo.tests('Fog::Rds[:aws] | db snapshot requests', ['aws']) do @snapshot_format = { 'AllocatedStorage' => Integer, 'AvailabilityZone' => Fog::Nullable::String, 'Engine' => String, 'EngineVersion' => String, 'InstanceCreateTime' => Time, 'DBInstanceIdentifier' => String, 'DBSnapshotIdentifier' => String, 'Iops' => Fog::Nullable::Integer, 'MasterUsername' => String, 'Port' => Fog::Nullable::Integer, 'Status' => String, 'StorageType' => String, 'SnapshotType' => String, 'SnapshotCreateTime' => Fog::Nullable::Time, } @snapshots_format = { 'requestId' => String } @rds_identity = "test_rds" Fog::AWS[:rds].create_db_instance(@rds_identity,{ "DBInstanceClass"=>"db.m3.xlarge", "Engine"=>"PostgreSQL", "AllocatedStorage"=>100, "MasterUserPassword"=>"password", "MasterUsername"=>"username" }) @rds = Fog::AWS[:rds].servers.get(@rds_identity) tests('success') do @snapshot_id = "testRdsSnapshot" tests("#create_snapshot(#{@rds.identity})").formats(@snapshot_format) do Fog::AWS[:rds].create_db_snapshot(@rds.identity,@snapshot_id).body["CreateDBSnapshotResult"]["DBSnapshot"] end Fog.wait_for { Fog::AWS[:rds].snapshots.get(@snapshot_id) } Fog::AWS[:rds].snapshots.get(@snapshot_id).wait_for { ready? } tests("#modify_db_snapshot_attribute").formats(@snapshots_format) do Fog::AWS[:rds].modify_db_snapshot_attribute(@snapshot_id, {"Add.MemberId"=>["389480430104"]}).body end tests("#copy_db_snapshot (#{@snapshot_id}, target_snapshot_id)").formats(@snapshot_format) do Fog::AWS[:rds].copy_db_snapshot(@snapshot_id, "target_snapshot_id").body["CopyDBSnapshotResult"]["DBSnapshot"] end end tests('failure') do tests("#delete_snapshot('snap-00000000')").raises(Fog::AWS::RDS::NotFound) do Fog::AWS[:rds].delete_db_snapshot(@rds.identity) end end @rds.destroy end fog-aws-3.18.0/tests/requests/rds/describe_events.rb000066400000000000000000000005401437344660100224530ustar00rootroot00000000000000Shindo.tests('AWS::RDS | describe DB events requests',['aws', 'rds']) do tests('success') do pending if Fog.mocking? tests( '#describe_events' ).formats(AWS::RDS::Formats::EVENT_LIST) do Fog::AWS[:rds].describe_events().body['Events'] end end tests('failure') do #TODO: What constitutes a failure here? end end fog-aws-3.18.0/tests/requests/rds/event_subscription_tests.rb000066400000000000000000000026211437344660100244600ustar00rootroot00000000000000Shindo.tests('AWS::RDS | event subscription requests', ['aws', 'rds']) do pending unless Fog.mocking? @name = 'fog' @arn = 'arn:aws:sns:us-east-1:12345678910:fog' tests('success') do tests('#create_event_subscription').formats(AWS::RDS::Formats::CREATE_EVENT_SUBSCRIPTION) do body = Fog::AWS[:rds].create_event_subscription('SubscriptionName' => @name, 'SnsTopicArn' => @arn).body returns(@name) { body['CreateEventSubscriptionResult']['EventSubscription']['CustSubscriptionId'] } returns('creating') { body['CreateEventSubscriptionResult']['EventSubscription']['Status'] } body end tests("#describe_event_subscriptions").formats(AWS::RDS::Formats::DESCRIBE_EVENT_SUBSCRIPTIONS) do returns('active') { Fog::AWS[:rds].describe_event_subscriptions.body['DescribeEventSubscriptionsResult']['EventSubscriptionsList'].first['Status'] } Fog::AWS[:rds].describe_event_subscriptions.body end tests("#delete_event_subscription").formats(AWS::RDS::Formats::BASIC) do body = Fog::AWS[:rds].delete_event_subscription(@name).body returns('deleting') { Fog::AWS[:rds].describe_event_subscriptions('SubscriptionName' => @name).body['DescribeEventSubscriptionsResult']['EventSubscriptionsList'].first['Status'] } raises(Fog::AWS::RDS::NotFound) { Fog::AWS[:rds].describe_event_subscriptions('SubscriptionName' => @name) } body end end end fog-aws-3.18.0/tests/requests/rds/helper.rb000066400000000000000000000310571437344660100205750ustar00rootroot00000000000000class AWS module RDS module Formats BASIC = { 'ResponseMetadata' => { 'RequestId' => String } } DB_AVAILABILITY_ZONE_OPTION = { 'Name' => String } DB_PARAMETER_GROUP = { 'DBParameterGroupFamily' => String, 'DBParameterGroupName' => String, 'Description' => String } CREATE_DB_PARAMETER_GROUP = { 'ResponseMetadata' => { 'RequestId' => String }, 'CreateDBParameterGroupResult' => { 'DBParameterGroup' => DB_PARAMETER_GROUP } } DB_SECURITY_GROUP = { 'DBSecurityGroupDescription' => String, 'DBSecurityGroupName' => String, 'EC2SecurityGroups' => [Fog::Nullable::Hash], 'IPRanges' => [Fog::Nullable::Hash], 'OwnerId' => Fog::Nullable::String } CREATE_DB_SECURITY_GROUP = BASIC.merge({ 'CreateDBSecurityGroupResult' => { 'DBSecurityGroup' => DB_SECURITY_GROUP } }) AUTHORIZE_DB_SECURITY_GROUP = BASIC.merge({ 'AuthorizeDBSecurityGroupIngressResult' => { 'DBSecurityGroup' => DB_SECURITY_GROUP } }) REVOKE_DB_SECURITY_GROUP = BASIC.merge({ 'RevokeDBSecurityGroupIngressResult' => { 'DBSecurityGroup' => DB_SECURITY_GROUP } }) DESCRIBE_DB_SECURITY_GROUP = BASIC.merge({ 'DescribeDBSecurityGroupsResult' => { 'DBSecurityGroups' => [DB_SECURITY_GROUP] } }) DB_SUBNET_GROUP = { 'DBSubnetGroupName' => String, 'DBSubnetGroupDescription' => String, 'SubnetGroupStatus' => String, 'VpcId' => String, 'Subnets' => [String] } CREATE_DB_SUBNET_GROUP = BASIC.merge({ 'CreateDBSubnetGroupResult' => { 'DBSubnetGroup' => DB_SUBNET_GROUP } }) DESCRIBE_DB_SUBNET_GROUPS = BASIC.merge({ 'DescribeDBSubnetGroupsResult' => { 'DBSubnetGroups' => [DB_SUBNET_GROUP] } }) DESCRIBE_DB_PARAMETER_GROUP = { 'ResponseMetadata' => { 'RequestId' => String }, 'DescribeDBParameterGroupsResult' => { 'DBParameterGroups' => [DB_PARAMETER_GROUP] } } ORDERABLE_DB_INSTANCE_OPTION = { 'MultiAZCapable' => Fog::Boolean, 'Engine' => String, 'LicenseModel' => String, 'ReadReplicaCapable' => Fog::Boolean, 'EngineVersion' => String, 'AvailabilityZones' => [DB_AVAILABILITY_ZONE_OPTION], 'DBInstanceClass' => String, 'SupportsStorageEncryption' => Fog::Boolean, 'SupportsPerformanceInsights' => Fog::Boolean, 'StorageType' => String, 'SupportsIops' => Fog::Boolean, 'SupportsIAMDatabaseAuthentication' => Fog::Boolean, 'SupportsEnhancedMonitoring' => Fog::Boolean, 'Vpc' => Fog::Boolean } DESCRIBE_ORDERABLE_DB_INSTANCE_OPTION = BASIC.merge({ 'DescribeOrderableDBInstanceOptionsResult' => { 'OrderableDBInstanceOptions' => [ORDERABLE_DB_INSTANCE_OPTION] } }) MODIFY_PARAMETER_GROUP = BASIC.merge({ 'ModifyDBParameterGroupResult' => { 'DBParameterGroupName' => String } }) DB_PARAMETER = { 'ParameterValue' => Fog::Nullable::String, 'DataType' => String, 'AllowedValues' => Fog::Nullable::String, 'Source' => String, 'IsModifiable' => Fog::Boolean, 'Description' => String, 'ParameterName' => String, 'ApplyType' => String } DESCRIBE_DB_PARAMETERS = BASIC.merge({ 'DescribeDBParametersResult' => { 'Marker' => Fog::Nullable::String, 'Parameters' => [DB_PARAMETER] } }) DB_LOG_FILE = { 'LastWritten' => Time, 'Size' => Integer, 'LogFileName' => String } DESCRIBE_DB_LOG_FILES = BASIC.merge({ 'DescribeDBLogFilesResult' => { 'Marker' => Fog::Nullable::String, 'DBLogFiles' => [DB_LOG_FILE] } }) SNAPSHOT = { 'AllocatedStorage' => Integer, 'AvailabilityZone' => Fog::Nullable::String, 'DBInstanceIdentifier' => String, 'DBSnapshotIdentifier' => String, 'EngineVersion' => String, 'Engine' => String, 'InstanceCreateTime' => Time, 'Iops' => Fog::Nullable::Integer, 'MasterUsername' => String, 'Port' => Fog::Nullable::Integer, 'SnapshotCreateTime' => Fog::Nullable::Time, 'Status' => String, 'SnapshotType' => String, 'StorageType' => String, } INSTANCE = { 'AllocatedStorage' => Integer, 'AutoMinorVersionUpgrade' => Fog::Boolean, 'AvailabilityZone' => Fog::Nullable::String, 'BackupRetentionPeriod' => Integer, 'CACertificateIdentifier' => String, 'CharacterSetName' => Fog::Nullable::String, 'DBClusterIndentifier' => Fog::Nullable::String, 'DbiResourceId' => Fog::Nullable::String, 'DBInstanceClass' => String, 'DBInstanceIdentifier' => String, 'DBInstanceStatus' => String, 'DBName' => Fog::Nullable::String, 'DBParameterGroups' => [{ 'ParameterApplyStatus' => String, 'DBParameterGroupName' => String }], 'DBSecurityGroups' => [{ 'Status' => String, 'DBSecurityGroupName' => String }], 'DBSubnetGroupName' => Fog::Nullable::String, 'PubliclyAccessible' => Fog::Boolean, 'Endpoint' => { 'Address' => Fog::Nullable::String, 'Port' => Fog::Nullable::Integer }, 'Engine' => String, 'EngineVersion' => String, 'InstanceCreateTime' => Fog::Nullable::Time, 'Iops' => Fog::Nullable::Integer, 'KmsKeyId' => Fog::Nullable::String, 'LatestRestorableTime' => Fog::Nullable::Time, 'LicenseModel' => String, 'MasterUsername' => String, 'MultiAZ' => Fog::Boolean, 'PendingModifiedValues' => { 'BackupRetentionPeriod' => Fog::Nullable::Integer, 'DBInstanceClass' => Fog::Nullable::String, 'EngineVersion' => Fog::Nullable::String, 'MasterUserPassword' => Fog::Nullable::String, 'MultiAZ' => Fog::Nullable::Boolean, 'AllocatedStorage' => Fog::Nullable::Integer, 'Port' => Fog::Nullable::Integer }, 'PreferredBackupWindow' => String, 'PreferredMaintenanceWindow' => String, 'ReadReplicaDBInstanceIdentifiers' => [Fog::Nullable::String], 'StorageType' => String, 'StorageEncrypted' => Fog::Boolean, 'TdeCredentialArn' => Fog::Nullable::String } REPLICA_INSTANCE = INSTANCE.merge({ 'PreferredBackupWindow' => Fog::Nullable::String, 'ReadReplicaSourceDBInstanceIdentifier' => String }) CREATE_DB_INSTANCE = BASIC.merge({ 'CreateDBInstanceResult' => { 'DBInstance' => INSTANCE } }) DESCRIBE_DB_INSTANCES = BASIC.merge({ 'DescribeDBInstancesResult' => { 'Marker' => Fog::Nullable::String, 'DBInstances' => [INSTANCE] } }) MODIFY_DB_INSTANCE = BASIC.merge({ 'ModifyDBInstanceResult' => { 'DBInstance' => INSTANCE } }) DELETE_DB_INSTANCE = BASIC.merge({ 'DeleteDBInstanceResult' => { 'DBInstance' => INSTANCE } }) REBOOT_DB_INSTANCE = BASIC.merge({ 'RebootDBInstanceResult' => { 'DBInstance' => INSTANCE } }) CREATE_READ_REPLICA = BASIC.merge({ 'CreateDBInstanceReadReplicaResult' => { 'DBInstance' => REPLICA_INSTANCE } }) PROMOTE_READ_REPLICA = BASIC.merge({ 'PromoteReadReplicaResult' => { 'DBInstance' => INSTANCE } }) CREATE_DB_SNAPSHOT = BASIC.merge({ 'CreateDBSnapshotResult' => { 'DBSnapshot' => SNAPSHOT } }) DESCRIBE_DB_SNAPSHOTS = BASIC.merge({ 'DescribeDBSnapshotsResult' => { 'Marker' => Fog::Nullable::String, 'DBSnapshots' => [SNAPSHOT] } }) DELETE_DB_SNAPSHOT = BASIC.merge({ 'DeleteDBSnapshotResult' => { 'DBSnapshot' => SNAPSHOT } }) LIST_TAGS_FOR_RESOURCE = { 'ListTagsForResourceResult' => { 'TagList' => Fog::Nullable::Hash } } EVENT_SUBSCRIPTION = { 'CustSubscriptionId' => String, 'EventCategories' => Array, 'SourceType' => Fog::Nullable::String, 'Enabled' => String, 'Status' => String, 'CreationTime' => Time, 'SnsTopicArn' => String } CREATE_EVENT_SUBSCRIPTION = { 'CreateEventSubscriptionResult' => { 'EventSubscription' => EVENT_SUBSCRIPTION } } DESCRIBE_EVENT_SUBSCRIPTIONS = { 'DescribeEventSubscriptionsResult' => { 'EventSubscriptionsList' => [EVENT_SUBSCRIPTION] } } DB_ENGINE_VERSION = { 'Engine' => String, 'DBParameterGroupFamily' => String, 'DBEngineDescription' => String, 'EngineVersion' => String, 'DBEngineVersionDescription' => String } DB_ENGINE_VERSIONS_LIST = BASIC.merge( 'DescribeDBEngineVersionsResult' => { 'DBEngineVersions' => [DB_ENGINE_VERSION] } ) DB_CLUSTER = { 'AllocatedStorage' => String, 'BackupRetentionPeriod' => String, 'DBClusterIdentifier' => String, 'DBClusterMembers' => [{ "master" => Fog::Nullable::Boolean, "DBInstanceIdentifier" => Fog::Nullable::String, }], 'DBClusterParameterGroup' => String, 'DBSubnetGroup' => String, 'Endpoint' => String, 'Engine' => String, 'EngineVersion' => String, 'MasterUsername' => String, 'Port' => String, 'PreferredBackupWindow' => String, 'PreferredMaintenanceWindow' => String, 'Status' => String, 'VpcSecurityGroups' => [{ "VpcSecurityGroupId" => Fog::Nullable::String, }] } DESCRIBE_DB_CLUSTERS = BASIC.merge({ 'DescribeDBClustersResult' => { 'Marker' => Fog::Nullable::String, 'DBClusters' => [DB_CLUSTER] } }) CREATE_DB_CLUSTER = BASIC.merge( 'CreateDBClusterResult' => { 'DBCluster' => DB_CLUSTER } ) DELETE_DB_CLUSTER = BASIC.merge( 'DeleteDBClusterResult' => { 'DBCluster' => DB_CLUSTER } ) DB_CLUSTER_SNAPSHOT = { 'AllocatedStorage' => Fog::Nullable::Integer, 'ClusterCreateTime' => Fog::Nullable::Time, 'DBClusterIdentifier' => String, 'DBClusterSnapshotIdentifier' => String, 'Engine' => String, 'LicenseModel' => String, 'MasterUsername' => String, 'PercentProgress' => Fog::Nullable::Integer, 'Port' => Fog::Nullable::Integer, 'SnapshotCreateTime' => Fog::Nullable::Time, 'SnapshotType' => String, 'Status' => String, 'VpcId' => Fog::Nullable::String } CREATE_DB_CLUSTER_SNAPSHOT = BASIC.merge( 'CreateDBClusterSnapshotResult' => { 'DBClusterSnapshot' => DB_CLUSTER_SNAPSHOT } ) DESCRIBE_DB_CLUSTER_SNAPSHOTS = BASIC.merge( 'DescribeDBClusterSnapshotsResult' => { 'Marker' => Fog::Nullable::String, 'DBClusterSnapshots' => [DB_CLUSTER_SNAPSHOT], } ) DELETE_DB_CLUSTER_SNAPSHOT = BASIC.merge( 'DeleteDBClusterSnapshotResult' => { 'DBClusterSnapshot' => DB_CLUSTER_SNAPSHOT, } ) RESTORE_DB_INSTANCE_FROM_DB_SNAPSHOT = BASIC.merge({ 'RestoreDBInstanceFromDBSnapshotResult' => { 'DBInstance' => INSTANCE } }) end end end fog-aws-3.18.0/tests/requests/rds/instance_option_tests.rb000066400000000000000000000022251437344660100237270ustar00rootroot00000000000000Shindo.tests('AWS::RDS | db instance option requests', ['aws', 'rds']) do tests('success') do tests("#describe_orderable_db_instance_options('mysql)").formats(AWS::RDS::Formats::DESCRIBE_ORDERABLE_DB_INSTANCE_OPTION) do body = Fog::AWS[:rds].describe_orderable_db_instance_options('mysql').body returns(2) {body['DescribeOrderableDBInstanceOptionsResult']['OrderableDBInstanceOptions'].length} group = body['DescribeOrderableDBInstanceOptionsResult']['OrderableDBInstanceOptions'].first returns( true ) { group['MultiAZCapable'] } returns( 'mysql' ) { group['Engine'] } returns( true ) { group['ReadReplicaCapable'] } returns( true ) { group['AvailabilityZones'].length >= 1 } returns( true ) { group['StorageType'].length > 2 } returns( false ) { group['SupportsIops'] } returns( true ) { group['SupportsStorageEncryption'] } returns( false ) { group['SupportsPerformanceInsights'] } returns( false ) { group['SupportsIops'] } returns( false ) { group['SupportsIAMDatabaseAuthentication'] } returns( true ) { group['SupportsEnhancedMonitoring'] } body end end end fog-aws-3.18.0/tests/requests/rds/instance_tests.rb000066400000000000000000000144241437344660100223430ustar00rootroot00000000000000Shindo.tests('AWS::RDS | instance requests', ['aws', 'rds']) do # random_differentiator # Useful when rapidly re-running tests, so we don't have to wait # serveral minutes for deleted servers to disappear suffix = rand(65536).to_s(16) @db_instance_id = "fog-test-#{suffix}" @db_replica_id = "fog-replica-#{suffix}" @db_snapshot_id = "fog-snapshot-#{suffix}" @db_final_snapshot_id = "fog-final-snapshot-#{suffix}" @db_instance_restore_id = "fog-test-#{suffix}" tests('success') do tests("#create_db_instance").formats(AWS::RDS::Formats::CREATE_DB_INSTANCE) do default_params = rds_default_server_params # creation of replicas requires a > 0 BackupRetentionPeriod value # InvalidDBInstanceState => Automated backups are not enabled for this database instance. To enable automated backups, use ModifyDBInstance to set the backup retention period to a non-zero value. (Fog::AWS::RDS::Error) backup_retention_period = 1 result = Fog::AWS[:rds].create_db_instance(@db_instance_id, 'AllocatedStorage' => default_params.fetch(:allocated_storage), 'DBInstanceClass' => default_params.fetch(:flavor_id), 'Engine' => default_params.fetch(:engine), 'EngineVersion' => default_params.fetch(:version), 'MasterUsername' => default_params.fetch(:master_username), 'BackupRetentionPeriod' => backup_retention_period, 'MasterUserPassword' => default_params.fetch(:password)).body instance = result['CreateDBInstanceResult']['DBInstance'] returns('creating') { instance['DBInstanceStatus'] } result end tests("#describe_db_instances").formats(AWS::RDS::Formats::DESCRIBE_DB_INSTANCES) do Fog::AWS[:rds].describe_db_instances.body end server = Fog::AWS[:rds].servers.get(@db_instance_id) server.wait_for { ready? } new_storage = 6 tests("#modify_db_instance with immediate apply").formats(AWS::RDS::Formats::MODIFY_DB_INSTANCE) do body = Fog::AWS[:rds].modify_db_instance(@db_instance_id, true, 'AllocatedStorage' => new_storage).body tests 'pending storage' do instance = body['ModifyDBInstanceResult']['DBInstance'] returns(new_storage) { instance['PendingModifiedValues']['AllocatedStorage'] } end body end server.wait_for { state == 'modifying' } server.wait_for { state == 'available' } tests 'new storage' do returns(new_storage) { server.allocated_storage } end tests("reboot db instance") do tests("#reboot").formats(AWS::RDS::Formats::REBOOT_DB_INSTANCE) do Fog::AWS[:rds].reboot_db_instance(@db_instance_id).body end end server.wait_for { state == 'rebooting' } server.wait_for { state == 'available' } tests("#create_db_snapshot").formats(AWS::RDS::Formats::CREATE_DB_SNAPSHOT) do body = Fog::AWS[:rds].create_db_snapshot(@db_instance_id, @db_snapshot_id).body returns('creating') { body['CreateDBSnapshotResult']['DBSnapshot']['Status'] } body end tests("#describe_db_snapshots").formats(AWS::RDS::Formats::DESCRIBE_DB_SNAPSHOTS) do Fog::AWS[:rds].describe_db_snapshots.body end server.wait_for { state == 'available' } tests("#create read replica").formats(AWS::RDS::Formats::CREATE_READ_REPLICA) do Fog::AWS[:rds].create_db_instance_read_replica(@db_replica_id, @db_instance_id).body end replica = Fog::AWS[:rds].servers.get(@db_replica_id) replica.wait_for { ready? } tests("replica source") do returns(@db_instance_id) { replica.read_replica_source } end server.reload tests("replica identifiers") do returns([@db_replica_id]) { server.read_replica_identifiers } end tests("#promote read replica").formats(AWS::RDS::Formats::PROMOTE_READ_REPLICA) do Fog::AWS[:rds].promote_read_replica(@db_replica_id).body end tests("#delete_db_instance").formats(AWS::RDS::Formats::DELETE_DB_INSTANCE) do #server.wait_for { state == 'available' } Fog::AWS[:rds].delete_db_instance(@db_replica_id, nil, true) body = Fog::AWS[:rds].delete_db_instance(@db_instance_id, @db_final_snapshot_id).body tests "final snapshot" do returns('creating') { Fog::AWS[:rds].describe_db_snapshots(:snapshot_id => @db_final_snapshot_id).body['DescribeDBSnapshotsResult']['DBSnapshots'].first['Status'] } end body end tests("#restore_db_instance_from_db_snapshot").formats(AWS::RDS::Formats::RESTORE_DB_INSTANCE_FROM_DB_SNAPSHOT) do snapshot = Fog::AWS[:rds].snapshots.get(@db_final_snapshot_id) snapshot.wait_for { state == 'available' } result = Fog::AWS[:rds].restore_db_instance_from_db_snapshot(@db_final_snapshot_id, @db_instance_restore_id).body instance = result['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] returns('creating') { instance['DBInstanceStatus'] } result end restore_server = Fog::AWS[:rds].servers.get(@db_instance_restore_id) restore_server.wait_for { state == 'available' } tests("#delete_db_snapshot").formats(AWS::RDS::Formats::DELETE_DB_SNAPSHOT) do Fog::AWS[:rds].snapshots.get(@db_snapshot_id).wait_for { ready? } Fog::AWS[:rds].delete_db_snapshot(@db_snapshot_id).body end tests("snapshot.destroy") do snapshot = Fog::AWS[:rds].snapshots.get(@db_final_snapshot_id) snapshot.wait_for { ready? } snapshot.destroy returns(nil) { Fog::AWS[:rds].snapshots.get(@db_final_snapshot_id) } end end tests('failure') do tests "deleting nonexisting instance" do raises(Fog::AWS::RDS::NotFound) { Fog::AWS[:rds].delete_db_instance('doesnexist', 'irrelevant') } end tests "deleting non existing snapshot" do raises(Fog::AWS::RDS::NotFound) { Fog::AWS[:rds].delete_db_snapshot('doesntexist') } end tests "modifying non existing instance" do raises(Fog::AWS::RDS::NotFound) { Fog::AWS[:rds].modify_db_instance 'doesntexit', true, 'AllocatedStorage' => 10 } end end end fog-aws-3.18.0/tests/requests/rds/log_file_tests.rb000066400000000000000000000011131437344660100223060ustar00rootroot00000000000000Shindo.tests('AWS::RDS | log file requests', %w[aws rds]) do tests('success') do pending if Fog.mocking? suffix = rand(65536).to_s(16) @db_instance_id = "fog-test-#{suffix}" tests('#describe_db_log_files').formats(AWS::RDS::Formats::DESCRIBE_DB_LOG_FILES) do result = Fog::AWS[:rds].describe_db_log_files(@db_instance_id).body['DescribeDBLogFilesResult'] returns(true) { result['DBLogFiles'].size > 0 } result end end tests('failures') do raises(Fog::AWS::RDS::NotFound) {Fog::AWS[:rds].describe_db_log_files('doesntexist')} end end fog-aws-3.18.0/tests/requests/rds/parameter_group_tests.rb000066400000000000000000000047001437344660100237270ustar00rootroot00000000000000Shindo.tests('AWS::RDS | parameter group requests', ['aws', 'rds']) do tests('success') do tests("#create_db_parameter_groups").formats(AWS::RDS::Formats::CREATE_DB_PARAMETER_GROUP) do body = Fog::AWS[:rds].create_db_parameter_group('fog-group', 'MySQL5.1', 'Some description').body returns( 'mysql5.1') { body['CreateDBParameterGroupResult']['DBParameterGroup']['DBParameterGroupFamily']} returns( 'fog-group') { body['CreateDBParameterGroupResult']['DBParameterGroup']['DBParameterGroupName']} returns( 'Some description') { body['CreateDBParameterGroupResult']['DBParameterGroup']['Description']} body end Fog::AWS[:rds].create_db_parameter_group('other-fog-group', 'MySQL5.1', 'Some description') tests("#describe_db_parameter_groups").formats(AWS::RDS::Formats::DESCRIBE_DB_PARAMETER_GROUP) do body = Fog::AWS[:rds].describe_db_parameter_groups().body returns(4) {body['DescribeDBParameterGroupsResult']['DBParameterGroups'].length} body end tests("#describe_db_parameter_groups('fog-group')").formats(AWS::RDS::Formats::DESCRIBE_DB_PARAMETER_GROUP) do body = Fog::AWS[:rds].describe_db_parameter_groups('fog-group').body returns(1) {body['DescribeDBParameterGroupsResult']['DBParameterGroups'].length} group = body['DescribeDBParameterGroupsResult']['DBParameterGroups'].first returns( 'mysql5.1') { group['DBParameterGroupFamily']} returns( 'fog-group') { group['DBParameterGroupName']} returns( 'Some description') { group['Description']} body end tests("delete_db_parameter_group").formats(AWS::RDS::Formats::BASIC) do body = Fog::AWS[:rds].delete_db_parameter_group('fog-group').body raises(Fog::AWS::RDS::NotFound) {Fog::AWS[:rds].describe_db_parameter_groups('fog-group')} body end Fog::AWS[:rds].delete_db_parameter_group('other-fog-group') end tests("failures") do raises(Fog::AWS::RDS::NotFound) {Fog::AWS[:rds].describe_db_parameter_groups('doesntexist')} raises(Fog::AWS::RDS::NotFound) {Fog::AWS[:rds].delete_db_parameter_group('doesntexist')} tests "creating second group with same id" do Fog::AWS[:rds].create_db_parameter_group('fog-group', 'MySQL5.1', 'Some description') raises(Fog::AWS::RDS::IdentifierTaken) {Fog::AWS[:rds].create_db_parameter_group('fog-group', 'MySQL5.1', 'Some description')} end Fog::AWS[:rds].delete_db_parameter_group('fog-group') end end fog-aws-3.18.0/tests/requests/rds/parameter_request_tests.rb000066400000000000000000000023461437344660100242670ustar00rootroot00000000000000Shindo.tests('AWS::RDS | parameter requests', ['aws', 'rds']) do tests('success') do Fog::AWS[:rds].create_db_parameter_group('fog-group', 'MySQL5.1', 'Some description') tests('#modify_db_parameter_group').formats(AWS::RDS::Formats::MODIFY_PARAMETER_GROUP) do body = Fog::AWS[:rds].modify_db_parameter_group('fog-group',[ {'ParameterName' => 'query_cache_size', 'ParameterValue' => '12345', 'ApplyMethod' => 'immediate'} ]).body body end tests('#describe_db_parameters').formats(AWS::RDS::Formats::DESCRIBE_DB_PARAMETERS) do Fog::AWS[:rds].describe_db_parameters('fog-group', :max_records => 20).body end tests("#describe_db_parameters :source => 'user'")do body = Fog::AWS[:rds].describe_db_parameters('fog-group', :source => 'user').body returns(1){ body['DescribeDBParametersResult']['Parameters'].length} param = body['DescribeDBParametersResult']['Parameters'].first returns('query_cache_size'){param['ParameterName']} returns('12345'){param['ParameterValue']} returns(true){param['IsModifiable']} returns('query_cache_size'){param['ParameterName']} end Fog::AWS[:rds].delete_db_parameter_group('fog-group') end end fog-aws-3.18.0/tests/requests/rds/security_group_tests.rb000066400000000000000000000112631437344660100236200ustar00rootroot00000000000000Shindo.tests('AWS::RDS | security group requests', ['aws', 'rds']) do suffix = rand(65536).to_s(16) @sec_group_name = "fog-sec-group-#{suffix}" if Fog.mocking? @owner_id = '123456780' else @owner_id = Fog::AWS[:rds].security_groups.get('default').owner_id end tests('success') do tests("#create_db_security_group").formats(AWS::RDS::Formats::CREATE_DB_SECURITY_GROUP) do body = Fog::AWS[:rds].create_db_security_group(@sec_group_name, 'Some description').body returns( @sec_group_name) { body['CreateDBSecurityGroupResult']['DBSecurityGroup']['DBSecurityGroupName']} returns( 'Some description') { body['CreateDBSecurityGroupResult']['DBSecurityGroup']['DBSecurityGroupDescription']} returns( []) { body['CreateDBSecurityGroupResult']['DBSecurityGroup']['EC2SecurityGroups']} returns( []) { body['CreateDBSecurityGroupResult']['DBSecurityGroup']['IPRanges']} body end tests("#describe_db_security_groups").formats(AWS::RDS::Formats::DESCRIBE_DB_SECURITY_GROUP) do Fog::AWS[:rds].describe_db_security_groups.body end tests("#authorize_db_security_group_ingress CIDR").formats(AWS::RDS::Formats::AUTHORIZE_DB_SECURITY_GROUP) do @cidr = '0.0.0.0/0' body = Fog::AWS[:rds].authorize_db_security_group_ingress(@sec_group_name,{'CIDRIP'=>@cidr}).body returns("authorizing") { body['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup']['IPRanges'].find{|h| h['CIDRIP'] == @cidr}['Status']} body end sec_group = Fog::AWS[:rds].security_groups.get(@sec_group_name) sec_group.wait_for {ready?} tests("#authorize_db_security_group_ingress another CIDR").formats(AWS::RDS::Formats::AUTHORIZE_DB_SECURITY_GROUP) do @cidr = "10.0.0.0/24" body = Fog::AWS[:rds].authorize_db_security_group_ingress(@sec_group_name,{'CIDRIP'=>@cidr}).body returns("authorizing") { body['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup']['IPRanges'].find{|h| h['CIDRIP'] == @cidr}['Status']} body end sec_group = Fog::AWS[:rds].security_groups.get(@sec_group_name) sec_group.wait_for {ready?} tests("#count CIDRIP").formats(AWS::RDS::Formats::DESCRIBE_DB_SECURITY_GROUP) do body = Fog::AWS[:rds].describe_db_security_groups(@sec_group_name).body returns(2) { body['DescribeDBSecurityGroupsResult']['DBSecurityGroups'][0]['IPRanges'].size } body end tests("#revoke_db_security_group_ingress CIDR").formats(AWS::RDS::Formats::REVOKE_DB_SECURITY_GROUP) do @cidr = '0.0.0.0/0' body = Fog::AWS[:rds].revoke_db_security_group_ingress(@sec_group_name,{'CIDRIP'=> @cidr}).body returns("revoking") { body['RevokeDBSecurityGroupIngressResult']['DBSecurityGroup']['IPRanges'].find{|h| h['CIDRIP'] == @cidr}['Status']} body end tests("#authorize_db_security_group_ingress EC2").formats(AWS::RDS::Formats::AUTHORIZE_DB_SECURITY_GROUP) do @ec2_sec_group = 'default' body = Fog::AWS[:rds].authorize_db_security_group_ingress(@sec_group_name,{'EC2SecurityGroupName' => @ec2_sec_group, 'EC2SecurityGroupOwnerId' => @owner_id}).body returns("authorizing") { body['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup']['EC2SecurityGroups'].find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group}['Status']} returns(@owner_id) { body['AuthorizeDBSecurityGroupIngressResult']['DBSecurityGroup']['EC2SecurityGroups'].find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group}['EC2SecurityGroupOwnerId']} body end tests("duplicate #authorize_db_security_group_ingress EC2").raises(Fog::AWS::RDS::AuthorizationAlreadyExists) do @ec2_sec_group = 'default' Fog::AWS[:rds].authorize_db_security_group_ingress(@sec_group_name,{'EC2SecurityGroupName' => @ec2_sec_group, 'EC2SecurityGroupOwnerId' => @owner_id}) end sec_group = Fog::AWS[:rds].security_groups.get(@sec_group_name) sec_group.wait_for {ready?} tests("#revoke_db_security_group_ingress EC2").formats(AWS::RDS::Formats::REVOKE_DB_SECURITY_GROUP) do @ec2_sec_group = 'default' body = Fog::AWS[:rds].revoke_db_security_group_ingress(@sec_group_name,{'EC2SecurityGroupName' => @ec2_sec_group, 'EC2SecurityGroupOwnerId' => @owner_id}).body returns("revoking") { body['RevokeDBSecurityGroupIngressResult']['DBSecurityGroup']['EC2SecurityGroups'].find{|h| h['EC2SecurityGroupName'] == @ec2_sec_group}['Status']} body end #TODO, authorize ec2 security groups tests("#delete_db_security_group").formats(AWS::RDS::Formats::BASIC) do body = Fog::AWS[:rds].delete_db_security_group(@sec_group_name).body raises(Fog::AWS::RDS::NotFound) {Fog::AWS[:rds].describe_db_security_groups(@sec_group_name)} body end end end fog-aws-3.18.0/tests/requests/rds/subnet_groups_tests.rb000066400000000000000000000035671437344660100234440ustar00rootroot00000000000000Shindo.tests('AWS::RDS | subnet group requests', ['aws', 'rds']) do # random_differentiator # Useful when rapidly re-running tests, so we don't have to wait # serveral minutes for deleted VPCs/subnets to disappear suffix = rand(65536).to_s(16) @subnet_group_name = "fog-test-#{suffix}" vpc_range = rand(245) + 10 @vpc = Fog::Compute[:aws].vpcs.create('cidr_block' => "10.#{vpc_range}.0.0/16") # Create 4 subnets in this VPC, each one in a different AZ subnet_az = 'us-east-1a' subnet_range = 8 @subnets = (1..4).map do subnet = Fog::Compute[:aws].create_subnet(@vpc.id, "10.#{vpc_range}.#{subnet_range}.0/24", 'AvailabilityZone' => subnet_az).body['subnet'] subnet_az = subnet_az.succ subnet_range *= 2 subnet end tests('success') do subnet_ids = @subnets.map { |sn| sn['subnetId'] }.to_a tests("#create_db_subnet_group").formats(AWS::RDS::Formats::CREATE_DB_SUBNET_GROUP) do result = Fog::AWS[:rds].create_db_subnet_group(@subnet_group_name, subnet_ids, 'A subnet group').body returns(@subnet_group_name) { result['CreateDBSubnetGroupResult']['DBSubnetGroup']['DBSubnetGroupName'] } returns('A subnet group') { result['CreateDBSubnetGroupResult']['DBSubnetGroup']['DBSubnetGroupDescription'] } returns(@vpc.id) { result['CreateDBSubnetGroupResult']['DBSubnetGroup']['VpcId'] } returns(subnet_ids.sort) { result['CreateDBSubnetGroupResult']['DBSubnetGroup']['Subnets'].sort } result end tests("#describe_db_subnet_groups").formats(AWS::RDS::Formats::DESCRIBE_DB_SUBNET_GROUPS) do Fog::AWS[:rds].describe_db_subnet_groups.body end tests("#delete_db_subnet_group").formats(AWS::RDS::Formats::BASIC) do Fog::AWS[:rds].delete_db_subnet_group(@subnet_group_name).body end end @subnets.each do |sn| Fog::Compute[:aws].delete_subnet(sn['subnetId']) end @vpc.destroy end fog-aws-3.18.0/tests/requests/rds/tagging_tests.rb000066400000000000000000000051541437344660100221570ustar00rootroot00000000000000Shindo.tests('AWS::RDS | tagging requests', ['aws', 'rds']) do @rds = Fog::AWS[:rds] @db_instance_id = "fog-test-#{rand(65536).to_s(16)}" Fog::Formatador.display_line "Creating RDS instance #{@db_instance_id}" @rds.create_db_instance(@db_instance_id, 'AllocatedStorage' => 5, 'DBInstanceClass' => 'db.t1.micro', 'Engine' => 'mysql', 'MasterUsername' => 'foguser', 'MasterUserPassword' => 'fogpassword') Fog::Formatador.display_line "Waiting for instance #{@db_instance_id} to be ready" @db = @rds.servers.get(@db_instance_id) @db.wait_for { ready? } tests('success') do single_tag = {'key1' => 'value1'} two_tags = {'key2' => 'value2', 'key3' => 'value3'} tests("#add_tags_to_resource with a single tag"). formats(AWS::RDS::Formats::BASIC) do result = @rds.add_tags_to_resource(@db_instance_id, single_tag).body returns(single_tag) do @rds.list_tags_for_resource(@db_instance_id). body['ListTagsForResourceResult']['TagList'] end result end tests("#add_tags_to_resource with a multiple tags"). formats(AWS::RDS::Formats::BASIC) do result = @rds.add_tags_to_resource(@db_instance_id, two_tags).body returns(single_tag.merge(two_tags)) do @rds.list_tags_for_resource(@db_instance_id). body['ListTagsForResourceResult']['TagList'] end result end tests("#remove_tags_from_resource").formats(AWS::RDS::Formats::BASIC) do result = @rds.remove_tags_from_resource( @db_instance_id, single_tag.keys).body returns(two_tags) do @rds.list_tags_for_resource(@db_instance_id). body['ListTagsForResourceResult']['TagList'] end result end tests("#list_tags_for_resource"). formats(AWS::RDS::Formats::LIST_TAGS_FOR_RESOURCE) do result = @rds.list_tags_for_resource(@db_instance_id).body returns(two_tags) do result['ListTagsForResourceResult']['TagList'] end result end end tests('failure') do tests "tagging a nonexisting instance" do raises(Fog::AWS::RDS::NotFound) do @rds.add_tags_to_resource('doesnexist', {'key1' => 'value1'}) end end tests "listing tags for a nonexisting instance" do raises(Fog::AWS::RDS::NotFound) do @rds.list_tags_for_resource('doesnexist') end end tests "removing tags for a nonexisting instance" do raises(Fog::AWS::RDS::NotFound) do @rds.remove_tags_from_resource('doesnexist', ['key1']) end end end Fog::Formatador.display_line "Destroying DB instance #{@db_instance_id}" @db.destroy end fog-aws-3.18.0/tests/requests/redshift/000077500000000000000000000000001437344660100200035ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/redshift/cluster_parameter_group_tests.rb000066400000000000000000000055241437344660100265150ustar00rootroot00000000000000Shindo.tests('Fog::Redshift[:aws] | cluster parameter group requests', ['aws']) do pending if Fog.mocking? suffix = rand(65536).to_s(16) parameter_group = "test-cluster-parameter-group-#{suffix}" @cluster_parameter_format = { 'Parameter' => { "ParameterValue" => String, "DataType" => String, "Source" => String, "IsModifiable" => Fog::Boolean, "Description" => String, "ParameterName" => String } } @cluster_parameters_format = { "Parameters"=> [@cluster_parameter_format] } @cluster_parameter_group_format = { 'ClusterParameterGroup' => { "ParameterGroupFamily" => String, "Description" => String, "ParameterGroupName" => String } } @cluster_parameter_groups_format = { "ParameterGroups"=> [@cluster_parameter_group_format] } @modify_cluster_parameter_group_format = { "ParameterGroupStatus" => String, "ParameterGroupName" => String } tests('success') do tests("create_cluster_parameter_group").formats(@cluster_parameter_group_format) do body = Fog::AWS[:redshift].create_cluster_parameter_group(:parameter_group_name=> parameter_group, :parameter_group_family=>"redshift-1.0", :description=>'testing').body body end tests("describe_cluster_parameter_groups").formats(@cluster_parameter_groups_format) do body = Fog::AWS[:redshift].describe_cluster_parameter_groups.body body end tests("describe_cluster_parameters").formats(@cluster_parameters_format) do body = Fog::AWS[:redshift].describe_cluster_parameters(:parameter_group_name=>parameter_group).body body end tests("modify_cluster_parameter_groups").formats(@modify_cluster_parameter_group_format) do body = Fog::AWS[:redshift].modify_cluster_parameter_group(:parameter_group_name=>parameter_group, :parameters=>{ :parameter_name=>'extra_float_digits', :parameter_value=>2}).body body end tests("delete_cluster_parameter_group") do present = !Fog::AWS[:redshift].describe_cluster_parameter_groups(:parameter_group_name=>parameter_group).body['ParameterGroups'].empty? tests("verify presence before deletion").returns(true) { present } Fog::AWS[:redshift].delete_cluster_parameter_group(:parameter_group_name=>parameter_group) not_present = Fog::AWS[:redshift].describe_cluster_parameter_groups(:parameter_group_name=>parameter_group).body['ParameterGroups'].empty? tests("verify deletion").returns(true) { not_present } end end end fog-aws-3.18.0/tests/requests/redshift/cluster_security_group_tests.rb000066400000000000000000000032001437344660100263710ustar00rootroot00000000000000Shindo.tests('Fog::Redshift[:aws] | cluster security group requests', ['aws']) do pending if Fog.mocking? suffix = rand(65536).to_s(16) identifier = "test-cluster-security-group-#{suffix}" @cluster_security_group_format = { "ClusterSecurityGroup" => { "EC2SecurityGroups" => Fog::Nullable::Array, "IPRanges" => Fog::Nullable::Array, "Description" => String, "ClusterSecurityGroupName" => String } } @describe_cluster_security_groups_format = { "ClusterSecurityGroups" => [@cluster_security_group_format] } tests('success') do tests("create_cluster_security_group").formats(@cluster_security_group_format) do body = Fog::AWS[:redshift].create_cluster_security_group(:cluster_security_group_name => identifier, :description => 'testing').body body end tests("describe_cluster_security_groups").formats(@describe_cluster_security_groups_format) do body = Fog::AWS[:redshift].describe_cluster_security_groups.body body end tests("delete_cluster_security_group") do present = !Fog::AWS[:redshift].describe_cluster_security_groups(:cluster_security_group_name => identifier).body['ClusterSecurityGroups'].empty? tests("verify presence before deletion").returns(true) { present } Fog::AWS[:redshift].delete_cluster_security_group(:cluster_security_group_name => identifier) not_present = Fog::AWS[:redshift].describe_cluster_security_groups(:cluster_security_group_name => identifier).body['ClusterSecurityGroups'].empty? tests("verify deletion").returns(true) { not_present } end end end fog-aws-3.18.0/tests/requests/redshift/cluster_snapshot_tests.rb000066400000000000000000000064741437344660100251650ustar00rootroot00000000000000Shindo.tests('Fog::Redshift[:aws] | cluster snapshot requests', ['aws']) do pending if Fog.mocking? suffix = rand(65536).to_s(16) identifier = "test-snapshot-#{suffix}" cluster = "test-cluster-#{suffix}" start_time = Fog::Time.now.to_iso8601_basic @cluster_snapshot_format = { 'Snapshot' => { "AccountsWithRestoreAccess" => Fog::Nullable::Array, "Port" => Integer, "SnapshotIdentifier" => String, "OwnerAccount" => String, "Status" => String, "SnapshotType" => String, "ClusterVersion" => String, "EstimatedSecondsToCompletion" => Integer, "SnapshotCreateTime" => Time, "Encrypted" => Fog::Boolean, "NumberOfNodes" => Integer, "DBName" => String, "CurrentBackupRateInMegaBytesPerSecond" => Float, "ClusterCreateTime" => Time, "AvailabilityZone" => String, "ActualIncrementalBackupSizeInMegaBytes" => Float, "TotalBackupSizeInMegaBytes" => Float, "ElapsedTimeInSeconds" => Integer, "BackupProgressInMegaBytes" => Float, "NodeType" => String, "ClusterIdentifier" => String, "MasterUsername" => String } } @describe_cluster_snapshots_format = { "Snapshots" => [@cluster_snapshot_format] } tests('success') do tests("create_cluster_snapshot").formats(@cluster_snapshot_format) do Fog::AWS[:redshift].create_cluster(:cluster_identifier => cluster, :master_user_password => 'Pass1234', :master_username => 'testuser', :node_type => 'dw.hs1.xlarge', :cluster_type => 'single-node') Fog.wait_for do "available" == Fog::AWS[:redshift].describe_clusters(:cluster_identifier=>cluster).body['ClusterSet'].first['Cluster']['ClusterStatus'] end body = Fog::AWS[:redshift].create_cluster_snapshot(:snapshot_identifier => identifier, :cluster_identifier => cluster).body body end tests('describe_cluster_snaphots').formats(@describe_cluster_snapshots_format) do sleep 30 unless Fog.mocking? body = Fog::AWS[:redshift].describe_cluster_snapshots(:start_time=>start_time).body body end tests('delete_cluster_snapshot').formats(@cluster_snapshot_format) do Fog.wait_for do "available" == Fog::AWS[:redshift].describe_cluster_snapshots(:snapshot_identifier=>identifier).body['Snapshots'].first['Snapshot']['Status'] end sleep 30 unless Fog.mocking? body = Fog::AWS[:redshift].delete_cluster_snapshot(:snapshot_identifier=>identifier).body body end Fog::AWS[:redshift].delete_cluster(:cluster_identifier => cluster, :skip_final_cluster_snapshot => true) end end fog-aws-3.18.0/tests/requests/redshift/cluster_tests.rb000066400000000000000000000062511437344660100232370ustar00rootroot00000000000000Shindo.tests('Fog::Redshift[:aws] | cluster requests', ['aws']) do pending if Fog.mocking? identifier = "test-cluster-#{rand(65536).to_s(16)}" @cluster_format = { 'Cluster' => { "ClusterParameterGroups" => [{ "ClusterParameterGroup" => { "ParameterApplyStatus" => String, "ParameterGroupName" => String } }], "ClusterSecurityGroups" => [{ 'ClusterSecurityGroup' => { "Status" => String, "ClusterSecurityGroupName" => String } }], "VpcSecurityGroups" => Fog::Nullable::Array, "EndPoint" => Fog::Nullable::Hash, "PendingModifiedValues" => Fog::Nullable::Hash, "RestoreStatus" => Fog::Nullable::Hash, "ClusterVersion" => String, "ClusterStatus" => String, "Encrypted" => Fog::Boolean, "NumberOfNodes" => Integer, "PubliclyAccessible" => Fog::Boolean, "AutomatedSnapshotRetentionPeriod" => Integer, "DBName" => String, "PreferredMaintenanceWindow" => String, "NodeType" => String, "ClusterIdentifier" => String, "AllowVersionUpgrade" => Fog::Boolean, "MasterUsername" => String } } @describe_clusters_format = { "ClusterSet" => [{ 'Cluster' => @cluster_format['Cluster'].merge({"ClusterCreateTime"=>Time, "AvailabilityZone"=>String, "EndPoint"=>{"Port"=>Integer, "Address"=>String}}) }] } tests('success') do tests('create_cluster').formats(@cluster_format) do body = Fog::AWS[:redshift].create_cluster(:cluster_identifier => identifier, :master_user_password => 'Password1234', :master_username => 'testuser', :node_type => 'dw.hs1.xlarge', :cluster_type => 'single-node').body Fog.wait_for do "available" == Fog::AWS[:redshift].describe_clusters(:cluster_identifier=>identifier).body['ClusterSet'].first['Cluster']['ClusterStatus'] end body end tests('describe_clusters').formats(@describe_clusters_format["ClusterSet"]) do sleep 30 unless Fog.mocking? body = Fog::AWS[:redshift].describe_clusters(:cluster_identifier=>identifier).body["ClusterSet"] body end tests('reboot_cluster') do sleep 30 unless Fog.mocking? body = Fog::AWS[:redshift].reboot_cluster(:cluster_identifier=>identifier).body tests("verify reboot").returns("rebooting") { body['Cluster']['ClusterStatus']} body end tests('delete_cluster') do Fog.wait_for do "available" == Fog::AWS[:redshift].describe_clusters(:cluster_identifier=>identifier).body['ClusterSet'].first['Cluster']['ClusterStatus'] end sleep 30 unless Fog.mocking? body = Fog::AWS[:redshift].delete_cluster(:cluster_identifier=>identifier, :skip_final_cluster_snapshot=>true).body tests("verify delete").returns("deleting") { body['Cluster']['ClusterStatus']} body end end end fog-aws-3.18.0/tests/requests/ses/000077500000000000000000000000001437344660100167655ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/ses/helper.rb000066400000000000000000000002121437344660100205640ustar00rootroot00000000000000class AWS module SES module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } end end end fog-aws-3.18.0/tests/requests/ses/verified_domain_identity_tests.rb000066400000000000000000000005211437344660100255670ustar00rootroot00000000000000Shindo.tests('AWS::SES | verified domain identity requests', ['aws', 'ses']) do tests('success') do tests("#verify_domain_identity('example.com')").formats(AWS::SES::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:ses].verify_domain_identity('example.com').body end end tests('failure') do end end fog-aws-3.18.0/tests/requests/ses/verified_email_address_tests.rb000066400000000000000000000015161437344660100252100ustar00rootroot00000000000000Shindo.tests('AWS::SES | verified email address requests', ['aws', 'ses']) do tests('success') do tests("#verify_email_address('test@example.com')").formats(AWS::SES::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:ses].verify_email_address('test@example.com').body end tests("#list_verified_email_addresses").formats(AWS::SES::Formats::BASIC.merge('VerifiedEmailAddresses' => [String])) do pending if Fog.mocking? Fog::AWS[:ses].list_verified_email_addresses.body end # email won't be there to delete, but succeeds regardless tests("#delete_verified_email_address('test@example.com')").formats(AWS::SES::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:ses].delete_verified_email_address('notaanemail@example.com').body end end tests('failure') do end end fog-aws-3.18.0/tests/requests/simpledb/000077500000000000000000000000001437344660100177725ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/simpledb/attributes_tests.rb000066400000000000000000000104271437344660100237330ustar00rootroot00000000000000Shindo.tests('AWS::SimpleDB | attributes requests', ['aws']) do @domain_name = "fog_domain_#{Time.now.to_f.to_s.gsub('.','')}" Fog::AWS[:simpledb].create_domain(@domain_name) tests('success') do tests("#batch_put_attributes('#{@domain_name}', { 'a' => { 'b' => 'c', 'd' => 'e' }, 'x' => { 'y' => 'z' } }).body").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].batch_put_attributes(@domain_name, { 'a' => { 'b' => 'c', 'd' => 'e' }, 'x' => { 'y' => 'z' } }).body end tests("#get_attributes('#{@domain_name}', 'a', {'ConsistentRead' => true}).body['Attributes']").returns({'b' => ['c'], 'd' => ['e']}) do Fog::AWS[:simpledb].get_attributes(@domain_name, 'a', {'ConsistentRead' => true}).body['Attributes'] end tests("#get_attributes('#{@domain_name}', 'AttributeName' => 'notanattribute')").succeeds do Fog::AWS[:simpledb].get_attributes(@domain_name, 'AttributeName' => 'notanattribute') end tests("#select('select * from #{@domain_name}', {'ConsistentRead' => true}).body['Items']").returns({'a' => { 'b' => ['c'], 'd' => ['e']}, 'x' => { 'y' => ['z'] } }) do pending if Fog.mocking? Fog::AWS[:simpledb].select("select * from #{@domain_name}", {'ConsistentRead' => true}).body['Items'] end tests("#put_attributes('#{@domain_name}', 'conditional', { 'version' => '1' }).body").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].put_attributes(@domain_name, 'conditional', { 'version' => '1' }).body end tests("#put_attributes('#{@domain_name}', 'conditional', { 'version' => '2' }, :expect => { 'version' => '1' }, :replace => ['version']).body").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].put_attributes(@domain_name, 'conditional', { 'version' => '2' }, :expect => { 'version' => '1' }, :replace => ['version']).body end # Verify that we can delete individual attributes. tests("#delete_attributes('#{@domain_name}', 'a', {'d' => []})").succeeds do Fog::AWS[:simpledb].delete_attributes(@domain_name, 'a', {'d' => []}).body end # Verify that individually deleted attributes are actually removed. tests("#get_attributes('#{@domain_name}', 'a', {'AttributeName' => ['d'], 'ConsistentRead' => true}).body['Attributes']").returns({}) do Fog::AWS[:simpledb].get_attributes(@domain_name, 'a', {'AttributeName' => ['d'], 'ConsistentRead' => true}).body['Attributes'] end tests("#delete_attributes('#{@domain_name}', 'a').body").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].delete_attributes(@domain_name, 'a').body end # Verify that we can delete entire domain, item combinations. tests("#delete_attributes('#{@domain_name}', 'a').body").succeeds do Fog::AWS[:simpledb].delete_attributes(@domain_name, 'a').body end # Verify that deleting a domain, item combination removes all related attributes. tests("#get_attributes('#{@domain_name}', 'a', {'ConsistentRead' => true}).body['Attributes']").returns({}) do Fog::AWS[:simpledb].get_attributes(@domain_name, 'a', {'ConsistentRead' => true}).body['Attributes'] end end tests('failure') do tests("#batch_put_attributes('notadomain', { 'a' => { 'b' => 'c' }, 'x' => { 'y' => 'z' } })").raises(Excon::Errors::BadRequest) do Fog::AWS[:simpledb].batch_put_attributes('notadomain', { 'a' => { 'b' => 'c' }, 'x' => { 'y' => 'z' } }) end tests("#get_attributes('notadomain', 'a')").raises(Excon::Errors::BadRequest) do Fog::AWS[:simpledb].get_attributes('notadomain', 'a') end tests("#put_attributes('notadomain', 'conditional', { 'version' => '1' })").raises(Excon::Errors::BadRequest) do Fog::AWS[:simpledb].put_attributes('notadomain', 'foo', { 'version' => '1' }) end tests("#put_attributes('#{@domain_name}', 'conditional', { 'version' => '2' }, :expect => { 'version' => '1' }, :replace => ['version'])").raises(Excon::Errors::Conflict) do Fog::AWS[:simpledb].put_attributes(@domain_name, 'conditional', { 'version' => '2' }, :expect => { 'version' => '1' }, :replace => ['version']) end tests("#delete_attributes('notadomain', 'a')").raises(Excon::Errors::BadRequest) do Fog::AWS[:simpledb].delete_attributes('notadomain', 'a') end end Fog::AWS[:simpledb].delete_domain(@domain_name) end fog-aws-3.18.0/tests/requests/simpledb/domain_tests.rb000066400000000000000000000030371437344660100230130ustar00rootroot00000000000000Shindo.tests('AWS::SimpleDB | domain requests', ['aws']) do @domain_metadata_format = AWS::SimpleDB::Formats::BASIC.merge({ 'AttributeNameCount' => Integer, 'AttributeNamesSizeBytes' => Integer, 'AttributeValueCount' => Integer, 'AttributeValuesSizeBytes' => Integer, 'ItemCount' => Integer, 'ItemNamesSizeBytes' => Integer, 'Timestamp' => Time }) @domain_name = "fog_domain_#{Time.now.to_f.to_s.gsub('.','')}" tests('success') do tests("#create_domain(#{@domain_name})").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].create_domain(@domain_name).body end tests("#create_domain(#{@domain_name})").succeeds do Fog::AWS[:simpledb].create_domain(@domain_name) end tests("#domain_metadata(#{@domain_name})").formats(@domain_metadata_format) do Fog::AWS[:simpledb].domain_metadata(@domain_name).body end tests("#list_domains").formats(AWS::SimpleDB::Formats::BASIC.merge('Domains' => [String])) do Fog::AWS[:simpledb].list_domains.body end tests("#delete_domain(#{@domain_name})").formats(AWS::SimpleDB::Formats::BASIC) do Fog::AWS[:simpledb].delete_domain(@domain_name).body end tests("#delete_domain(#{@domain_name})").succeeds do Fog::AWS[:simpledb].delete_domain(@domain_name) end end tests('failure') do tests("#domain_metadata('notadomain')").raises(Excon::Errors::BadRequest) do Fog::AWS[:simpledb].domain_metadata('notadomain') end end end fog-aws-3.18.0/tests/requests/simpledb/helper.rb000066400000000000000000000002251437344660100215750ustar00rootroot00000000000000class AWS module SimpleDB module Formats BASIC = { 'BoxUsage' => Float, 'RequestId' => String } end end end fog-aws-3.18.0/tests/requests/sns/000077500000000000000000000000001437344660100167765ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/sns/helper.rb000066400000000000000000000001621437344660100206010ustar00rootroot00000000000000class AWS module SNS module Formats BASIC = { 'RequestId' => String } end end end fog-aws-3.18.0/tests/requests/sns/subscription_tests.rb000066400000000000000000000053131437344660100232730ustar00rootroot00000000000000Shindo.tests('AWS::SES | topic lifecycle tests', ['aws', 'sns']) do unless Fog.mocking? @topic_arn = Fog::AWS[:sns].create_topic('fog_subscription_tests').body['TopicArn'] @queue_url = Fog::AWS[:sqs].create_queue('fog_subscription_tests').body['QueueUrl'] @queue_arn = Fog::AWS[:sqs].get_queue_attributes(@queue_url, 'QueueArn').body['Attributes']['QueueArn'] Fog::AWS[:sqs].set_queue_attributes( @queue_url, 'Policy', Fog::JSON.encode({ 'Id' => @topic_arn, 'Statement' => { 'Action' => 'sqs:SendMessage', 'Condition' => { 'StringEquals' => { 'aws:SourceArn' => @topic_arn } }, 'Effect' => 'Allow', 'Principal' => { 'AWS' => '*' }, 'Resource' => @queue_arn, 'Sid' => "#{@topic_arn}+sqs:SendMessage" }, 'Version' => '2008-10-17' }) ) end tests('success') do tests("#subscribe('#{@topic_arn}', '#{@queue_arn}', 'sqs')").formats(AWS::SNS::Formats::BASIC.merge('SubscriptionArn' => String)) do pending if Fog.mocking? body = Fog::AWS[:sns].subscribe(@topic_arn, @queue_arn, 'sqs').body @subscription_arn = body['SubscriptionArn'] body end list_subscriptions_format = AWS::SNS::Formats::BASIC.merge({ 'Subscriptions' => [{ 'Endpoint' => String, 'Owner' => String, 'Protocol' => String, 'SubscriptionArn' => String, 'TopicArn' => String }] }) tests("#list_subscriptions").formats(list_subscriptions_format) do pending if Fog.mocking? Fog::AWS[:sns].list_subscriptions.body end tests("#list_subscriptions_by_topic('#{@topic_arn}')").formats(list_subscriptions_format) do pending if Fog.mocking? body = Fog::AWS[:sns].list_subscriptions_by_topic(@topic_arn).body end tests("#publish('#{@topic_arn}', 'message')").formats(AWS::SNS::Formats::BASIC.merge('MessageId' => String)) do pending if Fog.mocking? body = Fog::AWS[:sns].publish(@topic_arn, 'message').body end tests("#receive_message('#{@queue_url}')...").returns('message') do pending if Fog.mocking? message = nil Fog.wait_for do message = Fog::AWS[:sqs].receive_message(@queue_url).body['Message'].first end Fog::JSON.decode(message['Body'])['Message'] end tests("#unsubscribe('#{@subscription_arn}')").formats(AWS::SNS::Formats::BASIC) do pending if Fog.mocking? Fog::AWS[:sns].unsubscribe(@subscription_arn).body end end tests('failure') do end unless Fog.mocking? Fog::AWS[:sns].delete_topic(@topic_arn) Fog::AWS[:sqs].delete_queue(@queue_url) end end fog-aws-3.18.0/tests/requests/sns/topic_tests.rb000066400000000000000000000035611437344660100216700ustar00rootroot00000000000000Shindo.tests('AWS::SNS | topic lifecycle tests', ['aws', 'sns']) do tests('success') do tests("#create_topic('fog_topic_tests')").formats(AWS::SNS::Formats::BASIC.merge('TopicArn' => String)) do body = Fog::AWS[:sns].create_topic('fog_topic_tests').body @topic_arn = body["TopicArn"] body end tests("#list_topics").formats(AWS::SNS::Formats::BASIC.merge('Topics' => [String])) do Fog::AWS[:sns].list_topics.body end tests("#set_topic_attributes('#{@topic_arn}', 'DisplayName', 'other-fog_topic_tests')").formats(AWS::SNS::Formats::BASIC) do Fog::AWS[:sns].set_topic_attributes(@topic_arn, 'DisplayName', 'other-fog_topic_tests').body end get_topic_attributes_format = AWS::SNS::Formats::BASIC.merge({ 'Attributes' => { 'DisplayName' => String, 'Owner' => String, 'Policy' => String, 'SubscriptionsConfirmed' => Integer, 'SubscriptionsDeleted' => Integer, 'SubscriptionsPending' => Integer, 'TopicArn' => String } }) tests("#get_topic_attributes('#{@topic_arn})").formats(get_topic_attributes_format) do Fog::AWS[:sns].get_topic_attributes(@topic_arn).body end tests("#add_permission('#{@topic_arn}')").formats(AWS::SNS::Formats::BASIC) do Fog::AWS[:sns].add_permission('TopicArn' => @topic_arn, 'Label' => 'Test', 'ActionName.member.1' => 'Subscribe', 'AWSAccountId.member.1' => '1234567890').body end tests("#remove_permission('#{@topic_arn}')").formats(AWS::SNS::Formats::BASIC) do Fog::AWS[:sns].remove_permission('TopicArn' => @topic_arn, 'Label' => 'Test').body end tests("#delete_topic('#{@topic_arn}')").formats(AWS::SNS::Formats::BASIC) do Fog::AWS[:sns].delete_topic(@topic_arn).body end end tests('failure') do end end fog-aws-3.18.0/tests/requests/sqs/000077500000000000000000000000001437344660100170015ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/sqs/helper.rb000066400000000000000000000002121437344660100206000ustar00rootroot00000000000000class AWS module SQS module Formats BASIC = { 'ResponseMetadata' => {'RequestId' => String} } end end end fog-aws-3.18.0/tests/requests/sqs/message_tests.rb000066400000000000000000000031761437344660100222030ustar00rootroot00000000000000Shindo.tests('AWS::SQS | message requests', ['aws']) do tests('success') do @queue_url = Fog::AWS[:sqs].create_queue('fog_message_tests').body['QueueUrl'] send_message_format = AWS::SQS::Formats::BASIC.merge({ 'MessageId' => String, 'MD5OfMessageBody' => String }) tests("#send_message('#{@queue_url}', 'message')").formats(send_message_format) do Fog::AWS[:sqs].send_message(@queue_url, 'message').body end receive_message_format = AWS::SQS::Formats::BASIC.merge({ 'Message' => [{ 'Attributes' => { 'ApproximateFirstReceiveTimestamp' => Time, 'ApproximateReceiveCount' => Integer, 'SenderId' => String, 'SentTimestamp' => Time }, 'Body' => String, 'MD5OfBody' => String, 'MessageId' => String, 'ReceiptHandle' => String }] }) tests("#receive_message").formats(receive_message_format) do data = Fog::AWS[:sqs].receive_message(@queue_url).body @receipt_handle = data['Message'].first['ReceiptHandle'] data end tests("#change_message_visibility('#{@queue_url}, '#{@receipt_handle}', 60)").formats(AWS::SQS::Formats::BASIC) do Fog::AWS[:sqs].change_message_visibility(@queue_url, @receipt_handle, 60).body end tests("#delete_message('#{@queue_url}', '#{@receipt_handle}')").formats(AWS::SQS::Formats::BASIC) do Fog::AWS[:sqs].delete_message(@queue_url, @receipt_handle).body end unless Fog.mocking? Fog::AWS[:sqs].delete_queue(@queue_url) end end end fog-aws-3.18.0/tests/requests/sqs/queue_tests.rb000066400000000000000000000032231437344660100216740ustar00rootroot00000000000000Shindo.tests('AWS::SQS | queue requests', ['aws']) do tests('success') do create_queue_format = AWS::SQS::Formats::BASIC.merge({ 'QueueUrl' => String }) tests("#create_queue('fog_queue_tests')").formats(create_queue_format) do data = Fog::AWS[:sqs].create_queue('fog_queue_tests').body @queue_url = data['QueueUrl'] data end list_queues_format = AWS::SQS::Formats::BASIC.merge({ 'QueueUrls' => [String] }) tests("#list_queues").formats(list_queues_format) do Fog::AWS[:sqs].list_queues.body end tests("#set_queue_attributes('#{@queue_url}', 'VisibilityTimeout', 60)").formats(AWS::SQS::Formats::BASIC) do Fog::AWS[:sqs].set_queue_attributes(@queue_url, 'VisibilityTimeout', 60).body end get_queue_attributes_format = AWS::SQS::Formats::BASIC.merge({ 'Attributes' => { 'ApproximateNumberOfMessages' => Integer, 'ApproximateNumberOfMessagesNotVisible' => Integer, 'CreatedTimestamp' => Time, 'MaximumMessageSize' => Integer, 'LastModifiedTimestamp' => Time, 'MessageRetentionPeriod' => Integer, 'QueueArn' => String, 'VisibilityTimeout' => Integer } }) tests("#get_queue_attributes('#{@queue_url}', 'All')").formats(get_queue_attributes_format) do Fog::AWS[:sqs].get_queue_attributes(@queue_url, 'All').body end tests("#delete_queue('#{@queue_url}')").formats(AWS::SQS::Formats::BASIC) do Fog::AWS[:sqs].delete_queue(@queue_url).body end end end fog-aws-3.18.0/tests/requests/storage/000077500000000000000000000000001437344660100176375ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/storage/acl_utils_tests.rb000066400000000000000000000224621437344660100233730ustar00rootroot00000000000000require 'fog/aws/requests/storage/acl_utils' Shindo.tests('Fog::AWS::Storage | ACL utils', ["aws"]) do tests(".hash_to_acl") do tests(".hash_to_acl({}) at xpath //AccessControlPolicy").returns("", "has an empty AccessControlPolicy") do xml = Fog::AWS::Storage.hash_to_acl({}) Nokogiri::XML(xml).xpath("//AccessControlPolicy").first.content.chomp end tests(".hash_to_acl({}) at xpath //AccessControlPolicy/Owner").returns(nil, "does not have an Owner element") do xml = Fog::AWS::Storage.hash_to_acl({}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner").first end tests(".hash_to_acl('Owner' => {}) at xpath //AccessControlPolicy/Owner").returns(nil, "does not have an Owner element") do xml = Fog::AWS::Storage.hash_to_acl('Owner' => {}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner").first end tests(".hash_to_acl('Owner' => {'ID' => 'abcdef0123456789'}) at xpath //AccessControlPolicy/Owner/ID").returns("abcdef0123456789", "returns the Owner ID") do xml = Fog::AWS::Storage.hash_to_acl('Owner' => {'ID' => 'abcdef0123456789'}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner/ID").first.content end tests(".hash_to_acl('Owner' => {'DisplayName' => 'bob'}) at xpath //AccessControlPolicy/Owner/ID").returns(nil, "does not have an Owner ID element") do xml = Fog::AWS::Storage.hash_to_acl('Owner' => {'DisplayName' => 'bob'}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner/ID").first end tests(".hash_to_acl('Owner' => {'DisplayName' => 'bob'}) at xpath //AccessControlPolicy/Owner/DisplayName").returns("bob", "returns the Owner DisplayName") do xml = Fog::AWS::Storage.hash_to_acl('Owner' => {'DisplayName' => 'bob'}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner/DisplayName").first.content end tests(".hash_to_acl('Owner' => {'ID' => 'abcdef0123456789'}) at xpath //AccessControlPolicy/Owner/DisplayName").returns(nil, "does not have an Owner DisplayName element") do xml = Fog::AWS::Storage.hash_to_acl('Owner' => {'ID' => 'abcdef0123456789'}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/Owner/DisplayName").first end tests(".hash_to_acl({}) at xpath //AccessControlPolicy/AccessControlList").returns(nil, "has no AccessControlList") do xml = Fog::AWS::Storage.hash_to_acl({}) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlPolicy").first end acl = { 'AccessControlList' => [ { 'Grantee' => { 'ID' => 'abcdef0123456789', 'DisplayName' => 'bob' }, 'Permission' => 'READ' } ] } tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee").returns("CanonicalUser", "has an xsi:type of CanonicalUser") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee").first.attributes["type"].value end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/ID").returns("abcdef0123456789", "returns the Grantee ID") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/ID").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/DisplayName").returns("bob", "returns the Grantee DisplayName") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/DisplayName").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Permission").returns("READ", "returns the Grantee Permission") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Permission").first.content end acl = { 'AccessControlList' => [ { 'Grantee' => { 'EmailAddress' => 'user@example.com' }, 'Permission' => 'FULL_CONTROL' } ] } tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee").returns("AmazonCustomerByEmail", "has an xsi:type of AmazonCustomerByEmail") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee").first.attributes["type"].value end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/EmailAddress").returns("user@example.com", "returns the Grantee EmailAddress") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/EmailAddress").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Permission").returns("FULL_CONTROL", "returns the Grantee Permission") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Permission").first.content end acl = { 'AccessControlList' => [ { 'Grantee' => { 'URI' => 'http://acs.amazonaws.com/groups/global/AllUsers' }, 'Permission' => 'WRITE' } ] } tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee").returns("Group", "has an xsi:type of Group") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee").first.attributes["type"].value end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/URI").returns("http://acs.amazonaws.com/groups/global/AllUsers", "returns the Grantee URI") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/URI").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Permission").returns("WRITE", "returns the Grantee Permission") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Permission").first.content end acl = { 'AccessControlList' => [ { 'Grantee' => { 'ID' => 'abcdef0123456789', 'DisplayName' => 'bob' }, 'Permission' => 'READ' }, { 'Grantee' => { 'EmailAddress' => 'user@example.com' }, 'Permission' => 'FULL_CONTROL' }, { 'Grantee' => { 'URI' => 'http://acs.amazonaws.com/groups/global/AllUsers' }, 'Permission' => 'WRITE' } ] } tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant").returns(3, "has three elements") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant").size end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/ID").returns("abcdef0123456789", "returns the first Grant's Grantee ID") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/ID").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/EmailAddress").returns("user@example.com", "returns the second Grant's Grantee EmailAddress") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/EmailAddress").first.content end tests(".hash_to_acl(#{acl.inspect}) at xpath //AccessControlPolicy/AccessControlList/Grant/Grantee/URI").returns("http://acs.amazonaws.com/groups/global/AllUsers", "returns the third Grant's Grantee URI") do xml = Fog::AWS::Storage.hash_to_acl(acl) Nokogiri::XML(xml).xpath("//AccessControlPolicy/AccessControlList/Grant/Grantee/URI").first.content end end tests(".acl_to_hash") do acl_xml = <<-XML 2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0 me 2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0 me FULL_CONTROL XML tests(".acl_to_hash(#{acl_xml.inspect})").returns({ "Owner" => { "DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0" }, "AccessControlList" => [{ "Grantee" => { "DisplayName" => "me", "ID" => "2744ccd10c7533bd736ad890f9dd5cab2adb27b07d500b9493f29cdc420cb2e0" }, "Permission" => "FULL_CONTROL" }] }, 'returns hash of ACL XML') do Fog::AWS::Storage.acl_to_hash(acl_xml) end end end fog-aws-3.18.0/tests/requests/storage/bucket_tests.rb000066400000000000000000000374631437344660100227000ustar00rootroot00000000000000Shindo.tests('Fog::Storage[:aws] | bucket requests', ["aws"]) do @aws_bucket_name = 'fogbuckettests-' + Time.now.to_i.to_s(32) tests('success') do @bucket_format = { 'CommonPrefixes' => [], 'IsTruncated' => Fog::Boolean, 'Marker' => NilClass, 'MaxKeys' => Integer, 'Name' => String, 'Prefix' => NilClass, 'Contents' => [{ 'ETag' => String, 'Key' => String, 'LastModified' => Time, 'Owner' => { 'DisplayName' => String, 'ID' => String }, 'Size' => Integer, 'StorageClass' => String }] } @bucket_lifecycle_format = { 'Rules' => [{ 'ID' => String, 'Prefix' => Fog::Nullable::String, 'Enabled' => Fog::Boolean, 'Expiration' => Fog::Nullable::Hash, 'Transition' => Fog::Nullable::Hash }] } @service_format = { 'Buckets' => [{ 'CreationDate' => Time, 'Name' => String, }], 'Owner' => { 'DisplayName' => String, 'ID' => String } } tests("#put_bucket('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].put_bucket(@aws_bucket_name) @aws_owner = Fog::Storage[:aws].get_bucket_acl(Fog::Storage[:aws].directories.first.key).body['Owner'] end tests('put existing bucket - default region') do Fog::Storage[:aws].put_bucket(@aws_bucket_name) tests("#put_bucket('#{@aws_bucket_name}') existing").succeeds do Fog::Storage[:aws].put_bucket(@aws_bucket_name) end end tests('put existing bucket - default region - preserves files') do Fog::Storage[:aws].put_bucket(@aws_bucket_name) test_key = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'test', :key => 'test/key') Fog::Storage[:aws].put_bucket(@aws_bucket_name) tests(".body['Contents'].first['Key']").returns('test/key') do Fog::Storage[:aws].get_bucket(@aws_bucket_name).body['Contents'].first['Key'] end test_key.destroy end tests("#get_service").formats(@service_format) do Fog::Storage[:aws].get_service.body end file = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'y', :key => 'x') tests("#get_bucket('#{@aws_bucket_name}')").formats(@bucket_format) do Fog::Storage[:aws].get_bucket(@aws_bucket_name).body end tests("#head_bucket('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].head_bucket(@aws_bucket_name) end file.destroy file1 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'a', :key => 'a/a1/file1') file2 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'ab', :key => 'a/file2') file3 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'abc', :key => 'b/file3') file4 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'abcd', :key => 'file4') tests("#get_bucket('#{@aws_bucket_name}')") do before do @bucket = Fog::Storage[:aws].get_bucket(@aws_bucket_name) end tests(".body['Contents'].map{|n| n['Key']}").returns(["a/a1/file1", "a/file2", "b/file3", "file4"]) do @bucket.body['Contents'].map{|n| n['Key']} end tests(".body['Contents'].map{|n| n['Size']}").returns([1, 2, 3, 4]) do @bucket.body['Contents'].map{|n| n['Size']} end tests(".body['CommonPrefixes']").returns([]) do @bucket.body['CommonPrefixes'] end end tests("#get_bucket('#{@aws_bucket_name}', 'delimiter' => '/')") do before do @bucket = Fog::Storage[:aws].get_bucket(@aws_bucket_name, 'delimiter' => '/') end tests(".body['Contents'].map{|n| n['Key']}").returns(['file4']) do @bucket.body['Contents'].map{|n| n['Key']} end tests(".body['CommonPrefixes']").returns(['a/', 'b/']) do @bucket.body['CommonPrefixes'] end end tests("#get_bucket('#{@aws_bucket_name}', 'delimiter' => '/', 'prefix' => 'a/')") do before do @bucket = Fog::Storage[:aws].get_bucket(@aws_bucket_name, 'delimiter' => '/', 'prefix' => 'a/') end tests(".body['Contents'].map{|n| n['Key']}").returns(['a/file2']) do @bucket.body['Contents'].map{|n| n['Key']} end tests(".body['CommonPrefixes']").returns(['a/a1/']) do @bucket.body['CommonPrefixes'] end end file1.destroy; file2.destroy; file3.destroy; file4.destroy tests("#get_bucket_location('#{@aws_bucket_name}')").formats('LocationConstraint' => NilClass) do Fog::Storage[:aws].get_bucket_location(@aws_bucket_name).body end tests("#get_request_payment('#{@aws_bucket_name}')").formats('Payer' => String) do Fog::Storage[:aws].get_request_payment(@aws_bucket_name).body end tests("#put_request_payment('#{@aws_bucket_name}', 'Requester')").succeeds do Fog::Storage[:aws].put_request_payment(@aws_bucket_name, 'Requester') end # This should show a warning, but work (second parameter is options hash for now) tests("#put_bucket_website('#{@aws_bucket_name}', 'index.html')").succeeds do Fog::Storage[:aws].put_bucket_website(@aws_bucket_name, 'index.html') end tests("#put_bucket_website('#{@aws_bucket_name}', :IndexDocument => 'index.html')").succeeds do Fog::Storage[:aws].put_bucket_website(@aws_bucket_name, :IndexDocument => 'index.html') end tests("#put_bucket_website('#{@aws_bucket_name}', :RedirectAllRequestsTo => 'redirect.example.com')").succeeds do Fog::Storage[:aws].put_bucket_website(@aws_bucket_name, :RedirectAllRequestsTo => 'redirect.example.com') end tests("#put_bucket_acl('#{@aws_bucket_name}', 'private')").succeeds do Fog::Storage[:aws].put_bucket_acl(@aws_bucket_name, 'private') end acl = { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => @aws_owner, 'Permission' => "FULL_CONTROL" } ] } tests("#put_bucket_acl('#{@aws_bucket_name}', hash with id)").returns(acl) do Fog::Storage[:aws].put_bucket_acl(@aws_bucket_name, acl) Fog::Storage[:aws].get_bucket_acl(@aws_bucket_name).body end tests("#put_bucket_acl('#{@aws_bucket_name}', hash with email)").returns({ 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'ID' => 'f62f0218873cfa5d56ae9429ae75a592fec4fd22a5f24a20b1038a7db9a8f150', 'DisplayName' => 'mtd' }, 'Permission' => "FULL_CONTROL" } ] }) do pending if Fog.mocking? Fog::Storage[:aws].put_bucket_acl(@aws_bucket_name, { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'EmailAddress' => 'mtd@amazon.com' }, 'Permission' => "FULL_CONTROL" } ] }) Fog::Storage[:aws].get_bucket_acl(@aws_bucket_name).body end acl = { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'URI' => 'http://acs.amazonaws.com/groups/global/AllUsers' }, 'Permission' => "FULL_CONTROL" } ] } tests("#put_bucket_acl('#{@aws_bucket_name}', hash with uri)").returns(acl) do Fog::Storage[:aws].put_bucket_acl(@aws_bucket_name, acl) Fog::Storage[:aws].get_bucket_acl(@aws_bucket_name).body end tests("#delete_bucket_website('#{@aws_bucket_name}')").succeeds do pending if Fog.mocking? Fog::Storage[:aws].delete_bucket_website(@aws_bucket_name) end tests('bucket lifecycle') do pending if Fog.mocking? lifecycle = {'Rules' => [{'ID' => 'test rule', 'Prefix' => '/prefix', 'Enabled' => true, 'Days' => 42}]} tests('non-existant bucket') do tests('#put_bucket_lifecycle').returns([404, 'NoSuchBucket']) do begin Fog::Storage[:aws].put_bucket_lifecycle('fognonbucket', lifecycle) rescue Excon::Errors::NotFound => e [e.response.status, e.response.body.match(%r{(.*)})[1]] end end tests('#get_bucket_lifecycle').returns([404, 'NoSuchBucket']) do begin Fog::Storage[:aws].get_bucket_lifecycle('fognonbucket') rescue Excon::Errors::NotFound => e [e.response.status, e.response.body.match(%r{(.*)})[1]] end end tests('#delete_bucket_lifecycle').returns([404, 'NoSuchBucket']) do begin Fog::Storage[:aws].delete_bucket_lifecycle('fognonbucket') rescue Excon::Errors::NotFound => e [e.response.status, e.response.body.match(%r{(.*)})[1]] end end end tests('no lifecycle') do tests('#get_bucket_lifecycle').returns([404, 'NoSuchLifecycleConfiguration']) do begin Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name) rescue Excon::Errors::NotFound => e [e.response.status, e.response.body.match(%r{(.*)})[1]] end end tests('#delete_bucket_lifecycle').succeeds do Fog::Storage[:aws].delete_bucket_lifecycle(@aws_bucket_name) end end tests('create').succeeds do Fog::Storage[:aws].put_bucket_lifecycle(@aws_bucket_name, lifecycle) end tests('read').formats(@bucket_lifecycle_format) do Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name).body end lifecycle = { 'Rules' => 5.upto(6).map { |i| {'ID' => "rule\##{i}", 'Prefix' => i.to_s, 'Enabled' => true, 'Days' => i} } } lifecycle_return = { 'Rules' => 5.upto(6).map { |i| {'ID' => "rule\##{i}", 'Prefix' => i.to_s, 'Enabled' => true, 'Expiration' => {'Days' => i}} } } tests('update').returns(lifecycle_return) do Fog::Storage[:aws].put_bucket_lifecycle(@aws_bucket_name, lifecycle) Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name).body end lifecycle = {'Rules' => [{'ID' => 'test rule', 'Prefix' => '/prefix', 'Enabled' => true, 'Expiration' => {'Days' => 42}, 'Transition' => {'Days' => 6, 'StorageClass'=>'GLACIER'}}]} tests('transition').returns(lifecycle) do Fog::Storage[:aws].put_bucket_lifecycle(@aws_bucket_name, lifecycle) Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name).body end lifecycle = {'Rules' => [{'ID' => 'test rule', 'Prefix' => '/prefix', 'Enabled' => true, 'NoncurrentVersionExpiration' => {'NoncurrentDays' => 42}, 'NoncurrentVersionTransition' => {'NoncurrentDays' => 6, 'StorageClass'=>'GLACIER'}}]} tests('versioned transition').returns(lifecycle) do Fog::Storage[:aws].put_bucket_lifecycle(@aws_bucket_name, lifecycle) Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name).body end lifecycle = {'Rules' => [{'ID' => 'test rule', 'Prefix' => '/prefix', 'Enabled' => true, 'Expiration' => {'Date' => '2012-12-31T00:00:00.000Z'}}]} tests('date').returns(lifecycle) do Fog::Storage[:aws].put_bucket_lifecycle(@aws_bucket_name, lifecycle) Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name).body end tests('delete').succeeds do Fog::Storage[:aws].delete_bucket_lifecycle(@aws_bucket_name) end tests('read').returns([404, 'NoSuchLifecycleConfiguration']) do begin Fog::Storage[:aws].get_bucket_lifecycle(@aws_bucket_name) rescue Excon::Errors::NotFound => e [e.response.status, e.response.body.match(%r{(.*)})[1]] end end end tests("put_bucket_cors('#{@aws_bucket_name}', cors)").succeeds do cors = {'CORSConfiguration' => [ { 'AllowedOrigin' => 'http://localhost:3000', 'AllowedMethod' => ['POST', 'GET'], 'AllowedHeader' => '*', 'MaxAgeSeconds' => 3000 } ] } Fog::Storage[:aws].put_bucket_cors(@aws_bucket_name, cors) end tests("bucket tagging") do tests("#put_bucket_tagging('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].put_bucket_tagging(@aws_bucket_name, {'Key1' => 'Value1', 'Key2' => 'Value2'}) end tests("#get_bucket_tagging('#{@aws_bucket_name}')"). returns('BucketTagging' => {'Key1' => 'Value1', 'Key2' => 'Value2'}) do Fog::Storage[:aws].get_bucket_tagging(@aws_bucket_name).body end tests("#delete_bucket_tagging('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].delete_bucket_tagging(@aws_bucket_name) end end tests("bucket notification") do @topic_arn = Fog::AWS[:sns].create_topic('fog_notifications_tests').body['TopicArn'] tests("#put_bucket_notification('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].put_bucket_notification(@aws_bucket_name, { 'Topics' => [{ 'Topic' => @topic_arn, 'Event' => 's3:ObjectCreated:CompleteMultipartUpload' }]}) end tests("#get_bucket_notification('#{@aws_bucket_name}')"). returns({'Topics' => [{ 'Topic' => @topic_arn, 'Event' => 's3:ObjectCreated:CompleteMultipartUpload' }]}) do Fog::Storage[:aws].get_bucket_notification(@aws_bucket_name).body end Fog::AWS[:sns].delete_topic(@topic_arn) end tests("#delete_bucket('#{@aws_bucket_name}')").succeeds do Fog::Storage[:aws].delete_bucket(@aws_bucket_name) end end tests('failure') do tests("#delete_bucket('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].delete_bucket('fognonbucket') end @bucket = Fog::Storage[:aws].directories.create(:key => 'fognonempty') @file = @bucket.files.create(:key => 'foo', :body => 'bar') tests("#delete_bucket('fognonempty')").raises(Excon::Errors::Conflict) do Fog::Storage[:aws].delete_bucket('fognonempty') end @file.destroy @bucket.destroy tests("#get_bucket('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_bucket('fognonbucket') end tests("#get_bucket_location('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_bucket_location('fognonbucket') end tests("#get_bucket_notification('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_bucket_notification('fognonbucket') end tests("#get_request_payment('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_request_payment('fognonbucket') end tests("#put_request_payment('fognonbucket', 'Requester')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].put_request_payment('fognonbucket', 'Requester') end tests("#put_bucket_acl('fognonbucket', 'invalid')").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].put_bucket_acl('fognonbucket', 'invalid') end tests("#put_bucket_website('fognonbucket', 'index.html')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].put_bucket_website('fognonbucket', 'index.html') end tests('put existing bucket - non-default region') do storage_eu_endpoint = Fog::Storage[:aws] storage_eu_endpoint.region = "eu-west-1" storage_eu_endpoint.put_bucket(@aws_bucket_name) tests("#put_bucket('#{@aws_bucket_name}') existing").raises(Excon::Errors::Conflict) do storage_eu_endpoint.put_bucket(@aws_bucket_name) end end tests("#put_bucket_website('fognonbucket', :RedirectAllRequestsTo => 'redirect.example.com')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].put_bucket_website('fognonbucket', :RedirectAllRequestsTo => 'redirect.example.com') end end # don't keep the bucket around Fog::Storage[:aws].delete_bucket(@aws_bucket_name) rescue nil end fog-aws-3.18.0/tests/requests/storage/cors_utils_tests.rb000066400000000000000000000116421437344660100236000ustar00rootroot00000000000000require 'fog/aws/requests/storage/cors_utils' Shindo.tests('Fog::AWS::Storage | CORS utils', ["aws"]) do tests(".hash_to_cors") do tests(".hash_to_cors({}) at xpath //CORSConfiguration").returns("", "has an empty CORSConfiguration") do xml = Fog::AWS::Storage.hash_to_cors({}) Nokogiri::XML(xml).xpath("//CORSConfiguration").first.content.chomp end tests(".hash_to_cors({}) at xpath //CORSConfiguration/CORSRule").returns(nil, "has no CORSRules") do xml = Fog::AWS::Storage.hash_to_cors({}) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule").first end cors = { 'CORSConfiguration' => [ { 'AllowedOrigin' => ['origin_123', 'origin_456'], 'AllowedMethod' => ['GET', 'POST'], 'AllowedHeader' => ['Accept', 'Content-Type'], 'ID' => 'blah-888', 'MaxAgeSeconds' => 2500, 'ExposeHeader' => ['x-some-header', 'x-other-header'] } ] } tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedOrigin").returns("origin_123", "returns the CORSRule AllowedOrigin") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedOrigin")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedOrigin").returns("origin_456", "returns the CORSRule AllowedOrigin") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedOrigin")[1].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedMethod").returns("GET", "returns the CORSRule AllowedMethod") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedMethod")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedMethod").returns("POST", "returns the CORSRule AllowedMethod") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedMethod")[1].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedHeader").returns("Accept", "returns the CORSRule AllowedHeader") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedHeader")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/AllowedHeader").returns("Content-Type", "returns the CORSRule AllowedHeader") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/AllowedHeader")[1].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/ID").returns("blah-888", "returns the CORSRule ID") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/ID")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/MaxAgeSeconds").returns("2500", "returns the CORSRule MaxAgeSeconds") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/MaxAgeSeconds")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/ExposeHeader").returns("x-some-header", "returns the CORSRule ExposeHeader") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/ExposeHeader")[0].content end tests(".hash_to_cors(#{cors.inspect}) at xpath //CORSConfiguration/CORSRule/ExposeHeader").returns("x-other-header", "returns the CORSRule ExposeHeader") do xml = Fog::AWS::Storage.hash_to_cors(cors) Nokogiri::XML(xml).xpath("//CORSConfiguration/CORSRule/ExposeHeader")[1].content end end tests(".cors_to_hash") do cors_xml = <<-XML http://www.example.com http://www.example2.com Content-Length X-Foobar PUT GET 3000 x-amz-server-side-encryption x-amz-balls XML tests(".cors_to_hash(#{cors_xml.inspect})").returns({ "CORSConfiguration" => [{ "AllowedOrigin" => ["http://www.example.com", "http://www.example2.com"], "AllowedHeader" => ["Content-Length", "X-Foobar"], "AllowedMethod" => ["PUT", "GET"], "MaxAgeSeconds" => 3000, "ExposeHeader" => ["x-amz-server-side-encryption", "x-amz-balls"] }] }, 'returns hash of CORS XML') do Fog::AWS::Storage.cors_to_hash(cors_xml) end end end fog-aws-3.18.0/tests/requests/storage/delete_multiple_objects_tests.rb000066400000000000000000000010401437344660100262670ustar00rootroot00000000000000Shindo.tests('AWS::Storage | delete_multiple_objects', ['aws']) do @directory = Fog::Storage[:aws].directories.create(:key => 'fogobjecttests-' + Time.now.to_i.to_s(32)) tests("doesn't alter options") do version_id = {'fog_object' => ['12345']} options = {:quiet => true, 'versionId' => version_id} Fog::Storage[:aws].delete_multiple_objects(@directory.identity, ['fog_object'], options) test(":quiet is unchanged") { options[:quiet] } test("'versionId' is unchanged") { options['versionId'] == version_id } end end fog-aws-3.18.0/tests/requests/storage/multipart_copy_tests.rb000066400000000000000000000077621437344660100244750ustar00rootroot00000000000000require 'securerandom' Shindo.tests('Fog::Storage[:aws] | copy requests', ["aws"]) do @directory = Fog::Storage[:aws].directories.create(:key => uniq_id('fogmultipartcopytests')) @large_data = SecureRandom.hex * 600000 @large_blob = Fog::Storage[:aws].put_object(@directory.identity, 'large_object', @large_data) tests('copies an empty object') do Fog::Storage[:aws].put_object(@directory.identity, 'empty_object', '') file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('empty_object') file.multipart_chunk_size = Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE tests("#copy_object('#{@directory.identity}', 'empty_copied_object'").succeeds do file.copy(@directory.identity, 'empty_copied_object') end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('empty_copied_object') test("copied is the same") { copied.body == file.body } end tests('copies a small object') do Fog::Storage[:aws].put_object(@directory.identity, 'fog_object', lorem_file) file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('fog_object') tests("#copy_object('#{@directory.identity}', 'copied_object'").succeeds do file.copy(@directory.identity, 'copied_object') end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('copied_object') test("copied is the same") { copied.body == file.body } end tests('copies a file needing a single part') do data = '*' * Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE Fog::Storage[:aws].put_object(@directory.identity, '1_part_object', data) file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('1_part_object') file.multipart_chunk_size = Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE tests("#copy_object('#{@directory.identity}', '1_part_copied_object'").succeeds do file.copy(@directory.identity, '1_part_copied_object') end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('1_part_copied_object') test("copied is the same") { copied.body == file.body } end tests('copies a file with many parts') do file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('large_object') file.multipart_chunk_size = Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE tests("#copy_object('#{@directory.identity}', 'large_copied_object'").succeeds do file.copy(@directory.identity, 'large_copied_object') end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('large_copied_object') test("concurrency defaults to 1") { file.concurrency == 1 } test("copied is the same") { copied.body == file.body } end tests('copies a file with many parts with 10 threads') do file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('large_object') file.multipart_chunk_size = Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE file.concurrency = 10 test("concurrency is set to 10") { file.concurrency == 10 } tests("#copy_object('#{@directory.identity}', 'copied_object_with_10_threads'").succeeds do file.copy(@directory.identity, 'copied_object_with_10_threads') end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('copied_object_with_10_threads') test("copied is the same") { copied.body == file.body } end tests('copies an object with unknown headers') do file = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('large_object') file.multipart_chunk_size = Fog::AWS::Storage::File::MIN_MULTIPART_CHUNK_SIZE file.concurrency = 10 tests("#copy_object('#{@directory.identity}', 'copied_object'").succeeds do file.copy(@directory.identity, 'copied_object', { unknown: 1 } ) end copied = Fog::Storage[:aws].directories.new(key: @directory.identity).files.get('copied_object') test("copied is the same") { copied.body == file.body } end end fog-aws-3.18.0/tests/requests/storage/multipart_upload_tests.rb000066400000000000000000000103031437344660100247700ustar00rootroot00000000000000Shindo.tests('Fog::Storage[:aws] | multipart upload requests', ["aws"]) do @directory = Fog::Storage[:aws].directories.create(:key => uniq_id('fogmultipartuploadtests')) tests('success') do @initiate_multipart_upload_format = { 'Bucket' => String, 'Key' => String, 'UploadId' => String } tests("#initiate_multipart_upload('#{@directory.identity}')", 'fog_multipart_upload').formats(@initiate_multipart_upload_format) do data = Fog::Storage[:aws].initiate_multipart_upload(@directory.identity, 'fog_multipart_upload').body @upload_id = data['UploadId'] data end @list_multipart_uploads_format = { 'Bucket' => String, 'IsTruncated' => Fog::Boolean, 'MaxUploads' => Integer, 'KeyMarker' => NilClass, 'NextKeyMarker' => String, 'NextUploadIdMarker' => Fog::Nullable::String, 'Upload' => [{ 'Initiated' => Time, 'Initiator' => { 'DisplayName' => String, 'ID' => String }, 'Key' => String, 'Owner' => { 'DisplayName' => String, 'ID' => String }, 'StorageClass' => String, 'UploadId' => String }], 'UploadIdMarker' => NilClass, } tests("#list_multipart_uploads('#{@directory.identity})").formats(@list_multipart_uploads_format) do pending if Fog.mocking? Fog::Storage[:aws].list_multipart_uploads(@directory.identity).body end @parts = [] tests("#upload_part('#{@directory.identity}', 'fog_multipart_upload', '#{@upload_id}', 1, ('x' * 6 * 1024 * 1024))").succeeds do data = Fog::Storage[:aws].upload_part(@directory.identity, 'fog_multipart_upload', @upload_id, 1, ('x' * 6 * 1024 * 1024)) @parts << data.headers['ETag'] end @list_parts_format = { 'Bucket' => String, 'Initiator' => { 'DisplayName' => String, 'ID' => String }, 'IsTruncated' => Fog::Boolean, 'Key' => String, 'MaxParts' => Integer, 'NextPartNumberMarker' => String, 'Part' => [{ 'ETag' => String, 'LastModified' => Time, 'PartNumber' => Integer, 'Size' => Integer }], 'PartNumberMarker' => String, 'StorageClass' => String, 'UploadId' => String } tests("#list_parts('#{@directory.identity}', 'fog_multipart_upload', '#{@upload_id}')").formats(@list_parts_format) do pending if Fog.mocking? Fog::Storage[:aws].list_parts(@directory.identity, 'fog_multipart_upload', @upload_id).body end @parts << Fog::Storage[:aws].upload_part(@directory.identity, 'fog_multipart_upload', @upload_id, 2, ('x' * 4 * 1024 * 1024)).headers['ETag'] @complete_multipart_upload_format = { 'Bucket' => String, 'ETag' => String, 'Key' => String, 'Location' => String } tests("#complete_multipart_upload('#{@directory.identity}', 'fog_multipart_upload', '#{@upload_id}', #{@parts.inspect})").formats(@complete_multipart_upload_format) do Fog::Storage[:aws].complete_multipart_upload(@directory.identity, 'fog_multipart_upload', @upload_id, @parts).body end tests("#get_object('#{@directory.identity}', 'fog_multipart_upload').body").succeeds do Fog::Storage[:aws].get_object(@directory.identity, 'fog_multipart_upload').body == ('x' * 10 * 1024 * 1024) end @directory.files.new(:key => 'fog_multipart_upload').destroy @upload_id = Fog::Storage[:aws].initiate_multipart_upload(@directory.identity, 'fog_multipart_abort').body['UploadId'] tests("#abort_multipart_upload('#{@directory.identity}', 'fog_multipart_abort', '#{@upload_id}')").succeeds do Fog::Storage[:aws].abort_multipart_upload(@directory.identity, 'fog_multipart_abort', @upload_id) end end tests('failure') do tests("initiate_multipart_upload") tests("list_multipart_uploads") tests("upload_part") tests("list_parts") tests("complete_multipart_upload") tests("abort_multipart_upload") end @directory.destroy end fog-aws-3.18.0/tests/requests/storage/object_tests.rb000066400000000000000000000320561437344660100226620ustar00rootroot00000000000000# encoding: utf-8 Shindo.tests('AWS::Storage | object requests', ['aws']) do @directory = Fog::Storage[:aws].directories.create(:key => 'fogobjecttests-' + Time.now.to_i.to_s(32)) @aws_owner = Fog::Storage[:aws].get_bucket_acl(@directory.key).body['Owner'] tests('success') do @multiple_delete_format = { 'DeleteResult' => [{ 'Deleted' => { 'Key' => String } }] } tests("#put_object('#{@directory.identity}', 'fog_object', lorem_file)").succeeds do Fog::Storage[:aws].put_object(@directory.identity, 'fog_object', lorem_file) end tests("#put_object('#{@directory.identity}', 'fog_object', lorem_file at EOF)").returns(lorem_file.read) do file = lorem_file file.read Fog::Storage[:aws].put_object(@directory.identity, 'fog_object', file) Fog::Storage[:aws].get_object(@directory.identity, 'fog_object').body end tests("#copy_object('#{@directory.identity}', 'fog_object', '#{@directory.identity}', 'fog_other_object')").succeeds do Fog::Storage[:aws].copy_object(@directory.identity, 'fog_object', @directory.identity, 'fog_other_object') end @directory.files.get('fog_other_object').destroy tests("#get_object('#{@directory.identity}', 'fog_object')").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object').body end tests("#get_object('#{@directory.identity}', 'fog_object', &block)").returns(lorem_file.read) do data = '' Fog::Storage[:aws].get_object(@directory.identity, 'fog_object') do |chunk, remaining_bytes, total_bytes| data << chunk end data end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file) })").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file.read) }).body end tests("#get_object('#{@directory.identity}', 'fog_object', {'Range' => 'bytes=0-20'})").returns(lorem_file.read[0..20]) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', {'Range' => 'bytes=0-20'}).body end tests("#get_object('#{@directory.identity}', 'fog_object', {'Range' => 'bytes=0-0'})").returns(lorem_file.read[0..0]) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', {'Range' => 'bytes=0-0'}).body end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file.read) })").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file.read) }).body end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Modified-Since' => Time.now - 60 })").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Modified-Since' => Time.now - 60 }).body end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-None-Match' => 'invalid_etag' })").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-None-Match' => 'invalid_etag' }).body end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Unmodified-Since' => Time.now + 60 })").returns(lorem_file.read) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Unmodified-Since' => Time.now + 60 }).body end tests("#head_object('#{@directory.identity}', 'fog_object')").succeeds do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object') end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file.read) })").succeeds do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Match' => Digest::MD5.hexdigest(lorem_file.read) }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Modified-Since' => Time.now - 60 })").succeeds do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Modified-Since' => Time.now - 60 }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-None-Match' => 'invalid_etag' })").succeeds do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-None-Match' => 'invalid_etag' }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Unmodified-Since' => Time.now + 60 })").succeeds do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Unmodified-Since' => Time.now + 60 }) end tests("#post_object_restore('#{@directory.identity}', 'fog_object')").succeeds do pending unless Fog.mocking? Fog::Storage[:aws].post_object_restore(@directory.identity, 'fog_object') end tests("#put_object_acl('#{@directory.identity}', 'fog_object', 'private')").succeeds do Fog::Storage[:aws].put_object_acl(@directory.identity, 'fog_object', 'private') end acl = { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => @aws_owner, 'Permission' => "FULL_CONTROL" } ]} tests("#put_object_acl('#{@directory.identity}', 'fog_object', hash with id)").returns(acl) do Fog::Storage[:aws].put_object_acl(@directory.identity, 'fog_object', acl) Fog::Storage[:aws].get_object_acl(@directory.identity, 'fog_object').body end tests("#put_object_acl('#{@directory.identity}', 'fog_object', hash with email)").returns({ 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'ID' => 'f62f0218873cfa5d56ae9429ae75a592fec4fd22a5f24a20b1038a7db9a8f150', 'DisplayName' => 'mtd' }, 'Permission' => "FULL_CONTROL" } ]}) do pending if Fog.mocking? Fog::Storage[:aws].put_object_acl(@directory.identity, 'fog_object', { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'EmailAddress' => 'mtd@amazon.com' }, 'Permission' => "FULL_CONTROL" } ]}) Fog::Storage[:aws].get_object_acl(@directory.identity, 'fog_object').body end acl = { 'Owner' => @aws_owner, 'AccessControlList' => [ { 'Grantee' => { 'URI' => 'http://acs.amazonaws.com/groups/global/AllUsers' }, 'Permission' => "FULL_CONTROL" } ]} tests("#put_object_acl('#{@directory.identity}', 'fog_object', hash with uri)").returns(acl) do Fog::Storage[:aws].put_object_acl(@directory.identity, 'fog_object', acl) Fog::Storage[:aws].get_object_acl(@directory.identity, 'fog_object').body end tests("#delete_object('#{@directory.identity}', 'fog_object')").succeeds do Fog::Storage[:aws].delete_object(@directory.identity, 'fog_object') end tests("#get_object_http_url('#{@directory.identity}', 'fog_object', expiration timestamp)").returns(true) do object_url = Fog::Storage[:aws].get_object_http_url(@directory.identity, 'fog_object', (Time.now + 60)) (object_url =~ /http:\/\/#{Regexp.quote(@directory.identity)}\.s3\.amazonaws\.com\/fog_object/) != nil end tests("#head_object_url('#{@directory.identity}', 'fog_object', expiration timestamp)").returns(true) do object_url = Fog::Storage[:aws].head_object_url(@directory.identity, 'fog_object', (Time.now + 60)) (object_url =~ /https:\/\/#{Regexp.quote(@directory.identity)}\.s3\.amazonaws\.com\/fog_object/) != nil end tests("delete_multiple_objects('#{@directory.identity}', ['fog_object', 'fog_other_object'])").formats(@multiple_delete_format) do Fog::Storage[:aws].delete_multiple_objects(@directory.identity, ['fog_object', 'fog_other_object']).body end tests("#delete_multiple_objects('#{@directory.identity}', 'fØg_öbjèct', UTF-8)").succeeds do Fog::Storage[:aws].delete_multiple_objects(@directory.identity, ['fØg_öbjèct']) end end fognonbucket = uniq_id('fognonbucket') tests('failure') do tests("#put_object('#{fognonbucket}', 'fog_non_object', lorem_file)").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].put_object(fognonbucket, 'fog_non_object', lorem_file) end tests("#put_object('#{@directory.identity}', 'fog_object', lorem_file, {'x-amz-meta-json' => 'ä'}").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].put_object(@directory.identity, 'fog_object', lorem_file, {'x-amz-meta-json' => 'ä'}) end tests("#copy_object('#{fognonbucket}', 'fog_object', '#{@directory.identity}', 'fog_other_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].copy_object(fognonbucket, 'fog_object', @directory.identity, 'fog_other_object') end tests("#copy_object('#{@directory.identity}', 'fog_non_object', '#{@directory.identity}', 'fog_other_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].copy_object(@directory.identity, 'fog_non_object', @directory.identity, 'fog_other_object') end tests("#copy_object('#{@directory.identity}', 'fog_object', 'fognonbucket', 'fog_other_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].copy_object(@directory.identity, 'fog_object', fognonbucket, 'fog_other_object') end tests("#get_object('#{fognonbucket}', 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_object(fognonbucket, 'fog_non_object') end tests("#get_object('#{@directory.identity}', 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_non_object') end Fog::Storage[:aws].put_object(@directory.identity, 'fog_object', lorem_file) tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Match' => 'invalid_etag' })").raises(Excon::Errors::PreconditionFailed) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Match' => 'invalid_etag' }) end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Modified-Since' => Time.now })").raises(Excon::Errors::NotModified) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Modified-Since' => Time.now }) end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-None-Match' => Digest::MD5.hexdigest(lorem_file.read) })").raises(Excon::Errors::NotModified) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-None-Match' => Digest::MD5.hexdigest(lorem_file.read) }) end tests("#get_object('#{@directory.identity}', 'fog_object', { 'If-Unmodified-Since' => Time.now - 60 })").raises(Excon::Errors::PreconditionFailed) do Fog::Storage[:aws].get_object(@directory.identity, 'fog_object', { 'If-Unmodified-Since' => Time.now - 60 }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Match' => 'invalid_etag' })").raises(Excon::Errors::PreconditionFailed) do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Match' => 'invalid_etag' }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Modified-Since' => Time.now })").raises(Excon::Errors::NotModified) do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Modified-Since' => Time.now }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-None-Match' => Digest::MD5.hexdigest(lorem_file.read) })").raises(Excon::Errors::NotModified) do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-None-Match' => Digest::MD5.hexdigest(lorem_file.read) }) end tests("#head_object('#{@directory.identity}', 'fog_object', { 'If-Unmodified-Since' => Time.now - 60 })").raises(Excon::Errors::PreconditionFailed) do Fog::Storage[:aws].head_object(@directory.identity, 'fog_object', { 'If-Unmodified-Since' => Time.now - 60 }) end Fog::Storage[:aws].delete_object(@directory.identity, 'fog_object') tests("#head_object(fognonbucket, 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].head_object(fognonbucket, 'fog_non_object') end tests("#head_object('#{@directory.identity}', 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].head_object(@directory.identity, 'fog_non_object') end tests("#delete_object('#{fognonbucket}', 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].delete_object(fognonbucket, 'fog_non_object') end tests("#delete_multiple_objects('#{fognonbucket}', ['fog_non_object'])").raises(Excon::Errors::NotFound) do pending if Fog.mocking? Fog::Storage[:aws].delete_multiple_objects(fognonbucket, ['fog_non_object']) end tests("#put_object_acl('#{@directory.identity}', 'fog_object', 'invalid')").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].put_object_acl('#{@directory.identity}', 'fog_object', 'invalid') end tests("#post_object_restore('#{@directory.identity}', 'fog_non_object')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].post_object_restore(@directory.identity, 'fog_non_object') end end @directory.destroy end fog-aws-3.18.0/tests/requests/storage/versioning_tests.rb000066400000000000000000000274611437344660100236030ustar00rootroot00000000000000def clear_bucket Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name).body['Versions'].each do |version| object = version[version.keys.first] Fog::Storage[:aws].delete_object(@aws_bucket_name, object['Key'], 'versionId' => object['VersionId']) end end def create_versioned_bucket @aws_bucket_name = 'fogbuckettests-' + Fog::Mock.random_hex(16) Fog::Storage[:aws].put_bucket(@aws_bucket_name) Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'Enabled') end def delete_bucket Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name).body['Versions'].each do |version| object = version[version.keys.first] Fog::Storage[:aws].delete_object(@aws_bucket_name, object['Key'], 'versionId' => object['VersionId']) end Fog::Storage[:aws].delete_bucket(@aws_bucket_name) end Shindo.tests('Fog::Storage[:aws] | versioning', ["aws"]) do tests('success') do tests("#put_bucket_versioning") do @aws_bucket_name = 'fogbuckettests-' + Fog::Mock.random_hex(16) Fog::Storage[:aws].put_bucket(@aws_bucket_name) tests("#put_bucket_versioning('#{@aws_bucket_name}', 'Enabled')").succeeds do Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'Enabled') end tests("#put_bucket_versioning('#{@aws_bucket_name}', 'Suspended')").succeeds do Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'Suspended') end delete_bucket end tests("#get_bucket_versioning('#{@aws_bucket_name}')") do @aws_bucket_name = 'fogbuckettests-' + Fog::Mock.random_hex(16) Fog::Storage[:aws].put_bucket(@aws_bucket_name) tests("#get_bucket_versioning('#{@aws_bucket_name}') without versioning").returns({}) do Fog::Storage[:aws].get_bucket_versioning(@aws_bucket_name).body['VersioningConfiguration'] end tests("#get_bucket_versioning('#{@aws_bucket_name}') with versioning enabled").returns('Enabled') do Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'Enabled') Fog::Storage[:aws].get_bucket_versioning(@aws_bucket_name).body['VersioningConfiguration']['Status'] end tests("#get_bucket_versioning('#{@aws_bucket_name}') with versioning suspended").returns('Suspended') do Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'Suspended') Fog::Storage[:aws].get_bucket_versioning(@aws_bucket_name).body['VersioningConfiguration']['Status'] end delete_bucket end tests("#get_bucket_object_versions('#{@aws_bucket_name}')") do create_versioned_bucket before do @versions = Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name) end v1 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'a', :key => 'file') v2 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'ab', :key => v1.key) v3 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'abc', :key => v1.key) v4 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'abcd', :key => v1.key) tests("versions").returns([v4.version, v3.version, v2.version, v1.version]) do @versions.body['Versions'].map {|v| v['Version']['VersionId']} end tests("version sizes").returns([4, 3, 2, 1]) do @versions.body['Versions'].map {|v| v['Version']['Size']} end tests("latest version").returns(v4.version) do latest = @versions.body['Versions'].find {|v| v['Version']['IsLatest']} latest['Version']['VersionId'] end end tests("get_object('#{@aws_bucket_name}', 'file')") do clear_bucket v1 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'a', :key => 'file') v2 = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'ab', :key => v1.key) tests("get_object('#{@aws_bucket_name}', '#{v2.key}') returns the latest version").returns(v2.version) do res = Fog::Storage[:aws].get_object(@aws_bucket_name, v2.key) res.headers['x-amz-version-id'] end tests("get_object('#{@aws_bucket_name}', '#{v1.key}', 'versionId' => '#{v1.version}') returns the specified version").returns(v1.version) do res = Fog::Storage[:aws].get_object(@aws_bucket_name, v1.key, 'versionId' => v1.version) res.headers['x-amz-version-id'] end v2.destroy tests("get_object('#{@aws_bucket_name}', '#{v2.key}') raises exception if delete marker is latest version").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_object(@aws_bucket_name, v2.key) end end tests("delete_object('#{@aws_bucket_name}', 'file')") do clear_bucket file = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'a', :key => 'file') tests("deleting an object just stores a delete marker").returns(true) do file.destroy versions = Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name) versions.body['Versions'].first.key?('DeleteMarker') end tests("there are two versions: the original and the delete marker").returns(2) do versions = Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name) versions.body['Versions'].size end tests("deleting the delete marker makes the object available again").returns(file.version) do versions = Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name) delete_marker = versions.body['Versions'].find { |v| v.key?('DeleteMarker') } Fog::Storage[:aws].delete_object(@aws_bucket_name, file.key, 'versionId' => delete_marker['DeleteMarker']['VersionId']) res = Fog::Storage[:aws].get_object(@aws_bucket_name, file.key) res.headers['x-amz-version-id'] end end tests("deleting_multiple_objects_versions('#{@aws_bucket_name}", 'file') do clear_bucket bucket = Fog::Storage[:aws].directories.get(@aws_bucket_name) file_count = 5 file_names = [] files = {} file_count.times do |id| file_names << "file_#{id}" file_version_count = rand(1..5) file_version_count.times do files[file_names.last] = bucket.files.create(:body => 'a', :key => file_names.last) end end tests("deleting an object with multiple versions").returns(true) do versions = Fog::Storage[:aws].get_bucket_object_versions( @aws_bucket_name) file_versions = {} versions.body['Versions'].each do |version| object = version[version.keys.first] if file_versions[object['Key']] file_versions[object['Key']] = file_versions[object['Key']] << object['VersionId'] else file_versions[object['Key']] = [object['VersionId']] end end Fog::Storage[:aws].delete_multiple_objects(@aws_bucket_name, file_names, 'versionId' => file_versions) versions = Fog::Storage[:aws].get_bucket_object_versions( @aws_bucket_name) versions.body['Versions'].empty? end end tests("deleting_multiple_objects('#{@aws_bucket_name}", 'file') do clear_bucket bucket = Fog::Storage[:aws].directories.get(@aws_bucket_name) file_count = 5 file_names = [] files = {} file_count.times do |id| file_names << "file_#{id}" files[file_names.last] = bucket.files.create(:body => 'a', :key => file_names.last) end tests("deleting an object just stores a delete marker").returns(true) do Fog::Storage[:aws].delete_multiple_objects(@aws_bucket_name, file_names) versions = Fog::Storage[:aws].get_bucket_object_versions( @aws_bucket_name) all_versions = {} versions.body['Versions'].each do |version| object = version[version.keys.first] next if file_names.index(object['Key']).nil? if !all_versions.key?(object['Key']) all_versions[object['Key']] = version.key?('DeleteMarker') else all_versions[object['Key']] |= version.key?('DeleteMarker') end end all_true = true all_versions.values.each do |marker| all_true = false if !marker end all_true end tests("there are two versions: the original and the delete marker"). returns(file_count*2) do versions = Fog::Storage[:aws].get_bucket_object_versions( @aws_bucket_name) versions.body['Versions'].size end tests("deleting the delete marker makes the object available again"). returns(true) do versions = Fog::Storage[:aws].get_bucket_object_versions( @aws_bucket_name) delete_markers = [] file_versions = {} versions.body['Versions'].each do |version| object = version[version.keys.first] next if object['VersionId'] == files[object['Key']].version file_versions[object['Key']] = object['VersionId'] end Fog::Storage[:aws].delete_multiple_objects(@aws_bucket_name, file_names, 'versionId' => file_versions) all_true = true file_names.each do |file| res = Fog::Storage[:aws].get_object(@aws_bucket_name, file) all_true = false if res.headers['x-amz-version-id'] != files[file].version end all_true end end tests("get_bucket('#{@aws_bucket_name}'") do clear_bucket file = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'a', :key => 'file') tests("includes a non-DeleteMarker object").returns(1) do Fog::Storage[:aws].get_bucket(@aws_bucket_name).body['Contents'].size end file.destroy tests("does not include a DeleteMarker object").returns(0) do Fog::Storage[:aws].get_bucket(@aws_bucket_name).body['Contents'].size end end delete_bucket end tests('failure') do create_versioned_bucket tests("#put_bucket_versioning('#{@aws_bucket_name}', 'bad_value')").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].put_bucket_versioning(@aws_bucket_name, 'bad_value') end tests("#get_bucket_object_versions('#{@aws_bucket_name}', { 'version-id-marker' => 'foo' })").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].get_bucket_object_versions(@aws_bucket_name, { 'version-id-marker' => 'foo' }) end tests("#put_bucket_versioning('fognonbucket', 'Enabled')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].put_bucket_versioning('fognonbucket', 'Enabled') end tests("#get_bucket_versioning('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_bucket_versioning('fognonbucket') end tests("#get_bucket_object_versions('fognonbucket')").raises(Excon::Errors::NotFound) do Fog::Storage[:aws].get_bucket_object_versions('fognonbucket') end file = Fog::Storage[:aws].directories.get(@aws_bucket_name).files.create(:body => 'y', :key => 'x') tests("#get_object('#{@aws_bucket_name}', '#{file.key}', 'versionId' => 'bad_version'").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].get_object(@aws_bucket_name, file.key, 'versionId' => '-1') end tests("#delete_object('#{@aws_bucket_name}', '#{file.key}', 'versionId' => 'bad_version'").raises(Excon::Errors::BadRequest) do Fog::Storage[:aws].delete_object(@aws_bucket_name, file.key, 'versionId' => '-1') end end # don't keep the bucket around delete_bucket end fog-aws-3.18.0/tests/requests/sts/000077500000000000000000000000001437344660100170045ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/sts/assume_role_tests.rb000066400000000000000000000011341437344660100230700ustar00rootroot00000000000000Shindo.tests('AWS::STS | assume role', ['aws']) do @policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} @response_format = { 'SessionToken' => String, 'SecretAccessKey' => String, 'Expiration' => String, 'AccessKeyId' => String, 'Arn' => String, 'RequestId' => String } tests("#assume_role('rolename', 'assumed_role_session', 'external_id', #{@policy.inspect}, 900)").formats(@response_format) do pending if Fog.mocking? Fog::AWS[:sts].assume_role("rolename","assumed_role_session","external_id", @policy, 900).body end end fog-aws-3.18.0/tests/requests/sts/assume_role_with_saml_tests.rb000066400000000000000000000012171437344660100251410ustar00rootroot00000000000000Shindo.tests('AWS::STS | assume role with SAML', ['aws']) do @policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} @response_format = { 'SessionToken' => String, 'SecretAccessKey' => String, 'Expiration' => String, 'AccessKeyId' => String, 'Arn' => String, 'RequestId' => String } tests("#assume_role_with_saml('role_arn', 'principal_arn', 'saml_assertion', #{@policy.inspect}, 900)").formats(@response_format) do pending if Fog.mocking? Fog::AWS[:sts].assume_role_with_saml("role_arn","principal_arn","saml_assertion", @policy, 900).body end end fog-aws-3.18.0/tests/requests/sts/assume_role_with_web_identity_tests.rb000066400000000000000000000016051437344660100266740ustar00rootroot00000000000000Shindo.tests('AWS::STS | assume role with web identity', ['aws']) do @sts = Fog::AWS[:sts] @iam = Fog::AWS[:iam] @role = @iam.create_role('sts', Fog::AWS::IAM::EC2_ASSUME_ROLE_POLICY).body['Role'] @token = Fog::AWS::Mock.key_id @response_format = { 'AssumedRoleUser' => { 'Arn' => String, 'AssumedRoleId' => String, }, 'Audience' => String, 'Credentials' => { 'AccessKeyId' => String, 'Expiration' => Time, 'SecretAccessKey' => String, 'SessionToken' => String, }, 'Provider' => String, 'SubjectFromWebIdentityToken' => String, } tests("#assume_role_with_web_identity('#{@role['Arn']}', '#{@token}', 'fog')").formats(@response_format) do @sts.assume_role_with_web_identity(@role['Arn'], @token, 'fog', :iam => @iam).body end @iam.roles.get('sts').destroy end fog-aws-3.18.0/tests/requests/sts/get_federation_token_tests.rb000066400000000000000000000010651437344660100247340ustar00rootroot00000000000000Shindo.tests('AWS::STS | session tokens', ['aws']) do @policy = {"Statement" => [{"Effect" => "Allow", "Action" => "*", "Resource" => "*"}]} @federation_format = { 'SessionToken' => String, 'SecretAccessKey' => String, 'Expiration' => String, 'AccessKeyId' => String, 'Arn' => String, 'FederatedUserId' => String, 'PackedPolicySize' => String, 'RequestId' => String } tests("#get_federation_token('test@fog.io', #{@policy.inspect})").formats(@federation_format) do Fog::AWS[:sts].get_federation_token("test@fog.io", @policy).body end end fog-aws-3.18.0/tests/requests/sts/session_token_tests.rb000066400000000000000000000005351437344660100234410ustar00rootroot00000000000000Shindo.tests('AWS::STS | session tokens', ['aws']) do @session_format = { 'SessionToken' => String, 'SecretAccessKey' => String, 'Expiration' => String, 'AccessKeyId' => String, 'RequestId' => String } tests("#get_session_token").formats(@session_format) do pending if Fog.mocking? Fog::AWS[:sts].get_session_token.body end end fog-aws-3.18.0/tests/requests/support/000077500000000000000000000000001437344660100177075ustar00rootroot00000000000000fog-aws-3.18.0/tests/requests/support/helper.rb000066400000000000000000000022761437344660100215220ustar00rootroot00000000000000class AWS module Support module Formats TRUSTED_ADVISOR_CHECK_FORMAT = { 'id' => String, 'name' => String, 'description' => String, 'metadata' => Array, 'category' => String, } FLAGGED_RESOURCE = { 'isSuppressed' => Fog::Boolean, 'metadata' => Array, 'region' => String, 'resourceId' => String, 'status' => String } TRUSTED_ADVISOR_CHECK_RESULT_FORMAT = { 'categorySpecificSummary' => Hash, 'checkId' => String, 'flaggedResources' => [FLAGGED_RESOURCE], 'resourcesSummary' => { 'resourcesFlagged' => Integer, 'resourcesIgnored' => Integer, 'resourcesProcessed' => Integer, 'resourcesSuppressed' => Integer }, 'status' => String, 'timestamp' => String } DESCRIBE_TRUSTED_ADVISOR_CHECKS = { 'checks' => [TRUSTED_ADVISOR_CHECK_FORMAT] } DESCRIBE_TRUSTED_ADVISOR_CHECK_RESULT = { 'result' => TRUSTED_ADVISOR_CHECK_RESULT_FORMAT } end end end fog-aws-3.18.0/tests/requests/support/trusted_advisor_check_tests.rb000066400000000000000000000013431437344660100260350ustar00rootroot00000000000000Shindo.tests("AWS::Support | describe_trusted_advisor_checks", ['aws', 'support']) do tests("#describe_trusted_advisor_checks").formats(AWS::Support::Formats::DESCRIBE_TRUSTED_ADVISOR_CHECKS) do Fog::AWS[:support].describe_trusted_advisor_checks.body end # things get weird in the mocked data depending on the order the model and requests run in if Fog.mocking? Fog::AWS[:support].reset end @check_id = Fog::AWS[:support].describe_trusted_advisor_checks.body['checks'].first['id'] tests("#describe_trusted_advisor_check_result(id: #{@check_id})").formats(AWS::Support::Formats::DESCRIBE_TRUSTED_ADVISOR_CHECK_RESULT) do Fog::AWS[:support].describe_trusted_advisor_check_result(:id => @check_id).body end end fog-aws-3.18.0/tests/signaturev4_tests.rb000066400000000000000000000147541437344660100203550ustar00rootroot00000000000000# encoding: utf-8 Shindo.tests('AWS | signaturev4', ['aws']) do @now = Fog::Time.utc(2011, 9, 9, 23, 36, 0) # These testcases are from http://docs.amazonwebservices.com/general/latest/gr/signature-v4-test-suite.html @signer = Fog::AWS::SignatureV4.new('AKIDEXAMPLE', 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY', 'us-east-1', 'host') tests('get-vanilla') do returns(@signer.sign({ headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470' end end tests('get-headers-mixed-case-headers') do returns(@signer.sign({ headers: { 'HOST' => 'host.foo.com', 'date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470' end end tests('get-vanilla-query-order-key with symbol keys') do returns(@signer.sign({ query: { a: 'foo', b: 'foo' }, headers: { Host: 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT'}, method: :get, path: '/' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b' end end tests('get-vanilla-query-order-key') do returns(@signer.sign({ query: { a: 'foo', b: 'foo' }, headers: { Host: 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT'}, :method => :get, :path => '/'}, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=0dc122f3b28b831ab48ba65cb47300de53fbe91b577fe113edac383730254a3b' end end tests('get-unreserved') do returns(@signer.sign({ headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/-._~0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=830cc36d03f0f84e6ee4953fbe701c1c8b71a0372c63af9255aa364dd183281e' end end tests('post-x-www-form-urlencoded-parameter') do returns(@signer.sign({ headers: { 'Content-type' => 'application/x-www-form-urlencoded; charset=utf8', 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :post, path: '/', body: 'foo=bar' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=content-type;date;host, Signature=b105eb10c6d318d2294de9d49dd8b031b55e3c3fe139f2e637da70511e9e7b71' end end tests('get with relative path') do returns(@signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/foo/bar/../..' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470' end end tests('get with pointless .') do returns(@signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/./foo' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=910e4d6c9abafaf87898e1eb4c929135782ea25bb0279703146455745391e63a' end end tests('get with repeated / ') do returns(@signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '//' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b27ccfbfa7df52a200ff74193ca6e32d4b48b8856fab7ebf1c595d0670a7e470' end end tests('get with repeated // inside path') do returns(@signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/foo//bar//baz' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b250c85c72c5d7c33f67759c7a1ad79ea381cf62105290cecd530af2771575d4' end end tests('get with repeated trailing / ') do returns(@signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '//foo//' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=b00392262853cfe3201e47ccf945601079e9b8a7f51ee4c3d9ee4f187aa9bf19' end end tests('get signature as components') do returns(@signer.signature_parameters({ query: { 'a' => 'foo', 'b' => 'foo' }, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/'}, @now)) do { 'X-Amz-Algorithm' => 'AWS4-HMAC-SHA256', 'X-Amz-Credential' => 'AKIDEXAMPLE/20110909/us-east-1/host/aws4_request', 'X-Amz-SignedHeaders' => 'date;host', 'X-Amz-Signature' => 'a6c6304682c74bcaebeeab2fdfb8041bbb39c6976300791a283057bccf333fb2' } end end tests('inject body sha') do returns(@signer.signature_parameters({ query: { 'a' => 'foo', 'b' => 'foo' }, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '/' }, @now, 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD')) do { 'X-Amz-Algorithm' => 'AWS4-HMAC-SHA256', 'X-Amz-Credential' => 'AKIDEXAMPLE/20110909/us-east-1/host/aws4_request', 'X-Amz-SignedHeaders' => 'date;host', 'X-Amz-Signature' => '22c32bb0d0b859b94839de4e9360bca1806e73d853f5f97ae0d849f0bdf42fb0' } end end tests('s3 signer does not normalize path') do signer = Fog::AWS::SignatureV4.new('AKIDEXAMPLE', 'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY', 'us-east-1', 's3') returns(signer.sign({ query: {}, headers: { 'Host' => 'host.foo.com', 'Date' => 'Mon, 09 Sep 2011 23:36:00 GMT' }, method: :get, path: '//foo/../bar/./' }, @now)) do 'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/s3/aws4_request, SignedHeaders=date;host, Signature=72407ad06b8e5750360f42e8aad9f33a0be363bcfeecdcae0aea58c99709fb4a' end end Fog::Time.now = ::Time.now end fog-aws-3.18.0/tests/signed_params_tests.rb000066400000000000000000000010561437344660100207050ustar00rootroot00000000000000# encoding: utf-8 Shindo.tests('AWS | signed_params', ['aws']) do returns(Fog::AWS.escape("'Stöp!' said Fred_-~./")) { '%27St%C3%B6p%21%27%20said%20Fred_-~.%2F' } tests('Unicode characters should be escaped') do unicode = ['00E9'.to_i(16)].pack('U*') escaped = '%C3%A9' returns(escaped) { Fog::AWS.escape(unicode) } end tests('Unicode characters with combining marks should be escaped') do unicode = ['0065'.to_i(16), '0301'.to_i(16)].pack('U*') escaped = 'e%CC%81' returns(escaped) { Fog::AWS.escape(unicode) } end end fog-aws-3.18.0/tests/storage_tests.rb000066400000000000000000000003711437344660100175340ustar00rootroot00000000000000# encoding: utf-8 Shindo.tests('AWS Storage | escape', ['aws']) do tests('Keys can contain a hierarchical prefix which should not be escaped') do returns(Fog::AWS::Storage.new.send(:escape, 'key/with/prefix')) { 'key/with/prefix' } end end