pax_global_header 0000666 0000000 0000000 00000000064 14071411272 0014511 g ustar 00root root 0000000 0000000 52 comment=e32fb6fb339d255f259f60d810664f7fd7a2e286
docker-api-2.2.0/ 0000775 0000000 0000000 00000000000 14071411272 0013530 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/.cane 0000664 0000000 0000000 00000000041 14071411272 0014432 0 ustar 00root root 0000000 0000000 --abc-max 30
--style-measure 120
docker-api-2.2.0/.github/ 0000775 0000000 0000000 00000000000 14071411272 0015070 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14071411272 0017125 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/.github/workflows/unit_test.yml 0000664 0000000 0000000 00000004472 14071411272 0021675 0 ustar 00root root 0000000 0000000 name: Unit Tests
on:
push:
branches:
# A test branch for seeing if your tests will pass in your personal fork
- test_me_github
pull_request:
branches:
- main
- master
jobs:
docker-rspec:
runs-on:
- ubuntu-18.04
strategy:
matrix:
ruby:
- 2.7
- 2.6
- 2.5
- 2.4
docker_version:
- ':20.'
- ':19.'
- ':18.'
fail-fast: true
steps:
- uses: actions/checkout@v2
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby }}
- name: install bundler
run: |
gem install bundler -v '~> 1.17.3'
bundle update
- name: install docker
env:
DOCKER_VERSION: ${{ matrix.docker_version }}
run: |
set -x
sudo apt-get remove -y docker docker-engine docker.io containerd runc ||:
sudo apt-get update -y
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update -y
sudo apt-cache gencaches
sudo apt-get install -y docker-ce=$( apt-cache madison docker-ce | grep -e $DOCKER_VERSION | cut -f 2 -d '|' | head -1 | sed 's/\s//g' )
if [ $? -ne 0 ]; then
echo "Error: Could not install ${DOCKER_VERSION}"
echo "Available docker versions:"
apt-cache madison docker-ce
exit 1
fi
sudo systemctl start docker
- name: spec tests
run: bundle exec rake
podman-rspec:
runs-on:
- ubuntu-latest
strategy:
matrix:
ruby:
- 2.7
- 2.6
- 2.5
- 2.4
fail-fast: true
steps:
- uses: actions/checkout@v2
- uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby }}
- name: install bundler
run: |
gem install bundler -v '~> 1.17.3'
bundle update
- name: install podman
run: sudo ./script/install_podman.sh
- name: spec tests
run: bundle exec rake
docker-api-2.2.0/.gitignore 0000664 0000000 0000000 00000000053 14071411272 0015516 0 ustar 00root root 0000000 0000000 .DS_Store
*.swp
*.gem
Gemfile.lock
.ruby-*
docker-api-2.2.0/.rspec 0000664 0000000 0000000 00000000015 14071411272 0014641 0 ustar 00root root 0000000 0000000 --order rand
docker-api-2.2.0/.simplecov 0000664 0000000 0000000 00000000120 14071411272 0015523 0 ustar 00root root 0000000 0000000 SimpleCov.start do
add_group 'Library', 'lib'
add_group 'Specs', 'spec'
end
docker-api-2.2.0/.travis.yml 0000664 0000000 0000000 00000000732 14071411272 0015643 0 ustar 00root root 0000000 0000000 os: linux
dist: bionic
language: ruby
cache: bundler
rvm:
- 2.7
- 2.6
- 2.5
- 2.4
- 2.3
- 2.2
env:
- DOCKER_VERSION=5:19.03.8~3-0~ubuntu-bionic
- DOCKER_VERSION=5:18.09.9~3-0~ubuntu-bionic
- DOCKER_VERSION=18.06.3~ce~3-0~ubuntu
jobs:
fast_finish: true
before_install:
- docker --version
- gem install bundler -v '~> 1.17.3'
before_script:
- sudo ./script/install_docker.sh ${DOCKER_VERSION} ${DOCKER_CE}
- uname -a
- docker --version
- docker info
docker-api-2.2.0/Dockerfile 0000664 0000000 0000000 00000000050 14071411272 0015515 0 ustar 00root root 0000000 0000000 FROM scratch
ADD Dockerfile /Dockerfile
docker-api-2.2.0/Gemfile 0000664 0000000 0000000 00000000046 14071411272 0015023 0 ustar 00root root 0000000 0000000 source 'http://rubygems.org'
gemspec
docker-api-2.2.0/LICENSE 0000664 0000000 0000000 00000002071 14071411272 0014535 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2014 Swipely, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
docker-api-2.2.0/README.md 0000664 0000000 0000000 00000070505 14071411272 0015016 0 ustar 00root root 0000000 0000000 docker-api
==========
[](https://badge.fury.io/rb/docker-api) [](https://travis-ci.org/swipely/docker-api) [](https://codeclimate.com/github/swipely/docker-api)
This gem provides an object-oriented interface to the [Docker Engine API](https://docs.docker.com/develop/sdk/). Every method listed there is implemented. At the time of this writing, docker-api is meant to interface with Docker version 1.4.*
If you're interested in using Docker to package your apps, we recommend the [dockly](https://github.com/swipely/dockly) gem. Dockly provides a simple DSL for describing Docker containers that install as Debian packages and are controlled by upstart scripts.
Installation
------------
Add this line to your application's Gemfile:
```ruby
gem 'docker-api'
```
And then run:
```shell
$ bundle install
```
Alternatively, if you wish to just use the gem in a script, you can run:
```shell
$ gem install docker-api
```
Finally, just add `require 'docker'` to the top of the file using this gem.
Usage
-----
docker-api is designed to be very lightweight. Almost no state is cached (aside from id's which are immutable) to ensure that each method call's information is up to date. As such, just about every external method represents an API call.
At this time, basic `podman` support has been added via the podman docker-compatible API socket.
## Starting up
Follow the [installation instructions](https://docs.docker.com/install/), and then run:
```shell
$ sudo docker -d
```
This will daemonize Docker so that it can be used for the remote API calls.
### Host
If you're running Docker locally as a socket, there is no setup to do in Ruby. If you're not using a socket or have changed the path of the socket, you'll have to point the gem to your socket or local/remote port. For example:
```ruby
Docker.url = 'tcp://example.com:5422'
```
Two things to note here. The first is that this gem uses [excon](https://github.com/excon/excon), so any of the options that are valid for `Excon.new` are also valid for `Docker.options`. Second, by default Docker runs on a socket. The gem will assume you want to connect to the socket unless you specify otherwise.
Also, you may set the above variables via `ENV` variables. For example:
```shell
$ DOCKER_URL=unix:///var/docker.sock irb
irb(main):001:0> require 'docker'
=> true
irb(main):002:0> Docker.url
=> "unix:///var/docker.sock"
irb(main):003:0> Docker.options
=> {}
```
```shell
$ DOCKER_URL=tcp://example.com:1000 irb
irb(main):001:0> require 'docker'
=> true
irb(main):003:0> Docker.url
=> "tcp://example.com:1000"
irb(main):004:0> Docker.options
=> {}
```
### SSL
When running docker using SSL, setting the DOCKER_CERT_PATH will configure docker-api to use SSL.
The cert path is a folder that contains the cert, key and cacert files.
docker-api is expecting the files to be named: cert.pem, key.pem, and ca.pem.
If your files are named different, you'll want to set your options explicity:
```
Docker.options = {
client_cert: File.join(cert_path, 'cert.pem'),
client_key: File.join(cert_path, 'key.pem'),
ssl_ca_file: File.join(cert_path, 'ca.pem'),
scheme: 'https'
}
```
If you want to load the cert files from a variable, e.g. you want to load them from ENV as needed on Heroku:
```
cert_store = OpenSSL::X509::Store.new
certificate = OpenSSL::X509::Certificate.new ENV["DOCKER_CA"]
cert_store.add_cert certificate
Docker.options = {
client_cert_data: ENV["DOCKER_CERT"],
client_key_data: ENV["DOCKER_KEY"],
ssl_cert_store: cert_store,
scheme: 'https'
}
```
If you need to disable SSL verification, set the DOCKER_SSL_VERIFY variable to 'false'.
## Global calls
All of the following examples require a connection to a Docker server. See the Starting up section above for more information.
```ruby
require 'docker'
# => true
# docker command for reference: docker version
Docker.version
# => { 'Version' => '0.5.2', 'GoVersion' => 'go1.1' }
# docker command for reference: docker info
Docker.info
# => { "Debug" => false, "Containers" => 187, "Images" => 196, "NFd" => 10, "NGoroutines" => 9, "MemoryLimit" => true }
# docker command for reference: docker login
Docker.authenticate!('username' => 'docker-fan-boi', 'password' => 'i<3docker', 'email' => 'dockerboy22@aol.com')
# => true
# docker command for reference: docker login registry.gitlab.com
Docker.authenticate!('username' => 'docker-fan-boi', 'password' => 'i<3docker', 'email' => 'dockerboy22@aol.com', 'serveraddress' => 'https://registry.gitlab.com/v1/')
# => true
```
## Images
Just about every method here has a one-to-one mapping with the [Images](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#2-2-images) section of the API. If an API call accepts query parameters, these can be passed as an Hash to it's corresponding method. Also, note that `Docker::Image.new` is a private method, so you must use `.create`, `.build`, `.build_from_dir`, `build_from_tar`, or `.import` to make an instance.
```ruby
require 'docker'
# => true
# Pull an Image.
# docker command for reference: docker pull ubuntu:14.04
image = Docker::Image.create('fromImage' => 'ubuntu:14.04')
# => Docker::Image { :id => ae7ffbcd1, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Insert a local file into an Image.
image.insert_local('localPath' => 'Gemfile', 'outputPath' => '/')
# => Docker::Image { :id => 682ea192f, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Insert multiple local files into an Image.
image.insert_local('localPath' => [ 'Gemfile', 'Rakefile' ], 'outputPath' => '/')
# => Docker::Image { :id => eb693ec80, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Add a repo name to Image.
# docker command for reference: docker tag base2
image.tag('repo' => 'base2', 'force' => true)
# => ["base2"]
# Add a repo name and tag an Image.
# docker command for reference: docker tag base2:latest
image.tag('repo' => 'base2', 'tag' => 'latest', force: true)
# => ["base2:latest"]
# Get more information about the Image.
# docker command for reference: docker inspect
image.json
# => {"id"=>"67859327bf22ef8b5b9b4a6781f72b2015acd894fa03ce07e0db7af170ba468c", "comment"=>"Imported from -", "created"=>"2013-06-19T18:42:58.287944526-04:00", "container_config"=>{"Hostname"=>"", "User"=>"", "Memory"=>0, "MemorySwap"=>0, "CpuShares"=>0, "AttachStdin"=>false, "AttachStdout"=>false, "AttachStderr"=>false, "PortSpecs"=>nil, "Tty"=>false, "OpenStdin"=>false, "StdinOnce"=>false, "Env"=>nil, "Cmd"=>nil, "Dns"=>nil, "Image"=>"", "Volumes"=>nil, "VolumesFrom"=>""}, "docker_version"=>"0.4.0", "architecture"=>"x86_64"}
# View the history of the Image.
image.history
# => [{"Id"=>"67859327bf22", "Created"=>1371681778}]
# Push the Image to the Docker registry. Note that you have to login using
# `Docker.authenticate!` and tag the Image first.
# docker command for reference: docker push
image.push
# => Docker::Image { @connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} }, @info = { "id" => eb693ec80, "RepoTags" => ["base2", "base2/latest"]} }
# Push individual tag to the Docker registry.
image.push(nil, tag: "tag_name")
image.push(nil, repo_tag: 'registry/repo_name:tag_name')
# Given a command, create a new Container to run that command in the Image.
# docker command for reference: docker run -ti ls -l
image.run('ls -l')
# => Docker::Container { id => aaef712eda, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Remove the Image from the server.
# docker command for reference: docker rmi -f
image.remove(:force => true)
# => true
# Export a single Docker Image to a file
# docker command for reference: docker save my_export.tar
image.save('my_export.tar')
# => Docker::Image { :id => 66b712aef, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Return the raw image binary data
image.save
# => "abiglongbinarystring"
# Stream the contents of the image to a block:
image.save_stream { |chunk| puts chunk }
# => nil
# Given a Container's export, creates a new Image.
# docker command for reference: docker import some-export.tar
Docker::Image.import('some-export.tar')
# => Docker::Image { :id => 66b712aef, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# `Docker::Image.import` can also import from a URI
Docker::Image.import('http://some-site.net/my-image.tar')
# => Docker::Image { :id => 6b462b2d2, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# For a lower-level interface for importing tars, `Docker::Image.import_stream` may be used.
# It accepts a block, and will call that block until it returns an empty `String`.
File.open('my-export.tar') do |file|
Docker::Image.import_stream { file.read(1000).to_s }
end
# Create an Image from a Dockerfile as a String.
Docker::Image.build("from base\nrun touch /test")
# => Docker::Image { :id => b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Create an Image from a Dockerfile.
# docker command for reference: docker build .
Docker::Image.build_from_dir('.')
# => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Create an Image from a file other than Dockerfile.
# docker command for reference: docker build -f Dockerfile.Centos .
Docker::Image.build_from_dir('.', { 'dockerfile' => 'Dockerfile.Centos' })
# => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Create an Image from a Dockerfile and stream the logs
Docker::Image.build_from_dir('.') do |v|
if (log = JSON.parse(v)) && log.has_key?("stream")
$stdout.puts log["stream"]
end
end
# Create an Image from a tar file.
# docker command for reference: docker build - < docker_image.tar
Docker::Image.build_from_tar(File.open('docker_image.tar', 'r'))
# => Docker::Image { :id => 1266dc19e, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Load all Images on your Docker server.
# docker command for reference: docker images
Docker::Image.all
# => [Docker::Image { :id => b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => 8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
# Get Image from the server, with id
# docker command for reference: docker images
Docker::Image.get('df4f1bdecf40')
# => Docker::Image { :id => eb693ec80, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Check if an image with a given id exists on the server.
Docker::Image.exist?('ef723dcdac09')
# => true
# Load an image from the file system
Docker::Image.load('./my-image.tar')
# => ""
# An IO object may also be specified for loading
File.open('./my-image.tar', 'rb') do |file|
Docker::Image.load(file)
end
# => ""
# Export multiple images to a single tarball
# docker command for reference: docker save my_image1 my_image2:not_latest > my_export.tar
names = %w( my_image1 my_image2:not_latest )
Docker::Image.save(names, 'my_export.tar')
# => nil
# Return the raw image binary data
names = %w( my_image1 my_image2:not_latest )
Docker::Image.save(names)
# => "abiglongbinarystring"
# Stream the raw binary data
names = %w( my_image1 my_image2:not_latest )
Docker::Image.save_stream(names) { |chunk| puts chunk }
# => nil
# Search the Docker registry.
# docker command for reference: docker search sshd
Docker::Image.search('term' => 'sshd')
# => [Docker::Image { :id => cespare/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => johnfuller/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => dhrp/mongodb-sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => rayang2004/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => dhrp/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd-nginx, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/daemontools-sshd-nginx-php-fpm, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => mbkan/lamp, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => toorop/golang, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => wma55/u1210sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => jdswinbank/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }, Docker::Image { :id => vgauthier/sshd, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
```
## Containers
Much like the Images, this object also has a one-to-one mapping with the [Containers](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#2-1-containers) section of the API. Also like Images, `.new` is a private method, so you must use `.create` to make an instance.
```ruby
require 'docker'
# Create a Container.
container = Docker::Container.create('Cmd' => ['ls'], 'Image' => 'base')
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Get more information about the Container.
container.json
# => {"ID"=>"492510dd38e4da7703f36dfccd013de672b8250f57f59d1555ced647766b5e82", "Created"=>"2013-06-20T10:46:02.897548-04:00", "Path"=>"ls", "Args"=>[], "Config"=>{"Hostname"=>"492510dd38e4", "User"=>"", "Memory"=>0, "MemorySwap"=>0, "CpuShares"=>0, "AttachStdin"=>false, "AttachStdout"=>false, "AttachStderr"=>false, "PortSpecs"=>nil, "Tty"=>false, "OpenStdin"=>false, "StdinOnce"=>false, "Env"=>nil, "Cmd"=>["ls"], "Dns"=>nil, "Image"=>"base", "Volumes"=>nil, "VolumesFrom"=>""}, "State"=>{"Running"=>false, "Pid"=>0, "ExitCode"=>0, "StartedAt"=>"0001-01-01T00:00:00Z", "Ghost"=>false}, "Image"=>"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", "NetworkSettings"=>{"IpAddress"=>"", "IpPrefixLen"=>0, "Gateway"=>"", "Bridge"=>"", "PortMapping"=>nil}, "SysInitPath"=>"/usr/bin/docker", "ResolvConfPath"=>"/etc/resolv.conf", "Volumes"=>nil}
# Start running the Container.
container.start
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Stop running the Container.
container.stop
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Restart the Container.
container.restart
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Pause the running Container processes.
container.pause
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Unpause the running Container processes.
container.unpause
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Kill the command running in the Container.
container.kill
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Kill the Container specifying the kill signal.
container.kill(:signal => "SIGHUP")
# => Docker::Container { :id => 492510dd38e4, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Return the currently executing processes in a Container.
container.top
# => [{"PID"=>"4851", "TTY"=>"pts/0", "TIME"=>"00:00:00", "CMD"=>"lxc-start"}]
# Same as above, but uses the original format
container.top(format: :hash)
# => {
# "Titles" => ["PID", "TTY", "TIME", "CMD"],
# "Processes" => [["4851", "pts/0", "00:00:00", "lxc-start"]]
# }
# To expose 1234 to bridge
# In Dockerfile: EXPOSE 1234/tcp
# docker run resulting-image-name
Docker::Container.create(
'Image' => 'image-name',
'HostConfig' => {
'PortBindings' => {
'1234/tcp' => [{}]
}
}
)
# To expose 1234 to host with any port
# docker run -p 1234 image-name
Docker::Container.create(
'Image' => 'image-name',
'ExposedPorts' => { '1234/tcp' => {} },
'HostConfig' => {
'PortBindings' => {
'1234/tcp' => [{}]
}
}
)
# To expose 1234 to host with a specified host port
# docker run -p 1234:1234 image-name
Docker::Container.create(
'Image' => 'image-name',
'ExposedPorts' => { '1234/tcp' => {} },
'HostConfig' => {
'PortBindings' => {
'1234/tcp' => [{ 'HostPort' => '1234' }]
}
}
)
# To expose 1234 to host with a specified host port and host IP
# docker run -p 192.168.99.100:1234:1234 image-name
Docker::Container.create(
'Image' => 'image-name',
'ExposedPorts' => { '1234/tcp' => {} },
'HostConfig' => {
'PortBindings' => {
'1234/tcp' => [{ 'HostPort' => '1234', 'HostIp' => '192.168.99.100' }]
}
}
)
# To set container name pass `name` key to options
Docker::Container.create(
'name' => 'my-new-container',
'Image' => 'image-name'
)
# Stores a file with the given content in the container
container.store_file("/test", "Hello world")
# Reads a file from the container
container.read_file("/test")
# => "Hello world"
# Export a Container. Since an export is typically at least 300M, chunks of the
# export are yielded instead of just returning the whole thing.
File.open('export.tar', 'w') do |file|
container.export { |chunk| file.write(chunk) }
end
# => nil
# Inspect a Container's changes to the file system.
container.changes
# => [{'Path'=>'/dev', 'Kind'=>0}, {'Path'=>'/dev/kmsg', 'Kind'=>1}]
# Copy files/directories from the Container. Note that these are exported as tars.
container.copy('/etc/hosts') { |chunk| puts chunk }
hosts0000644000000000000000000000023412100405636007023 0ustar
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
# => Docker::Container { :id => a1759f3e2873, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Wait for the current command to finish executing. If an argument is given,
# will timeout after that number of seconds. The default is one minute.
container.wait(15)
# => {'StatusCode'=>0}
# Attach to the Container. Currently, the below options are the only valid ones.
# By default, :stream, :stdout, and :stderr are set.
container.attach(:stream => true, :stdin => nil, :stdout => true, :stderr => true, :logs => true, :tty => false)
# => [["bin\nboot\ndev\netc\nhome\nlib\nlib64\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nselinux\nsrv\nsys\ntmp\nusr\nvar", []]
# If you wish to stream the attach method, a block may be supplied.
container = Docker::Container.create('Image' => 'base', 'Cmd' => ['find / -name *'])
container.tap(&:start).attach { |stream, chunk| puts "#{stream}: #{chunk}" }
stderr: 2013/10/30 17:16:24 Unable to locate find / -name *
# => [[], ["2013/10/30 17:16:24 Unable to locate find / -name *\n"]]
# If you want to attach to stdin of the container, supply an IO-like object:
container = Docker::Container.create('Image' => 'base', 'Cmd' => ['cat'], 'OpenStdin' => true, 'StdinOnce' => true)
container.tap(&:start).attach(stdin: StringIO.new("foo\nbar\n"))
# => [["foo\nbar\n"], []]
# Similar to the stdout/stderr attach method, there is logs and streaming_logs
# logs will only return after the container has exited. The output will be the raw output from the logs stream.
# streaming_logs will collect the messages out of the multiplexed form and also execute a block on each line that comes in (block takes a stream and a chunk as arguments)
# Raw logs from a TTY-enabled container after exit
container.logs(stdout: true)
# => "\e]0;root@8866c76564e8: /\aroot@8866c76564e8:/# echo 'i\b \bdocker-api'\r\ndocker-api\r\n\e]0;root@8866c76564e8: /\aroot@8866c76564e8:/# exit\r\n"
# Logs from a non-TTY container with multiplex prefix
container.logs(stdout: true)
# => "\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00021\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00022\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00023\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00024\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00025\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00026\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00027\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00028\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00029\n\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u000310\n"
# Streaming logs from non-TTY container removing multiplex prefix with a block printing out each line (block not possible with Container#logs)
container.streaming_logs(stdout: true) { |stream, chunk| puts "#{stream}: #{chunk}" }
stdout: 1
stdout: 2
stdout: 3
stdout: 4
stdout: 5
stdout: 6
stdout: 7
stdout: 8
stdout: 9
stdout: 10
# => "1\n\n2\n\n3\n\n4\n\n5\n\n6\n\n7\n\n8\n\n9\n\n10\n"
# If the container has TTY enabled, set `tty => true` to get the raw stream:
command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"I'm a TTY!\"; fi"]
container = Docker::Container.create('Image' => 'ubuntu', 'Cmd' => command, 'Tty' => true)
container.tap(&:start).attach(:tty => true)
# => [["I'm a TTY!"], []]
# Obtaining the current statistics of a container
container.stats
# => {"read"=>"2016-02-29T20:47:05.221608695Z", "precpu_stats"=>{"cpu_usage"=> ... }
# Create an Image from a Container's changes.
container.commit
# => Docker::Image { :id => eaeb8d00efdf, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Commit the Container and run a new command. The second argument is the number
# of seconds the Container should wait before stopping its current command.
container.run('pwd', 10)
# => Docker::Image { :id => 4427be4199ac, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Run an Exec instance inside the container and capture its output and exit status
container.exec(['date'])
# => [["Wed Nov 26 11:10:30 CST 2014\n"], [], 0]
# Launch an Exec instance without capturing its output or status
container.exec(['./my_service'], detach: true)
# => Docker::Exec { :id => be4eaeb8d28a, :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Parse the output of an Exec instance
container.exec(['find', '/', '-name *']) { |stream, chunk| puts "#{stream}: #{chunk}" }
stderr: 2013/10/30 17:16:24 Unable to locate find / -name *
# => [[], ["2013/10/30 17:16:24 Unable to locate find / -name *\n"], 1]
# Run an Exec instance by grab only the STDOUT output
container.exec(['date'], stderr: false)
# => [["Wed Nov 26 11:10:30 CST 2014\n"], [], 0]
# Pass input to an Exec instance command via Stdin
container.exec(['cat'], stdin: StringIO.new("foo\nbar\n"))
# => [["foo\nbar\n"], [], 0]
# Get the raw stream of data from an Exec instance
command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"I'm a TTY!\"; fi"]
container.exec(command, tty: true)
# => [["I'm a TTY!"], [], 0]
# Wait for the current command to finish executing. If an argument is given,
# will timeout after that number of seconds. The default is one minute.
command = ["bash", "-c", "if [ -t 1 ]; then echo -n \"Set max seconds for exec!!\"; fi"]
container.exec(command, wait: 120)
# => [["Set max seconds for exec!"], [], 0]
# Delete a Container.
container.delete(:force => true)
# => nil
# Update the container.
container.update("CpuShares" => 50000")
# Request a Container by ID or name.
Docker::Container.get('500f53b25e6e')
# => Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }
# Request all of the Containers. By default, will only return the running Containers.
Docker::Container.all(:all => true)
# => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
```
## JSON encoded values
For JSON encoded values, nothing is done implicitly, meaning you need to explicitly call `to_json` on your parameter before the call. For example, to request all of the Containers using a filter:
```ruby
require 'docker'
# Request all of the Containers, filtering by status exited.
Docker::Container.all(all: true, filters: { status: ["exited"] }.to_json)
# => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
# Request all of the Container, filtering by label_name.
Docker::Container.all(all: true, filters: { label: [ "label_name" ] }.to_json)
# => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
# Request all of the Container, filtering by label label_name that have the value label_value_.
Docker::Container.all(all: true, filters: { label: [ "label_name=label_value" ] }.to_json)
# => [Docker::Container { :id => , :connection => Docker::Connection { :url => tcp://localhost, :options => {:port=>2375} } }]
```
This applies for all parameters that are requested to be JSON encoded by the docker api.
## Events
```ruby
require 'docker'
# Action on a stream of events as they come in
Docker::Event.stream { |event| puts event; break }
Docker::Event { :status => create, :id => aeb8b55726df63bdd69d41e1b2650131d7ce32ca0d2fa5cbc75f24d0df34c7b0, :from => base:latest, :time => 1416958554 }
# => nil
# Action on all events after a given time (will execute the block for all events up till the current time, and wait to execute on any new events after)
Docker::Event.since(1416958763) { |event| puts event; puts Time.now.to_i; break }
Docker::Event { :status => die, :id => 663005cdeb56f50177c395a817dbc8bdcfbdfbdaef329043b409ecb97fb68d7e, :from => base:latest, :time => 1416958764 }
1416959041
# => nil
```
These methods are prone to read timeouts. `Docker.options[:read_timeout]` will need to be made higher than 60 seconds if expecting a long time between events.
## Connecting to Multiple Servers
By default, each object connects to the connection specified by `Docker.connection`. If you need to connect to multiple servers, you can do so by specifying the connection on `#new` or in the utilizing class method. For example:
```ruby
require 'docker'
Docker::Container.all({}, Docker::Connection.new('tcp://example.com:2375', {}))
```
## Rake Task
To create images through `rake`, a DSL task is provided. For example:
```ruby
require 'rake'
require 'docker'
image 'repo:tag' do
image = Docker::Image.create('fromImage' => 'repo', 'tag' => 'old_tag')
image = Docker::Image.run('rm -rf /etc').commit
image.tag('repo' => 'repo', 'tag' => 'tag')
end
image 'repo:new_tag' => 'repo:tag' do
image = Docker::Image.create('fromImage' => 'repo', 'tag' => 'tag')
image = image.insert_local('localPath' => 'some-file.tar.gz', 'outputPath' => '/')
image.tag('repo' => 'repo', 'tag' => 'new_tag')
end
```
## Not supported (yet)
* Generating a tarball of images and metadata for a repository specified by a name: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#get-a-tarball-containing-all-images-and-tags-in-a-repository
* Load a tarball generated from docker that contains all the images and metadata of a repository: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.14/#load-a-tarball-with-a-set-of-images-and-tags-into-docker
License
-----
This program is licensed under the MIT license. See LICENSE for details.
docker-api-2.2.0/Rakefile 0000664 0000000 0000000 00000002405 14071411272 0015176 0 ustar 00root root 0000000 0000000 require 'bundler/setup'
ENV['PATH'] = "/opt/docker/:#{ENV['PATH']}" if ENV['CI'] == 'true'
require 'docker'
require 'rspec/core/rake_task'
require 'cane/rake_task'
desc 'Run the full test suite from scratch'
task :default => [:unpack, :rspec, :quality]
RSpec::Core::RakeTask.new do |t|
t.pattern = 'spec/**/*_spec.rb'
end
Cane::RakeTask.new(:quality) do |cane|
cane.canefile = '.cane'
end
desc 'Download the necessary base images'
task :unpack do
%w( swipely/base registry busybox tianon/true debian:stable ).each do |image|
system "docker pull #{image}"
end
end
desc 'Run spec tests with a registry'
task :rspec do
begin
registry = Docker::Container.create(
'name' => 'registry',
'Image' => 'registry',
'Env' => ["GUNICORN_OPTS=[--preload]"],
'ExposedPorts' => {
'5000/tcp' => {}
},
'HostConfig' => {
'PortBindings' => { '5000/tcp' => [{ 'HostPort' => '5000' }] }
}
)
registry.start
Rake::Task["spec"].invoke
ensure
registry.kill!.remove unless registry.nil?
end
end
desc 'Pull an Ubuntu image'
image 'ubuntu:13.10' do
puts "Pulling ubuntu:13.10"
image = Docker::Image.create('fromImage' => 'ubuntu', 'tag' => '13.10')
puts "Pulled ubuntu:13.10, image id: #{image.id}"
end
docker-api-2.2.0/TESTING.md 0000664 0000000 0000000 00000003256 14071411272 0015175 0 ustar 00root root 0000000 0000000 # Prerequisites
To develop on this gem, you must the following installed:
* a sane Ruby 1.9+ environment with `bundler`
```shell
$ gem install bundler
```
* Docker v1.3.1 or greater
# Getting Started
1. Clone the git repository from Github:
```shell
$ git clone git@github.com:swipely/docker-api.git
```
2. Install the dependencies using Bundler
```shell
$ bundle install
```
3. Create a branch for your changes
```shell
$ git checkout -b my_bug_fix
```
4. Make any changes
5. Write tests to support those changes.
6. Run the tests:
* `bundle exec rake`
7. Assuming the tests pass, open a Pull Request on Github.
# Using Rakefile Commands
This repository comes with five Rake commands to assist in your testing of the code.
## `rake rspec`
This command will run Rspec tests normally on your local system. You must have all the required base images pulled.
## `rake quality`
This command runs a code quality threshold checker to hinder bad code.
## `rake unpack`
Pulls down all the required base images for testing.
### Setting Up Environment Variables
Certain Rspec tests will require your credentials to the Docker Hub. If you do not have a Docker Hub account, you can sign up for one [here](https://hub.docker.com/account/signup/). To avoid hard-coding credentials into the code the test suite leverages three Environment Variables: `DOCKER_API_USER`, `DOCKER_API_PASS`, and `DOCKER_API_EMAIL`. You will need to configure your work environment (shell profile, IDE, etc) with these values in order to successfully run certain tests.
```shell
export DOCKER_API_USER='your_docker_hub_user'
export DOCKER_API_PASS='your_docker_hub_password'
export DOCKER_API_EMAIL='your_docker_hub_email_address'
```
docker-api-2.2.0/docker-api.gemspec 0000664 0000000 0000000 00000001741 14071411272 0017116 0 ustar 00root root 0000000 0000000 # -*- encoding: utf-8 -*-
require File.expand_path('../lib/docker/version', __FILE__)
Gem::Specification.new do |gem|
gem.authors = ['Swipely, Inc.']
gem.email = 'tomhulihan@swipely.com bright@swipely.com toddlunter@swipely.com'
gem.description = gem.summary = 'A simple REST client for the Docker Remote API'
gem.homepage = 'https://github.com/swipely/docker-api'
gem.license = 'MIT'
gem.files = `git ls-files lib README.md LICENSE`.split($\)
gem.name = 'docker-api'
gem.version = Docker::VERSION
gem.add_dependency 'excon', '>= 0.47.0'
gem.add_dependency 'multi_json'
gem.add_development_dependency 'rake'
gem.add_development_dependency 'rspec', '~> 3.0'
gem.add_development_dependency 'rspec-its'
gem.add_development_dependency 'cane'
gem.add_development_dependency 'pry'
gem.add_development_dependency 'single_cov'
gem.add_development_dependency 'webmock'
gem.add_development_dependency 'parallel'
end
docker-api-2.2.0/lib/ 0000775 0000000 0000000 00000000000 14071411272 0014276 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/lib/docker-api.rb 0000664 0000000 0000000 00000000021 14071411272 0016632 0 ustar 00root root 0000000 0000000 require 'docker'
docker-api-2.2.0/lib/docker.rb 0000664 0000000 0000000 00000006761 14071411272 0016104 0 ustar 00root root 0000000 0000000 require 'cgi'
require 'multi_json'
require 'excon'
require 'tempfile'
require 'base64'
require 'find'
require 'rubygems/package'
require 'uri'
require 'open-uri'
# Add the Hijack middleware at the top of the middleware stack so it can
# potentially hijack HTTP sockets (when attaching to stdin) before other
# middlewares try and parse the response.
require 'excon/middlewares/hijack'
Excon.defaults[:middlewares].unshift Excon::Middleware::Hijack
Excon.defaults[:middlewares] << Excon::Middleware::RedirectFollower
# The top-level module for this gem. Its purpose is to hold global
# configuration variables that are used as defaults in other classes.
module Docker
attr_accessor :creds, :logger
require 'docker/error'
require 'docker/connection'
require 'docker/base'
require 'docker/container'
require 'docker/network'
require 'docker/event'
require 'docker/exec'
require 'docker/image'
require 'docker/messages_stack'
require 'docker/messages'
require 'docker/util'
require 'docker/version'
require 'docker/volume'
require 'docker/rake_task' if defined?(Rake::Task)
def default_socket_url
'unix:///var/run/docker.sock'
end
def env_url
ENV['DOCKER_URL'] || ENV['DOCKER_HOST']
end
def env_options
if cert_path = ENV['DOCKER_CERT_PATH']
{
client_cert: File.join(cert_path, 'cert.pem'),
client_key: File.join(cert_path, 'key.pem'),
ssl_ca_file: File.join(cert_path, 'ca.pem'),
scheme: 'https'
}.merge(ssl_options)
else
{}
end
end
def ssl_options
if ENV['DOCKER_SSL_VERIFY'] == 'false'
{
ssl_verify_peer: false
}
else
{}
end
end
def url
@url ||= env_url || default_socket_url
# docker uses a default notation tcp:// which means tcp://localhost:2375
if @url == 'tcp://'
@url = 'tcp://localhost:2375'
end
@url
end
def options
@options ||= env_options
end
def url=(new_url)
@url = new_url
reset_connection!
end
def options=(new_options)
@options = env_options.merge(new_options || {})
reset_connection!
end
def connection
@connection ||= Connection.new(url, options)
end
def reset!
@url = nil
@options = nil
reset_connection!
end
def reset_connection!
@connection = nil
end
# Get the version of Go, Docker, and optionally the Git commit.
def version(connection = self.connection)
connection.version
end
# Get more information about the Docker server.
def info(connection = self.connection)
connection.info
end
# Ping the Docker server.
def ping(connection = self.connection)
connection.ping
end
# Determine if the server is podman or docker.
def podman?(connection = self.connection)
connection.podman?
end
# Determine if the session is rootless.
def rootless?(connection = self.connection)
connection.rootless?
end
# Login to the Docker registry.
def authenticate!(options = {}, connection = self.connection)
creds = MultiJson.dump(options)
connection.post('/auth', {}, body: creds)
@creds = creds
true
rescue Docker::Error::ServerError, Docker::Error::UnauthorizedError
raise Docker::Error::AuthenticationError
end
module_function :default_socket_url, :env_url, :url, :url=, :env_options,
:options, :options=, :creds, :creds=, :logger, :logger=,
:connection, :reset!, :reset_connection!, :version, :info,
:ping, :podman?, :rootless?, :authenticate!, :ssl_options
end
docker-api-2.2.0/lib/docker/ 0000775 0000000 0000000 00000000000 14071411272 0015545 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/lib/docker/base.rb 0000664 0000000 0000000 00000001726 14071411272 0017012 0 ustar 00root root 0000000 0000000 # This class is a base class for Docker Container and Image.
# It is implementing accessor methods for the models attributes.
module Docker::Base
include Docker::Error
attr_accessor :connection, :info
attr_reader :id
# The private new method accepts a connection and a hash of options that must include an id.
def initialize(connection, hash={})
unless connection.is_a?(Docker::Connection)
raise ArgumentError, "Expected a Docker::Connection, got: #{connection}."
end
normalize_hash(hash)
@connection, @info, @id = connection, hash, hash['id']
raise ArgumentError, "Must have id, got: #{hash}" unless @id
end
# The docker-api will some time return "ID" other times it will return "Id"
# and other times it will return "id". This method normalize it to "id"
# The volumes endpoint returns Name instead of ID, added in the normalize function
def normalize_hash(hash)
hash["id"] ||= hash.delete("ID") || hash.delete("Id")
end
end
docker-api-2.2.0/lib/docker/connection.rb 0000664 0000000 0000000 00000011476 14071411272 0020242 0 ustar 00root root 0000000 0000000 # This class represents a Connection to a Docker server. The Connection is
# immutable in that once the url and options is set they cannot be changed.
class Docker::Connection
require 'docker/util'
require 'docker/error'
include Docker::Error
attr_reader :url, :options
# Create a new Connection. This method takes a url (String) and options
# (Hash). These are passed to Excon, so any options valid for `Excon.new`
# can be passed here.
def initialize(url, opts)
case
when !url.is_a?(String)
raise ArgumentError, "Expected a String, got: '#{url}'"
when !opts.is_a?(Hash)
raise ArgumentError, "Expected a Hash, got: '#{opts}'"
else
uri = URI.parse(url)
if uri.scheme == "unix"
@url, @options = 'unix:///', {:socket => uri.path}.merge(opts)
elsif uri.scheme =~ /^(https?|tcp)$/
@url, @options = url, opts
else
@url, @options = "http://#{uri}", opts
end
end
end
# The actual client that sends HTTP methods to the Docker server. This value
# is not cached, since doing so may cause socket errors after bad requests.
def resource
Excon.new(url, options)
end
private :resource
# Send a request to the server with the `
def request(*args, &block)
retries ||= 0
request = compile_request_params(*args, &block)
log_request(request)
begin
resource.request(request).body
rescue Excon::Errors::BadRequest => ex
if retries < 2
response_cause = ''
begin
response_cause = JSON.parse(ex.response.body)['cause']
rescue JSON::ParserError
#noop
end
if response_cause.is_a?(String)
# The error message will tell the application type given and then the
# application type that the message should be
#
# This is not perfect since it relies on processing a message that
# could change in the future. However, it should be a good stop-gap
# until all methods are updated to pass in the appropriate content
# type.
#
# A current example message is:
# * 'Content-Type: application/json is not supported. Should be "application/x-tar"'
matches = response_cause.delete('"\'').scan(%r{(application/\S+)})
unless matches.count < 2
Docker.logger.warn(
<<~RETRY_WARNING
Automatically retrying with content type '#{response_cause}'
Original Error: #{ex}
RETRY_WARNING
) if Docker.logger
request[:headers]['Content-Type'] = matches.last.first
retries += 1
retry
end
end
end
raise ClientError, ex.response.body
rescue Excon::Errors::Unauthorized => ex
raise UnauthorizedError, ex.response.body
rescue Excon::Errors::NotFound => ex
raise NotFoundError, ex.response.body
rescue Excon::Errors::Conflict => ex
raise ConflictError, ex.response.body
rescue Excon::Errors::InternalServerError => ex
raise ServerError, ex.response.body
rescue Excon::Errors::Timeout => ex
raise TimeoutError, ex.message
end
end
def log_request(request)
if Docker.logger
Docker.logger.debug(
[request[:method], request[:path], request[:query], request[:body]]
)
end
end
def to_s
"Docker::Connection { :url => #{url}, :options => #{options} }"
end
# Delegate all HTTP methods to the #request.
[:get, :put, :post, :delete].each do |method|
define_method(method) { |*args, &block| request(method, *args, &block) }
end
# Common attribute requests
def info
Docker::Util.parse_json(get('/info'))
end
def ping
get('/_ping')
end
def podman?
@podman ||= !(
Array(version['Components']).find do |component|
component['Name'].include?('Podman')
end
).nil?
end
def rootless?
@rootless ||= (info['Rootless'] == true)
end
def version
@version ||= Docker::Util.parse_json(get('/version'))
end
private
# Given an HTTP method, path, optional query, extra options, and block,
# compiles a request.
def compile_request_params(http_method, path, query = nil, opts = nil, &block)
query ||= {}
opts ||= {}
headers = opts.delete(:headers) || {}
content_type = opts[:body].nil? ? 'text/plain' : 'application/json'
user_agent = "Swipely/Docker-API #{Docker::VERSION}"
{
:method => http_method,
:path => path,
:query => query,
:headers => { 'Content-Type' => content_type,
'User-Agent' => user_agent,
}.merge(headers),
:expects => (200..204).to_a << 301 << 304,
:idempotent => http_method == :get,
:request_block => block,
}.merge(opts).reject { |_, v| v.nil? }
end
end
docker-api-2.2.0/lib/docker/container.rb 0000664 0000000 0000000 00000024727 14071411272 0020070 0 ustar 00root root 0000000 0000000 # This class represents a Docker Container. It's important to note that nothing
# is cached so that the information is always up to date.
class Docker::Container
include Docker::Base
# Update the @info hash, which is the only mutable state in this object.
# e.g. if you would like a live status from the #info hash, call #refresh! first.
def refresh!
other = Docker::Container.all({all: true}, connection).find { |c|
c.id.start_with?(self.id) || self.id.start_with?(c.id)
}
info.merge!(self.json)
other && info.merge!(other.info) { |key, info_value, other_value| info_value }
self
end
# Return a List of Hashes that represents the top running processes.
def top(opts = {})
format = opts.delete(:format) { :array }
resp = Docker::Util.parse_json(connection.get(path_for(:top), opts))
if resp['Processes'].nil?
format == :array ? [] : {}
else
format == :array ? resp['Processes'].map { |ary| Hash[resp['Titles'].zip(ary)] } : resp
end
end
# Wait for the current command to finish executing. Default wait time is
# `Excon.options[:read_timeout]`.
def wait(time = nil)
excon_params = { :read_timeout => time }
resp = connection.post(path_for(:wait), nil, excon_params)
Docker::Util.parse_json(resp)
end
# Given a command and an optional number of seconds to wait for the currently
# executing command, creates a new Container to run the specified command. If
# the command that is currently executing does not return a 0 status code, an
# UnexpectedResponseError is raised.
def run(cmd, time = 1000)
if (code = tap(&:start).wait(time)['StatusCode']).zero?
commit.run(cmd)
else
raise UnexpectedResponseError, "Command returned status code #{code}."
end
end
# Create an Exec instance inside the container
#
# @param command [String, Array] The command to run inside the Exec instance
# @param options [Hash] The options to pass to Docker::Exec
#
# @return [Docker::Exec] The Exec instance
def exec(command, options = {}, &block)
# Establish values
tty = options.delete(:tty) || false
detach = options.delete(:detach) || false
user = options.delete(:user)
stdin = options.delete(:stdin)
stdout = options.delete(:stdout) || !detach
stderr = options.delete(:stderr) || !detach
wait = options.delete(:wait)
opts = {
'Container' => self.id,
'User' => user,
'AttachStdin' => !!stdin,
'AttachStdout' => stdout,
'AttachStderr' => stderr,
'Tty' => tty,
'Cmd' => command
}.merge(options)
# Create Exec Instance
instance = Docker::Exec.create(
opts,
self.connection
)
start_opts = {
:tty => tty,
:stdin => stdin,
:detach => detach,
:wait => wait
}
if detach
instance.start!(start_opts)
return instance
else
instance.start!(start_opts, &block)
end
end
# Export the Container as a tar.
def export(&block)
connection.get(path_for(:export), {}, :response_block => block)
self
end
# Attach to a container's standard streams / logs.
def attach(options = {}, excon_params = {}, &block)
stdin = options.delete(:stdin)
tty = options.delete(:tty)
opts = {
:stream => true, :stdout => true, :stderr => true
}.merge(options)
# Creates list to store stdout and stderr messages
msgs = Docker::Messages.new
if stdin
# If attaching to stdin, we must hijack the underlying TCP connection
# so we can stream stdin to the remote Docker process
opts[:stdin] = true
excon_params[:hijack_block] = Docker::Util.hijack_for(stdin, block,
msgs, tty)
else
excon_params[:response_block] = Docker::Util.attach_for(block, msgs, tty)
end
connection.post(
path_for(:attach),
opts,
excon_params
)
[msgs.stdout_messages, msgs.stderr_messages]
end
# Create an Image from a Container's change.s
def commit(options = {})
options.merge!('container' => self.id[0..7])
# [code](https://github.com/dotcloud/docker/blob/v0.6.3/commands.go#L1115)
# Based on the link, the config passed as run, needs to be passed as the
# body of the post so capture it, remove from the options, and pass it via
# the post body
config = MultiJson.dump(options.delete('run'))
hash = Docker::Util.parse_json(
connection.post('/commit', options, body: config)
)
Docker::Image.send(:new, self.connection, hash)
end
# Return a String representation of the Container.
def to_s
"Docker::Container { :id => #{self.id}, :connection => #{self.connection} }"
end
# #json returns information about the Container, #changes returns a list of
# the changes the Container has made to the filesystem.
[:json, :changes].each do |method|
define_method(method) do |opts = {}|
Docker::Util.parse_json(connection.get(path_for(method), opts))
end
end
def logs(opts = {})
connection.get(path_for(:logs), opts)
end
def stats(options = {})
if block_given?
options[:read_timeout] ||= 10
options[:idempotent] ||= false
parser = lambda do |chunk, remaining_bytes, total_bytes|
yield Docker::Util.parse_json(chunk)
end
begin
connection.get(path_for(:stats), nil, {response_block: parser}.merge(options))
rescue Docker::Error::TimeoutError
# If the container stops, the docker daemon will hold the connection
# open forever, but stop sending events.
# So this Timeout indicates the stream is over.
end
else
Docker::Util.parse_json(connection.get(path_for(:stats), {stream: 0}.merge(options)))
end
end
def rename(new_name)
query = {}
query['name'] = new_name
connection.post(path_for(:rename), query)
end
def update(opts)
connection.post(path_for(:update), {}, body: MultiJson.dump(opts))
end
def streaming_logs(opts = {}, &block)
stack_size = opts.delete('stack_size') || opts.delete(:stack_size) || -1
tty = opts.delete('tty') || opts.delete(:tty) || false
msgs = Docker::MessagesStack.new(stack_size)
excon_params = {response_block: Docker::Util.attach_for(block, msgs, tty), idempotent: false}
connection.get(path_for(:logs), opts, excon_params)
msgs.messages.join
end
def start!(opts = {})
connection.post(path_for(:start), {}, body: MultiJson.dump(opts))
self
end
def kill!(opts = {})
connection.post(path_for(:kill), opts)
self
end
# #start! and #kill! both perform the associated action and
# return the Container. #start and #kill do the same,
# but rescue from ServerErrors.
[:start, :kill].each do |method|
define_method(method) do |*args|
begin; public_send(:"#{method}!", *args); rescue ServerError; self end
end
end
# #stop! and #restart! both perform the associated action and
# return the Container. #stop and #restart do the same,
# but rescue from ServerErrors.
[:stop, :restart].each do |method|
define_method(:"#{method}!") do |opts = {}|
timeout = opts.delete('timeout')
query = {}
request_options = {
:body => MultiJson.dump(opts)
}
if timeout
query['t'] = timeout
# Ensure request does not timeout before Docker timeout
request_options.merge!(
read_timeout: timeout.to_i + 5,
write_timeout: timeout.to_i + 5
)
end
connection.post(path_for(method), query, request_options)
self
end
define_method(method) do |*args|
begin; public_send(:"#{method}!", *args); rescue ServerError; self end
end
end
# remove container
def remove(options = {})
connection.delete("/containers/#{self.id}", options)
nil
end
alias_method :delete, :remove
# pause and unpause containers
# #pause! and #unpause! both perform the associated action and
# return the Container. #pause and #unpause do the same,
# but rescue from ServerErrors.
[:pause, :unpause].each do |method|
define_method(:"#{method}!") do
connection.post path_for(method)
self
end
define_method(method) do
begin; public_send(:"#{method}!"); rescue ServerError; self; end
end
end
def archive_out(path, &block)
connection.get(
path_for(:archive),
{ 'path' => path },
:response_block => block
)
self
end
def archive_in(inputs, output_path, opts = {})
file_hash = Docker::Util.file_hash_from_paths([*inputs])
tar = StringIO.new(Docker::Util.create_tar(file_hash))
archive_in_stream(output_path, opts) do
tar.read(Excon.defaults[:chunk_size]).to_s
end
end
def archive_in_stream(output_path, opts = {}, &block)
overwrite = opts[:overwrite] || opts['overwrite'] || false
connection.put(
path_for(:archive),
{ 'path' => output_path, 'noOverwriteDirNonDir' => !overwrite },
:headers => {
'Content-Type' => 'application/x-tar'
},
&block
)
self
end
def read_file(path)
content = StringIO.new
archive_out(path) do |chunk|
content.write chunk
end
content.rewind
Gem::Package::TarReader.new(content) do |tar|
tar.each do |tarfile|
return tarfile.read
end
end
end
def store_file(path, file_content)
output_io = StringIO.new(
Docker::Util.create_tar(
path => file_content
)
)
archive_in_stream("/", overwrite: true) { output_io.read }
end
# Create a new Container.
def self.create(opts = {}, conn = Docker.connection)
query = opts.select {|key| ['name', :name].include?(key) }
clean_opts = opts.reject {|key| ['name', :name].include?(key) }
resp = conn.post('/containers/create', query, :body => MultiJson.dump(clean_opts))
hash = Docker::Util.parse_json(resp) || {}
new(conn, hash)
end
# Return the container with specified ID
def self.get(id, opts = {}, conn = Docker.connection)
container_json = conn.get("/containers/#{id}/json", opts)
hash = Docker::Util.parse_json(container_json) || {}
new(conn, hash)
end
# Return all of the Containers.
def self.all(opts = {}, conn = Docker.connection)
hashes = Docker::Util.parse_json(conn.get('/containers/json', opts)) || []
hashes.map { |hash| new(conn, hash) }
end
# Prune images
def self.prune(conn = Docker.connection)
conn.post("/containers/prune", {})
nil
end
# Convenience method to return the path for a particular resource.
def path_for(resource)
"/containers/#{self.id}/#{resource}"
end
private :path_for
private_class_method :new
end
docker-api-2.2.0/lib/docker/error.rb 0000664 0000000 0000000 00000002303 14071411272 0017221 0 ustar 00root root 0000000 0000000 # This module holds the Errors for the gem.
module Docker::Error
# The default error. It's never actually raised, but can be used to catch all
# gem-specific errors that are thrown as they all subclass from this.
class DockerError < StandardError; end
# Raised when invalid arguments are passed to a method.
class ArgumentError < DockerError; end
# Raised when a request returns a 400.
class ClientError < DockerError; end
# Raised when a request returns a 401.
class UnauthorizedError < DockerError; end
# Raised when a request returns a 404.
class NotFoundError < DockerError; end
# Raised when a request returns a 409.
class ConflictError < DockerError; end
# Raised when a request returns a 500.
class ServerError < DockerError; end
# Raised when there is an unexpected response code / body.
class UnexpectedResponseError < DockerError; end
# Raised when there is an incompatible version of Docker.
class VersionError < DockerError; end
# Raised when a request times out.
class TimeoutError < DockerError; end
# Raised when login fails.
class AuthenticationError < DockerError; end
# Raised when an IO action fails.
class IOError < DockerError; end
end
docker-api-2.2.0/lib/docker/event.rb 0000664 0000000 0000000 00000005412 14071411272 0017215 0 ustar 00root root 0000000 0000000 # This class represents a Docker Event.
class Docker::Event
include Docker::Error
# Represents the actor object nested within an event
class Actor
attr_accessor :ID, :Attributes
def initialize(actor_attributes = {})
[:ID, :Attributes].each do |sym|
value = actor_attributes[sym]
if value.nil?
value = actor_attributes[sym.to_s]
end
send("#{sym}=", value)
end
if self.Attributes.nil?
self.Attributes = {}
end
end
alias_method :id, :ID
alias_method :attributes, :Attributes
end
class << self
include Docker::Error
def stream(opts = {}, conn = Docker.connection, &block)
conn.get('/events', opts, :response_block => lambda { |b, r, t|
b.each_line do |line|
block.call(new_event(line, r, t))
end
})
end
def since(since, opts = {}, conn = Docker.connection, &block)
stream(opts.merge(:since => since), conn, &block)
end
def new_event(body, remaining, total)
return if body.nil? || body.empty?
json = Docker::Util.parse_json(body)
Docker::Event.new(json)
end
end
attr_accessor :Type, :Action, :time, :timeNano
attr_reader :Actor
# Deprecated interface
attr_accessor :status, :from
def initialize(event_attributes = {})
[:Type, :Action, :Actor, :time, :timeNano, :status, :from].each do |sym|
value = event_attributes[sym]
if value.nil?
value = event_attributes[sym.to_s]
end
send("#{sym}=", value)
end
if @Actor.nil?
value = event_attributes[:id]
if value.nil?
value = event_attributes['id']
end
self.Actor = Actor.new(ID: value)
end
end
def ID
self.actor.ID
end
def Actor=(actor)
return if actor.nil?
if actor.is_a? Actor
@Actor = actor
else
@Actor = Actor.new(actor)
end
end
alias_method :type, :Type
alias_method :action, :Action
alias_method :actor, :Actor
alias_method :time_nano, :timeNano
alias_method :id, :ID
def to_s
if type.nil? && action.nil?
to_s_legacy
else
to_s_actor_style
end
end
private
def to_s_legacy
attributes = []
attributes << "from=#{from}" unless from.nil?
unless attributes.empty?
attribute_string = "(#{attributes.join(', ')}) "
end
"Docker::Event { #{time} #{status} #{id} #{attribute_string}}"
end
def to_s_actor_style
most_accurate_time = time_nano || time
attributes = []
actor.attributes.each do |attribute, value|
attributes << "#{attribute}=#{value}"
end
unless attributes.empty?
attribute_string = "(#{attributes.join(', ')}) "
end
"Docker::Event { #{most_accurate_time} #{type} #{action} #{actor.id} #{attribute_string}}"
end
end
docker-api-2.2.0/lib/docker/exec.rb 0000664 0000000 0000000 00000006746 14071411272 0017033 0 ustar 00root root 0000000 0000000 # This class represents a Docker Exec Instance.
class Docker::Exec
include Docker::Base
# Convert details about the object into a string
#
# @return [String] String representation of the Exec instance object
def to_s
"Docker::Exec { :id => #{self.id}, :connection => #{self.connection} }"
end
# Create a new Exec instance in a running container. Please note, this does
# NOT execute the instance - you must run #start. Also, each instance is
# one-time use only.
#
# @param options [Hash] Parameters to pass in to the API.
# @param conn [Docker::Connection] Connection to Docker Remote API
#
# @return [Docker::Exec] self
def self.create(options = {}, conn = Docker.connection)
container = options.delete('Container')
# Podman does not attach these by default but does require them to be attached
if ::Docker.podman?(conn)
options['AttachStderr'] = true if options['AttachStderr'].nil?
options['AttachStdout'] = true if options['AttachStdout'].nil?
end
resp = conn.post("/containers/#{container}/exec", {},
body: MultiJson.dump(options))
hash = Docker::Util.parse_json(resp) || {}
new(conn, hash)
end
# Get info about the Exec instance
#
def json
Docker::Util.parse_json(connection.get(path_for(:json), {}))
end
# Start the Exec instance. The Exec instance is deleted after this so this
# command can only be run once.
#
# @param options [Hash] Options to dictate behavior of the instance
# @option options [Object] :stdin (nil) The object to pass to STDIN.
# @option options [TrueClass, FalseClass] :detach (false) Whether to attach
# to STDOUT/STDERR.
# @option options [TrueClass, FalseClass] :tty (false) Whether to attach using
# a pseudo-TTY.
#
# @return [Array, Array, Int] The STDOUT, STDERR and exit code
def start!(options = {}, &block)
# Parse the Options
tty = !!options.delete(:tty)
detached = !!options.delete(:detach)
stdin = options[:stdin]
read_timeout = options[:wait]
# Create API Request Body
body = MultiJson.dump(
'Tty' => tty,
'Detach' => detached
)
excon_params = { body: body }
msgs = Docker::Messages.new
unless detached
if stdin
excon_params[:hijack_block] = Docker::Util.hijack_for(stdin, block,
msgs, tty)
else
excon_params[:response_block] = Docker::Util.attach_for(block,
msgs, tty)
end
end
excon_params[:read_timeout] = read_timeout unless read_timeout.nil?
connection.post(path_for(:start), nil, excon_params)
[msgs.stdout_messages, msgs.stderr_messages, self.json['ExitCode']]
end
# #start! performs the associated action and returns the output.
# #start does the same, but rescues from ServerErrors.
[:start].each do |method|
define_method(method) do |*args|
begin; public_send(:"#{method}!", *args); rescue ServerError; self end
end
end
# Resize the TTY associated with the Exec instance
#
# @param query [Hash] API query parameters
# @option query [Fixnum] h Height of the TTY
# @option query [Fixnum] w Width of the TTY
#
# @return [Docker::Exec] self
def resize(query = {})
connection.post(path_for(:resize), query)
self
end
# Get the request URI for the given endpoint
#
# @param endpoint [Symbol] The endpoint to grab
# @return [String] The full Remote API endpoint with ID
def path_for(endpoint)
"/exec/#{self.id}/#{endpoint}"
end
private :path_for
private_class_method :new
end
docker-api-2.2.0/lib/docker/image.rb 0000664 0000000 0000000 00000027377 14071411272 0017174 0 ustar 00root root 0000000 0000000 # This class represents a Docker Image.
class Docker::Image
include Docker::Base
# Given a command and optional list of streams to attach to, run a command on
# an Image. This will not modify the Image, but rather create a new Container
# to run the Image. If the image has an embedded config, no command is
# necessary, but it will fail with 500 if no config is saved with the image
def run(cmd = nil, options = {})
opts = {'Image' => self.id}.merge(options)
opts["Cmd"] = cmd.is_a?(String) ? cmd.split(/\s+/) : cmd
begin
Docker::Container.create(opts, connection)
.tap(&:start!)
rescue ServerError, ClientError => ex
if cmd
raise ex
else
raise ex, "No command specified."
end
end
end
# Push the Image to the Docker registry.
def push(creds = nil, options = {}, &block)
repo_tag = options.delete(:repo_tag) || ensure_repo_tags.first
raise ArgumentError, "Image is untagged" if repo_tag.nil?
repo, tag = Docker::Util.parse_repo_tag(repo_tag)
raise ArgumentError, "Image does not have a name to push." if repo.nil?
body = ""
credentials = creds || Docker.creds || {}
headers = Docker::Util.build_auth_header(credentials)
opts = {:tag => tag}.merge(options)
connection.post("/images/#{repo}/push", opts, :headers => headers,
:response_block => self.class.response_block(body, &block))
self
end
# Tag the Image.
def tag(opts = {})
self.info['RepoTags'] ||= []
connection.post(path_for(:tag), opts)
repo = opts['repo'] || opts[:repo]
tag = opts['tag'] || opts[:tag] || 'latest'
self.info['RepoTags'] << "#{repo}:#{tag}"
end
# Given a path of a local file and the path it should be inserted, creates
# a new Image that has that file.
def insert_local(opts = {})
local_paths = opts.delete('localPath')
output_path = opts.delete('outputPath')
local_paths = [ local_paths ] unless local_paths.is_a?(Array)
file_hash = Docker::Util.file_hash_from_paths(local_paths)
file_hash['Dockerfile'] = dockerfile_for(file_hash, output_path)
tar = Docker::Util.create_tar(file_hash)
body = connection.post('/build', opts, :body => tar)
self.class.send(:new, connection, 'id' => Docker::Util.extract_id(body))
end
# Remove the Image from the server.
def remove(opts = {})
name = opts.delete(:name)
unless name
if ::Docker.podman?
name = self.id.split(':').last
else
name = self.id
end
end
connection.delete("/images/#{name}", opts)
end
alias_method :delete, :remove
# Return a String representation of the Image.
def to_s
"Docker::Image { :id => #{self.id}, :info => #{self.info.inspect}, "\
":connection => #{self.connection} }"
end
# #json returns extra information about an Image, #history returns its
# history.
[:json, :history].each do |method|
define_method(method) do |opts = {}|
Docker::Util.parse_json(connection.get(path_for(method), opts))
end
end
# Save the image as a tarball
def save(filename = nil)
self.class.save(self.id, filename, connection)
end
# Save the image as a tarball to an IO object.
def save_stream(opts = {}, &block)
self.class.save_stream(self.id, opts, connection, &block)
end
# Update the @info hash, which is the only mutable state in this object.
def refresh!
img = Docker::Image.all({:all => true}, connection).find { |image|
image.id.start_with?(self.id) || self.id.start_with?(image.id)
}
info.merge!(self.json)
img && info.merge!(img.info)
self
end
class << self
# Create a new Image.
def create(opts = {}, creds = nil, conn = Docker.connection, &block)
credentials = creds.nil? ? Docker.creds : MultiJson.dump(creds)
headers = credentials && Docker::Util.build_auth_header(credentials) || {}
body = ''
conn.post(
'/images/create',
opts,
:headers => headers,
:response_block => response_block(body, &block)
)
# NOTE: see associated tests for why we're looking at image#end_with?
image = opts['fromImage'] || opts[:fromImage]
tag = opts['tag'] || opts[:tag]
image = "#{image}:#{tag}" if tag && !image.end_with?(":#{tag}")
get(image, {}, conn)
end
# Return a specific image.
def get(id, opts = {}, conn = Docker.connection)
image_json = conn.get("/images/#{id}/json", opts)
hash = Docker::Util.parse_json(image_json) || {}
new(conn, hash)
end
# Delete a specific image
def remove(id, opts = {}, conn = Docker.connection)
conn.delete("/images/#{id}", opts)
end
alias_method :delete, :remove
# Prune images
def prune(conn = Docker.connection)
conn.post("/images/prune", {})
end
# Save the raw binary representation or one or more Docker images
#
# @param names [String, Array#String] The image(s) you wish to save
# @param filename [String] The file to export the data to.
# @param conn [Docker::Connection] The Docker connection to use
#
# @return [NilClass, String] If filename is nil, return the string
# representation of the binary data. If the filename is not nil, then
# return nil.
def save(names, filename = nil, conn = Docker.connection)
if filename
File.open(filename, 'wb') do |file|
save_stream(names, {}, conn, &response_block_for_save(file))
end
nil
else
string = ''
save_stream(names, {}, conn, &response_block_for_save(string))
string
end
end
# Stream the contents of Docker image(s) to a block.
#
# @param names [String, Array#String] The image(s) you wish to save
# @param conn [Docker::Connection] The Docker connection to use
# @yield chunk [String] a chunk of the Docker image(s).
def save_stream(names, opts = {}, conn = Docker.connection, &block)
# By using compare_by_identity we can create a Hash that has
# the same key multiple times.
query = {}.tap(&:compare_by_identity)
Array(names).each { |name| query['names'.dup] = name }
conn.get(
'/images/get',
query,
opts.merge(:response_block => block)
)
nil
end
# Load a tar Image
def load(tar, opts = {}, conn = Docker.connection, creds = nil, &block)
headers = build_headers(creds)
io = tar.is_a?(String) ? File.open(tar, 'rb') : tar
body = ""
conn.post(
'/images/load',
opts,
:headers => headers,
:response_block => response_block(body, &block)
) { io.read(Excon.defaults[:chunk_size]).to_s }
end
# Check if an image exists.
def exist?(id, opts = {}, conn = Docker.connection)
get(id, opts, conn)
true
rescue Docker::Error::NotFoundError
false
end
# Return every Image.
def all(opts = {}, conn = Docker.connection)
hashes = Docker::Util.parse_json(conn.get('/images/json', opts)) || []
hashes.map { |hash| new(conn, hash) }
end
# Given a query like `{ :term => 'sshd' }`, queries the Docker Registry for
# a corresponding Image.
def search(query = {}, connection = Docker.connection, creds = nil)
credentials = creds.nil? ? Docker.creds : creds.to_json
headers = credentials && Docker::Util.build_auth_header(credentials) || {}
body = connection.get(
'/images/search',
query,
:headers => headers,
)
hashes = Docker::Util.parse_json(body) || []
hashes.map { |hash| new(connection, 'id' => hash['name']) }
end
# Import an Image from the output of Docker::Container#export. The first
# argument may either be a File or URI.
def import(imp, opts = {}, conn = Docker.connection)
require 'open-uri'
# This differs after Ruby 2.4
if URI.public_methods.include?(:open)
munged_open = URI.method(:open)
else
munged_open = self.method(:open)
end
munged_open.call(imp) do |io|
import_stream(opts, conn) do
io.read(Excon.defaults[:chunk_size]).to_s
end
end
rescue StandardError
raise Docker::Error::IOError, "Could not import '#{imp}'"
end
def import_stream(options = {}, connection = Docker.connection, &block)
body = connection.post(
'/images/create',
options.merge('fromSrc' => '-'),
:headers => { 'Content-Type' => 'application/tar',
'Transfer-Encoding' => 'chunked' },
&block
)
new(connection, 'id'=> Docker::Util.parse_json(body)['status'])
end
# Given a Dockerfile as a string, builds an Image.
def build(commands, opts = {}, connection = Docker.connection, &block)
body = ""
connection.post(
'/build', opts,
:body => Docker::Util.create_tar('Dockerfile' => commands),
:response_block => response_block(body, &block)
)
new(connection, 'id' => Docker::Util.extract_id(body))
rescue Docker::Error::ServerError
raise Docker::Error::UnexpectedResponseError
end
# Given File like object containing a tar file, builds an Image.
#
# If a block is passed, chunks of output produced by Docker will be passed
# to that block.
def build_from_tar(tar, opts = {}, connection = Docker.connection,
creds = nil, &block)
headers = build_headers(creds)
# The response_block passed to Excon will build up this body variable.
body = ""
connection.post(
'/build', opts,
:headers => headers,
:response_block => response_block(body, &block)
) { tar.read(Excon.defaults[:chunk_size]).to_s }
new(connection,
'id' => Docker::Util.extract_id(body),
:headers => headers)
end
# Given a directory that contains a Dockerfile, builds an Image.
#
# If a block is passed, chunks of output produced by Docker will be passed
# to that block.
def build_from_dir(dir, opts = {}, connection = Docker.connection,
creds = nil, &block)
tar = Docker::Util.create_dir_tar(dir)
build_from_tar tar, opts, connection, creds, &block
ensure
unless tar.nil?
tar.close
FileUtils.rm(tar.path, force: true)
end
end
end
private
# A method to build the config header and merge it into the
# headers sent by build_from_dir.
def self.build_headers(creds=nil)
credentials = creds || Docker.creds || {}
config_header = Docker::Util.build_config_header(credentials)
headers = { 'Content-Type' => 'application/tar',
'Transfer-Encoding' => 'chunked' }
headers = headers.merge(config_header) if config_header
headers
end
# Convenience method to return the path for a particular resource.
def path_for(resource)
"/images/#{self.id}/#{resource}"
end
# Convience method to get the Dockerfile for a file hash and a path to
# output to.
def dockerfile_for(file_hash, output_path)
dockerfile = "from #{self.id}\n"
file_hash.keys.each do |basename|
dockerfile << "add #{basename} #{output_path}\n"
end
dockerfile
end
def ensure_repo_tags
refresh! unless info.has_key?('RepoTags')
info['RepoTags']
end
# Generates the block to be passed as a reponse block to Excon. The returned
# lambda will append Docker output to the first argument, and yield output to
# the passed block, if a block is given.
def self.response_block(body)
lambda do |chunk, remaining, total|
body << chunk
yield chunk if block_given?
end
end
# Generates the block to be passed in to the save request. This lambda will
# append the streaming data to the file provided.
def self.response_block_for_save(file)
lambda do |chunk, remianing, total|
file << chunk
end
end
end
docker-api-2.2.0/lib/docker/messages.rb 0000664 0000000 0000000 00000003001 14071411272 0017673 0 ustar 00root root 0000000 0000000 # This class represents all the messages either received by chunks from attach
class Docker::Messages
attr_accessor :buffer, :stdout_messages, :stderr_messages, :all_messages
def initialize(stdout_messages=[],
stderr_messages=[],
all_messages=[],
buffer="")
@stdout_messages = stdout_messages
@stderr_messages = stderr_messages
@all_messages = all_messages
@buffer = buffer
end
def add_message(source, message)
case source
when 1
stdout_messages << message
when 2
stderr_messages << message
end
all_messages << message
end
def get_message(raw_text)
header = raw_text.slice!(0,8)
if header.length < 8
@buffer = header
return
end
type, length = header.unpack("CxxxN")
message = raw_text.slice!(0,length)
if message.length < length
@buffer = header + message
else
add_message(type, message)
end
end
def append(messages)
@stdout_messages += messages.stdout_messages
@stderr_messages += messages.stderr_messages
@all_messages += messages.all_messages
messages.clear
@all_messages
end
def clear
stdout_messages.clear
stderr_messages.clear
all_messages.clear
end
# Method to break apart application/vnd.docker.raw-stream headers
def decipher_messages(body)
raw_text = buffer + body.dup
messages = Docker::Messages.new
while !raw_text.empty?
messages.get_message(raw_text)
end
messages
end
end
docker-api-2.2.0/lib/docker/messages_stack.rb 0000664 0000000 0000000 00000000766 14071411272 0021077 0 ustar 00root root 0000000 0000000 # This class represents a messages stack
class Docker::MessagesStack
attr_accessor :messages
# Initialize stack with optional size
#
# @param size [Integer]
def initialize(size = -1)
@messages = []
@size = size
end
# Append messages to stack
#
# @param messages [Docker::Messages]
def append(messages)
return if @size == 0
messages.all_messages.each do |msg|
@messages << msg
@messages.shift if @size > -1 && @messages.size > @size
end
end
end
docker-api-2.2.0/lib/docker/network.rb 0000664 0000000 0000000 00000004201 14071411272 0017560 0 ustar 00root root 0000000 0000000 # This class represents a Docker Network.
class Docker::Network
include Docker::Base
def connect(container, opts = {}, body_opts = {})
body = MultiJson.dump({ container: container }.merge(body_opts))
Docker::Util.parse_json(
connection.post(path_for('connect'), opts, body: body)
)
reload
end
def disconnect(container, opts = {})
body = MultiJson.dump(container: container)
Docker::Util.parse_json(
connection.post(path_for('disconnect'), opts, body: body)
)
reload
end
def remove(opts = {})
connection.delete(path_for, opts)
nil
end
alias_method :delete, :remove
def json(opts = {})
Docker::Util.parse_json(connection.get(path_for, opts))
end
def to_s
"Docker::Network { :id => #{id}, :info => #{info.inspect}, "\
":connection => #{connection} }"
end
def reload
network_json = @connection.get("/networks/#{@id}")
hash = Docker::Util.parse_json(network_json) || {}
@info = hash
end
class << self
def create(name, opts = {}, conn = Docker.connection)
default_opts = MultiJson.dump({
'Name' => name,
'CheckDuplicate' => true
}.merge(opts))
resp = conn.post('/networks/create', {}, body: default_opts)
response_hash = Docker::Util.parse_json(resp) || {}
get(response_hash['Id'], {}, conn) || {}
end
def get(id, opts = {}, conn = Docker.connection)
network_json = conn.get("/networks/#{id}", opts)
hash = Docker::Util.parse_json(network_json) || {}
new(conn, hash)
end
def all(opts = {}, conn = Docker.connection)
hashes = Docker::Util.parse_json(conn.get('/networks', opts)) || []
hashes.map { |hash| new(conn, hash) }
end
def remove(id, opts = {}, conn = Docker.connection)
conn.delete("/networks/#{id}", opts)
nil
end
alias_method :delete, :remove
def prune(conn = Docker.connection)
conn.post("/networks/prune", {})
nil
end
end
# Convenience method to return the path for a particular resource.
def path_for(resource = nil)
["/networks/#{id}", resource].compact.join('/')
end
private :path_for
end
docker-api-2.2.0/lib/docker/rake_task.rb 0000664 0000000 0000000 00000001217 14071411272 0020037 0 ustar 00root root 0000000 0000000 # This class allows image-based tasks to be created.
class Docker::ImageTask < Rake::Task
def self.scope_name(_scope, task_name)
task_name
end
def needed?
!has_repo_tag?
end
private
def has_repo_tag?
images.any? { |image| image.info['RepoTags'].include?(repo_tag) }
end
def images
@images ||= Docker::Image.all(:all => true)
end
def repo
name.split(':')[0]
end
def tag
name.split(':')[1] || 'latest'
end
def repo_tag
"#{repo}:#{tag}"
end
end
# Monkeypatch Rake to add the `image` task.
module Rake::DSL
def image(*args, &block)
Docker::ImageTask.define_task(*args, &block)
end
end
docker-api-2.2.0/lib/docker/util.rb 0000664 0000000 0000000 00000022177 14071411272 0017060 0 ustar 00root root 0000000 0000000 require 'set'
# This module holds shared logic that doesn't really belong anywhere else in the
# gem.
module Docker::Util
# http://www.tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm#STANDARD-WILDCARDS
GLOB_WILDCARDS = /[\?\*\[\{\]\}]/
include Docker::Error
module_function
# Attaches to a HTTP stream
#
# @param block
# @param msg_stack [Docker::Messages]
# @param tty [boolean]
def attach_for(block, msg_stack, tty = false)
# If TTY is enabled expect raw data and append to stdout
if tty
attach_for_tty(block, msg_stack)
else
attach_for_multiplex(block, msg_stack)
end
end
def attach_for_tty(block, msg_stack)
messages = Docker::Messages.new
lambda do |c,r,t|
messages.stdout_messages << c
messages.all_messages << c
msg_stack.append(messages)
block.call c if block
end
end
def attach_for_multiplex(block, msg_stack)
messages = Docker::Messages.new
lambda do |c,r,t|
messages = messages.decipher_messages(c)
unless block.nil?
messages.stdout_messages.each do |msg|
block.call(:stdout, msg)
end
messages.stderr_messages.each do |msg|
block.call(:stderr, msg)
end
end
msg_stack.append(messages)
end
end
def debug(msg)
Docker.logger.debug(msg) if Docker.logger
end
def hijack_for(stdin, block, msg_stack, tty)
attach_block = attach_for(block, msg_stack, tty)
lambda do |socket|
debug "hijack: hijacking the HTTP socket"
threads = []
debug "hijack: starting stdin copy thread"
threads << Thread.start do
debug "hijack: copying stdin => socket"
IO.copy_stream stdin, socket
debug "hijack: closing write end of hijacked socket"
close_write(socket)
end
debug "hijack: starting hijacked socket read thread"
threads << Thread.start do
debug "hijack: reading from hijacked socket"
begin
while chunk = socket.readpartial(512)
debug "hijack: got #{chunk.bytesize} bytes from hijacked socket"
attach_block.call chunk, nil, nil
end
rescue EOFError
end
debug "hijack: killing stdin copy thread"
threads.first.kill
end
threads.each(&:join)
end
end
def close_write(socket)
if socket.respond_to?(:close_write)
socket.close_write
elsif socket.respond_to?(:io)
socket.io.close_write
else
raise IOError, 'Cannot close socket'
end
end
def parse_json(body)
MultiJson.load(body) unless body.nil? || body.empty? || (body == 'null')
rescue MultiJson::ParseError => ex
raise UnexpectedResponseError, ex.message
end
def parse_repo_tag(str)
if match = str.match(/\A(.*):([^:]*)\z/)
match.captures
else
[str, '']
end
end
def fix_json(body)
parse_json("[#{body.gsub(/}\s*{/, '},{')}]")
end
def create_tar(hash = {})
output = StringIO.new
Gem::Package::TarWriter.new(output) do |tar|
hash.each do |file_name, file_details|
permissions = file_details.is_a?(Hash) ? file_details[:permissions] : 0640
tar.add_file(file_name, permissions) do |tar_file|
content = file_details.is_a?(Hash) ? file_details[:content] : file_details
tar_file.write(content)
end
end
end
output.tap(&:rewind).string
end
def create_dir_tar(directory)
tempfile = create_temp_file
directory += '/' unless directory.end_with?('/')
create_relative_dir_tar(directory, tempfile)
File.new(tempfile.path, 'r')
end
# return the set of files that form the docker context
# implement this logic https://docs.docker.com/engine/reference/builder/#dockerignore-file
def docker_context(directory)
all_files = glob_all_files(File.join(directory, "**/*"))
dockerignore = File.join(directory, '.dockerignore')
return all_files unless all_files.include?(dockerignore)
# Iterate over valid lines, starting with the initial glob as working set
File
.read(dockerignore) # https://docs.docker.com/engine/reference/builder/#dockerignore-file
.each_line # "a newline-separated list of patterns"
.map(&:strip) # "A preprocessing step removes leading and trailing whitespace"
.reject(&:empty?) # "Lines that are blank after preprocessing are ignored"
.reject { |p| p.start_with?('#') } # "if [a line starts with `#`], then this line is considered as a comment"
.each_with_object(Set.new(all_files)) do |p, working_set|
# determine the pattern (p) and whether it is to be added or removed from context
add = p.start_with?("!")
# strip leading "!" from pattern p, then prepend the base directory
matches = dockerignore_compatible_glob(File.join(directory, add ? p[1..-1] : p))
# add or remove the matched items as indicated in the ignore file
add ? working_set.merge(matches) : working_set.replace(working_set.difference(matches))
end
.to_a
end
def create_relative_dir_tar(directory, output)
Gem::Package::TarWriter.new(output) do |tar|
files = docker_context(directory)
files.each do |prefixed_file_name|
stat = File.stat(prefixed_file_name)
next unless stat.file?
unprefixed_file_name = prefixed_file_name[directory.length..-1]
add_file_to_tar(
tar, unprefixed_file_name, stat.mode, stat.size, stat.mtime
) do |tar_file|
IO.copy_stream(File.open(prefixed_file_name, 'rb'), tar_file)
end
end
end
end
def add_file_to_tar(tar, name, mode, size, mtime)
tar.check_closed
io = tar.instance_variable_get(:@io)
name, prefix = tar.split_name(name)
header = Gem::Package::TarHeader.new(:name => name, :mode => mode,
:size => size, :prefix => prefix,
:mtime => mtime).to_s
io.write header
os = Gem::Package::TarWriter::BoundedStream.new io, size
yield os if block_given?
min_padding = size - os.written
io.write("\0" * min_padding)
remainder = (512 - (size % 512)) % 512
io.write("\0" * remainder)
tar
end
def create_temp_file
tempfile_name = Dir::Tmpname.create('out') {}
File.open(tempfile_name, 'wb+')
end
def extract_id(body)
body.lines.reverse_each do |line|
if (id = line.match(/Successfully built ([a-f0-9]+)/)) && !id[1].empty?
return id[1]
end
end
raise UnexpectedResponseError, "Couldn't find id: #{body}"
end
# Convenience method to get the file hash corresponding to an array of
# local paths.
def file_hash_from_paths(local_paths)
local_paths.each_with_object({}) do |local_path, file_hash|
unless File.exist?(local_path)
raise ArgumentError, "#{local_path} does not exist."
end
basename = File.basename(local_path)
if File.directory?(local_path)
tar = create_dir_tar(local_path)
file_hash[basename] = {
content: tar.read,
permissions: filesystem_permissions(local_path)
}
tar.close
FileUtils.rm(tar.path)
else
file_hash[basename] = {
content: File.read(local_path, mode: 'rb'),
permissions: filesystem_permissions(local_path)
}
end
end
end
def filesystem_permissions(path)
mode = sprintf("%o", File.stat(path).mode)
mode[(mode.length - 3)...mode.length].to_i(8)
end
def build_auth_header(credentials)
credentials = MultiJson.dump(credentials) if credentials.is_a?(Hash)
encoded_creds = Base64.urlsafe_encode64(credentials)
{
'X-Registry-Auth' => encoded_creds
}
end
def build_config_header(credentials)
if credentials.is_a?(String)
credentials = MultiJson.load(credentials, symbolize_keys: true)
end
header = MultiJson.dump(
credentials[:serveraddress].to_s => {
'username' => credentials[:username].to_s,
'password' => credentials[:password].to_s,
'email' => credentials[:email].to_s
}
)
encoded_header = Base64.urlsafe_encode64(header)
{
'X-Registry-Config' => encoded_header
}
end
# do a directory glob that matches .dockerignore behavior
# specifically: matched directories are considered a recursive match
def dockerignore_compatible_glob(pattern)
begin
some_dirs, some_files = glob_all_files(pattern).partition { |f| File.directory?(f) }
# since all directories will be re-processed with a /**/* glob, we can preemptively
# eliminate any whose parent directory is already in this set. This saves significant time.
some_files + some_dirs.reject { |d| some_dirs.any? { |pd| d.start_with?(pd) && d != pd } }
end.each_with_object(Set.new) do |f, acc|
# expand any directories by globbing; flatten results
acc.merge(File.directory?(f) ? glob_all_files("#{f}/**/*") : [f])
end
end
def glob_all_files(pattern)
# globs of "a_dir/**/*" can return "a_dir/.", so explicitly reject those
(Dir.glob(pattern, File::FNM_DOTMATCH) - ['..', '.']).reject { |p| p.end_with?("/.") }
end
end
docker-api-2.2.0/lib/docker/version.rb 0000664 0000000 0000000 00000000115 14071411272 0017554 0 ustar 00root root 0000000 0000000 module Docker
# The version of the docker-api gem.
VERSION = '2.2.0'
end
docker-api-2.2.0/lib/docker/volume.rb 0000664 0000000 0000000 00000002243 14071411272 0017402 0 ustar 00root root 0000000 0000000 # class represents a Docker Volume
class Docker::Volume
include Docker::Base
# /volumes/volume_name doesnt return anything
def remove(opts = {}, conn = Docker.connection)
conn.delete("/volumes/#{id}")
end
def normalize_hash(hash)
hash['id'] ||= hash['Name']
end
class << self
# get details for a single volume
def get(name, conn = Docker.connection)
resp = conn.get("/volumes/#{name}")
hash = Docker::Util.parse_json(resp) || {}
new(conn, hash)
end
# /volumes endpoint returns an array of hashes incapsulated in an Volumes tag
def all(opts = {}, conn = Docker.connection)
resp = conn.get('/volumes')
json = Docker::Util.parse_json(resp) || {}
hashes = json['Volumes'] || []
hashes.map { |hash| new(conn, hash) }
end
# creates a volume with an arbitrary name
def create(name, opts = {}, conn = Docker.connection)
opts['Name'] = name
resp = conn.post('/volumes/create', {}, body: MultiJson.dump(opts))
hash = Docker::Util.parse_json(resp) || {}
new(conn, hash)
end
def prune(conn = Docker.connection)
conn.post("/volumes/prune")
end
end
end
docker-api-2.2.0/lib/excon/ 0000775 0000000 0000000 00000000000 14071411272 0015412 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/lib/excon/middlewares/ 0000775 0000000 0000000 00000000000 14071411272 0017712 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/lib/excon/middlewares/hijack.rb 0000664 0000000 0000000 00000003204 14071411272 0021467 0 ustar 00root root 0000000 0000000 module Excon
VALID_REQUEST_KEYS << :hijack_block
module Middleware
# Hijack is an Excon middleware which parses response headers and then
# yields the underlying TCP socket for raw TCP communication (used to
# attach to STDIN of containers).
class Hijack < Base
def build_response(status, socket)
response = {
:body => '',
:headers => Excon::Headers.new,
:status => status,
:remote_ip => socket.respond_to?(:remote_ip) &&
socket.remote_ip,
}
if socket.data[:scheme] =~ /^(https?|tcp)$/
response.merge({
:local_port => socket.respond_to?(:local_port) &&
socket.local_port,
:local_address => socket.respond_to?(:local_address) &&
socket.local_address
})
end
response
end
def response_call(datum)
if datum[:hijack_block]
# Need to process the response headers here rather than in
# Excon::Middleware::ResponseParser as the response parser will
# block trying to read the body.
socket = datum[:connection].send(:socket)
# c.f. Excon::Response.parse
until match = /^HTTP\/\d+\.\d+\s(\d{3})\s/.match(socket.readline); end
status = match[1].to_i
datum[:response] = build_response(status, socket)
Excon::Response.parse_headers(socket, datum)
datum[:hijack_block].call socket.instance_variable_get(:@socket)
end
@stack.response_call(datum)
end
end
end
end
docker-api-2.2.0/script/ 0000775 0000000 0000000 00000000000 14071411272 0015034 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/script/docker 0000664 0000000 0000000 00000011226 14071411272 0016230 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -e
### BEGIN INIT INFO
# Provides: docker
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: cgroupfs-mount cgroup-lite
# Should-Stop: cgroupfs-mount cgroup-lite
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Create lightweight, portable, self-sufficient containers.
# Description:
# Docker is an open-source project to easily create lightweight, portable,
# self-sufficient containers from any application. The same container that a
# developer builds and tests on a laptop can run at scale, in production, on
# VMs, bare metal, OpenStack clusters, public clouds and more.
### END INIT INFO
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
# This is the pid file managed by docker itself
DOCKER_PIDFILE=/var/run/$BASE.pid
# This is the pid file created/managed by start-stop-daemon
DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
# Get lsb functions
. /lib/lsb/init-functions
if [ -f /etc/default/$BASE ]; then
. /etc/default/$BASE
fi
# Check docker is present
if [ ! -x $DOCKER ]; then
log_failure_msg "$DOCKER not present or not executable"
exit 1
fi
check_init() {
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly)
if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1"
exit 1
fi
}
fail_unless_root() {
if [ "$(id -u)" != '0' ]; then
log_failure_msg "$DOCKER_DESC must be run as root"
exit 1
fi
}
cgroupfs_mount() {
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
return
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
}
case "$1" in
start)
check_init
fail_unless_root
cgroupfs_mount
touch "$DOCKER_LOGFILE"
chgrp docker "$DOCKER_LOGFILE"
ulimit -n 1048576
if [ "$BASH" ]; then
ulimit -u 1048576
else
ulimit -p 1048576
fi
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
--pidfile "$DOCKER_SSD_PIDFILE" \
--make-pidfile \
-- \
-d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
>> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?
;;
stop)
check_init
fail_unless_root
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE"
log_end_msg $?
;;
restart)
check_init
fail_unless_root
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
[ -n "$docker_pid" ] \
&& ps -p $docker_pid > /dev/null 2>&1 \
&& $0 stop
$0 start
;;
force-reload)
check_init
fail_unless_root
$0 restart
;;
status)
check_init
status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC"
;;
*)
echo "Usage: service docker {start|stop|restart|status}"
exit 1
;;
esac
docker-api-2.2.0/script/docker.conf 0000664 0000000 0000000 00000003771 14071411272 0017162 0 ustar 00root root 0000000 0000000 description "Docker daemon"
start on (local-filesystems and net-device-up IFACE!=lo)
stop on runlevel [!2345]
limit nofile 524288 1048576
limit nproc 524288 1048576
respawn
kill timeout 20
pre-start script
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
|| [ ! -e /proc/cgroups ] \
|| [ ! -d /sys/fs/cgroup ]; then
exit 0
fi
if ! mountpoint -q /sys/fs/cgroup; then
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
fi
(
cd /sys/fs/cgroup
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
mkdir -p $sys
if ! mountpoint -q $sys; then
if ! mount -n -t cgroup -o $sys cgroup $sys; then
rmdir $sys || true
fi
fi
done
)
end script
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
DOCKER=/usr/bin/$UPSTART_JOB
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
exec "$DOCKER" -d $DOCKER_OPTS
end script
# Don't emit "started" event until docker.sock is ready.
# See https://github.com/docker/docker/issues/6647
post-start script
DOCKER_OPTS=
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then
while ! [ -e /var/run/docker.sock ]; do
initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1
echo "Waiting for /var/run/docker.sock"
sleep 0.1
done
echo "/var/run/docker.sock is up"
fi
end script
docker-api-2.2.0/script/install_docker.sh 0000775 0000000 0000000 00000001711 14071411272 0020370 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -ex
declare -a SEMVER
# argv[0]
DOCKER_VERSION=$1
# argv[1]
DOCKER_CE=$2
# disable travis default installation
systemctl stop docker.service
apt-get -y --purge remove docker docker-engine docker.io containerd runc
# install gpg key for docker rpo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
apt-key fingerprint 0EBFCD88
# enable docker repo
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get update
apt-cache gencaches
set +e
# install package
apt-get install docker-ce=${DOCKER_VERSION}
if [ $? -ne 0 ]; then
echo "Error: Could not install ${DOCKER_VERSION}"
echo "Available docker versions:"
apt-cache madison docker-ce
exit 1
fi
set -e
systemctl stop docker.service
echo 'DOCKER_OPTS="-H unix:///var/run/docker.sock --pidfile=/var/run/docker.pid"' > /etc/default/docker
cat /etc/default/docker
systemctl start docker.service
docker-api-2.2.0/script/install_podman.sh 0000775 0000000 0000000 00000000561 14071411272 0020401 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -ex
. /etc/os-release
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_18.04/ /" > /etc/apt/sources.list.d/podman.list
apt-get update
apt-get install -y podman
docker-api-2.2.0/spec/ 0000775 0000000 0000000 00000000000 14071411272 0014462 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/spec/cov_spec.rb 0000664 0000000 0000000 00000000666 14071411272 0016620 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.not_covered!
describe "Coverage" do
it "has coverage for all tests" do
SingleCov.assert_used
end
it "has tests for all files" do
SingleCov.assert_tested untested: %w[
lib/docker/base.rb
lib/docker/error.rb
lib/docker/messages_stack.rb
lib/docker/rake_task.rb
lib/docker/version.rb
lib/docker-api.rb
lib/excon/middlewares/hijack.rb
]
end
end
docker-api-2.2.0/spec/docker/ 0000775 0000000 0000000 00000000000 14071411272 0015731 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/spec/docker/connection_spec.rb 0000664 0000000 0000000 00000006647 14071411272 0021444 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 12
describe Docker::Connection do
subject { described_class.new('http://localhost:4243', {}) }
describe '#initialize' do
let(:url) { 'http://localhost:4243' }
let(:options) { {} }
subject { described_class.new(url, options) }
context 'when the first argument is not a String' do
let(:url) { :lol_not_a_string }
it 'raises an error' do
expect { subject }.to raise_error(Docker::Error::ArgumentError)
end
end
context 'when the first argument is a String' do
context 'and the url is a unix socket' do
let(:url) { ::Docker.env_url || ::Docker.default_socket_url }
it 'sets the socket path in the options' do
expect(subject.url).to eq('unix:///')
expect(subject.options).to include(:socket => url.split('//').last)
end
end
context 'but the second argument is not a Hash' do
let(:options) { :lol_not_a_hash }
it 'raises an error' do
expect { subject }.to raise_error(Docker::Error::ArgumentError)
end
end
context 'and the second argument is a Hash' do
it 'sets the url and options' do
expect(subject.url).to eq url
expect(subject.options).to eq options
end
end
end
context 'url conversion to uri' do
context 'when the url does not contain a scheme' do
let(:url) { 'localhost:4243' }
it 'adds the scheme to the url' do
expect(subject.url).to eq "http://#{url}"
end
end
context 'when the url is a complete uri' do
let(:url) { 'http://localhost:4243' }
it 'leaves the url intact' do
expect(subject.url).to eq url
end
end
end
end
describe '#resource' do
its(:resource) { should be_a Excon::Connection }
end
describe '#request' do
let(:method) { :get }
let(:path) { '/test' }
let(:query) { { :all => true } }
let(:options) { { :expects => 201, :lol => true } }
let(:body) { rand(10000000) }
let(:resource) { double(:resource) }
let(:response) { double(:response, :body => body) }
let(:expected_hash) {
{
:method => method,
:path => path,
:query => query,
:headers => { 'Content-Type' => 'text/plain',
'User-Agent' => "Swipely/Docker-API #{Docker::VERSION}",
},
:expects => 201,
:idempotent => true,
:lol => true
}
}
before do
allow(subject).to receive(:resource).and_return(resource)
expect(resource).to receive(:request).
with(expected_hash).
and_return(response)
end
it 'sends #request to #resource with the compiled params' do
expect(subject.request(method, path, query, options)).to eq body
end
end
[:get, :put, :post, :delete].each do |method|
describe "##{method}" do
it 'is delegated to #request' do
expect(subject).to receive(:request).with(method)
subject.public_send(method)
end
end
end
describe '#to_s' do
let(:url) { 'http://google.com:4000' }
let(:options) { {} }
let(:expected_string) {
"Docker::Connection { :url => #{url}, :options => #{options} }"
}
subject { described_class.new(url, options) }
it 'returns a pretty version with the url and port' do
expect(subject.to_s).to eq expected_string
end
end
end
docker-api-2.2.0/spec/docker/container_spec.rb 0000664 0000000 0000000 00000062167 14071411272 0021266 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 39
describe Docker::Container do
describe '#to_s' do
subject {
described_class.send(:new, Docker.connection, 'id' => rand(10000).to_s)
}
let(:id) { 'bf119e2' }
let(:connection) { Docker.connection }
let(:expected_string) {
"Docker::Container { :id => #{id}, :connection => #{connection} }"
}
before do
{
:@id => id,
:@connection => connection
}.each { |k, v| subject.instance_variable_set(k, v) }
end
its(:to_s) { should == expected_string }
end
describe '#json' do
subject {
described_class.create('Cmd' => %w[true], 'Image' => 'debian:stable')
}
let(:description) { subject.json }
after(:each) { subject.remove }
it 'returns the description as a Hash' do
expect(description).to be_a Hash
expect(description['Id']).to start_with(subject.id)
end
end
describe '#streaming_logs' do
let(:options) { {} }
subject do
described_class.create(
{'Cmd' => ['/bin/bash', '-lc', 'echo hello'], 'Image' => 'debian:stable'}.merge(options)
)
end
before(:each) { subject.tap(&:start).wait }
after(:each) { subject.remove }
context 'when not selecting any stream' do
let(:non_destination) { subject.streaming_logs }
it 'raises a client error' do
expect { non_destination }.to raise_error(Docker::Error::ClientError)
end
end
context 'when selecting stdout' do
let(:stdout) { subject.streaming_logs(stdout: 1) }
it 'returns blank logs' do
expect(stdout).to be_a String
expect(stdout).to match("hello")
end
end
context 'when using a tty' do
let(:options) { { 'Tty' => true } }
let(:output) { subject.streaming_logs(stdout: 1, tty: 1) }
it 'returns `hello`' do
expect(output).to be_a(String)
expect(output).to match("hello")
end
end
context 'when passing a block' do
let(:lines) { [] }
let(:output) { subject.streaming_logs(stdout: 1, follow: 1) { |s,c| lines << c } }
it 'returns `hello`' do
expect(output).to be_a(String)
expect(output).to match("hello")
expect(lines.join).to match("hello")
end
end
end
describe '#stats', :docker_1_9 do
after(:each) do
subject.wait
subject.remove
end
context "when requesting container stats" do
subject {
described_class.create('Cmd' => ['echo', 'hello'], 'Image' => 'debian:stable')
}
let(:output) { subject.stats }
it "returns a Hash" do
skip('Not supported on podman') if ::Docker.podman?
expect(output).to be_a Hash
end
end
context "when streaming container stats" do
subject {
described_class.create(
'Cmd' => ['sleep', '3'],
'Image' => 'debian:stable'
)
}
it "yields a Hash" do
skip('Not supported on podman') if ::Docker.podman?
subject.start! # If the container isn't started, no stats will be streamed
called_count = 0
subject.stats do |output|
expect(output).to be_a Hash
called_count += 1
break if called_count == 2
end
expect(called_count).to eq 2
end
end
end
describe '#logs' do
subject {
described_class.create('Cmd' => ['echo', 'hello'], 'Image' => 'debian:stable')
}
after(:each) { subject.remove }
context "when not selecting any stream" do
let(:non_destination) { subject.logs }
it 'raises a client error' do
expect { non_destination }.to raise_error(Docker::Error::ClientError)
end
end
context "when selecting stdout" do
let(:stdout) { subject.logs(stdout: 1) }
it 'returns blank logs' do
expect(stdout).to be_a String
expect(stdout).to eq ""
end
end
end
describe '#create' do
subject {
described_class.create({
'Cmd' => %w[true],
'Image' => 'debian:stable'
}.merge(opts))
}
context 'when creating a container named bob' do
let(:opts) { {"name" => "bob"} }
after(:each) { subject.remove }
it 'should have name set to bob' do
expect(subject.json["Name"]).to eq("/bob")
end
end
end
describe '#rename' do
subject {
described_class.create({
'name' => 'foo',
'Cmd' => %w[true],
'Image' => 'debian:stable'
})
}
before { subject.start }
after(:each) { subject.tap(&:wait).remove }
it 'renames the container' do
skip('Not supported on podman') if ::Docker.podman?
subject.rename('bar')
expect(subject.json["Name"]).to match(%r{bar})
end
end
describe "#update", :docker_1_10 do
subject {
described_class.create({
"name" => "foo",
'Cmd' => %w[true],
"Image" => "debian:stable",
"HostConfig" => {
"CpuShares" => 60000
}
})
}
before { subject.tap(&:start).tap(&:wait) }
after(:each) { subject.tap(&:wait).remove }
it "updates the container" do
skip('Podman containers are immutable once created') if ::Docker.podman?
subject.refresh!
expect(subject.info.fetch("HostConfig").fetch("CpuShares")).to eq 60000
subject.update("CpuShares" => 50000)
subject.refresh!
expect(subject.info.fetch("HostConfig").fetch("CpuShares")).to eq 50000
end
end
describe '#changes' do
subject {
described_class.create(
'Cmd' => %w[rm -rf /root],
'Image' => 'debian:stable'
)
}
let(:changes) { subject.changes }
before { subject.tap(&:start).tap(&:wait) }
after(:each) { subject.tap(&:wait).remove }
it 'returns the changes as an array' do
expect(changes).to be_a(Array)
expect(changes).to include(
{
"Path" => "/root",
"Kind" => 2
},
)
end
end
describe '#top' do
let(:dir) {
File.join(File.dirname(__FILE__), '..', 'fixtures', 'top')
}
let(:image) { Docker::Image.build_from_dir(dir) }
let(:top_empty) { sleep 1; container.top }
let(:top_ary) { sleep 1; container.top }
let(:top_hash) { sleep 1; container.top(format: :hash) }
let!(:container) { image.run('/while') }
after do
container.kill!.remove
image.remove
end
it 'returns the top commands as an Array' do
expect(top_ary).to be_a Array
expect(top_ary).to_not be_empty
expect(top_ary.first.keys).to include(/PID/)
end
it 'returns the top commands as an Hash' do
expect(top_hash).to be_a Hash
expect(top_hash).to_not be_empty
expect(top_hash.keys).to eq ['Processes', 'Titles']
end
it 'returns nothing when Processes were not returned due to an error' do
expect(Docker::Util).to receive(:parse_json).and_return({}).at_least(:once)
expect(top_empty).to eq []
end
end
describe '#archive_in', :docker_1_8 do
let(:license_path) { File.absolute_path(File.join(__FILE__, '..', '..', '..', 'LICENSE')) }
subject { Docker::Container.create('Image' => 'debian:stable', 'Cmd' => ['/bin/sh']) }
let(:committed_image) { subject.commit }
let(:ls_container) { committed_image.run('ls /').tap(&:wait) }
let(:output) { ls_container.streaming_logs(stdout: true, stderr: true) }
after do
subject.remove
end
context 'when the input is a tar' do
after do
ls_container.remove
committed_image.remove
end
it 'file exists in the container' do
skip('Not supported on podman') if ::Docker.podman?
subject.archive_in(license_path, '/', overwrite: false)
expect(output).to include('LICENSE')
end
end
end
describe '#archive_in_stream', :docker_1_8 do
let(:tar) { StringIO.new(Docker::Util.create_tar('/lol' => 'TEST')) }
subject { Docker::Container.create('Image' => 'debian:stable', 'Cmd' => ['/bin/sh']) }
let(:committed_image) { subject.commit }
let(:ls_container) { committed_image.run('ls /').tap(&:wait) }
let(:output) { ls_container.streaming_logs(stdout: true, stderr: true) }
after do
subject.remove
end
context 'when the input is a tar' do
after do
ls_container.remove
committed_image.remove
end
it 'file exists in the container' do
skip('Not supported on podman') if ::Docker.podman?
subject.archive_in_stream('/', overwrite: false) { tar.read }
expect(output).to include('lol')
end
end
context 'when the input would overwrite a directory with a file' do
let(:tar) { StringIO.new(Docker::Util.create_tar('/etc' => 'TEST')) }
it 'raises an error' do
skip('Not supported on podman') if ::Docker.podman?
# Docs say this should return a client error: clearly wrong
# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.21/
# #extract-an-archive-of-files-or-folders-to-a-directory-in-a-container
expect {
subject.archive_in_stream('/', overwrite: false) { tar.read }
}.to raise_error(Docker::Error::ServerError)
end
end
end
describe '#archive_out', :docker_1_8 do
subject { Docker::Container.create('Image' => 'debian:stable', 'Cmd' => ['touch','/test']) }
after { subject.remove }
context 'when the file does not exist' do
it 'raises an error' do
skip('Not supported on podman') if ::Docker.podman?
subject.start
subject.wait
expect { subject.archive_out('/lol') { |chunk| puts chunk } }
.to raise_error(Docker::Error::NotFoundError)
end
end
context 'when the input is a file' do
it 'yields each chunk of the tarred file' do
skip('Not supported on podman') if ::Docker.podman?
subject.start; subject.wait
chunks = []
subject.archive_out('/test') { |chunk| chunks << chunk }
chunks = chunks.join("\n")
expect(chunks).to be_include('test')
end
end
context 'when the input is a directory' do
it 'yields each chunk of the tarred directory' do
skip('Not supported on podman') if ::Docker.podman?
subject.start; subject.wait
chunks = []
subject.archive_out('/etc/logrotate.d') { |chunk| chunks << chunk }
chunks = chunks.join("\n")
expect(%w[apt dpkg]).to be_all { |file| chunks.include?(file) }
end
end
end
describe "#read_file", :docker_1_8 do
subject {
Docker::Container.create(
"Image" => "debian:stable",
"Cmd" => ["/bin/bash", "-c", "echo \"Hello world\" > /test"]
)
}
after { subject.remove }
before do
subject.start
subject.wait
end
it "reads contents from files" do
skip('Not supported on podman') if ::Docker.podman?
expect(subject.read_file("/test")).to eq "Hello world\n"
end
end
describe "#store_file", :docker_1_8 do
subject { Docker::Container.create('Image' => 'debian:stable', 'Cmd' => ["ls"]) }
after { subject.remove }
it "stores content in files" do
skip('Not supported on podman') if ::Docker.podman?
subject.store_file("/test", "Hello\nWorld")
expect(subject.read_file("/test")).to eq "Hello\nWorld"
end
end
describe '#export' do
subject { described_class.create('Cmd' => %w[/true],
'Image' => 'tianon/true') }
before { subject.start }
after { subject.tap(&:wait).remove }
it 'yields each chunk' do
first = nil
subject.export do |chunk|
first ||= chunk
end
expect(first[257..261]).to eq "ustar" # Make sure the export is a tar.
end
end
describe '#attach' do
subject {
described_class.create(
'Cmd' => ['bash','-c','sleep 2; echo hello'],
'Image' => 'debian:stable'
)
}
before { subject.start }
after(:each) { subject.stop.remove }
context 'with normal sized chunks' do
it 'yields each chunk' do
chunk = nil
subject.attach do |stream, c|
chunk ||= c
end
expect(chunk).to eq("hello\n")
end
end
context 'with very small chunks' do
before do
Docker.options = { :chunk_size => 1 }
end
after do
Docker.options = {}
end
it 'yields each chunk' do
chunk = nil
subject.attach do |stream, c|
chunk ||= c
end
expect(chunk).to eq("hello\n")
end
end
end
describe '#attach with stdin' do
it 'yields the output' do
skip('Currently broken in podman') if ::Docker.podman?
container = described_class.create(
'Cmd' => %w[cat],
'Image' => 'debian:stable',
'OpenStdin' => true,
'StdinOnce' => true
)
chunk = nil
container
.tap(&:start)
.attach(stdin: StringIO.new("foo\nbar\n")) do |stream, c|
chunk ||= c
end
container.tap(&:wait).remove
expect(chunk).to eq("foo\nbar\n")
end
end
describe '#start' do
subject {
described_class.create(
'Cmd' => %w[test -d /foo],
'Image' => 'debian:stable',
'Volumes' => {'/foo' => {}},
'HostConfig' => { 'Binds' => ["/tmp:/foo"] }
)
}
let(:all) { Docker::Container.all(all: true) }
before { subject.start }
after(:each) { subject.remove }
it 'starts the container' do
expect(all.map(&:id)).to be_any { |id| id.start_with?(subject.id) }
expect(subject.wait(10)['StatusCode']).to be_zero
end
end
describe '#stop' do
subject {
described_class.create('Cmd' => %w[true], 'Image' => 'debian:stable')
}
before { subject.tap(&:start).stop('timeout' => '10') }
after { subject.remove }
it 'stops the container' do
expect(described_class.all(:all => true).map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
expect(described_class.all.map(&:id)).to be_none { |id|
id.start_with?(subject.id)
}
end
context 'with a timeout' do
let(:custom_timeout) { 60 }
before do
subject.tap(&:start)
end
it 'extends the Excon timeout ensuring the request does not timeout before Docker' do
expect(subject.connection).to receive(:request).with(
:post,
anything,
anything,
hash_including(read_timeout: custom_timeout + 5, write_timeout: custom_timeout + 5)
).once
allow(subject.connection).to receive(:request).with(:delete, anything, anything)
subject.stop('timeout' => custom_timeout)
end
end
context 'without a timeout' do
before do
subject.tap(&:start)
end
it 'does not adjust the default Excon HTTP timeout' do
expect(subject.connection).to receive(:request).with(
:post,
anything,
anything,
hash_including(body: '{}')
).once
allow(subject.connection).to receive(:request).with(:delete, anything, anything)
subject.stop
end
end
end
describe '#exec' do
subject {
described_class.create(
'Cmd' => %w[sleep 20],
'Image' => 'debian:stable'
).start
}
after { subject.kill!.remove }
context 'when passed only a command' do
let(:output) { subject.exec(['bash','-c','sleep 2; echo hello']) }
it 'returns the stdout/stderr messages and exit code' do
expect(output).to eq([["hello\n"], [], 0])
end
end
context 'when detach is true' do
let(:output) { subject.exec(['date'], detach: true) }
it 'returns the Docker::Exec object' do
expect(output).to be_a Docker::Exec
expect(output.id).to_not be_nil
end
end
context 'when passed a block' do
it 'streams the stdout/stderr messages' do
chunk = nil
subject.exec(['bash','-c','sleep 2; echo hello']) do |stream, c|
chunk ||= c
end
expect(chunk).to eq("hello\n")
end
end
context 'when stdin object is passed' do
let(:output) { subject.exec(['cat'], stdin: StringIO.new("hello")) }
it 'returns the stdout/stderr messages' do
skip('Not supported on podman') if ::Docker.podman?
expect(output).to eq([["hello"],[],0])
end
end
context 'when tty is true' do
let(:command) { [
"bash", "-c",
"if [ -t 1 ]; then echo -n \"I'm a TTY!\"; fi"
] }
let(:output) { subject.exec(command, tty: true) }
it 'returns the raw stdout/stderr output' do
expect(output).to eq([["I'm a TTY!"], [], 0])
end
end
end
describe '#kill' do
let(:command) { ['/bin/bash', '-c', 'while [ 1 ]; do echo hello; done'] }
subject {
described_class.create('Cmd' => command, 'Image' => 'debian:stable')
}
before { subject.start }
after(:each) {subject.remove }
it 'kills the container' do
subject.kill
expect(described_class.all.map(&:id)).to be_none { |id|
id.start_with?(subject.id)
}
expect(described_class.all(:all => true).map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
end
context 'with a kill signal' do
let(:command) {
[
'/bin/bash',
'-c',
'trap echo SIGTERM; while [ 1 ]; do echo hello; done'
]
}
it 'kills the container' do
subject.kill(:signal => "SIGTERM")
expect(described_class.all.map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
expect(described_class.all(:all => true).map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
subject.kill(:signal => "SIGKILL")
expect(described_class.all.map(&:id)).to be_none { |id|
id.start_with?(subject.id)
}
expect(described_class.all(:all => true).map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
end
end
end
describe '#delete' do
subject {
described_class.create('Cmd' => ['ls'], 'Image' => 'debian:stable')
}
it 'deletes the container' do
subject.delete(:force => true)
expect(described_class.all.map(&:id)).to be_none { |id|
id.start_with?(subject.id)
}
end
end
describe '#restart' do
subject {
described_class.create('Cmd' => %w[sleep 10], 'Image' => 'debian:stable')
}
before { subject.start }
after { subject.kill!.remove }
it 'restarts the container' do
expect(described_class.all.map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
subject.stop
expect(described_class.all.map(&:id)).to be_none { |id|
id.start_with?(subject.id)
}
subject.restart('timeout' => '10')
expect(described_class.all.map(&:id)).to be_any { |id|
id.start_with?(subject.id)
}
end
end
describe '#pause' do
subject {
described_class.create(
'Cmd' => %w[sleep 50],
'Image' => 'debian:stable'
).start
}
after { subject.unpause.kill!.remove }
it 'pauses the container' do
skip('Not supported on rootless podman') if (::Docker.podman? && ::Docker.rootless?)
subject.pause
expect(described_class.get(subject.id).info['State']['Paused']).to be true
end
end
describe '#unpause' do
subject {
described_class.create(
'Cmd' => %w[sleep 50],
'Image' => 'debian:stable'
).start
}
before { subject.pause }
after { subject.kill!.remove }
it 'unpauses the container' do
subject.unpause
expect(
described_class.get(subject.id).info['State']['Paused']
).to be false
end
end
describe '#wait' do
subject {
described_class.create(
'Cmd' => %w[tar nonsense],
'Image' => 'debian:stable'
)
}
before { subject.start }
after(:each) { subject.remove }
it 'waits for the command to finish' do
expect(subject.wait['StatusCode']).to_not be_zero
end
context 'when an argument is given' do
subject { described_class.create('Cmd' => %w[sleep 5],
'Image' => 'debian:stable') }
it 'sets the :read_timeout to that amount of time' do
expect(subject.wait(6)['StatusCode']).to be_zero
end
context 'and a command runs for too long' do
it 'raises a ServerError' do
expect{subject.wait(4)}.to raise_error(Docker::Error::TimeoutError)
subject.tap(&:wait)
end
end
end
end
describe '#run' do
let(:run_command) { subject.run('ls') }
context 'when the Container\'s command does not return status code of 0' do
subject { described_class.create('Cmd' => %w[false],
'Image' => 'debian:stable') }
after do
subject.remove
end
it 'raises an error' do
expect { run_command }
.to raise_error(Docker::Error::UnexpectedResponseError)
end
end
context 'when the Container\'s command returns a status code of 0' do
subject { described_class.create('Cmd' => %w[pwd],
'Image' => 'debian:stable') }
after do
subject.remove
image = run_command.json['Image']
run_command.remove
Docker::Image.get(image).history.each do |layer|
next unless layer['CreatedBy'] == 'pwd'
Docker::Image.get(layer['Id']).remove(:noprune => true)
end
end
it 'creates a new container to run the specified command' do
expect(run_command.wait['StatusCode']).to be_zero
end
end
end
describe '#commit' do
subject {
described_class.create('Cmd' => %w[true], 'Image' => 'debian:stable')
}
let(:image) { subject.commit }
after(:each) do
subject.remove
image.remove
end
it 'creates a new Image from the Container\'s changes' do
subject.tap(&:start).wait
expect(image).to be_a Docker::Image
expect(image.id).to_not be_nil
end
context 'if run is passed, it saves the command in the image' do
let(:image) { subject.commit }
let(:container) { image.run('pwd') }
it 'saves the command' do
skip('Not supported on podman') if ::Docker.podman?
container.wait
expect(container.attach(logs: true, stream: false)).to eql [["/\n"],[]]
container.remove
end
end
end
describe '.create' do
subject { described_class }
context 'when the Container does not yet exist' do
context 'when the HTTP request does not return a 200' do
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :post }, { :status => 400 })
end
after do
Excon.stubs.shift
Docker.options = {}
end
it 'raises an error' do
expect { subject.create }.to raise_error(Docker::Error::ClientError)
end
end
context 'when the HTTP request returns a 200' do
let(:options) do
{
"Cmd" => ["date"],
"Image" => "debian:stable",
}
end
let(:container) { subject.create(options) }
after { container.remove }
it 'sets the id' do
expect(container).to be_a Docker::Container
expect(container.id).to_not be_nil
expect(container.connection).to_not be_nil
end
end
end
end
describe '.get' do
subject { described_class }
context 'when the HTTP response is not a 200' do
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :get }, { :status => 500 })
end
after do
Excon.stubs.shift
Docker.options = {}
end
it 'raises an error' do
expect { subject.get('randomID') }
.to raise_error(Docker::Error::ServerError)
end
end
context 'when the HTTP response is a 200' do
let(:container) {
subject.create('Cmd' => ['ls'], 'Image' => 'debian:stable')
}
after { container.remove }
it 'materializes the Container into a Docker::Container' do
expect(subject.get(container.id)).to be_a Docker::Container
end
end
end
describe '.all' do
subject { described_class }
context 'when the HTTP response is not a 200' do
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :get }, { :status => 500 })
end
after do
Excon.stubs.shift
Docker.options = {}
end
it 'raises an error' do
expect { subject.all }
.to raise_error(Docker::Error::ServerError)
end
end
context 'when the HTTP response is a 200' do
let(:container) {
subject.create('Cmd' => ['ls'], 'Image' => 'debian:stable')
}
before { container }
after { container.remove }
it 'materializes each Container into a Docker::Container' do
expect(subject.all(:all => true)).to be_all { |container|
container.is_a?(Docker::Container)
}
expect(subject.all(:all => true).length).to_not be_zero
end
end
end
describe '.prune', :docker_17_03 => true do
it 'prune containers' do
expect { Docker::Container.prune }.not_to raise_error
end
end
end
docker-api-2.2.0/spec/docker/event_spec.rb 0000664 0000000 0000000 00000007774 14071411272 0020430 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 5
describe Docker::Event do
let(:api_response) do
{
'Action' => 'start',
'Actor' => {
'Attributes' => {
'image' => 'tianon/true',
'name' => 'true-dat'
},
'ID' => 'bb2c783a32330b726f18d1eb44d80c899ef45771b4f939326e0fefcfc7e05db8'
},
'Type' => 'container',
'from' => 'tianon/true',
'id' => 'bb2c783a32330b726f18d1eb44d80c899ef45771b4f939326e0fefcfc7e05db8',
'status' => 'start',
'time' => 1461083270,
'timeNano' => 1461083270652069004
}
end
describe "#to_s" do
context 'with an old event' do
let(:event) do
described_class.new(
status: status,
id: id,
from: from,
time: time
)
end
let(:status) { "start" }
let(:id) { "398c9f77b5d2" }
let(:from) { "debian:stable" }
let(:time) { 1381956164 }
let(:expected_string) {
"Docker::Event { #{time} #{status} #{id} (from=#{from}) }"
}
it "equals the expected string" do
expect(event.to_s).to eq(expected_string)
end
end
context 'with a new event' do
let(:event) { described_class.new(api_response) }
let(:expected_string) do
'Docker::Event { 1461083270652069004 container start '\
'bb2c783a32330b726f18d1eb44d80c899ef45771b4f939326e0fefcfc7e05db8 '\
'(image=tianon/true, name=true-dat) }'
end
it 'equals the expected string' do
expect(event.to_s).to eq(expected_string)
end
end
end
describe ".stream" do
it 'receives at least 4 events' do
events = 0
stream_thread = Thread.new do
Docker::Event.stream do |event|
puts "#{event}"
events += 1
break if events >= 4
end
end
container = Docker::Image.create('fromImage' => 'debian:stable')
.run('bash')
.tap(&:wait)
stream_thread.join(10) || stream_thread.kill
expect(events).to be >= 4
container.remove
end
end
describe ".since" do
let(:time) { Time.now.to_i + 1 }
it 'receives at least 4 events' do
skip('Not supported on podman') if ::Docker.podman?
events = 0
stream_thread = Thread.new do
Docker::Event.since(time) do |event|
puts "#{event}"
events += 1
break if events >= 4
end
end
container = Docker::Image.create('fromImage' => 'debian:stable')
.run('bash')
.tap(&:wait)
stream_thread.join(10) || stream_thread.kill
expect(events).to be >= 4
container.remove
end
end
describe ".new_event" do
context 'with an old api response' do
let(:event) { Docker::Event.new_event(response_body, nil, nil) }
let(:status) { "start" }
let(:id) { "398c9f77b5d2" }
let(:from) { "debian:stable" }
let(:time) { 1381956164 }
let(:response_body) {
"{\"status\":\"#{status}\",\"id\":\"#{id}\""\
",\"from\":\"#{from}\",\"time\":#{time}}"
}
it "returns a Docker::Event" do
expect(event).to be_kind_of(Docker::Event)
expect(event.status).to eq(status)
expect(event.id).to eq(id)
expect(event.from).to eq(from)
expect(event.time).to eq(time)
end
end
context 'with a new api response' do
let(:event) do
Docker::Event.new_event(
MultiJson.dump(api_response),
nil,
nil
)
end
it 'returns a Docker::Event' do
expect(event).to be_kind_of(Docker::Event)
expect(event.type).to eq('container')
expect(event.action).to eq('start')
expect(
event.actor.id
).to eq('bb2c783a32330b726f18d1eb44d80c899ef45771b4f939326e0fefcfc7e05db8')
expect(event.actor.attributes).to eq('image' => 'tianon/true', 'name' => 'true-dat')
expect(event.time).to eq 1461083270
expect(event.time_nano).to eq 1461083270652069004
end
end
end
end
docker-api-2.2.0/spec/docker/exec_spec.rb 0000664 0000000 0000000 00000011264 14071411272 0020220 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 5
describe Docker::Exec do
let(:container) {
Docker::Container.create(
'Cmd' => %w(sleep 300),
'Image' => 'debian:stable'
).start!
}
describe '#to_s' do
subject {
described_class.send(:new, Docker.connection, 'id' => rand(10000).to_s)
}
let(:id) { 'bf119e2' }
let(:connection) { Docker.connection }
let(:expected_string) {
"Docker::Exec { :id => #{id}, :connection => #{connection} }"
}
before do
{
:@id => id,
:@connection => connection
}.each { |k, v| subject.instance_variable_set(k, v) }
end
its(:to_s) { should == expected_string }
end
describe '.create' do
subject { described_class }
context 'when the HTTP request returns a 201' do
let(:options) do
{
'AttachStdin' => false,
'AttachStdout' => false,
'AttachStderr' => false,
'Tty' => false,
'Cmd' => [
'date'
],
'Container' => container.id
}
end
let(:process) { subject.create(options) }
after { container.kill!.remove }
it 'sets the id' do
expect(process).to be_a Docker::Exec
expect(process.id).to_not be_nil
expect(process.connection).to_not be_nil
end
end
context 'when the parent container does not exist' do
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :get}, { :status => 404 }) # For Podman
Excon.stub({ :method => :post}, { :status => 404 })
end
after do
Excon.stubs.shift
Docker.options = {}
end
it 'raises an error' do
expect { subject.create }.to raise_error(Docker::Error::NotFoundError)
end
end
end
describe '#json' do
subject {
described_class.create(
'Container' => container.id,
'Detach' => true,
'Cmd' => %w[true]
)
}
let(:description) { subject.json }
before { subject.start! }
after { container.kill!.remove }
it 'returns the description as a Hash' do
expect(description).to be_a Hash
expect(description['ID']).to start_with(subject.id)
end
end
describe '#start!' do
context 'when the exec instance does not exist' do
subject do
described_class.send(:new, Docker.connection, 'id' => rand(10000).to_s)
end
it 'raises an error' do
expect { subject.start! }.to raise_error(Docker::Error::NotFoundError)
end
end
context 'when :detach is set to false' do
subject {
described_class.create(
'Container' => container.id,
'AttachStdout' => true,
'Cmd' => ['bash','-c','sleep 2; echo hello']
)
}
after { container.kill!.remove }
it 'returns the stdout and stderr messages' do
expect(subject.start!).to eq([["hello\n"],[],0])
end
context 'block is passed' do
it 'attaches to the stream' do
chunk = nil
result = subject.start! do |stream, c|
chunk ||= c
end
expect(chunk).to eq("hello\n")
expect(result).to eq([["hello\n"], [], 0])
end
end
end
context 'when :detach is set to true' do
subject {
described_class.create('Container' => container.id, 'Cmd' => %w[date])
}
after { container.kill!.remove }
it 'returns empty stdout/stderr messages with exitcode' do
expect(subject.start!(:detach => true).length).to eq(3)
end
end
context 'when :wait set long time value' do
subject {
described_class.create(
'Container' => container.id,
'AttachStdout' => true,
'Cmd' => %w[true]
)
}
after { container.kill!.remove }
it 'returns empty stdout and stderr messages with exitcode' do
expect(subject.start!(:wait => 100)).to eq([[], [], 0])
end
end
context 'when :wait set short time value' do
subject {
described_class.create(
'Container' => container.id,
'AttachStdout' => true,
'Cmd' => ['bash', '-c', 'sleep 2; echo hello']
)
}
after { container.kill!.remove }
it 'raises an error' do
expect { subject.start!(:wait => 1) }.to raise_error(Docker::Error::TimeoutError)
end
end
context 'when the HTTP request returns a 201' do
subject {
described_class.create('Container' => container.id, 'Cmd' => ['date'])
}
after { container.kill!.remove }
it 'starts the exec instance' do
expect { subject.start! }.not_to raise_error
end
end
end
end
docker-api-2.2.0/spec/docker/image_spec.rb 0000664 0000000 0000000 00000060236 14071411272 0020361 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 16
describe Docker::Image do
describe '#to_s' do
subject { described_class.new(Docker.connection, info) }
let(:id) { 'bf119e2' }
let(:connection) { Docker.connection }
let(:info) do
{"id" => "bf119e2", "Repository" => "debian", "Tag" => "stable",
"Created" => 1364102658, "Size" => 24653, "VirtualSize" => 180116135}
end
let(:expected_string) do
"Docker::Image { :id => #{id}, :info => #{info.inspect}, "\
":connection => #{connection} }"
end
its(:to_s) { should == expected_string }
end
describe '#remove' do
context 'when no name is given' do
let(:id) { subject.id }
subject { described_class.create('fromImage' => 'busybox:latest') }
after { described_class.create('fromImage' => 'busybox:latest') }
it 'removes the Image' do
subject.remove(:force => true)
expect(Docker::Image.all.map(&:id)).to_not include(id)
end
end
context 'when using the class' do
let(:id) { subject.id }
subject { described_class.create('fromImage' => 'busybox:latest') }
after { described_class.create('fromImage' => 'busybox:latest') }
it 'removes the Image' do
Docker::Image.remove(id, force: true)
expect(Docker::Image.all.map(&:id)).to_not include(id)
end
end
context 'when a valid tag is given' do
it 'untags the Image'
end
context 'when an invalid tag is given' do
it 'raises an error'
end
end
describe '#insert_local' do
include_context "local paths"
subject { described_class.create('fromImage' => 'debian:stable') }
let(:rm) { false }
let(:new_image) {
opts = {'localPath' => file, 'outputPath' => '/'}
opts[:rm] = true if rm
subject.insert_local(opts)
}
context 'when the local file does not exist' do
let(:file) { '/lol/not/a/file' }
it 'raises an error' do
expect { new_image }.to raise_error(Docker::Error::ArgumentError)
end
end
context 'when the local file does exist' do
let(:file) { File.join(project_dir, 'Gemfile') }
let(:gemfile) { File.read('Gemfile') }
let(:container) { new_image.run('cat /Gemfile').tap(&:wait) }
after do
container.remove
new_image.remove
end
it 'creates a new Image that has that file' do
begin
output = container.streaming_logs(stdout: true)
expect(output).to eq(gemfile)
rescue Docker::Error::UnexpectedResponseError => ex
skip("Could not communicate with DockerHub: #{ex}")
end
end
end
context 'when a directory is passed' do
let(:new_image) {
subject.insert_local(
'localPath' => File.join(project_dir, 'lib'),
'outputPath' => '/lib'
)
}
let(:container) { new_image.run('ls -a /lib/docker') }
let(:response) { container.tap(&:wait).streaming_logs(stdout: true) }
after do
container.tap(&:wait).remove
new_image.remove
end
it 'inserts the directory' do
begin
expect(response.split("\n").sort).to eq(Dir.entries('lib/docker').sort)
rescue Docker::Error::UnexpectedResponseError => ex
skip("Could not communicate with DockerHub: #{ex}")
end
end
end
context 'when there are multiple files passed' do
let(:file) {
[File.join(project_dir, 'Gemfile'), File.join(project_dir, 'LICENSE')]
}
let(:gemfile) { File.read('Gemfile') }
let(:license) { File.read('LICENSE') }
let(:container) { new_image.run('cat /Gemfile /LICENSE') }
let(:response) {
container.tap(&:wait).streaming_logs(stdout: true)
}
after do
container.remove
new_image.remove
end
it 'creates a new Image that has each file' do
begin
expect(response).to eq("#{gemfile}#{license}")
rescue Docker::Error::UnexpectedResponseError => ex
skip("Could not communicate with DockerHub: #{ex}")
end
end
end
context 'when removing intermediate containers' do
let(:rm) { true }
let(:file) { File.join(project_dir, 'Gemfile') }
after(:each) { new_image.remove }
it 'leave no intermediate containers' do
begin
expect { new_image }.to change {
Docker::Container.all(:all => true).count
}.by 0
rescue Docker::Error::UnexpectedResponseError => ex
skip("Could not communicate with DockerHub: #{ex}")
end
end
it 'creates a new image' do
begin
expect{new_image}.to change{Docker::Image.all.count}.by 1
rescue Docker::Error::UnexpectedResponseError => ex
skip("Could not communicate with DockerHub: #{ex}")
end
end
end
end
describe '#push' do
let(:credentials) {
{
'username' => ENV['DOCKER_API_USER'],
'password' => ENV['DOCKER_API_PASS'],
'serveraddress' => 'https://index.docker.io/v1',
'email' => ENV['DOCKER_API_EMAIL']
}
}
let(:repo_tag) { "#{ENV['DOCKER_API_USER']}/true" }
let(:image) {
described_class.build("FROM tianon/true\n", "t" => repo_tag).refresh!
}
after { image.remove(:name => repo_tag, :noprune => true) }
it 'pushes the Image' do
skip_without_auth
image.push(credentials)
end
it 'streams output from push' do
skip_without_auth
expect { |b| image.push(credentials, &b) }
.to yield_control.at_least(1)
end
context 'when a tag is specified' do
it 'pushes that specific tag'
end
context 'when the image was retrived by get' do
let(:image) {
described_class.build("FROM tianon/true\n", "t" => repo_tag).refresh!
described_class.get(repo_tag)
}
context 'when no tag is specified' do
it 'looks up the first repo tag' do
skip_without_auth
expect { image.push }.to_not raise_error
end
end
end
context 'when there are no credentials' do
let(:credentials) { nil }
let(:repo_tag) { "localhost:5000/true" }
it 'still pushes' do
begin
image.push
rescue => ex
if ex.message =~ /connection refused/
skip("Registry at #{repo_tag} is not available")
else
expect { raise(ex) }.to_not raise_error
end
end
end
end
end
describe '#tag' do
subject { described_class.create('fromImage' => 'debian:stable') }
after { subject.remove(:name => 'teh:latest', :noprune => true) }
it 'tags the image with the repo name' do
subject.tag(:repo => :teh, :force => true)
expect(subject.info['RepoTags']).to include 'teh:latest'
end
end
describe '#json' do
before { skip_without_auth }
subject { described_class.create('fromImage' => 'debian:stable') }
let(:json) { subject.json }
it 'returns additional information about image image' do
expect(json).to be_a Hash
expect(json.length).to_not be_zero
end
end
describe '#history' do
subject { described_class.create('fromImage' => 'debian:stable') }
let(:history) { subject.history }
it 'returns the history of the Image' do
expect(history).to be_a Array
expect(history.length).to_not be_zero
expect(history).to be_all { |elem| elem.is_a? Hash }
end
end
describe '#run' do
let(:cmd) { nil }
let(:options) { {} }
subject do
described_class.create(
{'fromImage' => 'debian:stable'})
end
let(:container) { subject.run(cmd, options).tap(&:wait) }
let(:output) { container.streaming_logs(stdout: true) }
context 'when cmd is a String' do
let(:cmd) { 'ls /lib64/' }
after { container.remove }
it 'splits the String by spaces and creates a new Container' do
expect(output).to eq("ld-linux-x86-64.so.2\n")
end
end
context 'when cmd is an Array' do
let(:cmd) { %w[which pwd] }
after { container.remove }
it 'creates a new Container' do
expect(output).to eq("/bin/pwd\n")
end
end
context 'when cmd is nil', docker_1_12: true do
let(:cmd) { nil }
context 'no command configured in image' do
subject { described_class.create('fromImage' => 'swipely/base') }
it 'should raise an error if no command is specified' do
begin
container
rescue => ex
expect([Docker::Error::ServerError, Docker::Error::ClientError]).to include(ex.class)
expect(ex.message).to match(/No\ command\ specified/)
end
end
end
end
context "command configured in image" do
let(:cmd) { 'pwd' }
after { container.remove }
it 'should normally show result if image has Cmd configured' do
expect(output).to eql "/\n"
end
end
context 'when using cpu shares' do
let(:options) { { 'CpuShares' => 50 } }
after { container.remove }
it 'returns 50' do
skip('Not supported on podman') if ::Docker.podman?
expect(container.json["HostConfig"]["CpuShares"]).to eq 50
end
end
end
describe '#save' do
let(:image) { Docker::Image.get('busybox') }
it 'calls the class method' do
expect(Docker::Image).to receive(:save)
.with(image.id, 'busybox.tar', anything)
image.save('busybox.tar')
end
end
describe '#save_stream' do
let(:image) { Docker::Image.get('busybox') }
let(:block) { proc { |chunk| puts chunk } }
it 'calls the class method' do
expect(Docker::Image).to receive(:save_stream)
.with(image.id, instance_of(Hash), instance_of(Docker::Connection))
image.save_stream(:chunk_size => 1024 * 1024, &block)
end
end
describe '#refresh!' do
let(:image) { Docker::Image.create('fromImage' => 'debian:stable') }
it 'updates the @info hash' do
size = image.info.size
image.refresh!
expect(image.info.size).to be > size
end
context 'with an explicit connection' do
let(:connection) { Docker::Connection.new(Docker.url, Docker.options) }
let(:image) {
Docker::Image.create({'fromImage' => 'debian:stable'}, nil, connection)
}
it 'updates using the provided connection' do
image.refresh!
end
end
end
describe '.load' do
include_context "local paths"
let(:file) { File.join(project_dir, 'spec', 'fixtures', 'load.tar') }
context 'when the argument is a String' do
it 'loads tianon/true image from the file system' do
result = Docker::Image.load(file)
expect(result).to eq("")
end
end
context 'when the argument is an IO' do
let(:io) { File.open(file) }
after { io.close }
it 'loads tinan/true image from the IO' do
result = Docker::Image.load(io)
expect(result).to eq("")
end
end
end
describe '.create' do
subject { described_class }
context 'when the Image does not yet exist and the body is a Hash' do
let(:image) { subject.create('fromImage' => 'swipely/base') }
let(:creds) {
{
:username => ENV['DOCKER_API_USER'],
:password => ENV['DOCKER_API_PASS'],
:email => ENV['DOCKER_API_EMAIL']
}
}
before do
skip_without_auth
Docker::Image.create('fromImage' => 'swipely/base').remove
end
after { Docker::Image.create('fromImage' => 'swipely/base') }
it 'sets the id and sends Docker.creds' do
allow(Docker).to receive(:creds).and_return(creds)
expect(image).to be_a Docker::Image
expect(image.id).to match(/\A(sha256:)?[a-fA-F0-9]+\Z/)
expect(image.id).to_not include('base')
expect(image.id).to_not be_nil
expect(image.id).to_not be_empty
end
end
context 'image with tag' do
it 'pulls the image (string arguments)' do
image = subject.create('fromImage' => 'busybox', 'tag' => 'uclibc')
image.refresh!
expect(image.info['RepoTags']).to include(/busybox:uclibc$/)
end
it 'pulls the image (symbol arguments)' do
image = subject.create(fromImage: 'busybox', tag: 'uclibc')
image.refresh!
expect(image.info['RepoTags']).to include(/busybox:uclibc$/)
end
it 'supports identical fromImage and tag', docker_1_10: true do
# This is here for backwards compatibility. docker-api used to
# complete ignore the "tag" argument, which Docker itself prioritizes
# over a tag found in fromImage, which meant that we had 3 scenarios:
#
# 1 fromImage does not include a tag, and the tag argument is provided
# and isn't the default (i.e. "latest"): docker-api crashes looking
# for fromImage when the image that was pulled is fromImage:tag (or
# returns the wrong image if fromImage:latest exists)
# 2 fromImage does not a include a tag, and the tag argument is absent
# or default (i.e. "latest"): docker-api finds the right image.
# 3 fromImage includes a tag, and the tag argument is absent: docker-api
# also finds the right image.
# 4 fromImage includes a tag, and the tag argument is present: works if
# the tag is the same in both.
#
# Adding support for the tag argument to fix 1 above means we'd break 4
# if we didn't explicitly handle the case where both tags are identical.
# This is what this test checks.
#
# Note that providing the tag inline in fromImage is only supported in
# Docker 1.10 and up.
skip('Not supported on podman') if ::Docker.podman?
image = subject.create(fromImage: 'busybox:uclibc', tag: 'uclibc')
image.refresh!
expect(image.info['RepoTags']).to include('busybox:uclibc')
end
end
context 'with a block capturing create output' do
let(:create_output) { "" }
let(:block) { Proc.new { |chunk| create_output << chunk } }
before do
Docker.creds = nil
subject.create('fromImage' => 'busybox').remove(force: true)
end
it 'calls the block and passes build output' do
subject.create('fromImage' => 'busybox', &block)
expect(create_output).to match(/ulling.*busybox/)
end
end
end
describe '.get' do
subject { described_class }
let(:image) { subject.get(image_name) }
context 'when the image does exist' do
let(:image_name) { 'debian:stable' }
it 'returns the new image' do
expect(image).to be_a Docker::Image
end
end
context 'when the image does not exist' do
let(:image_name) { 'abcdefghijkl' }
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :get }, { :status => 404 })
end
after do
Docker.options = {}
Excon.stubs.shift
end
it 'raises a not found error' do
expect { image }.to raise_error(Docker::Error::NotFoundError)
end
end
end
describe '.save' do
include_context "local paths"
context 'when a filename is specified' do
let(:file) { "#{project_dir}/scratch.tar" }
after { FileUtils.remove(file) }
it 'exports tarball of image to specified file' do
Docker::Image.save('swipely/base', file)
expect(File.exist?(file)).to eq true
expect(File.read(file)).to_not be_nil
end
end
context 'when no filename is specified' do
it 'returns raw binary data as string' do
raw = Docker::Image.save('swipely/base')
expect(raw).to_not be_nil
end
end
end
describe '.save_stream' do
let(:image) { 'busybox:latest' }
let(:non_streamed) do
Docker.connection.get('/images/get', 'names' => image)
end
let(:streamed) { '' }
let(:tar_files) do
proc do |string|
Gem::Package::TarReader
.new(StringIO.new(string, 'rb'))
.map(&:full_name)
.sort
end
end
it 'yields each chunk of the image' do
Docker::Image.save_stream(image) { |chunk| streamed << chunk }
expect(tar_files.call(streamed)).to eq(tar_files.call(non_streamed))
end
end
describe '.exist?' do
subject { described_class }
let(:exists) { subject.exist?(image_name) }
context 'when the image does exist' do
let(:image_name) { 'debian:stable' }
it 'returns true' do
expect(exists).to eq(true)
end
end
context 'when the image does not exist' do
let(:image_name) { 'abcdefghijkl' }
before do
Docker.options = { :mock => true }
Excon.stub({ :method => :get }, { :status => 404 })
end
after do
Docker.options = {}
Excon.stubs.shift
end
it 'return false' do
expect(exists).to eq(false)
end
end
end
describe '.import' do
include_context "local paths"
subject { described_class }
context 'when the file does not exist' do
let(:file) { '/lol/not/a/file' }
it 'raises an error' do
expect { subject.import(file) }
.to raise_error(Docker::Error::IOError)
end
end
context 'when the file does exist' do
let(:file) { File.join(project_dir, 'spec', 'fixtures', 'export.tar') }
let(:import) { subject.import(file) }
after { import.remove(:noprune => true) }
it 'creates the Image' do
expect(import).to be_a Docker::Image
expect(import.id).to_not be_nil
end
end
context 'when the argument is a URI' do
context 'when the URI is invalid' do
it 'raises an error' do
expect { subject.import('http://google.com') }
.to raise_error(Docker::Error::IOError)
end
end
context 'when the URI is valid' do
let(:uri) { 'http://swipely-pub.s3.amazonaws.com/tianon_true.tar' }
let(:import) { subject.import(uri) }
after { import.remove(:noprune => true) }
it 'returns an Image' do
expect(import).to be_a Docker::Image
expect(import.id).to_not be_nil
end
end
end
end
describe '.all' do
subject { described_class }
let(:images) { subject.all(:all => true) }
before { subject.create('fromImage' => 'debian:stable') }
it 'materializes each Image into a Docker::Image' do
images.each do |image|
expect(image).to_not be_nil
expect(image).to be_a(described_class)
expect(image.id).to_not be_nil
expected = [
'Created',
'Size'
]
expected << 'VirtualSize' unless ::Docker.podman?
expected.each do |key|
expect(image.info).to have_key(key)
end
end
expect(images.length).to_not be_zero
end
end
describe '.prune', :docker_17_03 => true do
it 'prune images' do
expect { Docker::Image.prune }.not_to raise_error
end
end
unless ::Docker.podman?
describe '.search' do
subject { described_class }
it 'materializes each Image into a Docker::Image' do
expect(subject.search('term' => 'sshd')).to be_all { |image|
!image.id.nil? && image.is_a?(described_class)
}
end
end
end
describe '.build' do
subject { described_class }
context 'with an invalid Dockerfile' do
if ::Docker.podman?
it 'throws a UnexpectedResponseError' do
expect { subject.build('lololol') }
.to raise_error(Docker::Error::UnexpectedResponseError)
end
else
it 'throws a UnexpectedResponseError', docker_17_09: false do
expect { subject.build('lololol') }
.to raise_error(Docker::Error::ClientError)
end
it 'throws a ClientError', docker_17_09: true do
expect { subject.build('lololol') }
.to raise_error(Docker::Error::ClientError)
end
end
end
context 'with a valid Dockerfile' do
context 'without query parameters' do
let(:image) { subject.build("FROM debian:stable\n") }
it 'builds an image' do
expect(image).to be_a Docker::Image
expect(image.id).to_not be_nil
expect(image.connection).to be_a Docker::Connection
end
end
context 'with specifying a repo in the query parameters' do
let(:image) {
subject.build(
"FROM debian:stable\nRUN true\n",
"t" => "#{ENV['DOCKER_API_USER']}/debian:true"
)
}
after { image.remove(:noprune => true) }
it 'builds an image and tags it' do
expect(image).to be_a Docker::Image
expect(image.id).to_not be_nil
expect(image.connection).to be_a Docker::Connection
image.refresh!
expect(image.info["RepoTags"].size).to eq(1)
expect(image.info["RepoTags"].first).to match(%r{#{ENV['DOCKER_API_USER']}/debian:true})
end
end
context 'with a block capturing build output' do
let(:build_output) { "" }
let(:block) { Proc.new { |chunk| build_output << chunk } }
let!(:image) { subject.build("FROM debian:stable\n", &block) }
it 'calls the block and passes build output' do
expect(build_output).to match(/(Step|STEP) \d(\/\d)?\s?: FROM debian:stable/)
end
end
end
end
describe '.build_from_dir' do
subject { described_class }
context 'with a valid Dockerfile' do
let(:dir) {
File.join(File.dirname(__FILE__), '..', 'fixtures', 'build_from_dir')
}
let(:docker_file) { File.new("#{dir}/Dockerfile") }
let(:image) { subject.build_from_dir(dir, opts, &block) }
let(:opts) { {} }
let(:block) { Proc.new {} }
let(:container) do
Docker::Container.create(
'Image' => image.id,
'Cmd' => %w[cat /Dockerfile]
).tap(&:start).tap(&:wait)
end
let(:output) { container.streaming_logs(stdout: true) }
after(:each) do
container.remove
image.remove(:noprune => true)
end
context 'with no query parameters' do
it 'builds the image' do
expect(output).to eq(docker_file.read)
end
end
context 'with specifying a repo in the query parameters' do
let(:opts) { { "t" => "#{ENV['DOCKER_API_USER']}/debian:from_dir" } }
it 'builds the image and tags it' do
expect(output).to eq(docker_file.read)
image.refresh!
expect(image.info["RepoTags"].size).to eq(1)
expect(image.info["RepoTags"].first).to match(%r{#{ENV['DOCKER_API_USER']}/debian:from_dir})
end
end
context 'with a block capturing build output' do
let(:build_output) { "" }
let(:block) { Proc.new { |chunk| build_output << chunk } }
it 'calls the block and passes build output' do
image # Create the image variable, which is lazy-loaded by Rspec
expect(build_output).to match(/(Step|STEP) \d(\/\d)?\s?: FROM debian:stable/)
end
context 'uses a cached version the second time' do
let(:build_output_two) { "" }
let(:block_two) { Proc.new { |chunk| build_output_two << chunk } }
let(:image_two) { subject.build_from_dir(dir, opts, &block_two) }
it 'calls the block and passes build output' do
skip('Not supported on podman') if ::Docker.podman?
image # Create the image variable, which is lazy-loaded by Rspec
expect(build_output).to match(/(Step|STEP) \d(\/\d)?\s?: FROM debian:stable/)
expect(build_output).to_not match(/Using cache/)
image_two # Create the image_two variable, which is lazy-loaded by Rspec
expect(build_output_two).to match(/Using cache/)
end
end
end
context 'with credentials passed' do
let(:creds) {
{
:username => ENV['DOCKER_API_USER'],
:password => ENV['DOCKER_API_PASS'],
:email => ENV['DOCKER_API_EMAIL'],
:serveraddress => 'https://index.docker.io/v1'
}
}
before { Docker.creds = creds }
after { Docker.creds = nil }
it 'sends X-Registry-Config header' do
expect(image.info[:headers].keys).to include('X-Registry-Config')
end
end
end
end
end
docker-api-2.2.0/spec/docker/messages_spec.rb 0000664 0000000 0000000 00000005130 14071411272 0021076 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 4
describe Docker::Messages do
shared_examples_for "two equal messages" do
it "has the same messages as we expect" do
expect(messages.all_messages).to eq(expected.all_messages)
expect(messages.stdout_messages).to eq(expected.stdout_messages)
expect(messages.stderr_messages).to eq(expected.stderr_messages)
expect(messages.buffer).to eq(expected.buffer)
end
end
describe '.decipher_messages' do
shared_examples_for "decipher_messages of raw_test" do
let(:messages) {
subject.decipher_messages(raw_text)
}
it_behaves_like "two equal messages"
end
context 'given both standard out and standard error' do
let(:raw_text) {
"\x01\x00\x00\x00\x00\x00\x00\x01a\x02\x00\x00\x00\x00\x00\x00\x01b"
}
let(:expected) {
Docker::Messages.new(["a"], ["b"], ["a","b"], "")
}
it_behaves_like "decipher_messages of raw_test"
end
context 'given a single header' do
let(:raw_text) { "\x01\x00\x00\x00\x00\x00\x00\x01a" }
let(:expected) {
Docker::Messages.new(["a"], [], ["a"], "")
}
it_behaves_like "decipher_messages of raw_test"
end
context 'given two headers' do
let(:raw_text) {
"\x01\x00\x00\x00\x00\x00\x00\x01a\x01\x00\x00\x00\x00\x00\x00\x01b"
}
let(:expected) {
Docker::Messages.new(["a", "b"], [], ["a","b"], "")
}
it_behaves_like "decipher_messages of raw_test"
end
context 'given a header for text longer then 255 characters' do
let(:raw_text) {
"\x01\x00\x00\x00\x00\x00\x01\x01" + ("a" * 257)
}
let(:expected) {
Docker::Messages.new([("a" * 257)], [], [("a" * 257)], "")
}
it_behaves_like "decipher_messages of raw_test"
end
end
describe "#append" do
context "appending one set of messages on another" do
let(:messages) {
Docker::Messages.new([], [], [], "")
}
before do
messages.append(new_messages)
end
context "with a buffer" do
let(:new_messages) {
Docker::Messages.new(["a"], [], ["a"], "b")
}
let(:expected) {
Docker::Messages.new(["a"], [], ["a"], "")
}
it_behaves_like "two equal messages"
end
context "without a buffer" do
let(:new_messages) {
Docker::Messages.new(["a"], [], ["a"], "")
}
let(:expected) {
Docker::Messages.new(["a"], [], ["a"], "")
}
it_behaves_like "two equal messages"
end
end
end
end
docker-api-2.2.0/spec/docker/messages_stack.rb 0000664 0000000 0000000 00000001346 14071411272 0021256 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered!
describe Docker::MessagesStack do
describe '#append' do
context 'without limits' do |variable|
it 'does not limit stack size by default' do
data = ['foo', 'bar']
msg = Docker::Messages.new(data, [], data)
expect(subject.messages).not_to receive(:shift)
1000.times { subject.append(msg) }
end
end
context 'with size limit' do
let(:subject) { described_class.new(100) }
it 'limits stack to given size' do
data = ['foo', 'bar']
msg = Docker::Messages.new(data, [], data)
expect(subject.messages).to receive(:shift).exactly(1900).times
1000.times { subject.append(msg) }
end
end
end
end
docker-api-2.2.0/spec/docker/network_spec.rb 0000664 0000000 0000000 00000007773 14071411272 0020777 0 ustar 00root root 0000000 0000000 require 'spec_helper'
unless ::Docker.podman?
SingleCov.covered! uncovered: 2
describe Docker::Network, docker_1_9: true do
let(:name) do |example|
example.description.downcase.gsub(/\s/, '-')
end
describe '#to_s' do
subject { described_class.new(Docker.connection, info) }
let(:connection) { Docker.connection }
let(:id) do
'a6c5ffd25e07a6c906accf804174b5eb6a9d2f9e07bccb8f5aa4f4de5be6d01d'
end
let(:info) do
{
'Name' => 'bridge',
'Scope' => 'local',
'Driver' => 'bridge',
'IPAM' => {
'Driver' => 'default',
'Config' => [{ 'Subnet' => '172.17.0.0/16' }]
},
'Containers' => {},
'Options' => {
'com.docker.network.bridge.default_bridge' => 'true',
'com.docker.network.bridge.enable_icc' => 'true',
'com.docker.network.bridge.enable_ip_masquerade' => 'true',
'com.docker.network.bridge.host_binding_ipv4' => '0.0.0.0',
'com.docker.network.bridge.name' => 'docker0',
'com.docker.network.driver.mtu' => '1500'
},
'id' => id
}
end
let(:expected_string) do
"Docker::Network { :id => #{id}, :info => #{info.inspect}, "\
":connection => #{connection} }"
end
its(:to_s) { should == expected_string }
end
describe '.create' do
let!(:id) { subject.id }
subject { described_class.create(name) }
after { described_class.remove(id) }
it 'creates a Network' do
expect(Docker::Network.all.map(&:id)).to include(id)
end
end
describe '.remove' do
let(:id) { subject.id }
subject { described_class.create(name) }
it 'removes the Network' do
described_class.remove(id)
expect(Docker::Network.all.map(&:id)).to_not include(id)
end
end
describe '.get' do
after do
described_class.remove(name)
end
let!(:network) { described_class.create(name) }
it 'returns a network' do
expect(Docker::Network.get(name).id).to eq(network.id)
end
end
describe '.all' do
let!(:networks) do
5.times.map { |i| described_class.create("#{name}-#{i}") }
end
after do
networks.each(&:remove)
end
it 'should return all networks' do
expect(Docker::Network.all.map(&:id)).to include(*networks.map(&:id))
end
end
describe '.prune', :docker_17_03 => true do
it 'prune networks' do
expect { Docker::Network.prune }.not_to raise_error
end
end
describe '#connect' do
let!(:container) do
Docker::Container.create(
'Cmd' => %w(sleep 10),
'Image' => 'debian:stable'
)
end
subject { described_class.create(name) }
before(:each) { container.start }
after(:each) do
container.kill!.remove
subject.remove
end
it 'connects a container to a network' do
subject.connect(container.id)
expect(subject.info['Containers']).to include(container.id)
end
end
describe '#disconnect' do
let!(:container) do
Docker::Container.create(
'Cmd' => %w(sleep 10),
'Image' => 'debian:stable'
)
end
subject { described_class.create(name) }
before(:each) do
container.start
sleep 1
subject.connect(container.id)
end
after(:each) do
container.kill!.remove
subject.remove
end
it 'connects a container to a network' do
subject.disconnect(container.id)
expect(subject.info['Containers']).not_to include(container.id)
end
end
describe '#remove' do
let(:id) { subject.id }
let(:name) { 'test-network-remove' }
subject { described_class.create(name) }
it 'removes the Network' do
subject.remove
expect(Docker::Network.all.map(&:id)).to_not include(id)
end
end
end
end
docker-api-2.2.0/spec/docker/util_spec.rb 0000664 0000000 0000000 00000022332 14071411272 0020247 0 ustar 00root root 0000000 0000000 require 'spec_helper'
require 'tempfile'
require 'fileutils'
SingleCov.covered! uncovered: 71
describe Docker::Util do
subject { described_class }
describe '.parse_json' do
subject { described_class.parse_json(arg) }
context 'when the argument is nil' do
let(:arg) { nil }
it { should be_nil }
end
context 'when the argument is empty' do
let(:arg) { '' }
it { should be_nil }
end
context 'when the argument is \'null\'' do
let(:arg) { 'null' }
it { should be_nil }
end
context 'when the argument is not valid JSON' do
let(:arg) { '~~lol not valid json~~' }
it 'raises an error' do
expect { subject }.to raise_error Docker::Error::UnexpectedResponseError
end
end
context 'when the argument is valid JSON' do
let(:arg) { '{"yolo":"swag"}' }
it 'parses the JSON into a Hash' do
expect(subject).to eq 'yolo' => 'swag'
end
end
end
describe '.fix_json' do
let(:response) { '{"this":"is"}{"not":"json"}' }
subject { Docker::Util.fix_json(response) }
it 'fixes the "JSON" response that Docker returns' do
expect(subject).to eq [
{
'this' => 'is'
},
{
'not' => 'json'
}
]
end
end
describe '.create_dir_tar' do
attr_accessor :tmpdir
def files_in_tar(tar)
Gem::Package::TarReader.new(tar) { |content| return content.map(&:full_name).sort }
end
# @param base_dir [String] the path to the directory where the structure should be written
# @param dockerignore_entries [Array] the lines of the desired .dockerignore file
def structure_context_dir(dockerignore_entries = nil)
FileUtils.mkdir_p("#{tmpdir}/a_dir/a_subdir")
[
'#edge',
'a_file',
'a_file2',
'a_dir/a_file',
'a_dir/a_subdir/a_file',
].each { |f| File.write("#{tmpdir}/#{f}", 'x') }
File.write("#{tmpdir}/.dockerignore", dockerignore_entries.join("\n")) unless dockerignore_entries.nil?
end
def expect_tar_entries(*entries)
expect(files_in_tar(tar)).to contain_exactly(*entries)
end
let(:tar) { subject.create_dir_tar tmpdir }
around do |example|
Dir.mktmpdir do |tmpdir|
self.tmpdir = tmpdir
example.call
FileUtils.rm tar
end
end
it 'creates a tarball' do
tar = subject.create_dir_tar tmpdir
expect(files_in_tar(tar)).to eq []
end
it 'packs regular files' do
File.write("#{tmpdir}/foo", 'bar')
expect(files_in_tar(tar)).to eq ['foo']
end
it 'packs nested files, but not directory entries' do
FileUtils.mkdir("#{tmpdir}/foo")
File.write("#{tmpdir}/foo/bar", 'bar')
expect(files_in_tar(tar)).to eq ['foo/bar']
end
describe '.dockerignore' do
it 'passes all files when there is no .dockerignore' do
structure_context_dir
expect_tar_entries('#edge', 'a_dir/a_file', 'a_dir/a_subdir/a_file', 'a_file', 'a_file2')
end
it 'passes all files when there is an empty .dockerignore' do
structure_context_dir([''])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_dir/a_subdir/a_file', 'a_file', 'a_file2')
end
it 'does not interpret comments' do
structure_context_dir(['#edge'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_dir/a_subdir/a_file', 'a_file', 'a_file2')
end
it 'ignores files' do
structure_context_dir(['a_file'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_dir/a_subdir/a_file', 'a_file2')
end
it 'ignores files with wildcard' do
structure_context_dir(['a_file'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_dir/a_subdir/a_file', 'a_file2')
end
it 'ignores files with dir wildcard' do
structure_context_dir(['**/a_file'])
expect_tar_entries('#edge', '.dockerignore', 'a_file2')
end
it 'ignores files with dir wildcard but handles exceptions' do
structure_context_dir(['**/a_file', '!a_dir/a_file'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_file2')
end
it 'ignores directories' do
structure_context_dir(['a_dir'])
expect_tar_entries('#edge', '.dockerignore', 'a_file', 'a_file2')
end
it 'ignores directories with dir wildcard' do
structure_context_dir(['*/a_subdir'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_file', 'a_file2')
end
it 'ignores directories with dir double wildcard' do
structure_context_dir(['**/a_subdir'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_file', 'a_file', 'a_file2')
end
it 'ignores directories with dir wildcard' do
structure_context_dir(['a_dir', '!a_dir/a_subdir'])
expect_tar_entries('#edge', '.dockerignore', 'a_dir/a_subdir/a_file', 'a_file', 'a_file2')
end
it 'ignores files' do
File.write("#{tmpdir}/foo", 'bar')
File.write("#{tmpdir}/baz", 'bar')
File.write("#{tmpdir}/.dockerignore", "foo")
expect(files_in_tar(tar)).to eq ['.dockerignore', 'baz']
end
it 'ignores folders' do
FileUtils.mkdir("#{tmpdir}/foo")
File.write("#{tmpdir}/foo/bar", 'bar')
File.write("#{tmpdir}/.dockerignore", "foo")
expect(files_in_tar(tar)).to eq ['.dockerignore']
end
it 'ignores based on wildcards' do
File.write("#{tmpdir}/bar", 'bar')
File.write("#{tmpdir}/baz", 'bar')
File.write("#{tmpdir}/.dockerignore", "*z")
expect(files_in_tar(tar)).to eq ['.dockerignore', 'bar']
end
it 'ignores comments' do
File.write("#{tmpdir}/foo", 'bar')
File.write("#{tmpdir}/baz", 'bar')
File.write("#{tmpdir}/.dockerignore", "# nothing here\nfoo")
expect(files_in_tar(tar)).to eq ['.dockerignore', 'baz']
end
it 'ignores whitespace' do
File.write("#{tmpdir}/foo", 'bar')
File.write("#{tmpdir}/baz", 'bar')
File.write("#{tmpdir}/.dockerignore", "foo \n \n\n")
expect(files_in_tar(tar)).to eq ['.dockerignore', 'baz']
end
it 'ignores multiple patterns' do
File.write("#{tmpdir}/foo", 'bar')
File.write("#{tmpdir}/baz", 'bar')
File.write("#{tmpdir}/zig", 'bar')
File.write("#{tmpdir}/.dockerignore", "fo*\nba*")
expect(files_in_tar(tar)).to eq ['.dockerignore', 'zig']
end
end
end
describe '.build_auth_header' do
subject { described_class }
let(:credentials) {
{
:username => 'test',
:password => 'password',
:email => 'test@example.com',
:serveraddress => 'https://registry.com/'
}
}
let(:credential_string) { MultiJson.dump(credentials) }
let(:encoded_creds) { Base64.urlsafe_encode64(credential_string) }
let(:expected_header) {
{
'X-Registry-Auth' => encoded_creds
}
}
context 'given credentials as a Hash' do
it 'returns an X-Registry-Auth header encoded' do
expect(subject.build_auth_header(credentials)).to eq(expected_header)
end
end
context 'given credentials as a String' do
it 'returns an X-Registry-Auth header encoded' do
expect(
subject.build_auth_header(credential_string)
).to eq(expected_header)
end
end
it 'does not contain newlines' do
h = subject.build_auth_header(credentials).fetch('X-Registry-Auth')
expect(h).not_to include("\n")
end
end
describe '.build_config_header' do
subject { described_class }
let(:credentials) {
{
:username => 'test',
:password => 'password',
:email => 'test@example.com',
:serveraddress => 'https://registry.com/'
}
}
let(:credentials_object) do
MultiJson.dump(
:'https://registry.com/' => {
username: 'test',
password: 'password',
email: 'test@example.com'
}
)
end
let(:encoded_creds) { Base64.urlsafe_encode64(credentials_object) }
let(:expected_header) {
{
'X-Registry-Config' => encoded_creds
}
}
context 'given credentials as a Hash' do
it 'returns an X-Registry-Config header encoded' do
expect(subject.build_config_header(credentials)).to eq(expected_header)
end
end
context 'given credentials as a String' do
it 'returns an X-Registry-Config header encoded' do
expect(
subject.build_config_header(MultiJson.dump(credentials))
).to eq(expected_header)
end
end
it 'does not contain newlines' do
h = subject.build_config_header(credentials).fetch('X-Registry-Config')
expect(h).not_to include("\n")
end
end
describe '.filesystem_permissions' do
it 'returns the permissions on a file' do
file = Tempfile.new('test_file')
file.close
expected_permissions = 0600
File.chmod(expected_permissions, file.path)
actual_permissions = subject.filesystem_permissions(file.path)
file.unlink
expect(actual_permissions).to eql(expected_permissions)
end
end
end
docker-api-2.2.0/spec/docker/volume_spec.rb 0000664 0000000 0000000 00000002401 14071411272 0020574 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 1
# Volume requests are actually slow enough to occasionally not work
# Use sleep statements to manage that
describe Docker::Volume, :docker_1_9 do
let(:name) { "ArbitraryNameForTheRakeTestVolume" }
describe '.create' do
let(:volume) { Docker::Volume.create(name) }
after { volume.remove }
it 'creates a volume' do
expect(volume.id).to eq(name)
end
end
describe '.get' do
let(:volume) { Docker::Volume.get(name) }
before { Docker::Volume.create(name); sleep 1 }
after { volume.remove }
it 'gets volume details' do
expect(volume.id).to eq(name)
expect(volume.info).to_not be_empty
end
end
describe '.all' do
after { Docker::Volume.get(name).remove }
it 'gets a list of volumes' do
expect { Docker::Volume.create(name); sleep 1 }.to change { Docker::Volume.all.length }.by(1)
end
end
describe '.prune', :docker_17_03 => true do
it 'prune volumes' do
expect { Docker::Volume.prune }.not_to raise_error
end
end
describe '#remove' do
it 'removes a volume' do
volume = Docker::Volume.create(name)
sleep 1
expect { volume.remove }.to change { Docker::Volume.all.length }.by(-1)
end
end
end
docker-api-2.2.0/spec/docker_spec.rb 0000664 0000000 0000000 00000016533 14071411272 0017300 0 ustar 00root root 0000000 0000000 require 'spec_helper'
SingleCov.covered! uncovered: 8
describe Docker do
subject { Docker }
it { should be_a Module }
context 'default url and connection' do
context "when the DOCKER_* ENV variables aren't set" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_HOST').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) { should == {} }
its(:url) { should == 'unix:///var/run/docker.sock' }
its(:connection) { should be_a Docker::Connection }
end
context "when the DOCKER_* ENV variables are set" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL')
.and_return('unixs:///var/run/not-docker.sock')
allow(ENV).to receive(:[]).with('DOCKER_HOST').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) { should == {} }
its(:url) { should == 'unixs:///var/run/not-docker.sock' }
its(:connection) { should be_a Docker::Connection }
end
context "when the DOCKER_HOST is set and uses default tcp://" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_HOST').and_return('tcp://')
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) { should == {} }
its(:url) { should == 'tcp://localhost:2375' }
its(:connection) { should be_a Docker::Connection }
end
context "when the DOCKER_HOST ENV variable is set" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_HOST')
.and_return('tcp://someserver:8103')
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) { should == {} }
its(:url) { should == 'tcp://someserver:8103' }
its(:connection) { should be_a Docker::Connection }
end
context "DOCKER_URL should take precedence over DOCKER_HOST" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL')
.and_return('tcp://someotherserver:8103')
allow(ENV).to receive(:[]).with('DOCKER_HOST')
.and_return('tcp://someserver:8103')
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) { should == {} }
its(:url) { should == 'tcp://someotherserver:8103' }
its(:connection) { should be_a Docker::Connection }
end
context "when the DOCKER_CERT_PATH and DOCKER_HOST ENV variables are set" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_HOST')
.and_return('tcp://someserver:8103')
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH')
.and_return('/boot2dockert/cert/path')
allow(ENV).to receive(:[]).with('DOCKER_SSL_VERIFY').and_return(nil)
Docker.reset!
end
after { Docker.reset! }
its(:options) {
should == {
client_cert: '/boot2dockert/cert/path/cert.pem',
client_key: '/boot2dockert/cert/path/key.pem',
ssl_ca_file: '/boot2dockert/cert/path/ca.pem',
scheme: 'https'
}
}
its(:url) { should == 'tcp://someserver:8103' }
its(:connection) { should be_a Docker::Connection }
end
context "when the DOCKER_CERT_PATH and DOCKER_SSL_VERIFY ENV variables are set" do
before do
allow(ENV).to receive(:[]).with('DOCKER_URL').and_return(nil)
allow(ENV).to receive(:[]).with('DOCKER_HOST')
.and_return('tcp://someserver:8103')
allow(ENV).to receive(:[]).with('DOCKER_CERT_PATH')
.and_return('/boot2dockert/cert/path')
allow(ENV).to receive(:[]).with('DOCKER_SSL_VERIFY')
.and_return('false')
Docker.reset!
end
after { Docker.reset! }
its(:options) {
should == {
client_cert: '/boot2dockert/cert/path/cert.pem',
client_key: '/boot2dockert/cert/path/key.pem',
ssl_ca_file: '/boot2dockert/cert/path/ca.pem',
scheme: 'https',
ssl_verify_peer: false
}
}
its(:url) { should == 'tcp://someserver:8103' }
its(:connection) { should be_a Docker::Connection }
end
end
describe '#reset_connection!' do
before { subject.connection }
it 'sets the @connection to nil' do
expect { subject.reset_connection! }
.to change { subject.instance_variable_get(:@connection) }
.to nil
end
end
[:options=, :url=].each do |method|
describe "##{method}" do
before { Docker.reset! }
it 'calls #reset_connection!' do
expect(subject).to receive(:reset_connection!)
subject.public_send(method, nil)
end
end
end
describe '#version' do
before { Docker.reset! }
let(:expected) {
%w[ApiVersion Arch GitCommit GoVersion KernelVersion Os Version]
}
let(:version) { subject.version }
it 'returns the version as a Hash' do
expect(version).to be_a Hash
expect(version.keys.sort).to include(*expected)
end
end
describe '#info' do
before { Docker.reset! }
let(:info) { subject.info }
let(:keys) do
%w(Containers Debug DockerRootDir Driver DriverStatus ID IPv4Forwarding
Images IndexServerAddress KernelVersion Labels MemTotal MemoryLimit
NCPU NEventsListener NFd NGoroutines Name OperatingSystem SwapLimit)
end
it 'returns the info as a Hash' do
expect(info).to be_a Hash
expect(info.keys.sort).to include(*keys)
end
end
describe '#ping' do
before { Docker.reset! }
let(:ping) { subject.ping}
it 'returns the status as a String' do
expect(ping).to eq('OK')
end
end
describe '#authenticate!' do
subject { described_class }
let(:authentication) {
subject.authenticate!(credentials)
}
after { Docker.creds = nil }
context 'with valid credentials' do
let(:credentials) {
{
:username => ENV['DOCKER_API_USER'],
:password => ENV['DOCKER_API_PASS'],
:email => ENV['DOCKER_API_EMAIL'],
:serveraddress => 'https://index.docker.io/v1/'
}
}
it 'logs in and sets the creds' do
skip_without_auth
expect(authentication).to be true
expect(Docker.creds).to eq(MultiJson.dump(credentials))
end
end
context 'with invalid credentials' do
let(:credentials) {
{
:username => 'test',
:password => 'password',
:email => 'test@example.com',
:serveraddress => 'https://index.docker.io/v1/'
}
}
it "raises an error and doesn't set the creds" do
skip('Not supported on podman') if ::Docker.podman?
expect {
authentication
}.to raise_error(Docker::Error::AuthenticationError)
expect(Docker.creds).to be_nil
end
end
end
end
docker-api-2.2.0/spec/fixtures/ 0000775 0000000 0000000 00000000000 14071411272 0016333 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/spec/fixtures/build_from_dir/ 0000775 0000000 0000000 00000000000 14071411272 0021313 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/spec/fixtures/build_from_dir/Dockerfile 0000664 0000000 0000000 00000000033 14071411272 0023301 0 ustar 00root root 0000000 0000000 FROM debian:stable
ADD . /
docker-api-2.2.0/spec/fixtures/export.tar 0000664 0000000 0000000 00000022000 14071411272 0020356 0 ustar 00root root 0000000 0000000 ./ 0040755 0000000 0000000 00000000000 12433474310 007406 5 ustar 00 0000000 0000000 .dockerenv 0100755 0000000 0000000 00000000000 12433474310 011220 0 ustar 00 0000000 0000000 .dockerinit 0100755 0000000 0000000 00000000000 12433474310 011373 0 ustar 00 0000000 0000000 dev/ 0040755 0000000 0000000 00000000000 12433474310 010027 5 ustar 00 0000000 0000000 dev/console 0100755 0000000 0000000 00000000000 12433474310 011402 0 ustar 00 0000000 0000000 dev/pts/ 0040755 0000000 0000000 00000000000 12433474310 010635 5 ustar 00 0000000 0000000 dev/shm/ 0040755 0000000 0000000 00000000000 12433474310 010616 5 ustar 00 0000000 0000000 etc/ 0040755 0000000 0000000 00000000000 12433474310 010024 5 ustar 00 0000000 0000000 etc/hostname 0100755 0000000 0000000 00000000000 12433474310 011553 0 ustar 00 0000000 0000000 etc/hosts 0100755 0000000 0000000 00000000000 12433474310 011075 0 ustar 00 0000000 0000000 etc/mtab 0120777 0000000 0000000 00000000000 12433474310 013160 2/proc/mounts ustar 00 0000000 0000000 etc/resolv.conf 0100755 0000000 0000000 00000000000 12433474310 012173 0 ustar 00 0000000 0000000 proc/ 0040755 0000000 0000000 00000000000 12433474310 010214 5 ustar 00 0000000 0000000 sys/ 0040755 0000000 0000000 00000000000 12433474310 010067 5 ustar 00 0000000 0000000 true 0100755 0000000 0000000 00000000175 12432702576 010165 0 ustar 00 0000000 0000000 ELF > x @ @ @ 8 @ @ } } °<™ docker-api-2.2.0/spec/fixtures/load.tar 0000664 0000000 0000000 00000036000 14071411272 0017761 0 ustar 00root root 0000000 0000000 206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c/ 0040755 0000000 0000000 00000000000 12662156334 017407 5 ustar 00 0000000 0000000 206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c/VERSION 0100644 0000000 0000000 00000000003 12662156334 020445 0 ustar 00 0000000 0000000 1.0 206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c/json 0100644 0000000 0000000 00000000604 12662156334 020300 0 ustar 00 0000000 0000000 {"id":"206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c","created":"0001-01-01T00:00:00Z","container_config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null}} 206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c/layer.tar 0100644 0000000 0000000 00000004000 12662156334 021222 0 ustar 00 0000000 0000000 true 0100755 0000000 0000000 00000000175 12662156334 010165 0 ustar 00 0000000 0000000 ELF > x @ @ @ 8 @ @ } } °<™ 685d8e0cf7ff18ccafb17112b53aa7c918ee2f055c221be7161ca3c2f2583493.json 0100644 0000000 0000000 00000002553 12662156334 021106 0 ustar 00 0000000 0000000 {"architecture":"amd64","config":{"Hostname":"d0051fd7a9bf","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/true"],"Image":"7231ab843d58059f5cdcbbbec2ea9fce2886fe6f8fc8e261dbdc4032b812aab4","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"a92988e93839af4d3e3d1582253eed052ec9586a6e3cc73d484890297ee6feef","container_config":{"Hostname":"d0051fd7a9bf","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/true\"]"],"Image":"7231ab843d58059f5cdcbbbec2ea9fce2886fe6f8fc8e261dbdc4032b812aab4","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2016-02-20T21:38:36.606403769Z","docker_version":"1.8.3","history":[{"created":"2016-02-20T21:38:36.541950965Z","created_by":"/bin/sh -c #(nop) ADD file:513005a00bb6ce26c9eb571d6f16e0c12378ba40f8e3100bcb484db53008e3b2 in /true"},{"created":"2016-02-20T21:38:36.606403769Z","created_by":"/bin/sh -c #(nop) CMD [\"/true\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:2445b9cd574070bda2431f50d0e9686fbe1778d873ef2db5dbded9441ac06a14","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}} c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e/ 0040755 0000000 0000000 00000000000 12662156334 017645 5 ustar 00 0000000 0000000 c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e/VERSION 0100644 0000000 0000000 00000000003 12662156334 020703 0 ustar 00 0000000 0000000 1.0 c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e/json 0100644 0000000 0000000 00000002070 12662156334 020535 0 ustar 00 0000000 0000000 {"id":"c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e","parent":"206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c","created":"2016-02-20T21:38:36.606403769Z","container":"a92988e93839af4d3e3d1582253eed052ec9586a6e3cc73d484890297ee6feef","container_config":{"Hostname":"d0051fd7a9bf","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/true\"]"],"Image":"7231ab843d58059f5cdcbbbec2ea9fce2886fe6f8fc8e261dbdc4032b812aab4","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.8.3","config":{"Hostname":"d0051fd7a9bf","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/true"],"Image":"7231ab843d58059f5cdcbbbec2ea9fce2886fe6f8fc8e261dbdc4032b812aab4","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"} c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e/layer.tar 0100644 0000000 0000000 00000002000 12662156334 021456 0 ustar 00 0000000 0000000 manifest.json 0100644 0000000 0000000 00000000434 00000000000 011710 0 ustar 00 0000000 0000000 [{"Config":"685d8e0cf7ff18ccafb17112b53aa7c918ee2f055c221be7161ca3c2f2583493.json","RepoTags":["tianon/true:latest"],"Layers":["206614686278cb8afdc8409957da4370450b01c75ca763b1a1d16f714930521c/layer.tar","c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e/layer.tar"]}]
repositories 0100644 0000000 0000000 00000000136 00000000000 011660 0 ustar 00 0000000 0000000 {"tianon/true":{"latest":"c9c2ed6a3344256b3018c03412fa60af26b089ee84660f493918db085c7f194e"}}
docker-api-2.2.0/spec/fixtures/top/ 0000775 0000000 0000000 00000000000 14071411272 0017135 5 ustar 00root root 0000000 0000000 docker-api-2.2.0/spec/fixtures/top/Dockerfile 0000664 0000000 0000000 00000000226 14071411272 0021127 0 ustar 00root root 0000000 0000000 FROM debian:stable
RUN apt-get update
RUN apt-get install -y procps
RUN printf '#! /bin/sh\nwhile true\ndo\ntrue\ndone\n' > /while && chmod +x /while
docker-api-2.2.0/spec/spec_helper.rb 0000664 0000000 0000000 00000001613 14071411272 0017301 0 ustar 00root root 0000000 0000000 require 'bundler/setup'
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
require 'rspec/its'
require 'single_cov'
# avoid coverage failure from lower docker versions not running all tests
SingleCov.setup :rspec
require 'docker'
ENV['DOCKER_API_USER'] ||= 'debbie_docker'
ENV['DOCKER_API_PASS'] ||= '*************'
ENV['DOCKER_API_EMAIL'] ||= 'debbie_docker@example.com'
RSpec.shared_context "local paths" do
def project_dir
File.expand_path(File.join(File.dirname(__FILE__), '..'))
end
end
module SpecHelpers
def skip_without_auth
skip "Disabled because of missing auth" if ENV['DOCKER_API_USER'] == 'debbie_docker'
end
end
Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each { |f| require f }
RSpec.configure do |config|
config.mock_with :rspec
config.color = true
config.formatter = :documentation
config.tty = true
config.include SpecHelpers
end