"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "ec2api/api/image.py" between
ec2-api-14.0.1.tar.gz and ec2-api-15.0.0.tar.gz

About: OpenStack EC2 API provides a standalone EC2 (and VPC) API service.
The "Zed" series (latest release).

image.py  (ec2-api-14.0.1):image.py  (ec2-api-15.0.0)
skipping to change at line 36 skipping to change at line 36
from cryptography.hazmat import backends from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives import serialization
import eventlet import eventlet
from glanceclient.common import exceptions as glance_exception from glanceclient.common import exceptions as glance_exception
from lxml import etree from lxml import etree
from oslo_concurrency import processutils from oslo_concurrency import processutils
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import six
from ec2api.api import common from ec2api.api import common
from ec2api.api import ec2utils from ec2api.api import ec2utils
from ec2api.api import instance as instance_api from ec2api.api import instance as instance_api
from ec2api import clients from ec2api import clients
from ec2api.db import api as db_api from ec2api.db import api as db_api
from ec2api import exception from ec2api import exception
from ec2api.i18n import _ from ec2api.i18n import _
import urllib.parse as parse
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
s3_opts = [ s3_opts = [
cfg.StrOpt('image_decryption_dir', cfg.StrOpt('image_decryption_dir',
default='/tmp', default='/tmp',
help='Parent directory for tempdir used for image decryption'), help='Parent directory for tempdir used for image decryption'),
cfg.StrOpt('s3_url', cfg.StrOpt('s3_url',
default='http://$my_ip:3334', default='http://$my_ip:3334',
help='URL to S3 server'), help='URL to S3 server'),
skipping to change at line 224 skipping to change at line 224
# TODO(ft): check parameters # TODO(ft): check parameters
metadata = {} metadata = {}
if name: if name:
# TODO(ft): check the name is unique (at least for EBS image case) # TODO(ft): check the name is unique (at least for EBS image case)
metadata['name'] = name metadata['name'] = name
if image_location: if image_location:
# Resolve the import type # Resolve the import type
metadata['image_location'] = image_location metadata['image_location'] = image_location
parsed_url = six.moves.urllib.parse.urlparse(image_location) parsed_url = parse.urlparse(image_location)
is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3') is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3')
is_url_import = not is_s3_import is_url_import = not is_s3_import
# Check if the name is in the metadata # Check if the name is in the metadata
if 'name' not in metadata: if 'name' not in metadata:
# NOTE(ft): it's needed for backward compatibility # NOTE(ft): it's needed for backward compatibility
metadata['name'] = image_location metadata['name'] = image_location
if root_device_name: if root_device_name:
metadata['root_device_name'] = root_device_name metadata['root_device_name'] = root_device_name
cinder = clients.cinder(context) cinder = clients.cinder(context)
skipping to change at line 828 skipping to change at line 828
'untarring': 'pending', 'untarring': 'pending',
'failed_untar': 'failed', 'failed_untar': 'failed',
'uploading': 'pending', 'uploading': 'pending',
'failed_upload': 'failed', 'failed_upload': 'failed',
'available': 'available'} 'available': 'available'}
def _s3_create(context, metadata): def _s3_create(context, metadata):
"""Gets a manifest from s3 and makes an image.""" """Gets a manifest from s3 and makes an image."""
# Parse the metadata into bucket and manifest path # Parse the metadata into bucket and manifest path
parsed_url = six.moves.urllib.parse.urlparse(metadata['image_location']) parsed_url = parse.urlparse(metadata['image_location'])
if parsed_url.hostname is not None: if parsed_url.hostname is not None:
# Handle s3://<BUCKET_NAME>/<KEY_PATH> case # Handle s3://<BUCKET_NAME>/<KEY_PATH> case
bucket_name = parsed_url.hostname bucket_name = parsed_url.hostname
manifest_path = parsed_url.path[1:] manifest_path = parsed_url.path[1:]
else: else:
# Handle <BUCKET_NAME>/<KEY_PATH> case # Handle <BUCKET_NAME>/<KEY_PATH> case
bucket_name = parsed_url.path.split('/')[0] bucket_name = parsed_url.path.split('/')[0]
manifest_path = '/'.join(parsed_url.path.split('/')[1:]) manifest_path = '/'.join(parsed_url.path.split('/')[1:])
# Continue with S3 import # Continue with S3 import
s3_client = _s3_conn(context) s3_client = _s3_conn(context)
image_location = '/'.join([bucket_name, manifest_path]) image_location = '/'.join([bucket_name, manifest_path])
key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path) key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path)
body = key['Body'] body = key['Body']
if isinstance(body, six.string_types): if isinstance(body, str):
manifest = body manifest = body
else: else:
# TODO(andrey-mp): check big objects # TODO(andrey-mp): check big objects
manifest = body.read() manifest = body.read()
(image_metadata, image_parts, (image_metadata, image_parts,
encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest) encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest)
metadata.update(image_metadata) metadata.update(image_metadata)
metadata.update({'image_state': 'pending', metadata.update({'image_state': 'pending',
'visibility': 'private'}) 'visibility': 'private'})
 End of changes. 5 change blocks. 
4 lines changed or deleted 4 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)