Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
5cf2ee5
2016-10-16 REST Service basic updates.
Apr 27, 2017
1eac246
Added tests for 8TB page blobs
zezha-msft Jun 12, 2017
5de2547
File Encryption at REST
Jun 22, 2017
d6fb054
Merge pull request #315 from zezha-msft/eight-tb-page-blob-test
Jun 23, 2017
f014c98
Return etag and lmt for create_from_*
Jun 28, 2017
0a79e55
Merge pull request #318 from jofriedm-msft/dev
zezha-msft Jun 30, 2017
b186508
Fixed syntax error in _convert_json_response_to_entities
zezha-msft Jun 28, 2017
6adc23b
Fixed table batch commit bug where the operation urls missed the acco…
zezha-msft Jun 22, 2017
e6c3586
Merge pull request #320 from zezha-msft/emulator-table-batch-url-problem
Jul 5, 2017
da8ac63
Merge branch 'dev' into syntax-error-fix-convert-table-entities
Jul 5, 2017
7d08b4a
Minor cosmetic fix for changelog
zezha-msft Jul 5, 2017
6306cf7
Merge pull request #321 from zezha-msft/syntax-error-fix-convert-tabl…
Jul 5, 2017
187c2cd
Fix flaky tests
zezha-msft Jun 26, 2017
a70e56d
Fail gracefully when last_sync_time on service_stats is empty
zezha-msft Jun 28, 2017
8fb0b2a
Merge pull request #323 from zezha-msft/fix-flaky-tests
Jul 10, 2017
81db183
Merge pull request #324 from zezha-msft/fail-gracefully-when-queue-se…
Jul 10, 2017
e037459
Premium Page Blob Tiers
Jul 11, 2017
f711775
Merge pull request #1 from jofriedm-msft/dev
zezha-msft Jul 11, 2017
228d7db
2017-04-17 REST Service update for v0.35.0
zezha-msft Jul 11, 2017
b43ab7a
Merge pull request #2 from zezha-msft/april-2017-update
zezha-msft Jul 11, 2017
a2f349a
Merge pull request #3 from wastore/file_encryption_headers
zezha-msft Jul 11, 2017
252ed04
Merge branch 'dev' into azure_dev_07_11
zezha-msft Jul 11, 2017
cdd199c
Merge pull request #4 from zezha-msft/azure_dev_07_11
zezha-msft Jul 11, 2017
2644dcd
Fixed several failing tests which verify create_from* methods on blob…
zezha-msft Jul 12, 2017
9c174e0
Merge pull request #5 from zezha-msft/fix-tests-0.35.0
zezha-msft Jul 13, 2017
3bf49d1
Bump version to 0.35.0
zezha-msft Jul 13, 2017
a025f12
Test recordings for 0.35.0
zezha-msft Jul 13, 2017
8dc8c8d
Merge pull request #6 from zezha-msft/bump-version-0.35.0
zezha-msft Jul 13, 2017
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
19 changes: 18 additions & 1 deletion ChangeLog.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,23 @@

> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.

## Version 0.35.0:

### All:
- Support for 2017-04-17 REST version. Please see our REST API documentation and blogs for information about the related added features. If you are using the Storage Emulator, please update to Emulator version 5.2.
- Fixed a bug where deserialization of service stats throws a TypeError when the service is unavailable.

### Blob:
- For Premium Accounts only, added support for getting and setting the tier on a page blob. The tier can also be set when creating or copying from an existing page blob.
- create_from_* and and append_blob_from_* methods will return response_properties which contains the etag and last modified time.

### Table:
- Fixed syntax error in _convert_json_response_to_entities.
- Fixed a bug where the urls are not correctly formed when making commit_batch to the emulator.

### File:
- The `server_encrypted` file property will now be populated when calling 'get_directory_properties', 'get_file', and 'get_file_properties'. This value is set to True if the file data (for files) and application metadata are completely encrypted.

## Version 0.34.3:
- All: Made the socket timeout configurable. Increased the default socket timeout to 20 seconds.
- All: Fixed a bug where SAS tokens were being duplicated on retries
Expand Down Expand Up @@ -179,4 +196,4 @@
- Client-side validation added for ranges used in APIs.
- Metadata returned for shares, directories, and files will be returned without the 'x-ms-meta' prefix on the keys. Namely, metadata will be returned as it is received.
- get_share_properties, get_directory_properties, and get_file_properties return parsed Share, Directory, and File objects, respectively, instead of string header dictionaries.
- copy_file returns a parsed CopyProperties object instead of a string header dictionary.
- copy_file returns a parsed CopyProperties object instead of a string header dictionary.
1 change: 1 addition & 0 deletions azure/storage/_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def __init__(self, service, account_name=None, account_key=None, sas_token=None,
self.account_key = account_key
self.sas_token = sas_token
self.protocol = protocol or DEFAULT_PROTOCOL
self.is_emulated = is_emulated

if is_emulated:
self.account_name = DEV_ACCOUNT_NAME
Expand Down
4 changes: 2 additions & 2 deletions azure/storage/_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
import platform

__author__ = 'Microsoft Corp. <[email protected]>'
__version__ = '0.34.3'
__version__ = '0.35.0'

# x-ms-version for storage service.
X_MS_VERSION = '2016-05-31'
X_MS_VERSION = '2017-04-17'

# UserAgent string sample: 'Azure-Storage/0.32.0 (Python CPython 3.4.2; Windows 8)'
USER_AGENT_STRING = 'Azure-Storage/{} (Python {} {}; {} {})'.format(__version__, platform.python_implementation(), platform.python_version(), platform.system(), platform.release())
Expand Down
8 changes: 7 additions & 1 deletion azure/storage/_deserialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ def _int_to_str(value):
def _bool(value):
return value.lower() == 'true'

def _to_upper_str(value):
return _to_str(value).upper() if value is not None else None

def _get_download_size(start_range, end_range, resource_size):
if start_range is not None:
end_range = end_range if end_range else (resource_size if resource_size else None)
Expand All @@ -55,6 +58,8 @@ def _get_download_size(start_range, end_range, resource_size):
'content-range': (None, 'content_range', _to_str),
'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _int_to_str),
'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _int_to_str),
'x-ms-access-tier': (None, 'blob_tier', _to_upper_str),
'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool),
'x-ms-share-quota': (None, 'quota', _int_to_str),
'x-ms-server-encrypted': (None, 'server_encrypted', _bool),
'content-type': ('content_settings', 'content_type', _to_str),
Expand Down Expand Up @@ -184,7 +189,8 @@ def _convert_xml_to_service_stats(response):

geo_replication = GeoReplication()
geo_replication.status = geo_replication_element.find('Status').text
geo_replication.last_sync_time = parser.parse(geo_replication_element.find('LastSyncTime').text)
last_sync_time = geo_replication_element.find('LastSyncTime').text
geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None

service_stats = ServiceStats()
service_stats.geo_replication = geo_replication
Expand Down
2 changes: 2 additions & 0 deletions azure/storage/blob/_deserialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,7 @@ def _convert_xml_to_containers(response):
'CopyProgress': ('copy', 'progress', _to_str),
'CopyCompletionTime': ('copy', 'completion_time', _to_str),
'CopyStatusDescription': ('copy', 'status_description', _to_str),
'AccessTier': (None, 'blob_tier', _to_str)
}

def _convert_xml_to_blob_list(response):
Expand Down Expand Up @@ -267,6 +268,7 @@ def _convert_xml_to_blob_list(response):
<CopyProgress>bytes copied/bytes total</CopyProgress>
<CopyCompletionTime>datetime</CopyCompletionTime>
<CopyStatusDescription>error string</CopyStatusDescription>
<AccessTier>P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60</AccessTier>
</Properties>
<Metadata>
<Name>value</Name>
Expand Down
15 changes: 14 additions & 1 deletion azure/storage/blob/_upload_chunking.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import sys
from threading import Lock
from time import sleep

from cryptography.hazmat.primitives.padding import PKCS7
from .._common_conversion import _encode_base64
from .._serialization import (
Expand All @@ -37,7 +38,7 @@ def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
progress_callback, validate_content, lease_id, uploader_class,
maxsize_condition=None, if_match=None, timeout=None,
content_encryption_key=None, initialization_vector=None):
content_encryption_key=None, initialization_vector=None, resource_properties=None):

encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector,
uploader_class is not _PageBlobChunkUploader)
Expand Down Expand Up @@ -104,6 +105,10 @@ def _upload_blob_chunks(blob_service, container_name, blob_name,
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]

if resource_properties:
resource_properties.last_modified = uploader.last_modified
resource_properties.etag = uploader.etag

return range_ids

def _upload_blob_substream_blocks(blob_service, container_name, blob_name,
Expand Down Expand Up @@ -249,6 +254,10 @@ def _upload_substream_block_with_progress(self, block_id, block_stream):
self._update_progress(len(block_stream))
return range_id

def set_response_properties(self, resp):
self.etag = resp.etag
self.last_modified = resp.last_modified

class _BlockBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
Expand Down Expand Up @@ -297,6 +306,8 @@ def _upload_chunk(self, chunk_start, chunk_data):
if not self.parallel:
self.if_match = resp.etag

self.set_response_properties(resp)

class _AppendBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
if not hasattr(self, 'current_length'):
Expand All @@ -323,6 +334,8 @@ def _upload_chunk(self, chunk_offset, chunk_data):
timeout=self.timeout,
)

self.set_response_properties(resp)

class _SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
Expand Down
86 changes: 52 additions & 34 deletions azure/storage/blob/appendblobservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@
_AppendBlobChunkUploader,
_upload_blob_chunks,
)
from .models import _BlobTypes
from .models import (
_BlobTypes,
ResourceProperties
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
Expand Down Expand Up @@ -325,6 +328,8 @@ def append_blob_from_path(
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
Expand All @@ -333,16 +338,16 @@ def append_blob_from_path(

count = path.getsize(file_path)
with open(file_path, 'rb') as stream:
self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
progress_callback=progress_callback,
lease_id=lease_id,
timeout=timeout)
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
progress_callback=progress_callback,
lease_id=lease_id,
timeout=timeout)

def append_blob_from_bytes(
self, container_name, blob_name, blob, index=0, count=None,
Expand Down Expand Up @@ -387,6 +392,8 @@ def append_blob_from_bytes(
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
Expand All @@ -404,16 +411,16 @@ def append_blob_from_bytes(
stream = BytesIO(blob)
stream.seek(index)

self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
return self.append_blob_from_stream(
container_name,
blob_name,
stream,
count=count,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)

def append_blob_from_text(
self, container_name, blob_name, text, encoding='utf-8',
Expand Down Expand Up @@ -455,6 +462,8 @@ def append_blob_from_text(
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
Expand All @@ -465,17 +474,17 @@ def append_blob_from_text(
_validate_not_none('encoding', encoding)
text = text.encode(encoding)

self.append_blob_from_bytes(
container_name,
blob_name,
text,
index=0,
count=len(text),
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)
return self.append_blob_from_bytes(
container_name,
blob_name,
text,
index=0,
count=len(text),
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
progress_callback=progress_callback,
timeout=timeout)

def append_blob_from_stream(
self, container_name, blob_name, stream, count=None,
Expand Down Expand Up @@ -518,12 +527,18 @@ def append_blob_from_stream(
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: ETag and last modified properties for the Append Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
_validate_encryption_unsupported(self.require_encryption, self.key_encryption_key)

# _upload_blob_chunks returns the block ids for block blobs so resource_properties
# is passed as a parameter to get the last_modified and etag for page and append blobs.
# this info is not needed for block_blobs since _put_block_list is called after which gets this info
resource_properties = ResourceProperties()
_upload_blob_chunks(
blob_service=self,
container_name=container_name,
Expand All @@ -537,5 +552,8 @@ def append_blob_from_stream(
lease_id=lease_id,
uploader_class=_AppendBlobChunkUploader,
maxsize_condition=maxsize_condition,
timeout=timeout
)
timeout=timeout,
resource_properties=resource_properties
)

return resource_properties
5 changes: 4 additions & 1 deletion azure/storage/blob/baseblobservice.py
Original file line number Diff line number Diff line change
Expand Up @@ -2981,6 +2981,7 @@ def copy_blob(self, container_name, blob_name, copy_source,
'''
return self._copy_blob(container_name, blob_name, copy_source,
metadata,
None,
source_if_modified_since, source_if_unmodified_since,
source_if_match, source_if_none_match,
destination_if_modified_since,
Expand All @@ -2993,6 +2994,7 @@ def copy_blob(self, container_name, blob_name, copy_source,

def _copy_blob(self, container_name, blob_name, copy_source,
metadata=None,
premium_page_blob_tier=None,
source_if_modified_since=None,
source_if_unmodified_since=None,
source_if_match=None, source_if_none_match=None,
Expand Down Expand Up @@ -3053,7 +3055,8 @@ def _copy_blob(self, container_name, blob_name, copy_source,
'If-Match': _to_str(destination_if_match),
'If-None-Match': _to_str(destination_if_none_match),
'x-ms-lease-id': _to_str(destination_lease_id),
'x-ms-source-lease-id': _to_str(source_lease_id)
'x-ms-source-lease-id': _to_str(source_lease_id),
'x-ms-access-tier': _to_str(premium_page_blob_tier)
}
_add_metadata_headers(metadata, request)

Expand Down
Loading