repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
bmweiner/skillful | skillful/validate.py | Valid.request | python | def request(self, app_id=None, body=None, stamp=None, url=None, sig=None):
if self.app_id:
if not self.application_id(app_id):
return False
if (url or sig):
if not (body and stamp and url and sig):
raise ValueError('Unable to validate sender, check arguments.')
else:
if not self.sender(body, stamp, url, sig):
return False
return True | Validate application ID and request is from Alexa. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/validate.py#L260-L273 | [
"def application_id(self, app_id):\n \"\"\"Validate request application id matches true application id.\n\n Verifying the Application ID matches: https://goo.gl/qAdqe4.\n\n Args:\n app_id: str. Request application_id.\n\n Returns:\n bool: True if valid, False otherwise.\n \"\"\"\n if... | class Valid(object):
"""Alexa request validator.
Attributes:
app_id: str. Skill application ID.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message. Used to cache valid url.
cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon
signing certificate. Used to cache valid cert.
"""
def __init__(self, app_id=None):
"""Init validator."""
self.app_id = app_id
self.url = None
self.cert = None
def application_id(self, app_id):
"""Validate request application id matches true application id.
Verifying the Application ID matches: https://goo.gl/qAdqe4.
Args:
app_id: str. Request application_id.
Returns:
bool: True if valid, False otherwise.
"""
if self.app_id != app_id:
warnings.warn('Application ID is invalid.')
return False
return True
def sender(self, body, stamp, url, sig):
"""Validate request is from Alexa.
Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5.
Checking the Signature of the Request: https://goo.gl/FDkjBN.
Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ
Args:
body: str. HTTPS request body.
stamp: str. Value of timestamp within request object of HTTPS
request body.
url: str. SignatureCertChainUrl header value sent
by request.
sig: str. Signature header value sent by request.
Returns:
bool: True if valid, False otherwise.
"""
if not timestamp(stamp):
return False
if self.url != url:
if not signature_cert_chain_url(url):
return False
certs = retrieve(url)
if not certs:
return False
if not cert_chain(certs):
return False
self.url = url
self.cert = certs[0]
if not signature(self.cert, sig, body):
return False
return True
|
bmweiner/skillful | skillful/interface.py | _snake_to_camel | python | def _snake_to_camel(name, strict=False):
if strict:
name = name.lower()
terms = name.split('_')
return terms[0] + ''.join([term.capitalize() for term in terms[1:]]) | Converts parameter names from snake_case to camelCase.
Args:
name, str. Snake case.
strict: bool, default True. If True, will set name to lowercase before
converting, otherwise assumes original name is proper camel case.
Set to False if name may already be in camelCase.
Returns:
str: CamelCase. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L565-L580 | null | """Implements request and response body objects."""
from __future__ import absolute_import, division, print_function
import json
import six
class DefaultAttrMixin(object):
"""Sets default attributes"""
def _set_default_attr(self, default_attr):
"""Sets default attributes when None.
Args:
default_attr: dict. Key-val of attr, default-value.
"""
for attr, val in six.iteritems(default_attr):
if getattr(self, attr, None) is None:
setattr(self, attr, val)
class Body(DefaultAttrMixin):
"""Base HTTP body class"""
def __repr__(self):
return ('<' + self.__class__.__name__ + ' ' +
self.to_json(False, False) + '>')
def __len__(self):
return len(self.__dict__)
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False):
"""Serialize self as JSON
Args:
drop_null: bool, default True. Remove 'empty' attributes. See
to_dict.
camel: bool, default True. Convert keys to camelCase.
indent: int, default None. See json built-in.
sort_keys: bool, default False. See json built-in.
Return:
str: object params.
"""
return json.dumps(self.to_dict(drop_null, camel), indent=indent,
sort_keys=sort_keys)
def to_dict(self, drop_null=True, camel=False):
"""Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params.
"""
#return _to_dict(self, drop_null, camel)
def to_dict(obj, drop_null, camel):
"""Recursively constructs the dict."""
if isinstance(obj, (Body, BodyChild)):
obj = obj.__dict__
if isinstance(obj, dict):
data = {}
for attr, val in six.iteritems(obj):
if camel:
attr = _snake_to_camel(attr)
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data[attr] = to_dict(val, drop_null, camel)
return data
elif isinstance(obj, list):
data = []
for val in obj:
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data.append(to_dict(val, drop_null, camel))
return data
else:
return obj
return to_dict(self, drop_null, camel)
class BodyChild(DefaultAttrMixin):
"""Base HTTP Body child class"""
def __repr__(self):
return '<' + self.__class__.__name__ + ' ' + str(self.__dict__) + '>'
def __len__(self):
return len(self.__dict__)
# implement request
class RequestBody(Body):
"""Implements the HTTP body for a custom request.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Attributes:
version: str. Version specifier for the request.
session: obj. Context associated with the session see Session class.
request: obj. Context association with the user, see Request class.
"""
def __init__(self, version=None, session=None, request=None):
"""Inits a RequestBody class with placeholder params."""
default_attr = dict(version=str(),
session=Session(),
request=Request())
self.version = version
self.session = session
self.request = request
self._set_default_attr(default_attr)
def parse(self, body):
"""Parse JSON request, storing content in object attributes.
Args:
body: str. HTTP request body.
Returns:
self
"""
if isinstance(body, six.string_types):
body = json.loads(body)
# version
version = body['version']
self.version = version
# session
session = body['session']
self.session.new = session['new']
self.session.session_id = session['sessionId']
application_id = session['application']['applicationId']
self.session.application.application_id = application_id
if 'attributes' in session and session['attributes']:
self.session.attributes = session.get('attributes', {})
else:
self.session.attributes = {}
self.session.user.user_id = session['user']['userId']
self.session.user.access_token = session['user'].get('accessToken', 0)
# request
request = body['request']
# launch request
if request['type'] == 'LaunchRequest':
self.request = LaunchRequest()
# intent request
elif request['type'] == 'IntentRequest':
self.request = IntentRequest()
self.request.intent = Intent()
intent = request['intent']
self.request.intent.name = intent['name']
if 'slots' in intent and intent['slots']:
for name, slot in six.iteritems(intent['slots']):
self.request.intent.slots[name] = Slot()
self.request.intent.slots[name].name = slot['name']
self.request.intent.slots[name].value = slot.get('value')
# session ended request
elif request['type'] == 'SessionEndedRequest':
self.request = SessionEndedRequest()
self.request.reason = request['reason']
# common - keep after specific requests to prevent param overwrite
self.request.type = request['type']
self.request.request_id = request['requestId']
self.request.timestamp = request['timestamp']
return self
class Session(BodyChild):
"""Request context associated with the session.
Attributes
new: bool. Indicates if session is new.
session_id: str. Unique identifier for user's active session, consistent
while the session is active.
attributes: dict. Key-value pairs for attribute name-value. Empty for
new requests.
application: obj. Context associated with the application. See
Application class.
user: obj. Context associated with the user. See User class.
"""
def __init__(self, new=None, session_id=None, attributes=None,
application=None, user=None):
"""Inits a Session class with placeholder params."""
default_attr = dict(new=bool(),
session_id=str(),
attributes=dict(),
application=Application(),
user=User())
self.new = new
self.session_id = session_id
self.attributes = attributes
self.application = application
self.user = user
self._set_default_attr(default_attr)
class Application(BodyChild):
"""Request context association with the application.
Attributes:
application_id: str. Intended application id for request. Used when
Skill class initialized with application_id for validation.
"""
def __init__(self, application_id=None):
"""Inits an Application class with placeholder params."""
default_attr = dict(application_id=None)
self.application_id = application_id
self._set_default_attr(default_attr)
class User(BodyChild):
"""Request context associated with the user.
Arguments:
user_id: str. Unique identifier for the user. Length can vary, but is
always less than 255 characters. A user_id will change if a user
disables then re-enables the skill.
access_token: str. User token for a linked account, if configured.
"""
def __init__(self, user_id=None, access_token=None):
"""Inits a User class with placeholder params."""
default_attr = dict(user_id=str(),
access_token=str())
self.user_id = user_id
self.access_token = access_token
self._set_default_attr(default_attr)
class Request(BodyChild):
"""Request context associated with all requests.
Attributes:
type: str. Type of request, possible values are: ['LaunchRequest',
'IntentRequest', 'SessionEndedRequest'].
request_id: str. Unique identifier for request.
timestamp: str. ISO 8601 formatted time and date when request sent.
"""
def __init__(self, type_=None, request_id=None, timestamp=None):
"""Inits a Request class with placeholder params."""
default_attr = dict(type=str(),
request_id=str(),
timestamp=str())
self.type = type_
self.request_id = request_id
self.timestamp = timestamp
self._set_default_attr(default_attr)
class LaunchRequest(Request):
"""Request context associated with a launch request.
See Request base class for additional info.
"""
def __init__(self):
"""Inits a LaunchRequest class with placeholder params."""
super(LaunchRequest, self).__init__()
class IntentRequest(Request):
"""Request context associated with an intent request.
See Request base class for additional info.
Arguments:
intent: obj. Context associated with the intent.
"""
def __init__(self, intent=None):
"""Inits an IntentRequest class with placeholder params."""
super(IntentRequest, self).__init__()
default_attr = dict(intent=Intent())
self.intent = intent
self._set_default_attr(default_attr)
class Intent(BodyChild):
"""Request context associated with an intent.
Arguments:
name: str. Name of the intent.
slots: dict. Key-value pairs containing slot values per predefined
schema.
"""
def __init__(self, name=None, slots=None):
"""Inits an Intent class with placeholder params."""
default_attr = dict(name=str(),
slots=dict())
self.name = name
self.slots = slots
self._set_default_attr(default_attr)
class Slot(BodyChild):
"""Request context associated with an intent slot.
Arguments:
name: str. Name of slot.
value: str. Value of the slot.
"""
def __init__(self, name=None, value=None):
"""Inits a Slot class with placeholder params."""
default_attr = dict(name=str(),
value=str())
self.name = name
self.value = value
self._set_default_attr(default_attr)
class SessionEndedRequest(Request):
"""Request context associated with a session ended request.
See Request base class for additional info.
Attributes:
reason: str. Reason session ended, possible values are:
['USER_INITIATED', 'ERROR', 'EXCEEDED_MAX_REPROMPTS']
"""
def __init__(self, reason=None):
"""Inits a SessionEndedRequest class with placeholder params."""
super(SessionEndedRequest, self).__init__()
default_attr = dict(reason=str())
self.reason = reason
self._set_default_attr(default_attr)
# implement response
class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
class Response(BodyChild):
"""Response context associated with all responses.
Attributes:
outputSpeech: obj. Context associated with the output speech.
card: obj. Context associated with the card.
reprompt: obj. Context associated with a reprompt.
shouldEndSession: bool. If True, end the session.
"""
def __init__(self, outputSpeech=None, card=None, reprompt=None,
shouldEndSession=None):
"""Inits a Response class with placeholder params."""
default_attr = dict(outputSpeech=OutputSpeech(),
card=Card(),
reprompt=Reprompt(),
shouldEndSession=False)
self.outputSpeech = outputSpeech
self.card = card
self.reprompt = reprompt
self.shouldEndSession = shouldEndSession
self._set_default_attr(default_attr)
class OutputSpeech(BodyChild):
"""Response context associated with output speech.
Attributes:
type: str. Type of speech, possible values are ['PlainText', 'SSML'].
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
def __init__(self, type_=None, text=None, ssml=None):
"""Inits a OutputSpeech class with placeholder params."""
default_attr = dict(type=str(),
text=str(),
ssml=str())
self.type = type_
self.text = text
self.ssml = ssml
self._set_default_attr(default_attr)
class Card(BodyChild):
"""Response context associated with card to render.
title, content, text, and image cannot exceed 8,000 characters.
Attributes:
type: str. Type of card, possible values are ['Simple', 'Standard',
'LinkAccount']
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
text: str. Content of Standard type card.
image: str. Context associated with a card image.
"""
def __init__(self, type_=None, title=None, content=None, text=None,
image=None):
"""Inits a Card class with placeholder params."""
default_attr = dict(type=str(),
title=str(),
content=str(),
text=str(),
image=Image())
self.type = type_
self.title = title
self.content = content
self.text = text
self.image = image
self._set_default_attr(default_attr)
class Image(BodyChild):
"""Response context associated with a card image.
Attributes:
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
def __init__(self, smallImageUrl=None, largeImageUrl=None):
"""Inits an Image class with placeholder params."""
default_attr = dict(smallImageUrl=str(),
largeImageUrl=str())
self.smallImageUrl = smallImageUrl
self.largeImageUrl = largeImageUrl
self._set_default_attr(default_attr)
class Reprompt(BodyChild):
"""Response context associated with a reprompt.
Attributes:
outputSpeech: obj. Context associated with the output speech.
"""
def __init__(self, outputSpeech=None):
"""Inits a Reprompt class with placeholder params."""
default_attr = dict(outputSpeech=OutputSpeech())
self.outputSpeech = outputSpeech
self._set_default_attr(default_attr)
def error_response(msg='Unknown'):
"""Returns an internal server error message.
Args:
msg: str, default is Unknown. Error message.
Returns:
str: JSON formatted error message.
"""
return """{{"InternalServerError":"{}"}}""".format(msg)
|
bmweiner/skillful | skillful/interface.py | DefaultAttrMixin._set_default_attr | python | def _set_default_attr(self, default_attr):
for attr, val in six.iteritems(default_attr):
if getattr(self, attr, None) is None:
setattr(self, attr, val) | Sets default attributes when None.
Args:
default_attr: dict. Key-val of attr, default-value. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L10-L18 | null | class DefaultAttrMixin(object):
"""Sets default attributes"""
|
bmweiner/skillful | skillful/interface.py | Body.to_json | python | def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False):
return json.dumps(self.to_dict(drop_null, camel), indent=indent,
sort_keys=sort_keys) | Serialize self as JSON
Args:
drop_null: bool, default True. Remove 'empty' attributes. See
to_dict.
camel: bool, default True. Convert keys to camelCase.
indent: int, default None. See json built-in.
sort_keys: bool, default False. See json built-in.
Return:
str: object params. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L29-L43 | [
"def to_dict(self, drop_null=True, camel=False):\n \"\"\"Serialize self as dict.\n\n Args:\n drop_null: bool, default True. Remove 'empty' attributes.\n camel: bool, default True. Convert keys to camelCase.\n\n Return:\n dict: object params.\n \"\"\"\n #return _to_dict(self, drop... | class Body(DefaultAttrMixin):
"""Base HTTP body class"""
def __repr__(self):
return ('<' + self.__class__.__name__ + ' ' +
self.to_json(False, False) + '>')
def __len__(self):
return len(self.__dict__)
def to_dict(self, drop_null=True, camel=False):
"""Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params.
"""
#return _to_dict(self, drop_null, camel)
def to_dict(obj, drop_null, camel):
"""Recursively constructs the dict."""
if isinstance(obj, (Body, BodyChild)):
obj = obj.__dict__
if isinstance(obj, dict):
data = {}
for attr, val in six.iteritems(obj):
if camel:
attr = _snake_to_camel(attr)
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data[attr] = to_dict(val, drop_null, camel)
return data
elif isinstance(obj, list):
data = []
for val in obj:
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data.append(to_dict(val, drop_null, camel))
return data
else:
return obj
return to_dict(self, drop_null, camel)
|
bmweiner/skillful | skillful/interface.py | Body.to_dict | python | def to_dict(self, drop_null=True, camel=False):
#return _to_dict(self, drop_null, camel)
def to_dict(obj, drop_null, camel):
"""Recursively constructs the dict."""
if isinstance(obj, (Body, BodyChild)):
obj = obj.__dict__
if isinstance(obj, dict):
data = {}
for attr, val in six.iteritems(obj):
if camel:
attr = _snake_to_camel(attr)
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data[attr] = to_dict(val, drop_null, camel)
return data
elif isinstance(obj, list):
data = []
for val in obj:
valid_null = (isinstance(val, bool) or val == 0 or
(val and to_dict(val, drop_null, camel)))
if not drop_null or (drop_null and valid_null):
data.append(to_dict(val, drop_null, camel))
return data
else:
return obj
return to_dict(self, drop_null, camel) | Serialize self as dict.
Args:
drop_null: bool, default True. Remove 'empty' attributes.
camel: bool, default True. Convert keys to camelCase.
Return:
dict: object params. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L45-L80 | [
"def to_dict(obj, drop_null, camel):\n \"\"\"Recursively constructs the dict.\"\"\"\n if isinstance(obj, (Body, BodyChild)):\n obj = obj.__dict__\n if isinstance(obj, dict):\n data = {}\n for attr, val in six.iteritems(obj):\n if camel:\n attr = _snake_to_came... | class Body(DefaultAttrMixin):
"""Base HTTP body class"""
def __repr__(self):
return ('<' + self.__class__.__name__ + ' ' +
self.to_json(False, False) + '>')
def __len__(self):
return len(self.__dict__)
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False):
"""Serialize self as JSON
Args:
drop_null: bool, default True. Remove 'empty' attributes. See
to_dict.
camel: bool, default True. Convert keys to camelCase.
indent: int, default None. See json built-in.
sort_keys: bool, default False. See json built-in.
Return:
str: object params.
"""
return json.dumps(self.to_dict(drop_null, camel), indent=indent,
sort_keys=sort_keys)
|
bmweiner/skillful | skillful/interface.py | RequestBody.parse | python | def parse(self, body):
if isinstance(body, six.string_types):
body = json.loads(body)
# version
version = body['version']
self.version = version
# session
session = body['session']
self.session.new = session['new']
self.session.session_id = session['sessionId']
application_id = session['application']['applicationId']
self.session.application.application_id = application_id
if 'attributes' in session and session['attributes']:
self.session.attributes = session.get('attributes', {})
else:
self.session.attributes = {}
self.session.user.user_id = session['user']['userId']
self.session.user.access_token = session['user'].get('accessToken', 0)
# request
request = body['request']
# launch request
if request['type'] == 'LaunchRequest':
self.request = LaunchRequest()
# intent request
elif request['type'] == 'IntentRequest':
self.request = IntentRequest()
self.request.intent = Intent()
intent = request['intent']
self.request.intent.name = intent['name']
if 'slots' in intent and intent['slots']:
for name, slot in six.iteritems(intent['slots']):
self.request.intent.slots[name] = Slot()
self.request.intent.slots[name].name = slot['name']
self.request.intent.slots[name].value = slot.get('value')
# session ended request
elif request['type'] == 'SessionEndedRequest':
self.request = SessionEndedRequest()
self.request.reason = request['reason']
# common - keep after specific requests to prevent param overwrite
self.request.type = request['type']
self.request.request_id = request['requestId']
self.request.timestamp = request['timestamp']
return self | Parse JSON request, storing content in object attributes.
Args:
body: str. HTTP request body.
Returns:
self | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L112-L170 | null | class RequestBody(Body):
"""Implements the HTTP body for a custom request.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Attributes:
version: str. Version specifier for the request.
session: obj. Context associated with the session see Session class.
request: obj. Context association with the user, see Request class.
"""
def __init__(self, version=None, session=None, request=None):
"""Inits a RequestBody class with placeholder params."""
default_attr = dict(version=str(),
session=Session(),
request=Request())
self.version = version
self.session = session
self.request = request
self._set_default_attr(default_attr)
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_speech_text | python | def set_speech_text(self, text):
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text | Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L365-L373 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_speech_ssml | python | def set_speech_ssml(self, ssml):
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml | Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L375-L384 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_card_simple | python | def set_card_simple(self, title, content):
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content | Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L386-L397 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_card_standard | python | def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl | Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L399-L419 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_reprompt_text | python | def set_reprompt_text(self, text):
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text | Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L425-L433 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_ssml(self, ssml):
"""Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/interface.py | ResponseBody.set_reprompt_ssml | python | def set_reprompt_ssml(self, ssml):
self.response.reprompt.outputSpeech.type = 'SSML'
self.response.reprompt.outputSpeech.ssml = ssml | Set response reprompt output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L435-L444 | null | class ResponseBody(Body):
"""Implements the HTTP body for a custom response.
Compliant with version 1.0 of the JSON Interface Reference for Custom
Skills: https://goo.gl/JpVGm4.
Total response size cannot exceed 24 kilobytes.
Attributes:
version: str, default '1.0'. Version specifier for the response.
sessionAttributes: dict. Key-value pairs for attribute name-value.
response: obj. Context association with the response, see Response
class.
"""
def __init__(self, version=None, sessionAttributes=None, response=None):
"""Inits a ResponseBody class with placeholder params."""
default_attr = dict(version='1.0',
sessionAttributes=dict(),
response=Response())
self.version = version
self.sessionAttributes = sessionAttributes
self.response = response
self._set_default_attr(default_attr)
def set_session_attribute(self, key, value):
"""Store session attribute.
Args:
key: str. Attribute name.
value: object. Attribute value.
"""
self.sessionAttributes[key] = value
def get_session_attribute(self, key):
"""Get session attribute.
Args:
key: str. Attribute name.
Returns:
value: object.
"""
return self.sessionAttributes.get(key, None)
def set_speech_text(self, text):
"""Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
"""
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
def set_speech_ssml(self, ssml):
"""Set response output speech as SSML type.
Args:
ssml: str. Response speech used when type is 'SSML', should be formatted
with Speech Synthesis Markup Language. Cannot exceed 8,000
characters.
"""
self.response.outputSpeech.type = 'SSML'
self.response.outputSpeech.ssml = ssml
def set_card_simple(self, title, content):
"""Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card.
"""
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content
def set_card_standard(self, title, text, smallImageUrl=None,
largeImageUrl=None):
"""Set response card as standard type.
title, text, and image cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
text: str. Content of Standard type card.
smallImageUrl: str. URL of small image. Cannot exceed 2,000
characters. Recommended pixel size: 720w x 480h.
largeImageUrl: str. URL of large image. Cannot exceed 2,000
characters. Recommended pixel size: 1200w x 800h.
"""
self.response.card.type = 'Standard'
self.response.card.title = title
self.response.card.text = text
if smallImageUrl:
self.response.card.image.smallImageUrl = smallImageUrl
if largeImageUrl:
self.response.card.image.largeImageUrl = largeImageUrl
def set_card_link(self):
"""Set response card as link account type."""
self.response.card.type = 'LinkAccount'
def set_reprompt_text(self, text):
"""Set response reprompt output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot
exceed 8,000 characters.
"""
self.response.reprompt.outputSpeech.type = 'PlainText'
self.response.reprompt.outputSpeech.text = text
def set_end_session(self, end):
"""Set response should end session
Args:
shouldEndSession: bool. If True, end the session.
"""
self.response.shouldEndSession = end
|
bmweiner/skillful | skillful/controller.py | Skill.register | python | def register(self, name):
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator | Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/controller.py#L49-L81 | null | class Skill(object):
"""Class for parsing, validation, logic registering, and dispatch.
References:
- JSON Interface Reference for Custom Skills: https://goo.gl/JpVGm4.
- Providing Home Cards for the Amazon Alexa App: https://goo.gl/mX9P5o.
- Speech Synthesis Markup Language (SSML) Reference: https://goo.gl/2BHQjz.
Attributes:
valid: skillful.validate.Valid. Request validator.
request: skillful.Request. Proxy for HTTP request body.
response: skillful.Response. HTTP response body.
logic: dict. Containes function logic for processing requests,
key-value corresponds to name-func.
launch: obj. Decorator for registering the launch request function.
See register() for additional info.
intent: obj. Decorator for registering a named intent request
function. See register() for additional info.
session_ended: obj. Decorator for registering the session ended
request function. See register() for additional info.
"""
def __init__(self, app_id=None):
"""Inits a Skill class with proxy request and response.
Args:
app_id: str, default None. Skill application ID, declare
to validate against application ID in the request.
"""
self.valid = Valid(app_id)
self.request = RequestBody()
self.response = ResponseBody()
self.logic = dict()
self.launch = self.register('LaunchRequest')
self.intent = self.register
self.session_ended = self.register('SessionEndedRequest')
def pass_session_attributes(self):
"""Copies request attributes to response"""
for key, value in six.iteritems(self.request.session.attributes):
self.response.sessionAttributes[key] = value
def terminate(self):
"""Convenience function to call response.set_end_session True."""
self.response.set_end_session(True)
def dispatch(self):
"""Calls the matching logic function by request type or intent name."""
if self.request.request.type == 'IntentRequest':
name = self.request.request.intent.name
else:
name = self.request.request.type
if name in self.logic:
self.logic[name]()
else:
error = 'Unable to find a registered logic function named: {}'
raise KeyError(error.format(name))
def process(self, body, url=None, sig=None):
"""Process request body given skill logic.
To validate a request, both, url and sig are required.
Attributes received through body will be automatically added to the
response.
Args:
body: str. HTTP request body.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message.
sig: str. Signature header value sent by request. Base64-encoded
signature of the request body.
Return:
str or bool: HTTP response body or False if the request is invalid.
"""
self.request = RequestBody()
self.response = ResponseBody()
self.request.parse(body)
app_id = self.request.session.application.application_id
stamp = self.request.request.timestamp
if not self.valid.request(app_id, body, stamp, url, sig):
return False
self.pass_session_attributes()
self.dispatch()
if self.request.request.type == 'SessionEndedRequest':
self.terminate()
return self.response.to_json()
|
bmweiner/skillful | skillful/controller.py | Skill.pass_session_attributes | python | def pass_session_attributes(self):
for key, value in six.iteritems(self.request.session.attributes):
self.response.sessionAttributes[key] = value | Copies request attributes to response | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/controller.py#L83-L86 | null | class Skill(object):
"""Class for parsing, validation, logic registering, and dispatch.
References:
- JSON Interface Reference for Custom Skills: https://goo.gl/JpVGm4.
- Providing Home Cards for the Amazon Alexa App: https://goo.gl/mX9P5o.
- Speech Synthesis Markup Language (SSML) Reference: https://goo.gl/2BHQjz.
Attributes:
valid: skillful.validate.Valid. Request validator.
request: skillful.Request. Proxy for HTTP request body.
response: skillful.Response. HTTP response body.
logic: dict. Containes function logic for processing requests,
key-value corresponds to name-func.
launch: obj. Decorator for registering the launch request function.
See register() for additional info.
intent: obj. Decorator for registering a named intent request
function. See register() for additional info.
session_ended: obj. Decorator for registering the session ended
request function. See register() for additional info.
"""
def __init__(self, app_id=None):
"""Inits a Skill class with proxy request and response.
Args:
app_id: str, default None. Skill application ID, declare
to validate against application ID in the request.
"""
self.valid = Valid(app_id)
self.request = RequestBody()
self.response = ResponseBody()
self.logic = dict()
self.launch = self.register('LaunchRequest')
self.intent = self.register
self.session_ended = self.register('SessionEndedRequest')
def register(self, name):
"""Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
"""
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator
def terminate(self):
"""Convenience function to call response.set_end_session True."""
self.response.set_end_session(True)
def dispatch(self):
"""Calls the matching logic function by request type or intent name."""
if self.request.request.type == 'IntentRequest':
name = self.request.request.intent.name
else:
name = self.request.request.type
if name in self.logic:
self.logic[name]()
else:
error = 'Unable to find a registered logic function named: {}'
raise KeyError(error.format(name))
def process(self, body, url=None, sig=None):
"""Process request body given skill logic.
To validate a request, both, url and sig are required.
Attributes received through body will be automatically added to the
response.
Args:
body: str. HTTP request body.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message.
sig: str. Signature header value sent by request. Base64-encoded
signature of the request body.
Return:
str or bool: HTTP response body or False if the request is invalid.
"""
self.request = RequestBody()
self.response = ResponseBody()
self.request.parse(body)
app_id = self.request.session.application.application_id
stamp = self.request.request.timestamp
if not self.valid.request(app_id, body, stamp, url, sig):
return False
self.pass_session_attributes()
self.dispatch()
if self.request.request.type == 'SessionEndedRequest':
self.terminate()
return self.response.to_json()
|
bmweiner/skillful | skillful/controller.py | Skill.dispatch | python | def dispatch(self):
if self.request.request.type == 'IntentRequest':
name = self.request.request.intent.name
else:
name = self.request.request.type
if name in self.logic:
self.logic[name]()
else:
error = 'Unable to find a registered logic function named: {}'
raise KeyError(error.format(name)) | Calls the matching logic function by request type or intent name. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/controller.py#L92-L104 | null | class Skill(object):
"""Class for parsing, validation, logic registering, and dispatch.
References:
- JSON Interface Reference for Custom Skills: https://goo.gl/JpVGm4.
- Providing Home Cards for the Amazon Alexa App: https://goo.gl/mX9P5o.
- Speech Synthesis Markup Language (SSML) Reference: https://goo.gl/2BHQjz.
Attributes:
valid: skillful.validate.Valid. Request validator.
request: skillful.Request. Proxy for HTTP request body.
response: skillful.Response. HTTP response body.
logic: dict. Containes function logic for processing requests,
key-value corresponds to name-func.
launch: obj. Decorator for registering the launch request function.
See register() for additional info.
intent: obj. Decorator for registering a named intent request
function. See register() for additional info.
session_ended: obj. Decorator for registering the session ended
request function. See register() for additional info.
"""
def __init__(self, app_id=None):
"""Inits a Skill class with proxy request and response.
Args:
app_id: str, default None. Skill application ID, declare
to validate against application ID in the request.
"""
self.valid = Valid(app_id)
self.request = RequestBody()
self.response = ResponseBody()
self.logic = dict()
self.launch = self.register('LaunchRequest')
self.intent = self.register
self.session_ended = self.register('SessionEndedRequest')
def register(self, name):
"""Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
"""
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator
def pass_session_attributes(self):
"""Copies request attributes to response"""
for key, value in six.iteritems(self.request.session.attributes):
self.response.sessionAttributes[key] = value
def terminate(self):
"""Convenience function to call response.set_end_session True."""
self.response.set_end_session(True)
def process(self, body, url=None, sig=None):
"""Process request body given skill logic.
To validate a request, both, url and sig are required.
Attributes received through body will be automatically added to the
response.
Args:
body: str. HTTP request body.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message.
sig: str. Signature header value sent by request. Base64-encoded
signature of the request body.
Return:
str or bool: HTTP response body or False if the request is invalid.
"""
self.request = RequestBody()
self.response = ResponseBody()
self.request.parse(body)
app_id = self.request.session.application.application_id
stamp = self.request.request.timestamp
if not self.valid.request(app_id, body, stamp, url, sig):
return False
self.pass_session_attributes()
self.dispatch()
if self.request.request.type == 'SessionEndedRequest':
self.terminate()
return self.response.to_json()
|
bmweiner/skillful | skillful/controller.py | Skill.process | python | def process(self, body, url=None, sig=None):
self.request = RequestBody()
self.response = ResponseBody()
self.request.parse(body)
app_id = self.request.session.application.application_id
stamp = self.request.request.timestamp
if not self.valid.request(app_id, body, stamp, url, sig):
return False
self.pass_session_attributes()
self.dispatch()
if self.request.request.type == 'SessionEndedRequest':
self.terminate()
return self.response.to_json() | Process request body given skill logic.
To validate a request, both, url and sig are required.
Attributes received through body will be automatically added to the
response.
Args:
body: str. HTTP request body.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message.
sig: str. Signature header value sent by request. Base64-encoded
signature of the request body.
Return:
str or bool: HTTP response body or False if the request is invalid. | train | https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/controller.py#L106-L142 | [
"def parse(self, body):\n \"\"\"Parse JSON request, storing content in object attributes.\n\n Args:\n body: str. HTTP request body.\n\n Returns:\n self\n \"\"\"\n if isinstance(body, six.string_types):\n body = json.loads(body)\n\n # version\n version = body['version']\n ... | class Skill(object):
"""Class for parsing, validation, logic registering, and dispatch.
References:
- JSON Interface Reference for Custom Skills: https://goo.gl/JpVGm4.
- Providing Home Cards for the Amazon Alexa App: https://goo.gl/mX9P5o.
- Speech Synthesis Markup Language (SSML) Reference: https://goo.gl/2BHQjz.
Attributes:
valid: skillful.validate.Valid. Request validator.
request: skillful.Request. Proxy for HTTP request body.
response: skillful.Response. HTTP response body.
logic: dict. Containes function logic for processing requests,
key-value corresponds to name-func.
launch: obj. Decorator for registering the launch request function.
See register() for additional info.
intent: obj. Decorator for registering a named intent request
function. See register() for additional info.
session_ended: obj. Decorator for registering the session ended
request function. See register() for additional info.
"""
def __init__(self, app_id=None):
"""Inits a Skill class with proxy request and response.
Args:
app_id: str, default None. Skill application ID, declare
to validate against application ID in the request.
"""
self.valid = Valid(app_id)
self.request = RequestBody()
self.response = ResponseBody()
self.logic = dict()
self.launch = self.register('LaunchRequest')
self.intent = self.register
self.session_ended = self.register('SessionEndedRequest')
def register(self, name):
"""Decorator for registering a named function in the sesion logic.
Args:
name: str. Function name.
func: obj. Parameterless function to register.
The following named functions must be registered:
'LaunchRequest' - logic for launch request.
'SessionEndedRequest': logic for session ended request.
In addition, all intents must be registered by their names specified
in the intent schema.
The aliased decorators: @launch, @intent(name), and @session_ended exist
as a convenience for registering specific functions.
"""
def decorator(func):
"""Inner decorator, not used directly.
Args:
func: obj. Parameterless function to register.
Returns:
func: decorated function.
"""
self.logic[name] = func
@wraps(func)
def wrapper():
"""Wrapper, not used directly."""
raise RuntimeError('working outside of request context')
return wrapper
return decorator
def pass_session_attributes(self):
"""Copies request attributes to response"""
for key, value in six.iteritems(self.request.session.attributes):
self.response.sessionAttributes[key] = value
def terminate(self):
"""Convenience function to call response.set_end_session True."""
self.response.set_end_session(True)
def dispatch(self):
"""Calls the matching logic function by request type or intent name."""
if self.request.request.type == 'IntentRequest':
name = self.request.request.intent.name
else:
name = self.request.request.type
if name in self.logic:
self.logic[name]()
else:
error = 'Unable to find a registered logic function named: {}'
raise KeyError(error.format(name))
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/eperiodical_semantic_info.py | EPeriodicalSemanticInfo.from_xml | python | def from_xml(xml):
hasAcquisitionFields = False
acquisitionFields = []
isClosed = False
isSummaryRecord = False
contentOfFMT = ""
parsedSummaryRecordSysNumber = ""
summaryRecordSysNumber = ""
parsed = xml
if not isinstance(xml, MARCXMLRecord):
parsed = MARCXMLRecord(str(xml))
# handle FMT record
if "FMT" in parsed.controlfields:
contentOfFMT = parsed["FMT"]
if contentOfFMT == "SE":
isSummaryRecord = True
if "HLD" in parsed.datafields or "HLD" in parsed.controlfields:
hasAcquisitionFields = True
if "STZ" in parsed.datafields:
acquisitionFields.extend(parsed["STZa"])
acquisitionFields.extend(parsed["STZb"])
def sign_and_author(sign):
"""
Sign is stored in ISTa, author's name is in ISTb.
Sign is MarcSubrecord obj with pointers to other subrecords, so it
is possible to pick references to author's name from signs.
"""
return [sign.replace(" ", "")] + sign.other_subfields.get("b", [])
# look for catalogization fields
for orig_sign in parsed["ISTa"]:
sign = orig_sign.replace(" ", "") # remove spaces
if sign.startswith("sk"):
hasAcquisitionFields = True
acquisitionFields.extend(sign_and_author(orig_sign))
# look whether the record was 'closed' by catalogizators
for status in parsed["BASa"]:
if status == "90":
isClosed = True
# if multiple PJM statuses are present, join them together
status = "\n".join([x for x in parsed["PJMa"]])
# detect link to 'new' record, if the old one was 'closed'
if status.strip():
summaryRecordSysNumber = status
parsedSummaryRecordSysNumber = _parse_summaryRecordSysNumber(
summaryRecordSysNumber
)
return EPeriodicalSemanticInfo(
hasAcquisitionFields=hasAcquisitionFields,
acquisitionFields=acquisitionFields,
isClosed=isClosed,
isSummaryRecord=isSummaryRecord,
contentOfFMT=contentOfFMT,
parsedSummaryRecordSysNumber=parsedSummaryRecordSysNumber,
summaryRecordSysNumber=summaryRecordSysNumber,
) | Pick informations from :class:`.MARCXMLRecord` object and use it to
build :class:`.SemanticInfo` structure.
Args:
xml (str/MARCXMLRecord): MarcXML which will be converted to
SemanticInfo. In case of str, ``<record>`` tag is required.
Returns:
structure: :class:`.SemanticInfo`. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/eperiodical_semantic_info.py#L53-L131 | [
"def _parse_summaryRecordSysNumber(summaryRecordSysNumber):\n \"\"\"\n Try to parse vague, not likely machine-readable description and return\n first token, which contains enough numbers in it.\n \"\"\"\n def number_of_digits(token):\n digits = filter(lambda x: x.isdigit(), token)\n ret... | class EPeriodicalSemanticInfo(namedtuple("EPeriodicalSemanticInfo", [
"hasAcquisitionFields",
"acquisitionFields",
"isClosed",
"isSummaryRecord",
"contentOfFMT",
"parsedSummaryRecordSysNumber",
"summaryRecordSysNumber"])):
"""
This structure is used to represent informations about export progress in
Aleph.
It contains informations about state of the record, so it can be tracked
from edeposit project.
Attributes:
hasAcquisitionFields (bool): Was the record aproved by acquisition?
acquisitionFields (list): Acquisition fields if it the record was
signed.
isClosed (bool): Was the record closed? This sometimes happen when bad
ISBN is given by creator of the record, but different is in the
book.
isSummaryRecord (bool): Is the content of FMT == "SE"?
contentOfFMT (str, default ""): Content of FMT subrecord.
parsedSummaryRecordSysNumber (str): Same as
:attr:`summaryRecordSysNumber` but without natural language
details.
summaryRecordSysNumber (str): Identificator of the new record if
`.isClosed` is True. Format of the string is not specified and can
be different for each record.
"""
@staticmethod
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | getListOfBases | python | def getListOfBases():
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) | This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L235-L268 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
# list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | _alephResultToDict | python | def _alephResultToDict(dom):
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result | Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L285-L313 | [
"def _tryConvertToInt(s):\n \"\"\"\n Try convert value from `s` to int.\n\n Returns:\n int(s): If the value was successfully converted, or `s` when conversion\n failed.\n \"\"\"\n try:\n return int(s)\n except ValueError:\n return s\n"
] | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | searchInAleph | python | def searchInAleph(base, phrase, considerSimilar, field):
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"]) | Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L316-L388 | [
"def _alephResultToDict(dom):\n \"\"\"\n Convert part of non-nested XML to :py:class:`dict`.\n\n Args:\n dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).\n\n Returns:\n dict: with python data\n \"\"\"\n result = {}\n for i in dom.childs:\n if not i.isOpeningTag():\... | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | downloadRecords | python | def downloadRecords(search_result, from_doc=1):
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records | Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L391-L430 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | getDocumentIDs | python | def getDocumentIDs(aleph_search_result, number_of_docs=-1):
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids | Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L433-L512 | [
"def _alephResultToDict(dom):\n \"\"\"\n Convert part of non-nested XML to :py:class:`dict`.\n\n Args:\n dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).\n\n Returns:\n dict: with python data\n \"\"\"\n result = {}\n for i in dom.childs:\n if not i.isOpeningTag():\... | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | downloadMARCXML | python | def downloadMARCXML(doc_id, library, base="nkc"):
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data | Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L515-L566 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
# MARCxml of document with given doc_id
def downloadMARCOAI(doc_id, base):
"""
Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
)
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/aleph.py | downloadMARCOAI | python | def downloadMARCOAI(doc_id, base):
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(OAI_DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
BASE=base
)
)
dom = dhtmlparser.parseString(data)
# check for errors
error = dom.find("error")
if len(error) <= 0: # no errors
return data
if "Error reading document" in error[0].getContent():
raise DocumentNotFoundException(
str(error[0].getContent())
)
else:
raise InvalidAlephBaseException(
error[0].getContent() + "\n" +
"The base you are trying to access probably doesn't exist."
) | Download MARC OAI document with given `doc_id` from given (logical) `base`.
Funny part is, that some documents can be obtained only with this function
in their full text.
Args:
doc_id (str): You will get this from :func:`getDocumentIDs`.
base (str, optional): Base from which you want to download Aleph
document.
This seems to be duplicite with
:func:`searchInAleph` parameters, but it's just
something Aleph's X-Services wants, so ..
Returns:
str: MARC XML Unicode string.
Raises:
InvalidAlephBaseException
DocumentNotFoundException | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/aleph.py#L569-L615 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
Aleph X-Service wrapper.
This module allows you to query Aleph's X-Services_ module (Aleph server is
defined by :attr:`aleph.settings.ALEPH_URL` in :mod:`settings.py
<aleph.settings>`).
.. _X-Services: http://www.exlibrisgroup.com/category/MetaLibXServer
There are two levels of abstraction.
Lowlevel
========
You can use this functions to access Aleph::
searchInAleph(base, phrase, considerSimilar, field)
downloadRecords(search_result, [from_doc])
getDocumentIDs(aleph_search_result, [number_of_docs])
downloadMARCXML(doc_id, library)
downloadMARCOAI(doc_id, base)
Workflow
********
Aleph works in strange way, that he won't allow you to access desired
information directly.
You have to create search request by calling :func:`searchInAleph` first, which
will return dictionary with few important informations about session.
This dictionary can be later used as parameter to :func:`getDocumentIDs`
function, which will give you list of :class:`DocumentID` named tuples.
Note:
:py:func:`~collections.namedtuple` is used, because to access your
document, you don't need just `document ID` number, but also `library ID`
string.
Depending on your system, there may be just only one accessible library, or
multiple ones, and then you will be glad, that you get both of this
informations together.
:class:`DocumentID` can be used as parameter to :func:`downloadMARCXML`.
Lets look at some code::
ids = getDocumentIDs(searchInAleph("nkc", "test", False, "wrd"))
for id_num, library in ids:
XML = downloadMARCXML(id_num, library)
# processDocument(XML)
High-level
==========
XML wrappers
************
This wrappers returns full XML records from Aleph:
- :func:`getISBNsXML`
- :func:`getAuthorsBooksXML`
- :func:`getPublishersBooksXML`
- :func:`getBooksTitleXML`
- :func:`getICZBooksXML`
ID wrappers
***********
There are wrappers, which returns ID's of matching document in Aleph:
- :func:`getISBNsIDs`
- :func:`getAuthorsBooksIDs`
- :func:`getPublishersBooksIDs`
- :func:`getBooksTitleIDs`
- :func:`getICZBooksIDs`
You can theh download them using :func:`downloadMARCXML` or
:func:`downloadMARCOAI`.
Count wrappers
**************
Count wrappers returns just the number of records with given parameters are
there in aleph.
- :func:`getISBNCount`
- :func:`getAuthorsBooksCount`
- :func:`getPublishersBooksCount`
- :func:`getBooksTitleCount`
- :func:`getICZBooksCount`
Note:
Counting functions are by one request faster than just counting results
from standard getters. It is preferred to use them to reduce load to Aleph.
Other noteworthy properties
===========================
List of valid bases can be obtained by calling :func:`getListOfBases`, which
returns list of strings.
There is also defined exception tree - see :class:`AlephException` doc-string
for details.
"""
from collections import namedtuple
from string import Template
from urllib import quote_plus
import dhtmlparser
from httpkie import Downloader
from settings import *
# Variables ===================================================================
# String.Template() variable convention is used
SEARCH_URL_TEMPLATE = "/X?op=find&request=$FIELD=$PHRASE&base=$BASE"
SET_URL_TEMPLATE = "/X?op=ill_get_set&set_number=$SET_NUMBER" + \
"&start_point=1&no_docs=$NUMBER_OF_DOCS"
DOC_URL_TEMPLATE = "/X?op=ill_get_doc&doc_number=$DOC_ID&library=$LIBRARY"
OAI_DOC_URL_TEMPLATE = "/X?op=find_doc&doc_num=$DOC_ID&base=$BASE"
RECORD_URL_TEMPLATE = "/X?op=present&set_number=$SET_NUM&set_entry=$RECORD_NUM"
MAX_RECORDS = 30
VALID_ALEPH_FIELDS = [
"wrd",
"wtl",
"wau",
"wkw",
"txt",
"wpb",
"wpp",
"wyr",
"ssn",
"sbn",
"isn",
"ob",
"wpf",
"wpv",
"wln",
"wlo",
"wtp",
"sg",
"bar",
"cnb",
"icz",
"sys",
"wpk",
]
"""
- ``wrd`` - Všechny údaje [`All fields`]
- ``wtl`` - Název [`Title/name of the book`]
- ``wau`` - Autor (osoba, korporace) [`Author (person, corporation)`]
- ``wkw`` - Předmět (klíčová slova) [`Subject (keywords)`]
- ``txt`` - Slova z obsahu (table of cont.) [`Words from table of content`]
- ``wpb`` - Nakladatel [`Publisher`]
- ``wpp`` - Místo vydání [`Place of publication`]
- ``wyr`` - Rok vydání [`Year of publication`]
- ``ssn`` - ISSN
- ``sbn`` - ISBN / ISMN
- ``isn`` - ISBN / ISMN / ISSN
- ``ob`` - Obsazení (hudební díla) [`Cast (musical works)`]
- ``wpf`` - Periodicita [`Periodicity`]
- ``wpv`` - Kód země vydání [`Country code`]
- ``wln`` - Kód jazyka dokumentu [`Language code`]
- ``wlo`` - Kód jazyka originálu [`Lanugage code of original`]
- ``wtp`` - Druh dokumentu [`Type of document`]
- ``sg`` - Signatura [`Signature`]
- ``bar`` - Čárový kód [`Barcode`]
- ``cnb`` - Číslo národní bibl. [`Number of national bibl.`]
- ``icz`` - Identifikační číslo [`Identification number`]
- ``sys`` - Systémové číslo [`System number`]
- ``wpk``
"""
dhtmlparser.NONPAIR_TAGS = [] # used for parsing XML - see documentation
# Functions & objects =========================================================
class AlephException(Exception):
"""
Exception tree::
- AlephException
|- InvalidAlephBaseException
|- InvalidAlephFieldException
|- LibraryNotFoundException
`- DocumentNotFoundException
"""
def __init__(self, message):
Exception.__init__(self, message)
class InvalidAlephBaseException(AlephException):
def __init__(self, message):
super(InvalidAlephBaseException, self).__init__(message)
class InvalidAlephFieldException(AlephException):
def __init__(self, message):
super(InvalidAlephFieldException, self).__init__(message)
class LibraryNotFoundException(AlephException):
def __init__(self, message):
super(LibraryNotFoundException, self).__init__(message)
class DocumentNotFoundException(AlephException):
def __init__(self, message):
super(DocumentNotFoundException, self).__init__(message)
class DocumentID(namedtuple("DocumentID", ["id", "library", "base"])):
"""
This structure is used to store `"pointer"` to document in aleph.
Attributes:
id (int): ID of document.
library (str): This can be different for each document, depend on your
system.
base (str): Default "``nkc``", but really depends on what bases you
have defined in your Aleph server.
"""
pass
def getListOfBases():
"""
This function is here mainly for purposes of unittest
Returns:
list of str: Valid bases as they are used as URL parameters in links at
Aleph main page.
"""
downer = Downloader()
data = downer.download(ALEPH_URL + "/F/?func=file&file_name=base-list")
dom = dhtmlparser.parseString(data.lower())
# from default aleph page filter links containing local_base in their href
base_links = filter(
lambda x: "href" in x.params and "local_base" in x.params["href"],
dom.find("a")
)
# split links by & - we will need only XXX from link.tld/..&local_base=XXX
base_links = map(
lambda x: x.params["href"].replace("?", "&", 1).split("&"),
base_links
)
# filter only sections containing bases
bases = map(
lambda link: filter(lambda base: "local_base=" in base, link)[0],
base_links
)
# filter bases from base sections
bases = map(lambda x: x.split("=")[1].strip(), bases)
return list(set(bases)) # list(set()) is same as unique()
def _tryConvertToInt(s):
"""
Try convert value from `s` to int.
Returns:
int(s): If the value was successfully converted, or `s` when conversion
failed.
"""
try:
return int(s)
except ValueError:
return s
def _alephResultToDict(dom):
"""
Convert part of non-nested XML to :py:class:`dict`.
Args:
dom (HTMLElement tree): pre-parsed XML (see dhtmlparser).
Returns:
dict: with python data
"""
result = {}
for i in dom.childs:
if not i.isOpeningTag():
continue
keyword = i.getTagName().strip()
value = _tryConvertToInt(i.getContent().strip())
# if there are multiple tags with same keyword, add values into
# array, instead of rewriting existing value at given keyword
if keyword in result: # if it is already there ..
if isinstance(result[keyword], list): # and it is list ..
result[keyword].append(value) # add it to list
else: # or make it array
result[keyword] = [result[keyword], value]
else: # if it is not in result, add it
result[keyword] = value
return result
def searchInAleph(base, phrase, considerSimilar, field):
"""
Send request to the aleph search engine.
Request itself is pretty useless, but it can be later used as parameter
for :func:`getDocumentIDs`, which can fetch records from Aleph.
Args:
base (str): which database you want to use
phrase (str): what do you want to search
considerSimilar (bool): fuzzy search, which is not working at all, so
don't use it
field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`)
Returns:
dictionary: consisting from following fields:
| error (optional): present if there was some form of error
| no_entries (int): number of entries that can be fetch from aleph
| no_records (int): no idea what is this, but it is always >= than
`no_entries`
| set_number (int): important - something like ID of your request
| session-id (str): used to count users for licensing purposes
Example:
Returned dict::
{
'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB',
'set_number': 36520,
'no_records': 1,
'no_entries': 1
}
Raises:
AlephException: if Aleph doesn't return any information
InvalidAlephFieldException: if specified field is not valid
"""
downer = Downloader()
if field.lower() not in VALID_ALEPH_FIELDS:
raise InvalidAlephFieldException("Unknown field '" + field + "'!")
param_url = Template(SEARCH_URL_TEMPLATE).substitute(
PHRASE=quote_plus(phrase), # urlencode phrase
BASE=base,
FIELD=field,
SIMILAR="Y" if considerSimilar else "N"
)
result = downer.download(ALEPH_URL + param_url)
dom = dhtmlparser.parseString(result)
find = dom.find("find") # find <find> element :)
if len(find) <= 0:
raise AlephException("Aleph didn't returned any information.")
find = find[0]
# convert aleph result into dictionary
result = _alephResultToDict(find)
# add informations about base into result
result["base"] = base
if "error" not in result:
return result
# handle errors
if result["error"] == "empty set":
result["no_entries"] = 0 # empty set have 0 entries
return result
else:
raise AlephException(result["error"])
def downloadRecords(search_result, from_doc=1):
"""
Download `MAX_RECORDS` documents from `search_result` starting from
`from_doc`.
Attr:
search_result (dict): returned from :func:`searchInAleph`.
from_doc (int, default 1): Start from document number `from_doc`.
Returns:
list: List of XML strings with documents in MARC OAI.
"""
downer = Downloader()
if "set_number" not in search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# download all no_records
records = []
for cnt in range(search_result["no_records"]):
doc_number = from_doc + cnt
if cnt >= MAX_RECORDS or doc_number > search_result["no_records"]:
break
set_data = downer.download(
ALEPH_URL + Template(RECORD_URL_TEMPLATE).substitute(
SET_NUM=set_number,
RECORD_NUM=doc_number,
)
)
records.append(set_data)
return records
def getDocumentIDs(aleph_search_result, number_of_docs=-1):
"""
Get IDs, which can be used as parameters for other functions.
Args:
aleph_search_result (dict): returned from :func:`searchInAleph`
number_of_docs (int, optional): how many :class:`DocumentID` from set
given by `aleph_search_result` should be returned.
Default -1 for all of them.
Returns:
list: :class:`DocumentID` named tuples to given `aleph_search_result`.
Raises:
AlephException: If Aleph returns unknown format of data.
Note:
Returned :class:`DocumentID` can be used as parameters to
:func:`downloadMARCXML`.
"""
downer = Downloader()
if "set_number" not in aleph_search_result:
return []
# set numbers should be probably aligned to some length
set_number = str(aleph_search_result["set_number"])
if len(set_number) < 6:
set_number = (6 - len(set_number)) * "0" + set_number
# limit number of fetched documents, if -1, download all
if number_of_docs <= 0:
number_of_docs = aleph_search_result["no_entries"]
# download data about given set
set_data = downer.download(
ALEPH_URL + Template(SET_URL_TEMPLATE).substitute(
SET_NUMBER=set_number,
NUMBER_OF_DOCS=number_of_docs,
)
)
# parse data
dom = dhtmlparser.parseString(set_data)
set_data = dom.find("ill-get-set")
# there should be at least one <ill-get-set> field
if len(set_data) <= 0:
raise AlephException("Aleph didn't returned set data.")
ids = []
for library in set_data:
documents = _alephResultToDict(library)
if "error" in documents:
raise AlephException("getDocumentIDs: " + documents["error"])
# convert all document records to DocumentID named tuple and extend
# them to 'ids' array
if isinstance(documents["doc-number"], list):
ids.extend(
map(
lambda x: DocumentID(
x,
documents["set-library"],
aleph_search_result["base"]
),
set(documents["doc-number"])
)
)
else:
ids.append(
DocumentID(
documents["doc-number"],
documents["set-library"],
aleph_search_result["base"]
)
)
return ids
def downloadMARCXML(doc_id, library, base="nkc"):
"""
Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException
"""
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
# check if there are any errors
# bad library error
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can't download document doc_id: '" + str(doc_id) + "' " +
"(probably bad library: '" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
# another error - document not found
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data # MARCxml of document with given doc_id
# High level API ==============================================================
def getISBNsXML(isbn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `isbn` in `base`.
Args:
isbn (str): ISBN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
isbn,
False,
"sbn"
)
)
def getISSNsXML(issn, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `issn` in `base`.
Args:
issn (str): ISSN of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
issn,
False,
"ssn"
)
)
def getAuthorsBooksXML(author, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `author` in `base`.
Args:
author (str): Name of the `author` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
author,
False,
"wau"
)
)
def getPublishersBooksXML(publisher, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `publisher` in `base`.
Args:
publisher (str): Name of the `publisher` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
publisher,
False,
"wpb"
)
)
def getBooksTitleXML(title, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `title` in `base`.
Args:
title (str): `title` of the books you want to get.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
title,
False,
"wtl"
)
)
def getICZBooksXML(icz, base=ALEPH_DEFAULT_BASE):
"""
Download full XML record for given `icz` (identification number) in `base`.
Args:
icz (str): Identification number used to search Aleph.
base (str): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: List of strings with full **OAI** XML representation of the \
record.
"""
return downloadRecords(
searchInAleph(
base,
icz,
False,
"icz"
)
)
# ID getters ==================================================================
def getISBNsIDs(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, isbn, False, "sbn"))
def getAuthorsBooksIDs(author, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, author, False, "wau"))
def getPublishersBooksIDs(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, publisher, False, "wpb"))
def getBooksTitleIDs(title, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`title`.
Args:
title (str): Title (name) of the book which will be used to search in
Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, title, False, "wtl"))
def getICZBooksIDs(icz, base=ALEPH_DEFAULT_BASE):
"""
Get list of :class:`DocumentID` objects of documents with given
`icz` (identification number).
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
list: of :class:`DocumentID` objects
"""
return getDocumentIDs(searchInAleph(base, icz, False, "icz"))
# Counters ====================================================================
def getISBNCount(isbn, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `isbn`.
Args:
isbn (str): ISBN string.
base (str, optional): Base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, isbn, False, "sbn")["no_entries"]
def getAuthorsBooksCount(author, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `author`.
Args:
author (str): Authors name/lastname in UTF-8.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, author, False, "wau")["no_entries"]
def getPublishersBooksCount(publisher, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `publisher`.
Args:
publisher (str): Name of publisher which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, publisher, False, "wpb")["no_entries"]
def getBooksTitleCount(title, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
title (str): Title (name) of book which will be used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, title, False, "wtl")["no_entries"]
def getICZBooksCount(icz, base=ALEPH_DEFAULT_BASE):
"""
Get number of records in Aleph which match given `title`.
Args:
icz (str): Identification number used to search Aleph.
base (str, optional): base on which will be search performed. Default
:attr:`aleph.settings.ALEPH_DEFAULT_BASE`.
Returns:
int: Number of matching documents in Aleph.
"""
return searchInAleph(base, icz, False, "icz")["no_entries"]
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/doc_number.py | getDocNumber | python | def getDocNumber(xml):
dom = dhtmlparser.parseString(xml)
doc_number_tag = dom.find("doc_number")
if not doc_number_tag:
return "-1"
return doc_number_tag[0].getContent().strip() | Parse <doc_number> tag from `xml`.
Args:
xml (str): XML string returned from :func:`aleph.aleph.downloadRecords`
Returns:
str: Doc ID as string or "-1" if not found. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/doc_number.py#L11-L28 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import dhtmlparser
# Functions & objects =========================================================
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/epublication.py | EPublication.from_xml | python | def from_xml(xml):
parsed = xml
if not isinstance(xml, MARCXMLRecord):
parsed = MARCXMLRecord(str(xml))
# check whether the document was deleted
if "DEL" in parsed.datafields:
raise DocumentNotFoundException("Document was deleted.")
# convert Persons objects to amqp's Authors namedtuple
authors = map(
lambda a: Author(
(a.name + " " + a.second_name).strip(),
a.surname,
a.title
),
parsed.get_authors()
)
# i know, that this is not PEP8, but you dont want to see it without
# proper formating (it looks bad, really bad)
return EPublication(
ISBN = parsed.get_ISBNs(),
invalid_ISBN = parsed.get_invalid_ISBNs(),
id_number = parsed.controlfields.get("001", None),
nazev = parsed.get_name(),
podnazev = parsed.get_subname(),
vazba = _first_or_blank_string(parsed.get_binding()),
cena = parsed.get_price(),
castDil = parsed.get_part(),
nazevCasti = parsed.get_part_name(),
nakladatelVydavatel = parsed.get_publisher(),
datumVydani = parsed.get_pub_date(),
poradiVydani = parsed.get_pub_order(),
zpracovatelZaznamu = _first_or_blank_string(parsed["040a"]),
format = parsed.get_format(),
url = parsed.get_urls(),
mistoVydani = parsed.get_pub_place(),
ISBNSouboruPublikaci= [],
autori = authors,
originaly = parsed.get_originals(),
internal_url = parsed.get_internal_urls(),
anotace = None, # TODO: read the annotation
) | Convert :class:`.MARCXMLRecord` object to :class:`.EPublication`
namedtuple.
Args:
xml (str/MARCXMLRecord): MARC XML which will be converted to
EPublication. In case of str, ``<record>`` tag is required.
Returns:
structure: :class:`.EPublication` namedtuple with data about \
publication. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/epublication.py#L90-L145 | [
"def _first_or_blank_string(items):\n \"\"\"\n Return first `item` from `items` or blank string.\n\n Args:\n items (list/tuple): Indexable object.\n\n Returns:\n str: Content of first item, or blank string.\n \"\"\"\n if not items:\n return \"\"\n\n return items[0]\n"
] | class EPublication(namedtuple("EPublication", ["ISBN",
"invalid_ISBN",
"id_number",
'nazev',
'podnazev',
'vazba',
'cena',
'castDil',
'nazevCasti',
'nakladatelVydavatel',
'datumVydani',
'poradiVydani',
'zpracovatelZaznamu',
'format',
'url',
'mistoVydani',
'ISBNSouboruPublikaci',
'autori',
'originaly',
'internal_url',
'anotace'])):
"""
This structure is returned as result of users :class:`.SearchRequest`.
In case of :class:`Search <.SearchRequest>`/:class:`Count <.CountRequest>`
requests, this structure is filled with data from MARC XML record.
Attributes:
url (str): Url specified by publisher (THIS IS NOT INTERNAL URL!).
ISBN (list): List of ISBNs for the book.
cena (str): Price of the book.
vazba (str): Bidding of the book.
nazev (str): Name of the book.
format (str): Format of the book - see :class:`FormatEnum`.
autori (list): List of :class:`Author` objects.
castDil (str): Which part of the series of books is this.
anotace (str): Anotation. Max lenght: 500 chars..
podnazev (str): Subname of the book.
id_number (str): Identification number in aleph - starts.
originaly (list): List of (str) ISBN's of original books in case of
translations.
nazevCasti (str): Name of part of the series.
datumVydani (str): Date of publication.
mistoVydani (str): City/country origin of the publication.
internal_url (str): Link to edeposit/kramerius system.
poradiVydani (str): Order of publication.
invalid_ISBN (list): List of INVALID ISBNs for this book.
zpracovatelZaznamu (str): Processor/manufacturer of record.
with nkc - ``nkc20150003133``.
nakladatelVydavatel (str): Publisher's name.
ISBNSouboruPublikaci (list): List of strings with ISBN of the book
series.
"""
@staticmethod
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | _sendPostDict | python | def _sendPostDict(post_dict):
downer = Downloader()
downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER
data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict)
rheaders = downer.response_headers
error_msg = rheaders.get("aleph-info", "").lower().strip()
if "aleph-info" in rheaders and error_msg.startswith("error"):
raise ExportRejectedException(
"Export request was rejected by import webform: %s" %
rheaders["aleph-info"]
)
return data | Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`.
Args:
post_dict (dict): dictionary from :class:`PostData.get_POST_data()`
Returns:
str: Reponse from webform. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L321-L343 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module is used to put data to Aleph. It is based on custom made webform,
which is currently used to report new books by publishers.
Most important function from this module is :func:`exportEPublication`,
which will do everything, that is needed to do, to export
:class:`.EPublication` structure to the Aleph.
Warning:
This whole module is highly dependent on processes, which are defined as
import processes at the Czech National Library.
Warning:
If you want to use export ability in your library, you should rewrite this
and take care, that you are sending data somewhere, where someone will
process them. Otherwise, you can fill your library's database with crap.
Note:
Source code of the webform is not available at this moment (it was created
by third party), but it is possible, that it will be in future. This will
highly depend on number of people, which will use this project.
"""
# Imports =====================================================================
import isbn_validator
from httpkie import Downloader
import settings
from datastructures import Author
from datastructures import FormatEnum
from datastructures import EPublication
# Variables ===================================================================
ANNOTATION_PREFIX = "Nakladatelská anotace: "
# Functions & objects =========================================================
class ExportException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class InvalidISBNException(ExportException):
def __init__(self, message):
super(InvalidISBNException, self).__init__(message)
class ExportRejectedException(ExportException):
def __init__(self, message):
super(ExportRejectedException, self).__init__(message)
class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
def _removeSpecialCharacters(epub):
"""
Remove most of the unnecessary interpunction from epublication, which can
break unimark if not used properly.
"""
special_chars = "/:,- "
epub_dict = epub._asdict()
for key in epub_dict.keys():
if isinstance(epub_dict[key], basestring):
epub_dict[key] = epub_dict[key].strip(special_chars)
elif type(epub_dict[key]) in [tuple, list]:
out = []
for item in epub_dict[key]:
if not isinstance(item, Author):
out.append(item)
continue
new_item = item._asdict()
for key in new_item.keys():
new_item[key] = new_item[key].strip(special_chars)
out.append(Author(**new_item))
epub_dict[key] = out
return EPublication(**epub_dict)
def exportEPublication(epub):
"""
Send `epub` :class:`.EPublication` object to Aleph, where it will be
processed by librarians.
Args:
epub (EPublication): structure for export
Warning:
The export function is expecting some of the EPublication properties to
be filled with non-blank data.
Specifically:
- :attr:`.EPublication.ISBN`
- :attr:`.EPublication.nazev`
- :attr:`.EPublication.mistoVydani`
- :attr:`.EPublication.datumVydani`
- :attr:`.EPublication.poradiVydani`
- :attr:`.EPublication.zpracovatelZaznamu`
- :attr:`.EPublication.vazba`
- :attr:`.EPublication.format`
- :attr:`.EPublication.format`
- :attr:`.EPublication.nakladatelVydavatel`
"""
epub = _removeSpecialCharacters(epub)
post_dict = PostData(epub).get_POST_data()
return _sendPostDict(post_dict)
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | _removeSpecialCharacters | python | def _removeSpecialCharacters(epub):
special_chars = "/:,- "
epub_dict = epub._asdict()
for key in epub_dict.keys():
if isinstance(epub_dict[key], basestring):
epub_dict[key] = epub_dict[key].strip(special_chars)
elif type(epub_dict[key]) in [tuple, list]:
out = []
for item in epub_dict[key]:
if not isinstance(item, Author):
out.append(item)
continue
new_item = item._asdict()
for key in new_item.keys():
new_item[key] = new_item[key].strip(special_chars)
out.append(Author(**new_item))
epub_dict[key] = out
return EPublication(**epub_dict) | Remove most of the unnecessary interpunction from epublication, which can
break unimark if not used properly. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L346-L375 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module is used to put data to Aleph. It is based on custom made webform,
which is currently used to report new books by publishers.
Most important function from this module is :func:`exportEPublication`,
which will do everything, that is needed to do, to export
:class:`.EPublication` structure to the Aleph.
Warning:
This whole module is highly dependent on processes, which are defined as
import processes at the Czech National Library.
Warning:
If you want to use export ability in your library, you should rewrite this
and take care, that you are sending data somewhere, where someone will
process them. Otherwise, you can fill your library's database with crap.
Note:
Source code of the webform is not available at this moment (it was created
by third party), but it is possible, that it will be in future. This will
highly depend on number of people, which will use this project.
"""
# Imports =====================================================================
import isbn_validator
from httpkie import Downloader
import settings
from datastructures import Author
from datastructures import FormatEnum
from datastructures import EPublication
# Variables ===================================================================
ANNOTATION_PREFIX = "Nakladatelská anotace: "
# Functions & objects =========================================================
class ExportException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class InvalidISBNException(ExportException):
def __init__(self, message):
super(InvalidISBNException, self).__init__(message)
class ExportRejectedException(ExportException):
def __init__(self, message):
super(ExportRejectedException, self).__init__(message)
class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
def _sendPostDict(post_dict):
"""
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`.
Args:
post_dict (dict): dictionary from :class:`PostData.get_POST_data()`
Returns:
str: Reponse from webform.
"""
downer = Downloader()
downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER
data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict)
rheaders = downer.response_headers
error_msg = rheaders.get("aleph-info", "").lower().strip()
if "aleph-info" in rheaders and error_msg.startswith("error"):
raise ExportRejectedException(
"Export request was rejected by import webform: %s" %
rheaders["aleph-info"]
)
return data
def exportEPublication(epub):
"""
Send `epub` :class:`.EPublication` object to Aleph, where it will be
processed by librarians.
Args:
epub (EPublication): structure for export
Warning:
The export function is expecting some of the EPublication properties to
be filled with non-blank data.
Specifically:
- :attr:`.EPublication.ISBN`
- :attr:`.EPublication.nazev`
- :attr:`.EPublication.mistoVydani`
- :attr:`.EPublication.datumVydani`
- :attr:`.EPublication.poradiVydani`
- :attr:`.EPublication.zpracovatelZaznamu`
- :attr:`.EPublication.vazba`
- :attr:`.EPublication.format`
- :attr:`.EPublication.format`
- :attr:`.EPublication.nakladatelVydavatel`
"""
epub = _removeSpecialCharacters(epub)
post_dict = PostData(epub).get_POST_data()
return _sendPostDict(post_dict)
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | exportEPublication | python | def exportEPublication(epub):
epub = _removeSpecialCharacters(epub)
post_dict = PostData(epub).get_POST_data()
return _sendPostDict(post_dict) | Send `epub` :class:`.EPublication` object to Aleph, where it will be
processed by librarians.
Args:
epub (EPublication): structure for export
Warning:
The export function is expecting some of the EPublication properties to
be filled with non-blank data.
Specifically:
- :attr:`.EPublication.ISBN`
- :attr:`.EPublication.nazev`
- :attr:`.EPublication.mistoVydani`
- :attr:`.EPublication.datumVydani`
- :attr:`.EPublication.poradiVydani`
- :attr:`.EPublication.zpracovatelZaznamu`
- :attr:`.EPublication.vazba`
- :attr:`.EPublication.format`
- :attr:`.EPublication.format`
- :attr:`.EPublication.nakladatelVydavatel` | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L378-L405 | [
"def _sendPostDict(post_dict):\n \"\"\"\n Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`.\n\n Args:\n post_dict (dict): dictionary from :class:`PostData.get_POST_data()`\n\n Returns:\n str: Reponse from webform.\n \"\"\"\n downer = Downloader()\n downer.headers[\"Referer\"] = s... | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module is used to put data to Aleph. It is based on custom made webform,
which is currently used to report new books by publishers.
Most important function from this module is :func:`exportEPublication`,
which will do everything, that is needed to do, to export
:class:`.EPublication` structure to the Aleph.
Warning:
This whole module is highly dependent on processes, which are defined as
import processes at the Czech National Library.
Warning:
If you want to use export ability in your library, you should rewrite this
and take care, that you are sending data somewhere, where someone will
process them. Otherwise, you can fill your library's database with crap.
Note:
Source code of the webform is not available at this moment (it was created
by third party), but it is possible, that it will be in future. This will
highly depend on number of people, which will use this project.
"""
# Imports =====================================================================
import isbn_validator
from httpkie import Downloader
import settings
from datastructures import Author
from datastructures import FormatEnum
from datastructures import EPublication
# Variables ===================================================================
ANNOTATION_PREFIX = "Nakladatelská anotace: "
# Functions & objects =========================================================
class ExportException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class InvalidISBNException(ExportException):
def __init__(self, message):
super(InvalidISBNException, self).__init__(message)
class ExportRejectedException(ExportException):
def __init__(self, message):
super(ExportRejectedException, self).__init__(message)
class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
def _sendPostDict(post_dict):
"""
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`.
Args:
post_dict (dict): dictionary from :class:`PostData.get_POST_data()`
Returns:
str: Reponse from webform.
"""
downer = Downloader()
downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER
data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict)
rheaders = downer.response_headers
error_msg = rheaders.get("aleph-info", "").lower().strip()
if "aleph-info" in rheaders and error_msg.startswith("error"):
raise ExportRejectedException(
"Export request was rejected by import webform: %s" %
rheaders["aleph-info"]
)
return data
def _removeSpecialCharacters(epub):
"""
Remove most of the unnecessary interpunction from epublication, which can
break unimark if not used properly.
"""
special_chars = "/:,- "
epub_dict = epub._asdict()
for key in epub_dict.keys():
if isinstance(epub_dict[key], basestring):
epub_dict[key] = epub_dict[key].strip(special_chars)
elif type(epub_dict[key]) in [tuple, list]:
out = []
for item in epub_dict[key]:
if not isinstance(item, Author):
out.append(item)
continue
new_item = item._asdict()
for key in new_item.keys():
new_item[key] = new_item[key].strip(special_chars)
out.append(Author(**new_item))
epub_dict[key] = out
return EPublication(**epub_dict)
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData._import_epublication | python | def _import_epublication(self, epub):
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori))) | Fill internal property ._POST dictionary with data from EPublication. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L162-L198 | null | class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData._apply_mapping | python | def _apply_mapping(self, mapping):
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4] | Map some case specific data to the fields in internal dictionary. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L200-L208 | null | class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData._postprocess | python | def _postprocess(self):
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"] | Move data between internal fields, validate them and make sure, that
everything is as it should be. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L227-L245 | [
"def _validate_isbn(self, raw_isbn, accept_blank=False):\n if raw_isbn and type(raw_isbn) in [tuple, list]:\n raw_isbn = raw_isbn[0]\n\n # blank list -> blank str\n raw_isbn = raw_isbn or \"\"\n\n if not raw_isbn and accept_blank:\n return raw_isbn\n\n if not isbn_validator.is_valid_isb... | class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData._check_required_fields | python | def _check_required_fields(self):
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)." | Make sure, that internal dictionary contains all fields, which are
required by the webform. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L256-L301 | [
"def _czech_isbn_check(isbn_field):\n isbn_field = isbn_field.replace(\"-\", \"\").strip()\n\n return any([\n isbn_field.startswith(\"80\"),\n isbn_field.startswith(\"97880\"),\n ])\n",
"def to_unicode(inp):\n try:\n return unicode(inp)\n except UnicodeDecodeError:\n ret... | class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/export.py | PostData.get_POST_data | python | def get_POST_data(self):
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST | Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/export.py#L303-L318 | [
"def _apply_mapping(self, mapping):\n \"\"\"\n Map some case specific data to the fields in internal dictionary.\n \"\"\"\n self._POST[\"P0100LDR__\"] = mapping[0]\n self._POST[\"P0200FMT__\"] = mapping[1]\n self._POST[\"P0300BAS__a\"] = mapping[2]\n self._POST[\"P07022001_b\"] = mapping[3]\n ... | class PostData(object):
"""
This class is used to transform data from
:class:`.EPublication` to dictionary, which is sent as POST request to
Aleph third-party webform_.
.. _webform: http://aleph.nkp.cz/F/?func=file&file_name=service-isbn
Note:
Class is used instead of simple function, because there is 29 POST
parameters with internal dependencies, which need to be processed and
validated before they can be passed to webform.
Args:
epub (EPublication): structure, which will be converted (see
:class:`.EPublication` for details).
Attr:
_POST (dict): dictionary with parsed data
mapping (dict): dictionary with some of mapping, which are applied to
:attr:`._POST` dict in post processing
Warning:
Don't manipulate :attr:`._POST` property directly, if you didn't really
know the internal structure and how the :attr:`.mapping` is applied.
"""
def __init__(self, epub):
self._POST = {
"sid": settings.EDEPOSIT_EXPORT_SIGNATURE,
"P0100LDR__": "-----nam-a22------a-4500",
"P0200FMT__": "BK",
"P0300BAS__a": "30", # Báze, pro eknihy 49
"P0501010__a": "", # ISBN (uppercase)
"P0502010__b": "online", # vazba/forma
"P0504010__d": "", # cena
# "P1201901__b": "", # ean
"P0601010__a": "", # ISBN souboru
"P0602010__b": "", # same thing
"P07012001_a": "", # název
"P07022001_b": "", # vyplneno na zaklade vazby/formy
"P07032001_e": "", # podnázev
"P07042001_h": "", # Část (svazek, díl)
"P07052001_i": "", # Název části
"P1301ZAK__b": "", # autor
"P1302ZAK__c": "", # autor2
"P1303ZAK__c": "", # autor3
# "P10012252_a": "", # edice
# "P10022252_v": "", # Číslo svazku
"P110185640u": "", # URL
"P0503010__x": "", # Formát (poze pro epublikace)
"P0901210__a": "", # Místo vydání
"P0902210__c": "", # Nakladatel
"P0903210__d": "", # Měsíc a rok vydání
"P1401PJM__a": "", # Vydáno v koedici s
"P0801205__a": "", # Pořadí vydání
"P1501IST1_a": "ow", # Zpracovatel záznamu (hidden)
"P1502IST1_b": "", # Zpracovatel záznamu (viditelna)
"P1601ISB__a": "", # ISBN2 - validated (hidden)
"P1801URL__u": "", # internal URL
# "REPEAT": "Y", # predvyplnit zaznam
"P1001330__a": "", # anotace
}
self.mapping = {
"mapa": [
"-----nem-a22------a-4500",
"MP",
"30",
"kartografický dokument",
"ow"
],
"CD-ROM": [
"-----nam-a22------a-4500",
"BK",
"30",
"elektronický zdroj",
"ow"
],
"online": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
],
# "else": [
# "-----nam-a22------a-4500",
# "BK",
# "30",
# "",
# "ow"
# ],
"else": [
"-----nam-a22------a-4500",
"BK",
"49",
"elektronický zdroj",
"ox",
]
}
self.mapping["DVD"] = self.mapping["CD-ROM"]
self._import_epublication(epub)
def _import_epublication(self, epub):
"""
Fill internal property ._POST dictionary with data from EPublication.
"""
# mrs. Svobodová requires that annotation exported by us have this
# prefix
prefixed_annotation = ANNOTATION_PREFIX + epub.anotace
self._POST["P0501010__a"] = epub.ISBN
self._POST["P07012001_a"] = epub.nazev
self._POST["P07032001_e"] = epub.podnazev
self._POST["P0502010__b"] = epub.vazba
self._POST["P0504010__d"] = epub.cena
self._POST["P07042001_h"] = epub.castDil
self._POST["P07052001_i"] = epub.nazevCasti
self._POST["P0902210__c"] = epub.nakladatelVydavatel
self._POST["P0903210__d"] = epub.datumVydani
self._POST["P0801205__a"] = epub.poradiVydani
self._POST["P1502IST1_b"] = epub.zpracovatelZaznamu
self._POST["P0503010__x"] = epub.format
self._POST["P110185640u"] = epub.url or ""
self._POST["P0901210__a"] = epub.mistoVydani
self._POST["P0601010__a"] = epub.ISBNSouboruPublikaci
self._POST["P1801URL__u"] = epub.internal_url
self._POST["P1001330__a"] = prefixed_annotation if epub.anotace else ""
if len(epub.autori) > 3:
epub.autori[2] = ", ".join(epub.autori[2:])
epub.autori = epub.autori[:3]
# check whether the autors have required type (string)
for author in epub.autori:
error_msg = "Bad type of author (%s) (str is required)."
assert isinstance(author, basestring), (error_msg % type(author))
authors_fields = ("P1301ZAK__b", "P1302ZAK__c", "P1303ZAK__c")
self._POST.update(dict(zip(authors_fields, epub.autori)))
def _apply_mapping(self, mapping):
"""
Map some case specific data to the fields in internal dictionary.
"""
self._POST["P0100LDR__"] = mapping[0]
self._POST["P0200FMT__"] = mapping[1]
self._POST["P0300BAS__a"] = mapping[2]
self._POST["P07022001_b"] = mapping[3]
self._POST["P1501IST1_a"] = mapping[4]
def _validate_isbn(self, raw_isbn, accept_blank=False):
if raw_isbn and type(raw_isbn) in [tuple, list]:
raw_isbn = raw_isbn[0]
# blank list -> blank str
raw_isbn = raw_isbn or ""
if not raw_isbn and accept_blank:
return raw_isbn
if not isbn_validator.is_valid_isbn(raw_isbn):
raise InvalidISBNException(
raw_isbn + " has invalid ISBN checksum!"
)
return raw_isbn.upper()
def _postprocess(self):
"""
Move data between internal fields, validate them and make sure, that
everything is as it should be.
"""
# validate series ISBN
self._POST["P0601010__a"] = self._validate_isbn(
self._POST["P0601010__a"],
accept_blank=True
)
if self._POST["P0601010__a"] != "":
self._POST["P0601010__b"] = "soubor : " + self._POST["P0601010__a"]
# validate ISBN of the book
self._POST["P0501010__a"] = self._validate_isbn(
self._POST["P0501010__a"],
accept_blank=False
)
self._POST["P1601ISB__a"] = self._POST["P0501010__a"]
@staticmethod
def _czech_isbn_check(isbn_field):
isbn_field = isbn_field.replace("-", "").strip()
return any([
isbn_field.startswith("80"),
isbn_field.startswith("97880"),
])
def _check_required_fields(self):
"""
Make sure, that internal dictionary contains all fields, which are
required by the webform.
"""
assert self._POST["P0501010__a"] != "", "ISBN is required!"
# export script accepts only czech ISBNs
for isbn_field_name in ("P0501010__a", "P1601ISB__a"):
check = PostData._czech_isbn_check(self._POST[isbn_field_name])
assert check, "Only czech ISBN is accepted!"
assert self._POST["P1601ISB__a"] != "", "Hidden ISBN field is required!"
assert self._POST["P07012001_a"] != "", "Nazev is required!"
assert self._POST["P0901210__a"] != "", "Místo vydání is required!"
assert self._POST["P0903210__d"] != "", "Datum vydání is required!"
assert self._POST["P0801205__a"] != "", "Pořadí vydání is required!"
# Zpracovatel záznamu
assert self._POST["P1501IST1_a"] != "", "Zpracovatel is required! (H)"
assert self._POST["P1502IST1_b"] != "", "Zpracovatel is required! (V)"
# vazba/forma
assert self._POST["P0502010__b"] != "", "Vazba/forma is required!"
# assert self._POST["P110185640u"] != "", "URL is required!"
# Formát (pouze pro epublikace)
if self._POST["P0502010__b"] == FormatEnum.ONLINE:
assert self._POST["P0503010__x"] != "", "Format is required!"
assert self._POST["P0902210__c"] != "", "Nakladatel is required!"
def to_unicode(inp):
try:
return unicode(inp)
except UnicodeDecodeError:
return unicode(inp, "utf-8")
# check lenght of annotation field - try to convert string to unicode,
# to count characters, not combination bytes
annotation_length = len(to_unicode(self._POST["P1001330__a"]))
annotation_length -= len(to_unicode(ANNOTATION_PREFIX))
assert annotation_length <= 500, "Annotation is too long (> 500)."
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/eperiodical.py | EPeriodical.from_xml | python | def from_xml(xml):
parsed = xml
if not isinstance(xml, MARCXMLRecord):
parsed = MARCXMLRecord(str(xml))
# check whether the document was deleted
if "DEL" in parsed.datafields:
raise DocumentNotFoundException("Document was deleted.")
# i know, that this is not PEP8, but you dont want to see it without
# proper formating (it looks bad, really bad)
return EPeriodical(
url=parsed.get_urls(),
ISSN=parsed.get_ISSNs(),
nazev=parsed.get_name(),
anotace=None, # TODO: read the annotation
podnazev=parsed.get_subname(),
id_number=parsed.controlfields.get("001", None),
datumVydani=parsed.get_pub_date(),
mistoVydani=parsed.get_pub_place(),
internal_url=parsed.get_internal_urls(),
invalid_ISSNs=parsed.get_invalid_ISSNs(),
nakladatelVydavatel=parsed.get_publisher(),
ISSNSouboruPublikaci=parsed.get_linking_ISSNs(),
) | Convert :class:`.MARCXMLRecord` object to :class:`.EPublication`
namedtuple.
Args:
xml (str/MARCXMLRecord): MARC XML which will be converted to
EPublication. In case of str, ``<record>`` tag is required.
Returns:
structure: :class:`.EPublication` namedtuple with data about \
publication. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/eperiodical.py#L53-L89 | null | class EPeriodical(namedtuple("EPeriodical", ["url",
"ISSN",
"invalid_ISSNs",
"nazev",
"anotace",
"podnazev",
"id_number",
"mistoVydani",
"datumVydani",
"internal_url",
"nakladatelVydavatel",
"ISSNSouboruPublikaci"])):
"""
This structure is returned as result of users :class:`.SearchRequest`.
In case of :class:`Search <.SearchRequest>`/:class:`Count <.CountRequest>`
requests, this structure is filled with data from MARC XML record.
Attributes:
url (str): Url specified by publisher (THIS IS NOT INTERNAL URL!).
ISSN (list): List of ISSNs for the periodical.
invalid_ISSNs (list): List of INVALID ISSNs for this book.
nazev (str): Name of the periodical.
anotace (str): Anotation. Max lenght: 500 chars.
podnazev (str): Subname of the book.
id_number (str): Identification number in aleph.
mistoVydani (str): City/country origin of the publication.
datumVydani (str): Date of publication.
internal_url (str): Link to edeposit/kramerius system.
nakladatelVydavatel (str): Publisher's name.
ISSNSouboruPublikaci (list): ISSN links to other things.
"""
@staticmethod
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/__init__.py | _iiOfAny | python | def _iiOfAny(instance, classes):
if type(classes) not in [list, tuple]:
classes = [classes]
return any(map(lambda x: type(instance).__name__ == x.__name__, classes)) | Returns true, if `instance` is instance of any (_iiOfAny) of the `classes`.
This function doesn't use :func:`isinstance` check, it just compares the
class names.
This can be generally dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:func:`isinstance` and :func:`type` comparsions thus fails.
Use this function instead, if you want to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just the class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` is instance of any of the `classes`. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/__init__.py#L359-L387 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
Query workflow
==============
AQMP is handled by `edeposit.amqp <http://edeposit-amqp.readthedocs.org>`_
module, this package provides just datastructures and
:func:`reactToAMQPMessage` function, which is used in daemon to translate
highlevel requests to lowlevel queries to Aleph's webapi.
AMQP query
----------
To query Aleph thru AMQP, run :class:`.edeposit_amqp_alephdaemon` (from
:mod:`edeposit.amqp` package) and create one of the Queries -
:class:`ISBNQuery` for example and put it into :class:`.SearchRequest` wrapper
and send the message to the Aleph's exchange::
request = SearchRequest(
ISBNQuery("80-251-0225-4")
)
amqp.send( # you can use pika library to send data to AMQP queue
message = serialize(request),
properties = "..",
exchange = "ALEPH'S_EXCHANGE"
)
and you will get back AMQP message with :class:`.SearchResult`.
Note:
You don't have to import all structures from :class:`datastructures`, they
should be automatically imported and made global in ``__init__.py``.
Count requests
--------------
If you want to just get count of how many items is there in Aleph, just wrap
the :class:`.ISBNQuery` with :class:`.CountRequest` class::
isbnq = ISBNQuery("80-251-0225-4")
request = CountRequest(isbnq)
# rest is same..
and you will get back :class:`.CountResult`.
Note:
You should always use :class:`.CountRequest` instead of just calling
:py:func:`len()` to :attr:`.SearchResult.records` - it doesn't put that
much load to Aleph. Also Aleph is restricted to 150 requests per second.
Direct queries
--------------
As I said, this module provides only direct access to Aleph, AMQP communication
is handled in :mod:`edeposit.amqp`.
If you want to access module directly, you can use :func:`reactToAMQPMessage`
wrapper, or query :mod:`aleph <aleph.aleph>` submodule directly.
:func:`reactToAMQPMessage` is preferred, because in that case, you don't have
to deal with Aleph lowlevel API, which can be little bit annoying.
Diagrams
--------
Here is ASCII flow diagram for you::
ISBNQuery ----. ,--> CountResult
AuthorQuery ----| | `- num_of_records
PublisherQuery ----| |
TitleQuery ----| ExportRequest |--> SearchResult
GenericQuery ----| ISBNValidationRequest | `- AlephRecord
DocumentQuery ----| | |
ICZQuery ----| | |--> ISBNValidationResult
| | | - ISBN
V | |
Count/Search/ExportRequest | |--> ExportResult
| | |
| | |
| | |
V | |
serialize()<----------' deserialize()
| ^
V Client |
AMQPMessage ------> AMQP -------> AMQPMessage
| ^
V |
| ^
V |
| ^
V |
AMQPMessage <------ AMQP <-------- AMQPMessage
| Service ^
| |
V |
reactToAMQPMessage() ............... magic_happens()
and here is (pseudo) UML:
.. image:: /_static/reactoamqpmessage.png
Neat, isn't it?
API
---
"""
# Imports =====================================================================
from collections import namedtuple
import isbn_validator
import aleph
import export
import settings
import doc_number
from datastructures import *
# Queries =====================================================================
class _QueryTemplate(object):
"""
This class is here to just save some effort by using common ancestor with
same .getSearchResult() and .getCountResult() definition.
You probably shouldn't use it.
"""
def getSearchResult(self):
records = []
for xml in self._getXML():
records.append(
AlephRecord(
base=self.base,
library=settings.DEFAULT_LIBRARY,
docNumber=doc_number.getDocNumber(xml),
xml=xml
)
)
return SearchResult(records)
def getCountResult(self):
return CountResult(self._getCount())
class GenericQuery(namedtuple("GenericQuery", ['base',
'phrase',
'considerSimilar',
'field']),
_QueryTemplate):
"""
Used for generic queries to Aleph.
Args:
base (str): Which base in Aleph will be queried. This depends on
settings of your server. See :func:`aleph.getListOfBases`
for details.
phrase (str): What are you looking for.
considerSimilar (bool): Don't use this, it usually doesn't work.
field (str): Which field you want to use for search.
See :attr:`aleph.VALID_ALEPH_FIELDS` for list of valid
bases.
For details of base/phrase/.. parameters, see :func:`aleph.searchInAleph`.
All parameters also serves as properties.
This is used mainly if you want to search by your own parameters and don't
want to use prepared wrappers (:class:`AuthorQuery`/:class:`ISBNQuery`/..).
"""
def _getXML(self):
return aleph.downloadRecords(
aleph.searchInAleph(
self.base,
self.phrase,
self.considerSimilar,
self.field,
)
)
def _getCount(self):
return aleph.searchInAleph(
self.base,
self.phrase,
self.considerSimilar,
self.field
)["no_entries"]
class DocumentQuery(namedtuple("DocumentQuery", ["doc_id", "library"])):
"""
Query Aleph when you know the Document ID.
Args:
doc_id (str): ID number as string.
library (str, default settings.DEFAULT_LIBRARY): Library.
"""
def __new__(cls, doc_id, library=settings.DEFAULT_LIBRARY):
return super(DocumentQuery, cls).__new__(
cls,
doc_id,
library
)
def getSearchResult(self):
"""
Returns:
object: :class:`SearchResult` document with given `doc_id`.
Raises:
aleph.DocumentNotFoundException: When document is not found.
"""
xml = aleph.downloadMARCOAI(self.doc_id, self.library)
return SearchResult([
AlephRecord(
None,
self.library,
self.doc_id,
xml
)
])
def getCountResult(self):
"""
Returns:
int: 0/1 whether the document is found or not.
"""
try:
self.getSearchResult()
except aleph.DocumentNotFoundException:
return 0
return 1
class ISBNQuery(namedtuple("ISBNQuery", ["ISBN", "base"]), _QueryTemplate):
"""
Used to query Aleph to get books by ISBN.
Args:
ISBN (str): ISBN 10/13.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
Note:
ISBN is not unique, so you can get back lot of books with same ISBN.
Some books also have two or more ISBNs.
"""
def __new__(self, ISBN, base=settings.ALEPH_DEFAULT_BASE):
return super(ISBNQuery, self).__new__(self, ISBN, base)
def _getXML(self):
return aleph.getISBNsXML(self.ISBN, base=self.base)
def _getCount(self):
return aleph.getISBNCount(self.ISBN, base=self.base)
class AuthorQuery(namedtuple("AuthorQuery", ["author", "base"]),
_QueryTemplate):
"""
Used to query Aleph to get books by Author.
Args:
author (str): Author's name/lastname in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, author, base=settings.ALEPH_DEFAULT_BASE):
return super(AuthorQuery, self).__new__(self, author, base)
def _getXML(self):
return aleph.getAuthorsBooksXML(self.author, base=self.base)
def _getCount(self):
return aleph.getAuthorsBooksCount(self.author, base=self.base)
class PublisherQuery(namedtuple("PublisherQuery", ["publisher", "base"]),
_QueryTemplate):
"""
Used to query Aleph to get books by Publisher.
Args:
publisher (str): Publisher's name in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, publisher, base=settings.ALEPH_DEFAULT_BASE):
return super(PublisherQuery, self).__new__(self, publisher, base)
def _getXML(self):
return aleph.getPublishersBooksXML(self.publisher, base=self.base)
def _getCount(self):
return aleph.getPublishersBooksCount(self.publisher, base=self.base)
class TitleQuery(_QueryTemplate,
namedtuple("TitleQuery", ["title", "base"])):
"""
Used to query Aleph to get books by book's title/name.
Args:
title (str): Book's title in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, title, base=settings.ALEPH_DEFAULT_BASE):
return super(TitleQuery, self).__new__(self, title, base)
def _getXML(self):
return aleph.getBooksTitleXML(self.title, base=self.base)
def _getCount(self):
return aleph.getBooksTitleCount(self.title, base=self.base)
class ICZQuery(_QueryTemplate, namedtuple("ICZQuery", ["icz", "base"])):
"""
Used to query Aleph to get books by record's identification number `icz`.
Args:
icz (str): Identification number (``nkc20150003029`` for example).
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, icz, base=settings.ALEPH_DEFAULT_BASE):
return super(ICZQuery, self).__new__(self, icz, base)
def _getXML(self):
return aleph.getICZBooksXML(self.icz, base=self.base)
def _getCount(self):
return aleph.getICZBooksCount(self.icz, base=self.base)
# Variables ===================================================================
QUERY_TYPES = [
ISBNQuery,
AuthorQuery,
PublisherQuery,
TitleQuery,
GenericQuery,
DocumentQuery,
ICZQuery,
]
REQUEST_TYPES = [
SearchRequest,
CountRequest,
ExportRequest,
ISBNValidationRequest,
]
# Interface for an external world =============================================
# Functions ===================================================================
def reactToAMQPMessage(req, send_back):
"""
React to given (AMQP) message.
This function is used by :mod:`edeposit.amqp.alephdaemon`. It works as
highlevel wrapper for whole module.
Example:
>>> import aleph
>>> request = aleph.SearchRequest(
... aleph.ISBNQuery("80-251-0225-4")
... )
>>> request
SearchRequest(query=ISBNQuery(ISBN='80-251-0225-4', base='nkc'))
>>> response = aleph.reactToAMQPMessage(request, None)
>>> response # formated by hand for purposes of example
SearchResult(
records=[
AlephRecord(
base='nkc',
library='NKC01',
docNumber=1492461,
xml='HERE IS WHOLE MARC OAI RECORD',
epublication=EPublication(
ISBN=['80-251-0225-4'],
nazev='Umění programování v UNIXu /',
podnazev='',
vazba='(bro\xc5\xbe.) :',
cena='K\xc4\x8d 590,00',
castDil='',
nazevCasti='',
nakladatelVydavatel='Computer Press,',
datumVydani='2004',
poradiVydani='1. vyd.',
zpracovatelZaznamu='BOA001',
format='23 cm',
url='',
mistoVydani='Brno :',
ISBNSouboruPublikaci=[],
autori=[
Author(
firstName='Eric S.',
lastName='Raymond',
title=''
)
],
originaly=[
'Art of UNIX programming'
],
internal_url=''
)
)
]
)
Args:
req (Request class): Any of the Request class from
:class:`aleph.datastructures.requests`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
Result class: Result of search in Aleph. \
See :mod:`aleph.datastructures.results` submodule.
Raises:
ValueError: If bad type of `req` structure is given.
"""
if not _iiOfAny(req, REQUEST_TYPES):
raise ValueError(
"Unknown type of request: '" + str(type(req)) + "'!"
)
if _iiOfAny(req, CountRequest) and _iiOfAny(req.query, QUERY_TYPES):
return req.query.getCountResult()
elif _iiOfAny(req, SearchRequest) and _iiOfAny(req.query, QUERY_TYPES):
return req.query.getSearchResult()
elif _iiOfAny(req, ISBNValidationRequest):
ISBN = req.ISBN
if _iiOfAny(ISBN, ISBNQuery):
ISBN = ISBN.ISBN
return ISBNValidationResult(isbn_validator.is_valid_isbn(ISBN))
elif _iiOfAny(req, ExportRequest):
export.exportEPublication(req.epublication)
return ExportResult(req.epublication.ISBN)
raise ValueError(
"Unknown type of request: '" + str(type(req)) + "' or query: '" +
str(type(req.query)) + "'!"
)
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/__init__.py | reactToAMQPMessage | python | def reactToAMQPMessage(req, send_back):
if not _iiOfAny(req, REQUEST_TYPES):
raise ValueError(
"Unknown type of request: '" + str(type(req)) + "'!"
)
if _iiOfAny(req, CountRequest) and _iiOfAny(req.query, QUERY_TYPES):
return req.query.getCountResult()
elif _iiOfAny(req, SearchRequest) and _iiOfAny(req.query, QUERY_TYPES):
return req.query.getSearchResult()
elif _iiOfAny(req, ISBNValidationRequest):
ISBN = req.ISBN
if _iiOfAny(ISBN, ISBNQuery):
ISBN = ISBN.ISBN
return ISBNValidationResult(isbn_validator.is_valid_isbn(ISBN))
elif _iiOfAny(req, ExportRequest):
export.exportEPublication(req.epublication)
return ExportResult(req.epublication.ISBN)
raise ValueError(
"Unknown type of request: '" + str(type(req)) + "' or query: '" +
str(type(req.query)) + "'!"
) | React to given (AMQP) message.
This function is used by :mod:`edeposit.amqp.alephdaemon`. It works as
highlevel wrapper for whole module.
Example:
>>> import aleph
>>> request = aleph.SearchRequest(
... aleph.ISBNQuery("80-251-0225-4")
... )
>>> request
SearchRequest(query=ISBNQuery(ISBN='80-251-0225-4', base='nkc'))
>>> response = aleph.reactToAMQPMessage(request, None)
>>> response # formated by hand for purposes of example
SearchResult(
records=[
AlephRecord(
base='nkc',
library='NKC01',
docNumber=1492461,
xml='HERE IS WHOLE MARC OAI RECORD',
epublication=EPublication(
ISBN=['80-251-0225-4'],
nazev='Umění programování v UNIXu /',
podnazev='',
vazba='(bro\xc5\xbe.) :',
cena='K\xc4\x8d 590,00',
castDil='',
nazevCasti='',
nakladatelVydavatel='Computer Press,',
datumVydani='2004',
poradiVydani='1. vyd.',
zpracovatelZaznamu='BOA001',
format='23 cm',
url='',
mistoVydani='Brno :',
ISBNSouboruPublikaci=[],
autori=[
Author(
firstName='Eric S.',
lastName='Raymond',
title=''
)
],
originaly=[
'Art of UNIX programming'
],
internal_url=''
)
)
]
)
Args:
req (Request class): Any of the Request class from
:class:`aleph.datastructures.requests`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
Result class: Result of search in Aleph. \
See :mod:`aleph.datastructures.results` submodule.
Raises:
ValueError: If bad type of `req` structure is given. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/__init__.py#L391-L490 | [
"def _iiOfAny(instance, classes):\n \"\"\"\n Returns true, if `instance` is instance of any (_iiOfAny) of the `classes`.\n\n This function doesn't use :func:`isinstance` check, it just compares the\n class names.\n\n This can be generally dangerous, but it is really useful when you are\n comparing... | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
Query workflow
==============
AQMP is handled by `edeposit.amqp <http://edeposit-amqp.readthedocs.org>`_
module, this package provides just datastructures and
:func:`reactToAMQPMessage` function, which is used in daemon to translate
highlevel requests to lowlevel queries to Aleph's webapi.
AMQP query
----------
To query Aleph thru AMQP, run :class:`.edeposit_amqp_alephdaemon` (from
:mod:`edeposit.amqp` package) and create one of the Queries -
:class:`ISBNQuery` for example and put it into :class:`.SearchRequest` wrapper
and send the message to the Aleph's exchange::
request = SearchRequest(
ISBNQuery("80-251-0225-4")
)
amqp.send( # you can use pika library to send data to AMQP queue
message = serialize(request),
properties = "..",
exchange = "ALEPH'S_EXCHANGE"
)
and you will get back AMQP message with :class:`.SearchResult`.
Note:
You don't have to import all structures from :class:`datastructures`, they
should be automatically imported and made global in ``__init__.py``.
Count requests
--------------
If you want to just get count of how many items is there in Aleph, just wrap
the :class:`.ISBNQuery` with :class:`.CountRequest` class::
isbnq = ISBNQuery("80-251-0225-4")
request = CountRequest(isbnq)
# rest is same..
and you will get back :class:`.CountResult`.
Note:
You should always use :class:`.CountRequest` instead of just calling
:py:func:`len()` to :attr:`.SearchResult.records` - it doesn't put that
much load to Aleph. Also Aleph is restricted to 150 requests per second.
Direct queries
--------------
As I said, this module provides only direct access to Aleph, AMQP communication
is handled in :mod:`edeposit.amqp`.
If you want to access module directly, you can use :func:`reactToAMQPMessage`
wrapper, or query :mod:`aleph <aleph.aleph>` submodule directly.
:func:`reactToAMQPMessage` is preferred, because in that case, you don't have
to deal with Aleph lowlevel API, which can be little bit annoying.
Diagrams
--------
Here is ASCII flow diagram for you::
ISBNQuery ----. ,--> CountResult
AuthorQuery ----| | `- num_of_records
PublisherQuery ----| |
TitleQuery ----| ExportRequest |--> SearchResult
GenericQuery ----| ISBNValidationRequest | `- AlephRecord
DocumentQuery ----| | |
ICZQuery ----| | |--> ISBNValidationResult
| | | - ISBN
V | |
Count/Search/ExportRequest | |--> ExportResult
| | |
| | |
| | |
V | |
serialize()<----------' deserialize()
| ^
V Client |
AMQPMessage ------> AMQP -------> AMQPMessage
| ^
V |
| ^
V |
| ^
V |
AMQPMessage <------ AMQP <-------- AMQPMessage
| Service ^
| |
V |
reactToAMQPMessage() ............... magic_happens()
and here is (pseudo) UML:
.. image:: /_static/reactoamqpmessage.png
Neat, isn't it?
API
---
"""
# Imports =====================================================================
from collections import namedtuple
import isbn_validator
import aleph
import export
import settings
import doc_number
from datastructures import *
# Queries =====================================================================
class _QueryTemplate(object):
"""
This class is here to just save some effort by using common ancestor with
same .getSearchResult() and .getCountResult() definition.
You probably shouldn't use it.
"""
def getSearchResult(self):
records = []
for xml in self._getXML():
records.append(
AlephRecord(
base=self.base,
library=settings.DEFAULT_LIBRARY,
docNumber=doc_number.getDocNumber(xml),
xml=xml
)
)
return SearchResult(records)
def getCountResult(self):
return CountResult(self._getCount())
class GenericQuery(namedtuple("GenericQuery", ['base',
'phrase',
'considerSimilar',
'field']),
_QueryTemplate):
"""
Used for generic queries to Aleph.
Args:
base (str): Which base in Aleph will be queried. This depends on
settings of your server. See :func:`aleph.getListOfBases`
for details.
phrase (str): What are you looking for.
considerSimilar (bool): Don't use this, it usually doesn't work.
field (str): Which field you want to use for search.
See :attr:`aleph.VALID_ALEPH_FIELDS` for list of valid
bases.
For details of base/phrase/.. parameters, see :func:`aleph.searchInAleph`.
All parameters also serves as properties.
This is used mainly if you want to search by your own parameters and don't
want to use prepared wrappers (:class:`AuthorQuery`/:class:`ISBNQuery`/..).
"""
def _getXML(self):
return aleph.downloadRecords(
aleph.searchInAleph(
self.base,
self.phrase,
self.considerSimilar,
self.field,
)
)
def _getCount(self):
return aleph.searchInAleph(
self.base,
self.phrase,
self.considerSimilar,
self.field
)["no_entries"]
class DocumentQuery(namedtuple("DocumentQuery", ["doc_id", "library"])):
"""
Query Aleph when you know the Document ID.
Args:
doc_id (str): ID number as string.
library (str, default settings.DEFAULT_LIBRARY): Library.
"""
def __new__(cls, doc_id, library=settings.DEFAULT_LIBRARY):
return super(DocumentQuery, cls).__new__(
cls,
doc_id,
library
)
def getSearchResult(self):
"""
Returns:
object: :class:`SearchResult` document with given `doc_id`.
Raises:
aleph.DocumentNotFoundException: When document is not found.
"""
xml = aleph.downloadMARCOAI(self.doc_id, self.library)
return SearchResult([
AlephRecord(
None,
self.library,
self.doc_id,
xml
)
])
def getCountResult(self):
"""
Returns:
int: 0/1 whether the document is found or not.
"""
try:
self.getSearchResult()
except aleph.DocumentNotFoundException:
return 0
return 1
class ISBNQuery(namedtuple("ISBNQuery", ["ISBN", "base"]), _QueryTemplate):
"""
Used to query Aleph to get books by ISBN.
Args:
ISBN (str): ISBN 10/13.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
Note:
ISBN is not unique, so you can get back lot of books with same ISBN.
Some books also have two or more ISBNs.
"""
def __new__(self, ISBN, base=settings.ALEPH_DEFAULT_BASE):
return super(ISBNQuery, self).__new__(self, ISBN, base)
def _getXML(self):
return aleph.getISBNsXML(self.ISBN, base=self.base)
def _getCount(self):
return aleph.getISBNCount(self.ISBN, base=self.base)
class AuthorQuery(namedtuple("AuthorQuery", ["author", "base"]),
_QueryTemplate):
"""
Used to query Aleph to get books by Author.
Args:
author (str): Author's name/lastname in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, author, base=settings.ALEPH_DEFAULT_BASE):
return super(AuthorQuery, self).__new__(self, author, base)
def _getXML(self):
return aleph.getAuthorsBooksXML(self.author, base=self.base)
def _getCount(self):
return aleph.getAuthorsBooksCount(self.author, base=self.base)
class PublisherQuery(namedtuple("PublisherQuery", ["publisher", "base"]),
_QueryTemplate):
"""
Used to query Aleph to get books by Publisher.
Args:
publisher (str): Publisher's name in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, publisher, base=settings.ALEPH_DEFAULT_BASE):
return super(PublisherQuery, self).__new__(self, publisher, base)
def _getXML(self):
return aleph.getPublishersBooksXML(self.publisher, base=self.base)
def _getCount(self):
return aleph.getPublishersBooksCount(self.publisher, base=self.base)
class TitleQuery(_QueryTemplate,
namedtuple("TitleQuery", ["title", "base"])):
"""
Used to query Aleph to get books by book's title/name.
Args:
title (str): Book's title in UTF-8.
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, title, base=settings.ALEPH_DEFAULT_BASE):
return super(TitleQuery, self).__new__(self, title, base)
def _getXML(self):
return aleph.getBooksTitleXML(self.title, base=self.base)
def _getCount(self):
return aleph.getBooksTitleCount(self.title, base=self.base)
class ICZQuery(_QueryTemplate, namedtuple("ICZQuery", ["icz", "base"])):
"""
Used to query Aleph to get books by record's identification number `icz`.
Args:
icz (str): Identification number (``nkc20150003029`` for example).
base (str, optional): If not set, :attr:`settings.ALEPH_DEFAULT_BASE`
is used.
"""
def __new__(self, icz, base=settings.ALEPH_DEFAULT_BASE):
return super(ICZQuery, self).__new__(self, icz, base)
def _getXML(self):
return aleph.getICZBooksXML(self.icz, base=self.base)
def _getCount(self):
return aleph.getICZBooksCount(self.icz, base=self.base)
# Variables ===================================================================
QUERY_TYPES = [
ISBNQuery,
AuthorQuery,
PublisherQuery,
TitleQuery,
GenericQuery,
DocumentQuery,
ICZQuery,
]
REQUEST_TYPES = [
SearchRequest,
CountRequest,
ExportRequest,
ISBNValidationRequest,
]
# Interface for an external world =============================================
def _iiOfAny(instance, classes):
"""
Returns true, if `instance` is instance of any (_iiOfAny) of the `classes`.
This function doesn't use :func:`isinstance` check, it just compares the
class names.
This can be generally dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:func:`isinstance` and :func:`type` comparsions thus fails.
Use this function instead, if you want to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just the class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` is instance of any of the `classes`.
"""
if type(classes) not in [list, tuple]:
classes = [classes]
return any(map(lambda x: type(instance).__name__ == x.__name__, classes))
# Functions ===================================================================
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/__init__.py | DocumentQuery.getSearchResult | python | def getSearchResult(self):
xml = aleph.downloadMARCOAI(self.doc_id, self.library)
return SearchResult([
AlephRecord(
None,
self.library,
self.doc_id,
xml
)
]) | Returns:
object: :class:`SearchResult` document with given `doc_id`.
Raises:
aleph.DocumentNotFoundException: When document is not found. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/__init__.py#L205-L222 | [
"def downloadMARCOAI(doc_id, base):\n \"\"\"\n Download MARC OAI document with given `doc_id` from given (logical) `base`.\n\n Funny part is, that some documents can be obtained only with this function\n in their full text.\n\n Args:\n doc_id (str): You will get this from :func:`getDoc... | class DocumentQuery(namedtuple("DocumentQuery", ["doc_id", "library"])):
"""
Query Aleph when you know the Document ID.
Args:
doc_id (str): ID number as string.
library (str, default settings.DEFAULT_LIBRARY): Library.
"""
def __new__(cls, doc_id, library=settings.DEFAULT_LIBRARY):
return super(DocumentQuery, cls).__new__(
cls,
doc_id,
library
)
def getCountResult(self):
"""
Returns:
int: 0/1 whether the document is found or not.
"""
try:
self.getSearchResult()
except aleph.DocumentNotFoundException:
return 0
return 1
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/semanticinfo.py | _parse_summaryRecordSysNumber | python | def _parse_summaryRecordSysNumber(summaryRecordSysNumber):
def number_of_digits(token):
digits = filter(lambda x: x.isdigit(), token)
return len(digits)
tokens = map(
lambda x: remove_hairs(x, r" .,:;<>(){}[]\/"),
summaryRecordSysNumber.split()
)
# pick only tokens that contains 3 digits
contains_digits = filter(lambda x: number_of_digits(x) > 3, tokens)
if not contains_digits:
return ""
return contains_digits[0] | Try to parse vague, not likely machine-readable description and return
first token, which contains enough numbers in it. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/semanticinfo.py#L19-L39 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
Definition of structures, which are used to hold informations about
catalogization process.
"""
# Imports =====================================================================
from collections import namedtuple
from remove_hairs import remove_hairs
from marcxml_parser import MARCXMLRecord
# Functions ===================================================================
# Structures ==================================================================
class SemanticInfo(namedtuple("SemanticInfo", ["hasAcquisitionFields",
"acquisitionFields",
"ISBNAgencyFields",
"descriptiveCatFields",
"descriptiveCatReviewFields",
"subjectCatFields",
"subjectCatReviewFields",
"isClosed",
"isSummaryRecord",
"contentOfFMT",
"parsedSummaryRecordSysNumber",
"summaryRecordSysNumber"])):
"""
This structure is used to represent informations about export progress in
Aleph.
It contains informations about state of the record, so it can be tracked
from edeposit project.
See :func:`.toSemanticInfo` for details of parsing of those attributes.
Attributes:
hasAcquisitionFields (bool): Was the record aproved by acquisition?
acquisitionFields (list): Acquisition fields if it the record was
signed.
ISBNAgencyFields (list): Was the record approved by ISBN agency?
Contains list of signs if it the record was signed.
descriptiveCatFields (list): Did the record get thru name description
(jmenný popis). Contains list of signs if it the record was signed.
descriptiveCatReviewFields (list): Did the record get thru name
revision (jmenná revize). Contains list of signs if it the record
was signed.
subjectCatFields (list): Did the record get thru subject description
(věcný popis). Contains list of signs if it the record was signed.
subjectCatReviewFields (list): Did the record get thru subject revision
(věcná revize). Contains list of signs if the record was signed.
isClosed (bool): Was the record closed? This sometimes happen when bad
ISBN is given by creator of the record, but different is in the
book.
isSummaryRecord (bool): Is the content of FMT == "SE"?
contentOfFMT (str, default ""): Content of FMT subrecord.
parsedSummaryRecordSysNumber (str): Same as
:attr:`summaryRecordSysNumber` but without natural language
details.
summaryRecordSysNumber (str): Identificator of the new record if
`.isClosed` is True. Format of the string is not specified and can
be different for each record.
"""
@staticmethod
def from_xml(xml):
"""
Pick informations from :class:`.MARCXMLRecord` object and use it to
build :class:`.SemanticInfo` structure.
Args:
xml (str/MARCXMLRecord): MarcXML which will be converted to
SemanticInfo. In case of str, ``<record>`` tag is required.
Returns:
structure: :class:`.SemanticInfo`.
"""
hasAcquisitionFields = False
acquisitionFields = []
ISBNAgencyFields = []
descriptiveCatFields = []
descriptiveCatReviewFields = []
subjectCatFields = []
subjectCatReviewFields = []
isClosed = False
summaryRecordSysNumber = ""
parsedSummaryRecordSysNumber = ""
isSummaryRecord = False
contentOfFMT = ""
parsed = xml
if not isinstance(xml, MARCXMLRecord):
parsed = MARCXMLRecord(str(xml))
# handle FMT record
if "FMT" in parsed.controlfields:
contentOfFMT = parsed["FMT"]
if contentOfFMT == "SE":
isSummaryRecord = True
if "HLD" in parsed.datafields or "HLD" in parsed.controlfields:
hasAcquisitionFields = True
if "STZ" in parsed.datafields:
acquisitionFields.extend(parsed["STZa"])
acquisitionFields.extend(parsed["STZb"])
def sign_and_author(sign):
"""
Sign is stored in ISTa, author's name is in ISTb.
Sign is MarcSubrecord obj with pointers to other subrecords, so it
is possible to pick references to author's name from signs.
"""
return [sign.replace(" ", "")] + sign.other_subfields.get("b", [])
# look for catalogization fields
for orig_sign in parsed["ISTa"]:
sign = orig_sign.replace(" ", "") # remove spaces
if sign.startswith("jp2"):
descriptiveCatFields.extend(sign_and_author(orig_sign))
elif sign.startswith("jr2"):
descriptiveCatReviewFields.extend(sign_and_author(orig_sign))
elif sign.startswith("vp"):
subjectCatFields.extend(sign_and_author(orig_sign))
elif sign.startswith("vr"):
subjectCatReviewFields.extend(sign_and_author(orig_sign))
elif sign.startswith("ii2"):
ISBNAgencyFields.extend(sign_and_author(orig_sign))
# look whether the record was 'closed' by catalogizators
for status in parsed["BASa"]:
if status == "90":
isClosed = True
# if multiple PJM statuses are present, join them together
status = "\n".join([x for x in parsed["PJMa"]])
# detect link to 'new' record, if the old one was 'closed'
if status.strip():
summaryRecordSysNumber = status
parsedSummaryRecordSysNumber = _parse_summaryRecordSysNumber(
summaryRecordSysNumber
)
return SemanticInfo(
hasAcquisitionFields=hasAcquisitionFields,
acquisitionFields=acquisitionFields,
ISBNAgencyFields=ISBNAgencyFields,
descriptiveCatFields=descriptiveCatFields,
descriptiveCatReviewFields=descriptiveCatReviewFields,
subjectCatFields=subjectCatFields,
subjectCatReviewFields=subjectCatReviewFields,
isClosed=isClosed,
isSummaryRecord=isSummaryRecord,
contentOfFMT=contentOfFMT,
parsedSummaryRecordSysNumber=parsedSummaryRecordSysNumber,
summaryRecordSysNumber=summaryRecordSysNumber,
)
|
edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/semanticinfo.py | SemanticInfo.from_xml | python | def from_xml(xml):
hasAcquisitionFields = False
acquisitionFields = []
ISBNAgencyFields = []
descriptiveCatFields = []
descriptiveCatReviewFields = []
subjectCatFields = []
subjectCatReviewFields = []
isClosed = False
summaryRecordSysNumber = ""
parsedSummaryRecordSysNumber = ""
isSummaryRecord = False
contentOfFMT = ""
parsed = xml
if not isinstance(xml, MARCXMLRecord):
parsed = MARCXMLRecord(str(xml))
# handle FMT record
if "FMT" in parsed.controlfields:
contentOfFMT = parsed["FMT"]
if contentOfFMT == "SE":
isSummaryRecord = True
if "HLD" in parsed.datafields or "HLD" in parsed.controlfields:
hasAcquisitionFields = True
if "STZ" in parsed.datafields:
acquisitionFields.extend(parsed["STZa"])
acquisitionFields.extend(parsed["STZb"])
def sign_and_author(sign):
"""
Sign is stored in ISTa, author's name is in ISTb.
Sign is MarcSubrecord obj with pointers to other subrecords, so it
is possible to pick references to author's name from signs.
"""
return [sign.replace(" ", "")] + sign.other_subfields.get("b", [])
# look for catalogization fields
for orig_sign in parsed["ISTa"]:
sign = orig_sign.replace(" ", "") # remove spaces
if sign.startswith("jp2"):
descriptiveCatFields.extend(sign_and_author(orig_sign))
elif sign.startswith("jr2"):
descriptiveCatReviewFields.extend(sign_and_author(orig_sign))
elif sign.startswith("vp"):
subjectCatFields.extend(sign_and_author(orig_sign))
elif sign.startswith("vr"):
subjectCatReviewFields.extend(sign_and_author(orig_sign))
elif sign.startswith("ii2"):
ISBNAgencyFields.extend(sign_and_author(orig_sign))
# look whether the record was 'closed' by catalogizators
for status in parsed["BASa"]:
if status == "90":
isClosed = True
# if multiple PJM statuses are present, join them together
status = "\n".join([x for x in parsed["PJMa"]])
# detect link to 'new' record, if the old one was 'closed'
if status.strip():
summaryRecordSysNumber = status
parsedSummaryRecordSysNumber = _parse_summaryRecordSysNumber(
summaryRecordSysNumber
)
return SemanticInfo(
hasAcquisitionFields=hasAcquisitionFields,
acquisitionFields=acquisitionFields,
ISBNAgencyFields=ISBNAgencyFields,
descriptiveCatFields=descriptiveCatFields,
descriptiveCatReviewFields=descriptiveCatReviewFields,
subjectCatFields=subjectCatFields,
subjectCatReviewFields=subjectCatReviewFields,
isClosed=isClosed,
isSummaryRecord=isSummaryRecord,
contentOfFMT=contentOfFMT,
parsedSummaryRecordSysNumber=parsedSummaryRecordSysNumber,
summaryRecordSysNumber=summaryRecordSysNumber,
) | Pick informations from :class:`.MARCXMLRecord` object and use it to
build :class:`.SemanticInfo` structure.
Args:
xml (str/MARCXMLRecord): MarcXML which will be converted to
SemanticInfo. In case of str, ``<record>`` tag is required.
Returns:
structure: :class:`.SemanticInfo`. | train | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/semanticinfo.py#L93-L188 | [
"def _parse_summaryRecordSysNumber(summaryRecordSysNumber):\n \"\"\"\n Try to parse vague, not likely machine-readable description and return\n first token, which contains enough numbers in it.\n \"\"\"\n def number_of_digits(token):\n digits = filter(lambda x: x.isdigit(), token)\n ret... | class SemanticInfo(namedtuple("SemanticInfo", ["hasAcquisitionFields",
"acquisitionFields",
"ISBNAgencyFields",
"descriptiveCatFields",
"descriptiveCatReviewFields",
"subjectCatFields",
"subjectCatReviewFields",
"isClosed",
"isSummaryRecord",
"contentOfFMT",
"parsedSummaryRecordSysNumber",
"summaryRecordSysNumber"])):
"""
This structure is used to represent informations about export progress in
Aleph.
It contains informations about state of the record, so it can be tracked
from edeposit project.
See :func:`.toSemanticInfo` for details of parsing of those attributes.
Attributes:
hasAcquisitionFields (bool): Was the record aproved by acquisition?
acquisitionFields (list): Acquisition fields if it the record was
signed.
ISBNAgencyFields (list): Was the record approved by ISBN agency?
Contains list of signs if it the record was signed.
descriptiveCatFields (list): Did the record get thru name description
(jmenný popis). Contains list of signs if it the record was signed.
descriptiveCatReviewFields (list): Did the record get thru name
revision (jmenná revize). Contains list of signs if it the record
was signed.
subjectCatFields (list): Did the record get thru subject description
(věcný popis). Contains list of signs if it the record was signed.
subjectCatReviewFields (list): Did the record get thru subject revision
(věcná revize). Contains list of signs if the record was signed.
isClosed (bool): Was the record closed? This sometimes happen when bad
ISBN is given by creator of the record, but different is in the
book.
isSummaryRecord (bool): Is the content of FMT == "SE"?
contentOfFMT (str, default ""): Content of FMT subrecord.
parsedSummaryRecordSysNumber (str): Same as
:attr:`summaryRecordSysNumber` but without natural language
details.
summaryRecordSysNumber (str): Identificator of the new record if
`.isClosed` is True. Format of the string is not specified and can
be different for each record.
"""
@staticmethod
|
capnproto/pycapnp | buildutils/bundle.py | localpath | python | def localpath(*args):
plist = [ROOT] + list(args)
return os.path.abspath(pjoin(*plist)) | construct an absolute path from a list relative to the root pycapnp directory | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L53-L56 | null | """utilities for fetching build dependencies."""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# This bundling code is largely adapted from pyzmq-static's get.sh by
# Brandon Craig-Rhodes, which is itself BSD licensed.
#-----------------------------------------------------------------------------
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import os
import shutil
import stat
import sys
import tarfile
from glob import glob
from subprocess import Popen, PIPE
try:
# py2
from urllib2 import urlopen
except ImportError:
# py3
from urllib.request import urlopen
from .msg import fatal, debug, info, warn
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
bundled_version = (0,6,1)
libcapnp = "capnproto-c++-%i.%i.%i.tar.gz" % (bundled_version)
libcapnp_url = "https://capnproto.org/" + libcapnp
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def untgz(archive):
return archive.replace('.tar.gz', '')
def fetch_archive(savedir, url, fname, force=False):
"""download an archive to a specific location"""
dest = pjoin(savedir, fname)
if os.path.exists(dest) and not force:
info("already have %s" % fname)
return dest
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
return dest
#-----------------------------------------------------------------------------
# libcapnp
#-----------------------------------------------------------------------------
def fetch_libcapnp(savedir, url=None):
"""download and extract libcapnp"""
is_preconfigured = False
if url is None:
url = libcapnp_url
is_preconfigured = True
dest = pjoin(savedir, 'capnproto-c++')
if os.path.exists(dest):
info("already have %s" % dest)
return
fname = fetch_archive(savedir, url, libcapnp)
tf = tarfile.open(fname)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
if is_preconfigured:
shutil.move(with_version, dest)
else:
cpp_dir = os.path.join(with_version, 'c++')
conf = Popen(['autoreconf', '-i'], cwd=cpp_dir)
returncode = conf.wait()
if returncode != 0:
raise RuntimeError('Autoreconf failed. Make sure autotools are installed on your system.')
shutil.move(cpp_dir, dest)
def stage_platform_hpp(capnproot):
"""stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
def copy_and_patch_libcapnp(capnp, libcapnp):
"""copy libcapnp into source dir, and patch it if necessary.
This command is necessary prior to running a bdist on Linux or OS X.
"""
if sys.platform.startswith('win'):
return
# copy libcapnp into capnp for bdist
local = localpath('capnp',libcapnp)
if not capnp and not os.path.exists(local):
fatal("Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually prior to running bdist.")
try:
# resolve real file through symlinks
lib = os.path.realpath(pjoin(capnp, 'lib', libcapnp))
print ("copying %s -> %s"%(lib, local))
shutil.copy(lib, local)
except Exception:
if not os.path.exists(local):
fatal("Could not copy libcapnp into capnp/, which is necessary for bdist. "
"Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually.")
if sys.platform == 'darwin':
# chmod u+w on the lib,
# which can be user-read-only for some reason
mode = os.stat(local).st_mode
os.chmod(local, mode | stat.S_IWUSR)
# patch install_name on darwin, instead of using rpath
cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libcapnp, local]
try:
p = Popen(cmd, stdout=PIPE,stderr=PIPE)
except OSError:
fatal("install_name_tool not found, cannot patch libcapnp for bundling.")
out,err = p.communicate()
if p.returncode:
fatal("Could not patch bundled libcapnp install_name: %s"%err, p.returncode)
|
capnproto/pycapnp | buildutils/bundle.py | fetch_archive | python | def fetch_archive(savedir, url, fname, force=False):
dest = pjoin(savedir, fname)
if os.path.exists(dest) and not force:
info("already have %s" % fname)
return dest
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
return dest | download an archive to a specific location | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L58-L70 | [
"def info(msg):\n logger.info(msg)\n"
] | """utilities for fetching build dependencies."""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# This bundling code is largely adapted from pyzmq-static's get.sh by
# Brandon Craig-Rhodes, which is itself BSD licensed.
#-----------------------------------------------------------------------------
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import os
import shutil
import stat
import sys
import tarfile
from glob import glob
from subprocess import Popen, PIPE
try:
# py2
from urllib2 import urlopen
except ImportError:
# py3
from urllib.request import urlopen
from .msg import fatal, debug, info, warn
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
bundled_version = (0,6,1)
libcapnp = "capnproto-c++-%i.%i.%i.tar.gz" % (bundled_version)
libcapnp_url = "https://capnproto.org/" + libcapnp
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def untgz(archive):
return archive.replace('.tar.gz', '')
def localpath(*args):
"""construct an absolute path from a list relative to the root pycapnp directory"""
plist = [ROOT] + list(args)
return os.path.abspath(pjoin(*plist))
#-----------------------------------------------------------------------------
# libcapnp
#-----------------------------------------------------------------------------
def fetch_libcapnp(savedir, url=None):
"""download and extract libcapnp"""
is_preconfigured = False
if url is None:
url = libcapnp_url
is_preconfigured = True
dest = pjoin(savedir, 'capnproto-c++')
if os.path.exists(dest):
info("already have %s" % dest)
return
fname = fetch_archive(savedir, url, libcapnp)
tf = tarfile.open(fname)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
if is_preconfigured:
shutil.move(with_version, dest)
else:
cpp_dir = os.path.join(with_version, 'c++')
conf = Popen(['autoreconf', '-i'], cwd=cpp_dir)
returncode = conf.wait()
if returncode != 0:
raise RuntimeError('Autoreconf failed. Make sure autotools are installed on your system.')
shutil.move(cpp_dir, dest)
def stage_platform_hpp(capnproot):
"""stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
def copy_and_patch_libcapnp(capnp, libcapnp):
"""copy libcapnp into source dir, and patch it if necessary.
This command is necessary prior to running a bdist on Linux or OS X.
"""
if sys.platform.startswith('win'):
return
# copy libcapnp into capnp for bdist
local = localpath('capnp',libcapnp)
if not capnp and not os.path.exists(local):
fatal("Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually prior to running bdist.")
try:
# resolve real file through symlinks
lib = os.path.realpath(pjoin(capnp, 'lib', libcapnp))
print ("copying %s -> %s"%(lib, local))
shutil.copy(lib, local)
except Exception:
if not os.path.exists(local):
fatal("Could not copy libcapnp into capnp/, which is necessary for bdist. "
"Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually.")
if sys.platform == 'darwin':
# chmod u+w on the lib,
# which can be user-read-only for some reason
mode = os.stat(local).st_mode
os.chmod(local, mode | stat.S_IWUSR)
# patch install_name on darwin, instead of using rpath
cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libcapnp, local]
try:
p = Popen(cmd, stdout=PIPE,stderr=PIPE)
except OSError:
fatal("install_name_tool not found, cannot patch libcapnp for bundling.")
out,err = p.communicate()
if p.returncode:
fatal("Could not patch bundled libcapnp install_name: %s"%err, p.returncode)
|
capnproto/pycapnp | buildutils/bundle.py | fetch_libcapnp | python | def fetch_libcapnp(savedir, url=None):
is_preconfigured = False
if url is None:
url = libcapnp_url
is_preconfigured = True
dest = pjoin(savedir, 'capnproto-c++')
if os.path.exists(dest):
info("already have %s" % dest)
return
fname = fetch_archive(savedir, url, libcapnp)
tf = tarfile.open(fname)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
if is_preconfigured:
shutil.move(with_version, dest)
else:
cpp_dir = os.path.join(with_version, 'c++')
conf = Popen(['autoreconf', '-i'], cwd=cpp_dir)
returncode = conf.wait()
if returncode != 0:
raise RuntimeError('Autoreconf failed. Make sure autotools are installed on your system.')
shutil.move(cpp_dir, dest) | download and extract libcapnp | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L76-L100 | [
"def info(msg):\n logger.info(msg)\n",
"def fetch_archive(savedir, url, fname, force=False):\n \"\"\"download an archive to a specific location\"\"\"\n dest = pjoin(savedir, fname)\n if os.path.exists(dest) and not force:\n info(\"already have %s\" % fname)\n return dest\n info(\"fetc... | """utilities for fetching build dependencies."""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# This bundling code is largely adapted from pyzmq-static's get.sh by
# Brandon Craig-Rhodes, which is itself BSD licensed.
#-----------------------------------------------------------------------------
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import os
import shutil
import stat
import sys
import tarfile
from glob import glob
from subprocess import Popen, PIPE
try:
# py2
from urllib2 import urlopen
except ImportError:
# py3
from urllib.request import urlopen
from .msg import fatal, debug, info, warn
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
bundled_version = (0,6,1)
libcapnp = "capnproto-c++-%i.%i.%i.tar.gz" % (bundled_version)
libcapnp_url = "https://capnproto.org/" + libcapnp
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def untgz(archive):
return archive.replace('.tar.gz', '')
def localpath(*args):
"""construct an absolute path from a list relative to the root pycapnp directory"""
plist = [ROOT] + list(args)
return os.path.abspath(pjoin(*plist))
def fetch_archive(savedir, url, fname, force=False):
"""download an archive to a specific location"""
dest = pjoin(savedir, fname)
if os.path.exists(dest) and not force:
info("already have %s" % fname)
return dest
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
return dest
#-----------------------------------------------------------------------------
# libcapnp
#-----------------------------------------------------------------------------
def stage_platform_hpp(capnproot):
"""stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
def copy_and_patch_libcapnp(capnp, libcapnp):
"""copy libcapnp into source dir, and patch it if necessary.
This command is necessary prior to running a bdist on Linux or OS X.
"""
if sys.platform.startswith('win'):
return
# copy libcapnp into capnp for bdist
local = localpath('capnp',libcapnp)
if not capnp and not os.path.exists(local):
fatal("Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually prior to running bdist.")
try:
# resolve real file through symlinks
lib = os.path.realpath(pjoin(capnp, 'lib', libcapnp))
print ("copying %s -> %s"%(lib, local))
shutil.copy(lib, local)
except Exception:
if not os.path.exists(local):
fatal("Could not copy libcapnp into capnp/, which is necessary for bdist. "
"Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually.")
if sys.platform == 'darwin':
# chmod u+w on the lib,
# which can be user-read-only for some reason
mode = os.stat(local).st_mode
os.chmod(local, mode | stat.S_IWUSR)
# patch install_name on darwin, instead of using rpath
cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libcapnp, local]
try:
p = Popen(cmd, stdout=PIPE,stderr=PIPE)
except OSError:
fatal("install_name_tool not found, cannot patch libcapnp for bundling.")
out,err = p.communicate()
if p.returncode:
fatal("Could not patch bundled libcapnp install_name: %s"%err, p.returncode)
|
capnproto/pycapnp | buildutils/bundle.py | stage_platform_hpp | python | def stage_platform_hpp(capnproot):
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp) | stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated. | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L103-L138 | [
"def info(msg):\n logger.info(msg)\n"
] | """utilities for fetching build dependencies."""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# This bundling code is largely adapted from pyzmq-static's get.sh by
# Brandon Craig-Rhodes, which is itself BSD licensed.
#-----------------------------------------------------------------------------
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import os
import shutil
import stat
import sys
import tarfile
from glob import glob
from subprocess import Popen, PIPE
try:
# py2
from urllib2 import urlopen
except ImportError:
# py3
from urllib.request import urlopen
from .msg import fatal, debug, info, warn
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
bundled_version = (0,6,1)
libcapnp = "capnproto-c++-%i.%i.%i.tar.gz" % (bundled_version)
libcapnp_url = "https://capnproto.org/" + libcapnp
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def untgz(archive):
return archive.replace('.tar.gz', '')
def localpath(*args):
"""construct an absolute path from a list relative to the root pycapnp directory"""
plist = [ROOT] + list(args)
return os.path.abspath(pjoin(*plist))
def fetch_archive(savedir, url, fname, force=False):
"""download an archive to a specific location"""
dest = pjoin(savedir, fname)
if os.path.exists(dest) and not force:
info("already have %s" % fname)
return dest
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
return dest
#-----------------------------------------------------------------------------
# libcapnp
#-----------------------------------------------------------------------------
def fetch_libcapnp(savedir, url=None):
"""download and extract libcapnp"""
is_preconfigured = False
if url is None:
url = libcapnp_url
is_preconfigured = True
dest = pjoin(savedir, 'capnproto-c++')
if os.path.exists(dest):
info("already have %s" % dest)
return
fname = fetch_archive(savedir, url, libcapnp)
tf = tarfile.open(fname)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
if is_preconfigured:
shutil.move(with_version, dest)
else:
cpp_dir = os.path.join(with_version, 'c++')
conf = Popen(['autoreconf', '-i'], cwd=cpp_dir)
returncode = conf.wait()
if returncode != 0:
raise RuntimeError('Autoreconf failed. Make sure autotools are installed on your system.')
shutil.move(cpp_dir, dest)
def copy_and_patch_libcapnp(capnp, libcapnp):
"""copy libcapnp into source dir, and patch it if necessary.
This command is necessary prior to running a bdist on Linux or OS X.
"""
if sys.platform.startswith('win'):
return
# copy libcapnp into capnp for bdist
local = localpath('capnp',libcapnp)
if not capnp and not os.path.exists(local):
fatal("Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually prior to running bdist.")
try:
# resolve real file through symlinks
lib = os.path.realpath(pjoin(capnp, 'lib', libcapnp))
print ("copying %s -> %s"%(lib, local))
shutil.copy(lib, local)
except Exception:
if not os.path.exists(local):
fatal("Could not copy libcapnp into capnp/, which is necessary for bdist. "
"Please specify capnp prefix via `setup.py configure --capnp=/path/to/capnp` "
"or copy libcapnp into capnp/ manually.")
if sys.platform == 'darwin':
# chmod u+w on the lib,
# which can be user-read-only for some reason
mode = os.stat(local).st_mode
os.chmod(local, mode | stat.S_IWUSR)
# patch install_name on darwin, instead of using rpath
cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libcapnp, local]
try:
p = Popen(cmd, stdout=PIPE,stderr=PIPE)
except OSError:
fatal("install_name_tool not found, cannot patch libcapnp for bundling.")
out,err = p.communicate()
if p.returncode:
fatal("Could not patch bundled libcapnp install_name: %s"%err, p.returncode)
|
capnproto/pycapnp | buildutils/patch.py | _find_library | python | def _find_library(lib, path):
for d in path[::-1]:
real_lib = os.path.join(d, lib)
if os.path.exists(real_lib):
return real_lib | Find a library | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/patch.py#L30-L35 | null | """utils for patching libraries"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import re
import sys
import os
import logging
from .misc import get_output_error
pjoin = os.path.join
# LIB_PAT from delocate
LIB_PAT = re.compile(r"\s*(.*) \(compatibility version (\d+\.\d+\.\d+), "
r"current version (\d+\.\d+\.\d+)\)")
def _get_libs(fname):
rc, so, se = get_output_error(['otool', '-L', fname])
if rc:
logging.error("otool -L %s failed: %r" % (fname, se))
return
for line in so.splitlines()[1:]:
m = LIB_PAT.match(line)
if m:
yield m.group(1)
def _install_name_change(fname, lib, real_lib):
rc, so, se = get_output_error(['install_name_tool', '-change', lib, real_lib, fname])
if rc:
logging.error("Couldn't update load path: %s", se)
def patch_lib_paths(fname, library_dirs):
"""Load any weakly-defined libraries from their real location
(only on OS X)
- Find libraries with `otool -L`
- Update with `install_name_tool -change`
"""
if sys.platform != 'darwin':
return
libs = _get_libs(fname)
for lib in libs:
if not lib.startswith(('@', '/')):
real_lib = _find_library(lib, library_dirs)
if real_lib:
_install_name_change(fname, lib, real_lib)
__all__ = ['patch_lib_paths'] |
capnproto/pycapnp | buildutils/patch.py | patch_lib_paths | python | def patch_lib_paths(fname, library_dirs):
if sys.platform != 'darwin':
return
libs = _get_libs(fname)
for lib in libs:
if not lib.startswith(('@', '/')):
real_lib = _find_library(lib, library_dirs)
if real_lib:
_install_name_change(fname, lib, real_lib) | Load any weakly-defined libraries from their real location
(only on OS X)
- Find libraries with `otool -L`
- Update with `install_name_tool -change` | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/patch.py#L42-L58 | null | """utils for patching libraries"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import re
import sys
import os
import logging
from .misc import get_output_error
pjoin = os.path.join
# LIB_PAT from delocate
LIB_PAT = re.compile(r"\s*(.*) \(compatibility version (\d+\.\d+\.\d+), "
r"current version (\d+\.\d+\.\d+)\)")
def _get_libs(fname):
rc, so, se = get_output_error(['otool', '-L', fname])
if rc:
logging.error("otool -L %s failed: %r" % (fname, se))
return
for line in so.splitlines()[1:]:
m = LIB_PAT.match(line)
if m:
yield m.group(1)
def _find_library(lib, path):
"""Find a library"""
for d in path[::-1]:
real_lib = os.path.join(d, lib)
if os.path.exists(real_lib):
return real_lib
def _install_name_change(fname, lib, real_lib):
rc, so, se = get_output_error(['install_name_tool', '-change', lib, real_lib, fname])
if rc:
logging.error("Couldn't update load path: %s", se)
def patch_lib_paths(fname, library_dirs):
"""Load any weakly-defined libraries from their real location
(only on OS X)
- Find libraries with `otool -L`
- Update with `install_name_tool -change`
"""
if sys.platform != 'darwin':
return
libs = _get_libs(fname)
for lib in libs:
if not lib.startswith(('@', '/')):
real_lib = _find_library(lib, library_dirs)
if real_lib:
_install_name_change(fname, lib, real_lib)
__all__ = ['patch_lib_paths'] |
capnproto/pycapnp | examples/calculator_client.py | main | python | def main(host):
client = capnp.TwoPartyClient(host)
# Pass "calculator" to ez_restore (there's also a `restore` function that
# takes a struct or AnyPointer as an argument), and then cast the returned
# capability to it's proper type. This casting is due to capabilities not
# having a reference to their schema
calculator = client.bootstrap().cast_as(calculator_capnp.Calculator)
'''Make a request that just evaluates the literal value 123.
What's interesting here is that evaluate() returns a "Value", which is
another interface and therefore points back to an object living on the
server. We then have to call read() on that object to read it.
However, even though we are making two RPC's, this block executes in
*one* network round trip because of promise pipelining: we do not wait
for the first call to complete before we send the second call to the
server.'''
print('Evaluating a literal... ', end="")
# Make the request. Note we are using the shorter function form (instead
# of evaluate_request), and we are passing a dictionary that represents a
# struct and its member to evaluate
eval_promise = calculator.evaluate({"literal": 123})
# This is equivalent to:
'''
request = calculator.evaluate_request()
request.expression.literal = 123
# Send it, which returns a promise for the result (without blocking).
eval_promise = request.send()
'''
# Using the promise, create a pipelined request to call read() on the
# returned object. Note that here we are using the shortened method call
# syntax read(), which is mostly just sugar for read_request().send()
read_promise = eval_promise.value.read()
# Now that we've sent all the requests, wait for the response. Until this
# point, we haven't waited at all!
response = read_promise.wait()
assert response.value == 123
print("PASS")
'''Make a request to evaluate 123 + 45 - 67.
The Calculator interface requires that we first call getOperator() to
get the addition and subtraction functions, then call evaluate() to use
them. But, once again, we can get both functions, call evaluate(), and
then read() the result -- four RPCs -- in the time of *one* network
round trip, because of promise pipelining.'''
print("Using add and subtract... ", end='')
# Get the "add" function from the server.
add = calculator.getOperator(op='add').func
# Get the "subtract" function from the server.
subtract = calculator.getOperator(op='subtract').func
# Build the request to evaluate 123 + 45 - 67. Note the form is 'evaluate'
# + '_request', where 'evaluate' is the name of the method we want to call
request = calculator.evaluate_request()
subtract_call = request.expression.init('call')
subtract_call.function = subtract
subtract_params = subtract_call.init('params', 2)
subtract_params[1].literal = 67.0
add_call = subtract_params[0].init('call')
add_call.function = add
add_params = add_call.init('params', 2)
add_params[0].literal = 123
add_params[1].literal = 45
# Send the evaluate() request, read() the result, and wait for read() to finish.
eval_promise = request.send()
read_promise = eval_promise.value.read()
response = read_promise.wait()
assert response.value == 101
print("PASS")
'''
Note: a one liner version of building the previous request (I highly
recommend not doing it this way for such a complicated structure, but I
just wanted to demonstrate it is possible to set all of the fields with a
dictionary):
eval_promise = calculator.evaluate(
{'call': {'function': subtract,
'params': [{'call': {'function': add,
'params': [{'literal': 123},
{'literal': 45}]}},
{'literal': 67.0}]}})
'''
'''Make a request to evaluate 4 * 6, then use the result in two more
requests that add 3 and 5.
Since evaluate() returns its result wrapped in a `Value`, we can pass
that `Value` back to the server in subsequent requests before the first
`evaluate()` has actually returned. Thus, this example again does only
one network round trip.'''
print("Pipelining eval() calls... ", end="")
# Get the "add" function from the server.
add = calculator.getOperator(op='add').func
# Get the "multiply" function from the server.
multiply = calculator.getOperator(op='multiply').func
# Build the request to evaluate 4 * 6
request = calculator.evaluate_request()
multiply_call = request.expression.init("call")
multiply_call.function = multiply
multiply_params = multiply_call.init("params", 2)
multiply_params[0].literal = 4
multiply_params[1].literal = 6
multiply_result = request.send().value
# Use the result in two calls that add 3 and add 5.
add_3_request = calculator.evaluate_request()
add_3_call = add_3_request.expression.init("call")
add_3_call.function = add
add_3_params = add_3_call.init("params", 2)
add_3_params[0].previousResult = multiply_result
add_3_params[1].literal = 3
add_3_promise = add_3_request.send().value.read()
add_5_request = calculator.evaluate_request()
add_5_call = add_5_request.expression.init("call")
add_5_call.function = add
add_5_params = add_5_call.init("params", 2)
add_5_params[0].previousResult = multiply_result
add_5_params[1].literal = 5
add_5_promise = add_5_request.send().value.read()
# Now wait for the results.
assert add_3_promise.wait().value == 27
assert add_5_promise.wait().value == 29
print("PASS")
'''Our calculator interface supports defining functions. Here we use it
to define two functions and then make calls to them as follows:
f(x, y) = x * 100 + y
g(x) = f(x, x + 1) * 2;
f(12, 34)
g(21)
Once again, the whole thing takes only one network round trip.'''
print("Defining functions... ", end="")
# Get the "add" function from the server.
add = calculator.getOperator(op='add').func
# Get the "multiply" function from the server.
multiply = calculator.getOperator(op='multiply').func
# Define f.
request = calculator.defFunction_request()
request.paramCount = 2
# Build the function body.
add_call = request.body.init("call")
add_call.function = add
add_params = add_call.init("params", 2)
add_params[1].parameter = 1 # y
multiply_call = add_params[0].init("call")
multiply_call.function = multiply
multiply_params = multiply_call.init("params", 2)
multiply_params[0].parameter = 0 # x
multiply_params[1].literal = 100
f = request.send().func
# Define g.
request = calculator.defFunction_request()
request.paramCount = 1
# Build the function body.
multiply_call = request.body.init("call")
multiply_call.function = multiply
multiply_params = multiply_call.init("params", 2)
multiply_params[1].literal = 2
f_call = multiply_params[0].init("call")
f_call.function = f
f_params = f_call.init("params", 2)
f_params[0].parameter = 0
add_call = f_params[1].init("call")
add_call.function = add
add_params = add_call.init("params", 2)
add_params[0].parameter = 0
add_params[1].literal = 1
g = request.send().func
# OK, we've defined all our functions. Now create our eval requests.
# f(12, 34)
f_eval_request = calculator.evaluate_request()
f_call = f_eval_request.expression.init("call")
f_call.function = f
f_params = f_call.init("params", 2)
f_params[0].literal = 12
f_params[1].literal = 34
f_eval_promise = f_eval_request.send().value.read()
# g(21)
g_eval_request = calculator.evaluate_request()
g_call = g_eval_request.expression.init("call")
g_call.function = g
g_call.init('params', 1)[0].literal = 21
g_eval_promise = g_eval_request.send().value.read()
# Wait for the results.
assert f_eval_promise.wait().value == 1234
assert g_eval_promise.wait().value == 4244
print("PASS")
'''Make a request that will call back to a function defined locally.
Specifically, we will compute 2^(4 + 5). However, exponent is not
defined by the Calculator server. So, we'll implement the Function
interface locally and pass it to the server for it to use when
evaluating the expression.
This example requires two network round trips to complete, because the
server calls back to the client once before finishing. In this
particular case, this could potentially be optimized by using a tail
call on the server side -- see CallContext::tailCall(). However, to
keep the example simpler, we haven't implemented this optimization in
the sample server.'''
print("Using a callback... ", end="")
# Get the "add" function from the server.
add = calculator.getOperator(op='add').func
# Build the eval request for 2^(4+5).
request = calculator.evaluate_request()
pow_call = request.expression.init("call")
pow_call.function = PowerFunction()
pow_params = pow_call.init("params", 2)
pow_params[0].literal = 2
add_call = pow_params[1].init("call")
add_call.function = add
add_params = add_call.init("params", 2)
add_params[0].literal = 4
add_params[1].literal = 5
# Send the request and wait.
response = request.send().value.read().wait()
assert response.value == 512
print("PASS") | Make a request that just evaluates the literal value 123.
What's interesting here is that evaluate() returns a "Value", which is
another interface and therefore points back to an object living on the
server. We then have to call read() on that object to read it.
However, even though we are making two RPC's, this block executes in
*one* network round trip because of promise pipelining: we do not wait
for the first call to complete before we send the second call to the
server. | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/examples/calculator_client.py#L35-L303 | null | #!/usr/bin/env python
from __future__ import print_function
import argparse
import socket
import capnp
import calculator_capnp
class PowerFunction(calculator_capnp.Calculator.Function.Server):
'''An implementation of the Function interface wrapping pow(). Note that
we're implementing this on the client side and will pass a reference to
the server. The server will then be able to make calls back to the client.'''
def call(self, params, **kwargs):
'''Note the **kwargs. This is very necessary to include, since
protocols can add parameters over time. Also, by default, a _context
variable is passed to all server methods, but you can also return
results directly as python objects, and they'll be added to the
results struct in the correct order'''
return pow(params[0], params[1])
def parse_args():
parser = argparse.ArgumentParser(usage='Connects to the Calculator server \
at the given address and does some RPCs')
parser.add_argument("host", help="HOST:PORT")
return parser.parse_args()
if __name__ == '__main__':
main(parse_args().host)
|
capnproto/pycapnp | buildutils/misc.py | get_output_error | python | def get_output_error(cmd):
if not isinstance(cmd, list):
cmd = [cmd]
logging.debug("Running: %s", ' '.join(map(quote, cmd)))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE)
except IOError as e:
return -1, u(''), u('Failed to run %r: %r' % (cmd, e))
so, se = result.communicate()
# unicode:
so = so.decode('utf8', 'replace')
se = se.decode('utf8', 'replace')
return result.returncode, so, se | Return the exit status, stdout, stderr of a command | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/misc.py#L50-L64 | [
"u = lambda x: x\n"
] | """misc build utility functions"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
import logging
from distutils import ccompiler
from distutils.sysconfig import customize_compiler
from pipes import quote
from subprocess import Popen, PIPE
pjoin = os.path.join
if sys.version_info[0] >= 3:
u = lambda x: x
else:
u = lambda x: x.decode('utf8', 'replace')
def customize_mingw(cc):
# strip -mno-cygwin from mingw32 (Python Issue #12641)
for cmd in [cc.compiler, cc.compiler_cxx, cc.compiler_so, cc.linker_exe, cc.linker_so]:
if '-mno-cygwin' in cmd:
cmd.remove('-mno-cygwin')
# remove problematic msvcr90
if 'msvcr90' in cc.dll_libraries:
cc.dll_libraries.remove('msvcr90')
def get_compiler(compiler, **compiler_attrs):
"""get and customize a compiler"""
if compiler is None or isinstance(compiler, str):
cc = ccompiler.new_compiler(compiler=compiler)
customize_compiler(cc)
if cc.compiler_type == 'mingw32':
customize_mingw(cc)
else:
cc = compiler
for name, val in compiler_attrs.items():
setattr(cc, name, val)
return cc
|
capnproto/pycapnp | buildutils/config.py | load_config | python | def load_config(name, base='conf'):
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg | Load config dict from JSON | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L31-L42 | [
"def warn(msg):\n logger.error(\"Warning: \" + msg)\n"
] | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | save_config | python | def save_config(name, data, base='conf'):
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2) | Save config dict to JSON | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L45-L51 | null | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | get_eargs | python | def get_eargs():
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings | Look for options in environment vars | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L58-L68 | [
"def debug(msg):\n logger.debug(msg)\n"
] | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | cfg2dict | python | def cfg2dict(cfg):
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d | turn a ConfigParser into a nested dict
because ConfigParser objects are dumb. | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L70-L78 | null | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | get_cfg_args | python | def get_cfg_args():
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg | Look for options in setup.cfg | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L80-L102 | [
"def cfg2dict(cfg):\n \"\"\"turn a ConfigParser into a nested dict\n\n because ConfigParser objects are dumb.\n \"\"\"\n d = {}\n for section in cfg.sections():\n d[section] = dict(cfg.items(section))\n return d\n"
] | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | config_from_prefix | python | def config_from_prefix(prefix):
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings | Get config from zmq prefix | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L104-L119 | null | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | merge | python | def merge(into, d):
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d | merge two containers
into is updated, d has priority | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L121-L136 | [
"def merge(into, d):\n \"\"\"merge two containers\n\n into is updated, d has priority\n \"\"\"\n if isinstance(into, dict):\n for key in d.keys():\n if key not in into:\n into[key] = d[key]\n else:\n into[key] = merge(into[key], d[key])\n ... | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | buildutils/config.py | discover_settings | python | def discover_settings(conf_base=None):
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings | Discover custom settings for ZMQ path | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/config.py#L138-L157 | [
"def load_config(name, base='conf'):\n \"\"\"Load config dict from JSON\"\"\"\n fname = pjoin(base, name + '.json')\n if not os.path.exists(fname):\n return {}\n try:\n with open(fname) as f:\n cfg = json.load(f)\n except Exception as e:\n warn(\"Couldn't load %s: %s\"... | """Config functions"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import os
import json
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
from .msg import debug, fatal, warn
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def load_config(name, base='conf'):
"""Load config dict from JSON"""
fname = pjoin(base, name + '.json')
if not os.path.exists(fname):
return {}
try:
with open(fname) as f:
cfg = json.load(f)
except Exception as e:
warn("Couldn't load %s: %s" % (fname, e))
cfg = {}
return cfg
def save_config(name, data, base='conf'):
"""Save config dict to JSON"""
if not os.path.exists(base):
os.mkdir(base)
fname = pjoin(base, name+'.json')
with open(fname, 'w') as f:
json.dump(data, f, indent=2)
def v_str(v_tuple):
"""turn (2,0,1) into '2.0.1'."""
return ".".join(str(x) for x in v_tuple)
def get_eargs():
""" Look for options in environment vars """
settings = {}
zmq = os.environ.get("ZMQ_PREFIX", None)
if zmq is not None:
debug("Found environ var ZMQ_PREFIX=%s" % zmq)
settings['zmq_prefix'] = zmq
return settings
def cfg2dict(cfg):
"""turn a ConfigParser into a nested dict
because ConfigParser objects are dumb.
"""
d = {}
for section in cfg.sections():
d[section] = dict(cfg.items(section))
return d
def get_cfg_args():
""" Look for options in setup.cfg """
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
cfg.read('setup.cfg')
cfg = cfg2dict(cfg)
g = cfg.setdefault('global', {})
# boolean keys:
for key in ['libzmq_extension',
'bundle_libzmq_dylib',
'no_libzmq_extension',
'have_sys_un_h',
'skip_check_zmq',
]:
if key in g:
g[key] = eval(g[key])
# globals go to top level
cfg.update(cfg.pop('global'))
return cfg
def config_from_prefix(prefix):
"""Get config from zmq prefix"""
settings = {}
if prefix.lower() in ('default', 'auto', ''):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = False
elif prefix.lower() in ('bundled', 'extension'):
settings['zmq_prefix'] = ''
settings['libzmq_extension'] = True
settings['no_libzmq_extension'] = False
else:
settings['zmq_prefix'] = prefix
settings['libzmq_extension'] = False
settings['no_libzmq_extension'] = True
return settings
def merge(into, d):
"""merge two containers
into is updated, d has priority
"""
if isinstance(into, dict):
for key in d.keys():
if key not in into:
into[key] = d[key]
else:
into[key] = merge(into[key], d[key])
return into
elif isinstance(into, list):
return into + d
else:
return d
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings
|
capnproto/pycapnp | examples/calculator_server.py | evaluate_impl | python | def evaluate_impl(expression, params=None):
'''Implementation of CalculatorImpl::evaluate(), also shared by
FunctionImpl::call(). In the latter case, `params` are the parameter
values passed to the function; in the former case, `params` is just an
empty list.'''
which = expression.which()
if which == 'literal':
return capnp.Promise(expression.literal)
elif which == 'previousResult':
return read_value(expression.previousResult)
elif which == 'parameter':
assert expression.parameter < len(params)
return capnp.Promise(params[expression.parameter])
elif which == 'call':
call = expression.call
func = call.function
# Evaluate each parameter.
paramPromises = [evaluate_impl(param, params) for param in call.params]
joinedParams = capnp.join_promises(paramPromises)
# When the parameters are complete, call the function.
ret = (joinedParams
.then(lambda vals: func.call(vals))
.then(lambda result: result.value))
return ret
else:
raise ValueError("Unknown expression type: " + which) | Implementation of CalculatorImpl::evaluate(), also shared by
FunctionImpl::call(). In the latter case, `params` are the parameter
values passed to the function; in the former case, `params` is just an
empty list. | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/examples/calculator_server.py#L20-L50 | [
"def read_value(value):\n '''Helper function to asynchronously call read() on a Calculator::Value and\n return a promise for the result. (In the future, the generated code might\n include something like this automatically.)'''\n\n return value.read().then(lambda result: result.value)\n"
] | #!/usr/bin/env python
from __future__ import print_function
import argparse
import socket
import random
import capnp
import calculator_capnp
def read_value(value):
'''Helper function to asynchronously call read() on a Calculator::Value and
return a promise for the result. (In the future, the generated code might
include something like this automatically.)'''
return value.read().then(lambda result: result.value)
class ValueImpl(calculator_capnp.Calculator.Value.Server):
"Simple implementation of the Calculator.Value Cap'n Proto interface."
def __init__(self, value):
self.value = value
def read(self, **kwargs):
return self.value
class FunctionImpl(calculator_capnp.Calculator.Function.Server):
'''Implementation of the Calculator.Function Cap'n Proto interface, where the
function is defined by a Calculator.Expression.'''
def __init__(self, paramCount, body):
self.paramCount = paramCount
self.body = body.as_builder()
def call(self, params, _context, **kwargs):
'''Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise'''
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value))
class OperatorImpl(calculator_capnp.Calculator.Function.Server):
'''Implementation of the Calculator.Function Cap'n Proto interface, wrapping
basic binary arithmetic operators.'''
def __init__(self, op):
self.op = op
def call(self, params, **kwargs):
assert len(params) == 2
op = self.op
if op == 'add':
return params[0] + params[1]
elif op == 'subtract':
return params[0] - params[1]
elif op == 'multiply':
return params[0] * params[1]
elif op == 'divide':
return params[0] / params[1]
else:
raise ValueError('Unknown operator')
class CalculatorImpl(calculator_capnp.Calculator.Server):
"Implementation of the Calculator Cap'n Proto interface."
def evaluate(self, expression, _context, **kwargs):
return evaluate_impl(expression).then(lambda value: setattr(_context.results, 'value', ValueImpl(value)))
def defFunction(self, paramCount, body, _context, **kwargs):
return FunctionImpl(paramCount, body)
def getOperator(self, op, **kwargs):
return OperatorImpl(op)
def parse_args():
parser = argparse.ArgumentParser(usage='''Runs the server bound to the\
given address/port ADDRESS may be '*' to bind to all local addresses.\
:PORT may be omitted to choose a port automatically. ''')
parser.add_argument("address", help="ADDRESS[:PORT]")
return parser.parse_args()
def main():
address = parse_args().address
server = capnp.TwoPartyServer(address, bootstrap=CalculatorImpl())
server.run_forever()
if __name__ == '__main__':
main()
|
capnproto/pycapnp | examples/calculator_server.py | FunctionImpl.call | python | def call(self, params, _context, **kwargs):
'''Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise'''
assert len(params) == self.paramCount
# using setattr because '=' is not allowed inside of lambdas
return evaluate_impl(self.body, params).then(lambda value: setattr(_context.results, 'value', value)) | Note that we're returning a Promise object here, and bypassing the
helper functionality that normally sets the results struct from the
returned object. Instead, we set _context.results directly inside of
another promise | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/examples/calculator_server.py#L73-L81 | [
"def evaluate_impl(expression, params=None):\n '''Implementation of CalculatorImpl::evaluate(), also shared by\n FunctionImpl::call(). In the latter case, `params` are the parameter\n values passed to the function; in the former case, `params` is just an\n empty list.'''\n\n which = expression.which... | class FunctionImpl(calculator_capnp.Calculator.Function.Server):
'''Implementation of the Calculator.Function Cap'n Proto interface, where the
function is defined by a Calculator.Expression.'''
def __init__(self, paramCount, body):
self.paramCount = paramCount
self.body = body.as_builder()
|
capnproto/pycapnp | buildutils/detect.py | detect_version | python | def detect_version(basedir, compiler=None, **compiler_attrs):
if compiler is None:
compiler = get_default_compiler()
cfile = pjoin(basedir, 'vers.cpp')
shutil.copy(pjoin(os.path.dirname(__file__), 'vers.cpp'), cfile)
# check if we need to link against Realtime Extensions library
if sys.platform.startswith('linux'):
cc = ccompiler.new_compiler(compiler=compiler)
cc.output_dir = basedir
if not cc.has_function('timer_create'):
if 'libraries' not in compiler_attrs:
compiler_attrs['libraries'] = []
compiler_attrs['libraries'].append('rt')
cc = get_compiler(compiler=compiler, **compiler_attrs)
efile = test_compilation(cfile, compiler=cc)
patch_lib_paths(efile, cc.library_dirs)
rc, so, se = get_output_error([efile])
if rc:
msg = "Error running version detection script:\n%s\n%s" % (so,se)
logging.error(msg)
raise IOError(msg)
handlers = {'vers': lambda val: tuple(int(v) for v in val.split('.'))}
props = {}
for line in (x for x in so.split('\n') if x):
key, val = line.split(':')
props[key] = handlers[key](val)
return props | Compile, link & execute a test program, in empty directory `basedir`.
The C compiler will be updated with any keywords given via setattr.
Parameters
----------
basedir : path
The location where the test program will be compiled and run
compiler : str
The distutils compiler key (e.g. 'unix', 'msvc', or 'mingw32')
**compiler_attrs : dict
Any extra compiler attributes, which will be set via ``setattr(cc)``.
Returns
-------
A dict of properties for zmq compilation, with the following two keys:
vers : tuple
The ZMQ version as a tuple of ints, e.g. (2,2,0)
settings : dict
The compiler options used to compile the test function, e.g. `include_dirs`,
`library_dirs`, `libs`, etc. | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/detect.py#L87-L144 | [
"def get_compiler(compiler, **compiler_attrs):\n \"\"\"get and customize a compiler\"\"\"\n if compiler is None or isinstance(compiler, str):\n cc = ccompiler.new_compiler(compiler=compiler)\n customize_compiler(cc)\n if cc.compiler_type == 'mingw32':\n customize_mingw(cc)\n ... | """Detect zmq version"""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
#
# This file is part of pyzmq, copied and adapted from h5py.
# h5py source used under the New BSD license
#
# h5py: <http://code.google.com/p/h5py/>
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#
# Adapted for use in pycapnp from pyzmq. See https://github.com/zeromq/pyzmq
# for original project.
import shutil
import sys
import os
import logging
import platform
from distutils import ccompiler
from distutils.ccompiler import get_default_compiler
from subprocess import Popen, PIPE
import tempfile
from .misc import get_compiler, get_output_error
from .patch import patch_lib_paths
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Utility functions (adapted from h5py: http://h5py.googlecode.com)
#-----------------------------------------------------------------------------
def test_compilation(cfile, compiler=None, **compiler_attrs):
"""Test simple compilation with given settings"""
cc = get_compiler(compiler, **compiler_attrs)
efile, ext = os.path.splitext(cfile)
cpreargs = lpreargs = []
if sys.platform == 'darwin':
# use appropriate arch for compiler
if platform.architecture()[0]=='32bit':
if platform.processor() == 'powerpc':
cpu = 'ppc'
else:
cpu = 'i386'
cpreargs = ['-arch', cpu]
lpreargs = ['-arch', cpu, '-undefined', 'dynamic_lookup']
else:
# allow for missing UB arch, since it will still work:
lpreargs = ['-undefined', 'dynamic_lookup']
if sys.platform == 'sunos5':
if platform.architecture()[0]=='32bit':
lpreargs = ['-m32']
else:
lpreargs = ['-m64']
extra = compiler_attrs.get('extra_compile_args', [])
extra += ['--std=c++11']
objs = cc.compile([cfile], extra_preargs=cpreargs, extra_postargs=extra)
cc.link_executable(objs, efile, extra_preargs=lpreargs)
return efile
def compile_and_run(basedir, src, compiler=None, **compiler_attrs):
if not os.path.exists(basedir):
os.makedirs(basedir)
cfile = pjoin(basedir, os.path.basename(src))
shutil.copy(src, cfile)
try:
cc = get_compiler(compiler, **compiler_attrs)
efile = test_compilation(cfile, compiler=cc)
patch_lib_paths(efile, cc.library_dirs)
result = Popen(efile, stdout=PIPE, stderr=PIPE)
so, se = result.communicate()
# for py3k:
so = so.decode()
se = se.decode()
finally:
shutil.rmtree(basedir)
return result.returncode, so, se
def test_build():
"""do a test build of libcapnp"""
tmp_dir = tempfile.mkdtemp()
# line()
# info("Configure: Autodetecting Cap'n Proto settings...")
# info(" Custom Cap'n Proto dir: %s" % prefix)
try:
detected = detect_version(tmp_dir)
finally:
erase_dir(tmp_dir)
# info(" Cap'n Proto version detected: %s" % v_str(detected['vers']))
return detected
def erase_dir(dir):
try:
shutil.rmtree(dir)
except Exception:
pass
|
capnproto/pycapnp | buildutils/constants.py | cython_enums | python | def cython_enums():
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines)) | generate `enum: ZMQ_CONST` block for constant_enums.pxi | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/constants.py#L33-L42 | null | """
script for generating files that involve repetitive updates for zmq constants.
Run this after updating utils/constant_names
Currently generates the following files from templates:
- constant_enums.pxi
- constants.pxi
- zmq_constants.h
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
from . import info
pjoin = os.path.join
root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, pjoin(root, 'zmq', 'utils'))
from constant_names import all_names, no_prefix
ifndef_t = """#ifndef {0}
#define {0} (_PYZMQ_UNDEFINED)
#endif
"""
def cython_enums():
"""generate `enum: ZMQ_CONST` block for constant_enums.pxi"""
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines))
def ifndefs():
"""generate `#ifndef ZMQ_CONST` block for zmq_constants.h"""
lines = ['#define _PYZMQ_UNDEFINED (-9999)']
for name in all_names:
if not no_prefix(name):
name = 'ZMQ_%s' % name
lines.append(ifndef_t.format(name))
return dict(ZMQ_IFNDEFS='\n'.join(lines))
def constants_pyx():
"""generate CONST = ZMQ_CONST and __all__ for constants.pxi"""
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines))
def generate_file(fname, ns_func, dest_dir="."):
"""generate a constants file from its template"""
with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:
tpl = f.read()
out = tpl.format(**ns_func())
dest = pjoin(dest_dir, fname)
info("generating %s from template" % dest)
with open(dest, 'w') as f:
f.write(out)
def render_constants():
"""render generated constant files from templates"""
generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils'))
if __name__ == '__main__':
render_constants()
|
capnproto/pycapnp | buildutils/constants.py | ifndefs | python | def ifndefs():
lines = ['#define _PYZMQ_UNDEFINED (-9999)']
for name in all_names:
if not no_prefix(name):
name = 'ZMQ_%s' % name
lines.append(ifndef_t.format(name))
return dict(ZMQ_IFNDEFS='\n'.join(lines)) | generate `#ifndef ZMQ_CONST` block for zmq_constants.h | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/constants.py#L44-L51 | null | """
script for generating files that involve repetitive updates for zmq constants.
Run this after updating utils/constant_names
Currently generates the following files from templates:
- constant_enums.pxi
- constants.pxi
- zmq_constants.h
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
from . import info
pjoin = os.path.join
root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, pjoin(root, 'zmq', 'utils'))
from constant_names import all_names, no_prefix
ifndef_t = """#ifndef {0}
#define {0} (_PYZMQ_UNDEFINED)
#endif
"""
def cython_enums():
"""generate `enum: ZMQ_CONST` block for constant_enums.pxi"""
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines))
def constants_pyx():
"""generate CONST = ZMQ_CONST and __all__ for constants.pxi"""
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines))
def generate_file(fname, ns_func, dest_dir="."):
"""generate a constants file from its template"""
with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:
tpl = f.read()
out = tpl.format(**ns_func())
dest = pjoin(dest_dir, fname)
info("generating %s from template" % dest)
with open(dest, 'w') as f:
f.write(out)
def render_constants():
"""render generated constant files from templates"""
generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils'))
if __name__ == '__main__':
render_constants()
|
capnproto/pycapnp | buildutils/constants.py | constants_pyx | python | def constants_pyx():
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines)) | generate CONST = ZMQ_CONST and __all__ for constants.pxi | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/constants.py#L53-L64 | null | """
script for generating files that involve repetitive updates for zmq constants.
Run this after updating utils/constant_names
Currently generates the following files from templates:
- constant_enums.pxi
- constants.pxi
- zmq_constants.h
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
from . import info
pjoin = os.path.join
root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, pjoin(root, 'zmq', 'utils'))
from constant_names import all_names, no_prefix
ifndef_t = """#ifndef {0}
#define {0} (_PYZMQ_UNDEFINED)
#endif
"""
def cython_enums():
"""generate `enum: ZMQ_CONST` block for constant_enums.pxi"""
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines))
def ifndefs():
"""generate `#ifndef ZMQ_CONST` block for zmq_constants.h"""
lines = ['#define _PYZMQ_UNDEFINED (-9999)']
for name in all_names:
if not no_prefix(name):
name = 'ZMQ_%s' % name
lines.append(ifndef_t.format(name))
return dict(ZMQ_IFNDEFS='\n'.join(lines))
def generate_file(fname, ns_func, dest_dir="."):
"""generate a constants file from its template"""
with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:
tpl = f.read()
out = tpl.format(**ns_func())
dest = pjoin(dest_dir, fname)
info("generating %s from template" % dest)
with open(dest, 'w') as f:
f.write(out)
def render_constants():
"""render generated constant files from templates"""
generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils'))
if __name__ == '__main__':
render_constants()
|
capnproto/pycapnp | buildutils/constants.py | generate_file | python | def generate_file(fname, ns_func, dest_dir="."):
with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:
tpl = f.read()
out = tpl.format(**ns_func())
dest = pjoin(dest_dir, fname)
info("generating %s from template" % dest)
with open(dest, 'w') as f:
f.write(out) | generate a constants file from its template | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/constants.py#L66-L74 | [
"def info(msg):\n logger.info(msg)\n",
"def cython_enums():\n \"\"\"generate `enum: ZMQ_CONST` block for constant_enums.pxi\"\"\"\n lines = []\n for name in all_names:\n if no_prefix(name):\n lines.append('enum: ZMQ_{0} \"{0}\"'.format(name))\n else:\n lines.append(... | """
script for generating files that involve repetitive updates for zmq constants.
Run this after updating utils/constant_names
Currently generates the following files from templates:
- constant_enums.pxi
- constants.pxi
- zmq_constants.h
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
from . import info
pjoin = os.path.join
root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, pjoin(root, 'zmq', 'utils'))
from constant_names import all_names, no_prefix
ifndef_t = """#ifndef {0}
#define {0} (_PYZMQ_UNDEFINED)
#endif
"""
def cython_enums():
"""generate `enum: ZMQ_CONST` block for constant_enums.pxi"""
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines))
def ifndefs():
"""generate `#ifndef ZMQ_CONST` block for zmq_constants.h"""
lines = ['#define _PYZMQ_UNDEFINED (-9999)']
for name in all_names:
if not no_prefix(name):
name = 'ZMQ_%s' % name
lines.append(ifndef_t.format(name))
return dict(ZMQ_IFNDEFS='\n'.join(lines))
def constants_pyx():
"""generate CONST = ZMQ_CONST and __all__ for constants.pxi"""
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines))
def render_constants():
"""render generated constant files from templates"""
generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils'))
if __name__ == '__main__':
render_constants()
|
capnproto/pycapnp | buildutils/constants.py | render_constants | python | def render_constants():
generate_file("constant_enums.pxi", cython_enums, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("constants.pxi", constants_pyx, pjoin(root, 'zmq', 'backend', 'cython'))
generate_file("zmq_constants.h", ifndefs, pjoin(root, 'zmq', 'utils')) | render generated constant files from templates | train | https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/constants.py#L76-L80 | [
"def generate_file(fname, ns_func, dest_dir=\".\"):\n \"\"\"generate a constants file from its template\"\"\"\n with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:\n tpl = f.read()\n out = tpl.format(**ns_func())\n dest = pjoin(dest_dir, fname)\n info(\"generating %s fro... | """
script for generating files that involve repetitive updates for zmq constants.
Run this after updating utils/constant_names
Currently generates the following files from templates:
- constant_enums.pxi
- constants.pxi
- zmq_constants.h
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
from . import info
pjoin = os.path.join
root = os.path.abspath(pjoin(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, pjoin(root, 'zmq', 'utils'))
from constant_names import all_names, no_prefix
ifndef_t = """#ifndef {0}
#define {0} (_PYZMQ_UNDEFINED)
#endif
"""
def cython_enums():
"""generate `enum: ZMQ_CONST` block for constant_enums.pxi"""
lines = []
for name in all_names:
if no_prefix(name):
lines.append('enum: ZMQ_{0} "{0}"'.format(name))
else:
lines.append('enum: ZMQ_{0}'.format(name))
return dict(ZMQ_ENUMS='\n '.join(lines))
def ifndefs():
"""generate `#ifndef ZMQ_CONST` block for zmq_constants.h"""
lines = ['#define _PYZMQ_UNDEFINED (-9999)']
for name in all_names:
if not no_prefix(name):
name = 'ZMQ_%s' % name
lines.append(ifndef_t.format(name))
return dict(ZMQ_IFNDEFS='\n'.join(lines))
def constants_pyx():
"""generate CONST = ZMQ_CONST and __all__ for constants.pxi"""
all_lines = []
assign_lines = []
for name in all_names:
if name == "NULL":
# avoid conflict with NULL in Cython
assign_lines.append("globals()['NULL'] = ZMQ_NULL")
else:
assign_lines.append('{0} = ZMQ_{0}'.format(name))
all_lines.append(' "{0}",'.format(name))
return dict(ASSIGNMENTS='\n'.join(assign_lines), ALL='\n'.join(all_lines))
def generate_file(fname, ns_func, dest_dir="."):
"""generate a constants file from its template"""
with open(pjoin(root, 'buildutils', 'templates', '%s' % fname), 'r') as f:
tpl = f.read()
out = tpl.format(**ns_func())
dest = pjoin(dest_dir, fname)
info("generating %s from template" % dest)
with open(dest, 'w') as f:
f.write(out)
if __name__ == '__main__':
render_constants()
|
mikusjelly/apkutils | apkutils/axml/chunk.py | StringPoolChunk.skipNullPadding | python | def skipNullPadding(self, buff):
'''
不断地寻找 CHUNK_STRINGPOOL_TYPE,目前暂时没有遇到这种样本。
'''
def readNext(buff, first_run=True):
datas = unpack('<i', buff.read(4))
header = datas[0]
if header == CHUNK_NULL_TYPE and first_run:
print("Skipping null padding in StringPoolChunk header")
header = readNext(buff, first_run=False)
elif header != CHUNK_STRINGPOOL_TYPE:
print("Invalid StringPoolChunk header")
return header
header = readNext(buff)
return header >> 8, header & 0xFF | 不断地寻找 CHUNK_STRINGPOOL_TYPE,目前暂时没有遇到这种样本。 | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/axml/chunk.py#L79-L96 | [
"def readNext(buff, first_run=True):\n datas = unpack('<i', buff.read(4))\n header = datas[0]\n\n if header == CHUNK_NULL_TYPE and first_run:\n print(\"Skipping null padding in StringPoolChunk header\")\n header = readNext(buff, first_run=False)\n elif header != CHUNK_STRINGPOOL_TYPE:\n ... | class StringPoolChunk(object):
'''
解析String Pool Chunk
'''
def __init__(self, buff):
self.size_of_buff = buff.size()
self.start = buff.get_idx()
self._cache = {}
self.header_size, self.header = self.skipNullPadding(buff)
# 块大小
self.chunkSize = unpack('<i', buff.read(4))[0]
# 字符串数
self.stringCount = unpack('<i', buff.read(4))[0]
# style 数
self.styleCount = unpack('<i', buff.read(4))[0]
# 字符串格式标记
self.flags = unpack('<i', buff.read(4))[0]
# 字符串的格式有两种,一种是16bit,另外一种是UTF8
self.m_isUTF8 = (self.flags & UTF8_FLAG) != 0
# 字符串起始位置
self.stringsStart = unpack('<i', buff.read(4))[0]
# 注意:
# 1. 如果解析的是清单,那么这个值肯定是为空的。
# 2. 该值不可能大于小于文件的大小(开发者通常用来对抗解析工具)
self.stylesStart = unpack('<i', buff.read(4))[0]
if self.stylesStart > buff.size():
self.stylesStart = 0
# 字符串偏移数组
self.m_stringIndices = []
# style 偏移数组
self.m_styleIndices = []
# 字符串池
self.m_charbuff = ""
# style pan 池
self.m_styles = []
for _ in range(0, self.stringCount):
tmp = buff.read(4)
self.m_stringIndices.append(unpack('<i', tmp)[0])
for _ in range(0, self.styleCount):
tmp = buff.read(4)
self.m_styleIndices.append(unpack('<i', tmp)[0])
# 4字节对齐
size = self.chunkSize - self.stringsStart
if self.stylesStart != 0:
size = self.stylesStart - self.stringsStart
# 字符串池
self.m_charbuff = buff.read(size)
if self.stylesStart != 0:
size = self.chunkSize - self.stylesStart
for _ in range(0, int(size / 4) - 1):
tmp = buff.read(4)
self.m_styles.append(unpack('<i', tmp)[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringIndices or idx >= len(
self.m_stringIndices):
return ""
offset = self.m_stringIndices[idx]
if self.m_isUTF8:
self._cache[idx] = self.decode8(offset)
else:
self._cache[idx] = self.decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
return self.m_styles[idx]
def decode8(self, offset):
str_len, skip = self.decodeLength(offset, 1)
offset += skip
encoded_bytes, skip = self.decodeLength(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-8', str_len)
def decode16(self, offset):
str_len, skip = self.decodeLength(offset, 2)
offset += skip
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-16', str_len)
def decode_bytes(self, data, encoding, str_len):
string = data.decode(encoding, 'replace')
if len(string) != str_len:
raise Exception("invalid decoded string length")
return string
def decodeLength(self, offset, sizeof_char):
length = self.m_charbuff[offset]
sizeof_2chars = sizeof_char << 1
fmt_chr = 'B' if sizeof_char == 1 else 'H'
fmt = "<2" + fmt_chr
length1, length2 = unpack(
fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
highbit = 0x80 << (8 * (sizeof_char - 1))
if (length & highbit) != 0:
return ((length1 & ~highbit) << (8 * sizeof_char)) | length2, sizeof_2chars
return length1, sizeof_char
def show(self, flag=False):
print("String Pool Chunk:")
print(" - start:", self.start)
print(" - header Size:", self.header_size)
print(" - chunkSize:", self.chunkSize)
print(" - stringCount:", self.stringCount)
print(" - styleCount:", self.styleCount)
print(" - stringsStart:", self.stringsStart)
print(" - stylesStart:", self.stylesStart)
print(" - flags:", self.flags)
print(" - size_of_buff:", self.size_of_buff)
if not flag:
return
for i in range(0, len(self.m_stringIndices)):
print((i, repr(self.getString(i))))
|
mikusjelly/apkutils | apkutils/apkfile.py | is_zipfile | python | def is_zipfile(filename):
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result | Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L182-L196 | [
"def _check_zipfile(fp):\n try:\n if _EndRecData(fp):\n return True # file has correct magic number\n except OSError:\n pass\n return False\n"
] | """
from https://github.com/python/cpython/tree/3.6/Lib/zipfile.py
Read and write APK files.
XXX references to utf-8 need further investigation.
"""
import binascii
import importlib.util
import io
import os
import re
import shutil
import stat
import struct
import sys
import time
try:
import threading
except ImportError:
import dummy_threading as threading
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number,
# and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except OSError:
pass
return False
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size。
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec = list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start + sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] # as claimed by the zip file
comment = data[start + sizeEndCentDir:start +
sizeEndCentDir + commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980, 1, 1, 0, 0, 0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
if self.compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self.compress_type,
self.compress_type))
hi = self.external_attr >> 16
lo = self.external_attr & 0xFFFF
if hi:
result.append(' filemode=%r' % stat.filemode(hi))
if lo:
result.append(' external_attr=%#x' % lo)
isdir = self.filename[-1:] == '/'
if not isdir or self.file_size:
result.append(' file_size=%r' % self.file_size)
if ((not isdir or self.compress_size) and
(self.compress_type != ZIP_STORED or
self.file_size != self.compress_size)):
result.append(' compress_size=%r' % self.compress_size)
result.append('>')
return ''.join(result)
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt) - 4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s" % (ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
# old = self.header_offset
self.header_offset = counts[idx]
idx += 1
extra = extra[ln + 4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = None
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
if _ZipDecrypter.crctable is None:
_ZipDecrypter.crctable = _ZipDecrypter._GenerateCRCTable()
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k ^ 1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError(
"compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class _SharedFile:
def __init__(self, file, pos, close, lock):
self._file = file
self._pos = pos
self._close = close
self._lock = lock
def read(self, n=-1):
with self._lock:
self._file.seek(self._pos)
data = self._file.read(n)
self._pos = self._file.tell()
return data
def close(self):
if self._file is not None:
fileobj = self._file
self._file = None
self._close(fileobj)
# Provide the tell method for unseekable stream
class _Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
def close(self):
self.fp.close()
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
self.mode = mode
self.name = zipinfo.filename
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
# 因为Android并不验证CRC
# 部分APK把CRC抹掉,解析的时候,不验证CRC
# if self._eof and self._running_crc != self._expected_crc:
# raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
# Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
# self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=True, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s "%s" skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file "%s" skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file "%s" skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(
file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(
file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(
file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(
self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args=None):
import textwrap
USAGE = textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print(
"The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.extractall(args[2])
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w') as zf:
for path in args[2:]:
zippath = os.path.basename(path)
if not zippath:
zippath = os.path.basename(os.path.dirname(path))
if zippath in ('', os.curdir, os.pardir):
zippath = ''
addToZip(zf, path, zippath)
if __name__ == "__main__":
main()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipInfo.FileHeader | python | def FileHeader(self, zip64=None):
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt) - 4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra | Return the per-file header as a string. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L393-L437 | [
"def _encodeFilenameFlags(self):\n try:\n return self.filename.encode('ascii'), self.flag_bits\n except UnicodeEncodeError:\n return self.filename.encode('utf-8'), self.flag_bits | 0x800\n"
] | class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980, 1, 1, 0, 0, 0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
if self.compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self.compress_type,
self.compress_type))
hi = self.external_attr >> 16
lo = self.external_attr & 0xFFFF
if hi:
result.append(' filemode=%r' % stat.filemode(hi))
if lo:
result.append(' external_attr=%#x' % lo)
isdir = self.filename[-1:] == '/'
if not isdir or self.file_size:
result.append(' file_size=%r' % self.file_size)
if ((not isdir or self.compress_size) and
(self.compress_type != ZIP_STORED or
self.file_size != self.compress_size)):
result.append(' compress_size=%r' % self.compress_size)
result.append('>')
return ''.join(result)
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s" % (ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
# old = self.header_offset
self.header_offset = counts[idx]
idx += 1
extra = extra[ln + 4:]
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipExtFile.readline | python | def readline(self, limit=-1):
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line | Read and return a line from the stream.
If limit is specified, at most limit bytes will be read. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L758-L806 | [
"def peek(self, n=1):\n \"\"\"Returns buffered bytes without advancing the position.\"\"\"\n if n > len(self._readbuffer) - self._offset:\n chunk = self.read(n)\n if len(chunk) > self._offset:\n self._readbuffer = chunk + self._readbuffer[self._offset:]\n self._offset = 0\n... | class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
self.mode = mode
self.name = zipinfo.filename
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
# 因为Android并不验证CRC
# 部分APK把CRC抹掉,解析的时候,不验证CRC
# if self._eof and self._running_crc != self._expected_crc:
# raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
# Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
# self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipExtFile.peek | python | def peek(self, n=1):
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512] | Returns buffered bytes without advancing the position. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L808-L819 | [
"def read(self, n=-1):\n \"\"\"Read and return up to n bytes.\n If the argument is omitted, None, or negative, data is read and returned until EOF is reached..\n \"\"\"\n if n is None or n < 0:\n buf = self._readbuffer[self._offset:]\n self._readbuffer = b''\n self._offset = 0\n ... | class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
self.mode = mode
self.name = zipinfo.filename
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
# 因为Android并不验证CRC
# 部分APK把CRC抹掉,解析的时候,不验证CRC
# if self._eof and self._running_crc != self._expected_crc:
# raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
# Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
# self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipExtFile.read | python | def read(self, n=-1):
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf | Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L824-L855 | [
"def _read1(self, n):\n # Read up to n compressed bytes with at most one read() system call,\n # decrypt and decompress them.\n if self._eof or n <= 0:\n return b''\n\n # Read from file.\n if self._compress_type == ZIP_DEFLATED:\n # Handle unconsumed data.\n data = self._decompre... | class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
self.mode = mode
self.name = zipinfo.filename
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
# 因为Android并不验证CRC
# 部分APK把CRC抹掉,解析的时候,不验证CRC
# if self._eof and self._running_crc != self._expected_crc:
# raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
# Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
# self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile._RealGetContents | python | def _RealGetContents(self):
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH] | Read in the table of contents for the ZIP file. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1088-L1162 | [
"def _EndRecData(fpin):\n \"\"\"Return data from the \"End of Central Directory\" record, or None.\n\n The data is a list of the nine items in the ZIP \"End of central dir\"\n record followed by a tenth item, the file seek offset of this record.\"\"\"\n\n # Determine file size。\n fpin.seek(0, 2)\n ... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile.setpassword | python | def setpassword(self, pwd):
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None | Set default password for encrypted files. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1204-L1211 | null | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile.read | python | def read(self, name, pwd=None):
with self.open(name, "r", pwd) as fp:
return fp.read() | Return file bytes (as a string) for name. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1231-L1234 | [
"def open(self, name, mode=\"r\", pwd=None):\n \"\"\"Return file-like object for 'name'.\"\"\"\n if mode not in (\"r\", \"U\", \"rU\"):\n raise RuntimeError('open() requires mode \"r\", \"U\", or \"rU\"')\n if 'U' in mode:\n import warnings\n warnings.warn(\"'U' mode is deprecated\",\n... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile.open | python | def open(self, name, mode="r", pwd=None):
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise | Return file-like object for 'name'. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1236-L1312 | [
"def read(self, n=-1):\n with self._lock:\n self._file.seek(self._pos)\n data = self._file.read(n)\n self._pos = self._file.tell()\n return data\n",
"def close(self):\n if self._file is not None:\n fileobj = self._file\n self._file = None\n self._close(fileob... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile._sanitize_windows_name | python | def _sanitize_windows_name(cls, arcname, pathsep):
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname | Replace bad characters and remove trailing dots from parts. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1341-L1353 | null | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile._extract_member | python | def _extract_member(self, member, targetpath, pwd):
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath | Extract the ZipInfo object 'member' to a physical
file on the path targetpath. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1355-L1392 | [
"def open(self, name, mode=\"r\", pwd=None):\n \"\"\"Return file-like object for 'name'.\"\"\"\n if mode not in (\"r\", \"U\", \"rU\"):\n raise RuntimeError('open() requires mode \"r\", \"U\", or \"rU\"')\n if 'U' in mode:\n import warnings\n warnings.warn(\"'U' mode is deprecated\",\n... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile._writecheck | python | def _writecheck(self, zinfo):
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions") | Check for errors before writing a file to the archive. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1394-L1415 | [
"def _check_compression(compression):\n if compression == ZIP_STORED:\n pass\n elif compression == ZIP_DEFLATED:\n if not zlib:\n raise RuntimeError(\n \"Compression requires the (missing) zlib module\")\n elif compression == ZIP_BZIP2:\n if not bz2:\n ... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile.write | python | def write(self, filename, arcname=None, compress_type=None):
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo | Put the bytes from filename into the archive under the name
arcname. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1417-L1519 | [
"def _get_compressor(compress_type):\n if compress_type == ZIP_DEFLATED:\n return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,\n zlib.DEFLATED, -15)\n elif compress_type == ZIP_BZIP2:\n return bz2.BZ2Compressor()\n elif compress_type == ZIP_LZMA:\n return... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | ZipFile.close | python | def close(self):
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp) | Close the file, and for mode 'w', 'x' and 'a' write the ending
records. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1588-L1603 | [
"def _write_end_record(self):\n for zinfo in self.filelist: # write central directory\n dt = zinfo.date_time\n dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]\n dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)\n extra = []\n if zinfo.file_size > ZIP64_LIMIT \\\n ... | class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise RuntimeError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r': 'rb', 'w': 'w+b', 'x': 'x+b', 'a': 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise RuntimeError("Mode must be 'r', 'w', 'x', or 'a'")
except Exception as ignore:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# ---> APK文件只有一个,不可能存在额外数据。
# "concat" is zero, unless zip was concatenated to another file
# concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
# if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# # If Zip64 extension structures are present, account for them
# concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
concat = 0
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)
x._decodeExtra()
# x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = total + sizeCentralDir + \
centdir[_CD_FILENAME_LENGTH] + \
centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if 'U' in mode:
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
self._fileRefCnt += 1
zef_file = _SharedFile(
self.fp, zinfo.header_offset, self._fpclose, self._lock)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
# print(fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
# 注意:头部的文件长度可能会被修改
len_fname = fheader[_FH_FILENAME_LENGTH]
if len_fname > 256:
# 自动修正文件名长度,但是,不能保证解压成功
len_fname = len(zinfo.orig_filename)
fname = zef_file.read(len_fname)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
# zinfo.flag_bits ^= zinfo.flag_bits % 2
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError(
"compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
return ZipExtFile(zef_file, mode, zinfo, None, True)
except Exception as ignore:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise RuntimeError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
return
cmpr = _get_compressor(zinfo.compress_type)
if not self._seekable:
zinfo.flag_bits |= 0x08
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.start_dir = self.fp.tell()
else:
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
self.start_dir = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(self.start_dir)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.start_dir = self.fp.tell()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q' * len(extra),
1, 8 * len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(
extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
|
mikusjelly/apkutils | apkutils/apkfile.py | PyZipFile._get_codename | python | def _get_codename(self, pathname, basename):
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(
file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(
file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(
file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(
self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename) | Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string). | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1805-L1889 | null | class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=True, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s "%s" skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file "%s" skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file "%s" skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
|
mikusjelly/apkutils | apkutils/__init__.py | APK.pretty_print | python | def pretty_print(node):
for pre, _, node in RenderTree(node):
print('{}{}'.format(pre, node.name)) | 漂亮地打印一个节点
Args:
node (TYPE): Description | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/__init__.py#L113-L120 | null | class APK:
def __init__(self, apk_path):
self.apk_path = apk_path
self.dex_files = None
self.children = None
self.manifest = None
self.org_manifest = None
self.strings = None
self.org_strings = None
self.opcodes = None
self.certs = {}
self.arsc = None
self.strings_refx = None
self.app_icon = None
self.methods = None
self.trees = None # 代码结构序列字典
self.application = None
self.main_activity = None
self.mini_mani = None
self.classes = None
@staticmethod
def serialize_xml(org_xml):
if not org_xml:
return None
_xml = re.sub(r'\n', ' ', org_xml)
_xml = re.sub(r'"\s+?>', '">', _xml)
_xml = re.sub(r'>\s+?<', '><', _xml)
return _xml
def get_mini_mani(self):
if not self.mini_mani:
self.mini_mani = self.serialize_xml(self.get_org_manifest())
return self.mini_mani
def get_main_activity(self):
if not self.main_activity:
self._init_main_activity()
return self.main_activity
def _init_main_activity(self):
mani = self.get_mini_mani()
ptn = r'<activity android:name="([^"]*?)"[^<>]*?>.*?<action android:name="android.intent.action.MAIN">.*?</activity>'
result = re.search(ptn, mani)
if result:
self.main_activity = result.groups()[0]
def get_application(self):
if not self.application:
self._init_application()
return self.application
def _init_application(self):
mani = self.get_mini_mani()
if not mani:
return
ptn = r'<application[^<>]*?:name="([^<>"]*?)"[^<>]*?>'
result = re.search(ptn, mani)
if result:
self.application = result.groups()[0]
def get_app_icon(self):
if self.app_icon:
return self.app_icon
self._init_app_icon()
return self.app_icon
def _init_app_icon(self):
files = self.get_files()
result = re.search(r':icon="@(.*?)"', self.get_org_manifest())
ids = '0x' + result.groups()[0].lower()
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
data = z.read('resources.arsc')
self.arscobj = ARSCParser(data)
self.package = self.arscobj.get_packages_names()[0]
datas = xmltodict.parse(
self.arscobj.get_public_resources(self.package))
for item in datas['resources']['public']:
if ids != item['@id']:
continue
for f in files:
name = f['name']
if item['@type'] in name and item['@name'] in name:
self.app_icon = name
except Exception as ex:
raise ex
def get_trees(self, height=2, limit=5000):
if self.trees is None:
self._init_trees(height, limit)
return self.trees
@staticmethod
def _init_trees(self, height, limit):
if self.methods is None:
self._init_methods(limit)
if not self.methods:
return
root = Node('root')
r = Resolver(pathattr='name')
def find_node(path):
"""查找节点
Args:
root (TYPE): Description
path (TYPE): Description
Returns:
TYPE: Description
"""
try:
return r.glob(root, path)[0]
except Exception:
return None
def to_nodes(mtd):
"""把一个方法,转化成节点
Args:
root (TYPE): 根节点
mtd (TYPE): Description
Returns:
TYPE: Node
"""
current = root
node_path = '/root'
for item in mtd.split('/'):
node_path = node_path + '/' + item
tnode = find_node(node_path)
if tnode:
current = tnode
else:
current = Node(item, parent=current)
count = 0
# TODO 节点插入的顺序,决定了树的遍历顺序,及其计算结果
# 假设2个结构一样,但是,因为名字顺序不一样,导致插入顺序不一致
# 有可能导致一样的结构不一样的结果。
for mtd in self.methods:
count += 1
to_nodes(mtd)
def serialize_node(root_node):
snum = ''
for pre, _, node in RenderTree(root_node):
snum = snum + str(node.height)
return snum
self.trees = {}
for pre, _, node in RenderTree(root):
if node.height > height:
key = hash.hash(serialize_node(node), 'md5')
if key in self.trees:
self.trees[key].append(node)
else:
self.trees[key] = [node]
def get_classes(self):
if self.classes is None:
self._init_classes()
return self.classes
def _init_classes(self):
classes = set()
if not self.dex_files:
self._init_dex_files()
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
classes.add(dexClass.name)
self.classes = sorted(classes)
def get_methods(self, limit=10000):
"""获取所有方法路径 com/a/b/mtd_name
Returns:
TYPE: set
"""
if self.methods is None:
self._init_methods(limit)
return self.methods
def _init_methods(self, limit=10000):
"""初始化方法
某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制
Args:
limit (int, optional): 方法数量限制,超过该值,则不获取方法
Returns:
TYPE: 方法集合
"""
methods = set()
if not self.dex_files:
self._init_dex_files()
count = 0
for dex_file in self.dex_files:
count += dex_file.method_ids.size
if limit < count:
return
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
methods.add(clsname + '/' + mtdname)
self.methods = sorted(methods)
def _init_strings_refx(self):
if not self.dex_files:
self._init_dex_files()
self.strings_refx = {}
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
if not method.code:
continue
for bc in method.code.bytecode:
# 1A const-string
# 1B const-string-jumbo
if bc.opcode not in {26, 27}:
continue
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
dexstr = dex_file.string(bc.args[1])
if clsname in self.strings_refx:
if mtdname in self.strings_refx[clsname]:
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname] = {}
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
def get_strings_refx(self):
"""获取字符串索引,即字符串被那些类、方法使用了。
:return: 字符串索引
:rtype: [dict]
"""
if self.strings_refx is None:
self._init_strings_refx()
return self.strings_refx
def get_dex_files(self):
if not self.dex_files:
self._init_dex_files()
return self.dex_files
def _init_dex_files(self):
self.dex_files = []
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
for name in z.namelist():
data = z.read(name)
if name.startswith('classes') and name.endswith('.dex') \
and Magic(data).get_type() == 'dex':
dex_file = DexFile(data)
self.dex_files.append(dex_file)
except Exception as ex:
raise ex
def get_strings(self):
if not self.strings:
self._init_strings()
return self.strings
def get_org_strings(self):
if not self.org_strings:
self._init_strings()
return self.org_strings
def _init_strings(self):
if not self.dex_files:
self._init_dex_files()
str_set = set()
org_str_set = set()
for dex_file in self.dex_files:
for i in range(dex_file.string_ids.size):
ostr = dex_file.string(i)
org_str_set.add(ostr)
str_set.add(binascii.hexlify(ostr).decode())
self.strings = list(str_set)
self.org_strings = list(org_str_set)
def get_files(self):
if not self.children:
self._init_children()
return self.children
def _init_children(self):
self.children = []
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mine = Magic(data).get_type()
info = zf.getinfo(name)
except Exception as ex:
print(name, ex)
continue
item = {}
item["name"] = name
item["type"] = mine
item["time"] = "%d%02d%02d%02d%02d%02d" % info.date_time
crc = str(hex(info.CRC)).upper()[2:]
crc = '0' * (8 - len(crc)) + crc
item["crc"] = crc
# item["sha1"] = ""
self.children.append(item)
except Exception as e:
raise e
def get_org_manifest(self):
if not self.org_manifest:
self._init_manifest()
return self.org_manifest
def _init_org_manifest(self):
ANDROID_MANIFEST = "AndroidManifest.xml"
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ANDROID_MANIFEST in zf.namelist():
data = zf.read(ANDROID_MANIFEST)
try:
axml = AXML(data)
if axml.is_valid:
self.org_manifest = axml.get_xml()
except Exception as e:
raise e
except Exception as e:
raise e
def get_manifest(self):
if not self.manifest:
self._init_manifest()
return self.manifest
def _init_manifest(self):
if not self.org_manifest:
self._init_org_manifest()
if self.org_manifest:
try:
self.manifest = xmltodict.parse(
self.org_manifest, False)['manifest']
except xml.parsers.expat.ExpatError as e:
pass
except Exception as e:
raise e
def _init_arsc(self):
ARSC_NAME = 'resources.arsc'
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ARSC_NAME in zf.namelist():
data = zf.read(ARSC_NAME)
self.arsc = ARSCParser(data)
except Exception as e:
raise e
def get_arsc(self):
if not self.arsc:
self._init_arsc()
return self.arsc
def get_certs(self, digestalgo='md5'):
if digestalgo not in self.certs:
self._init_certs(digestalgo)
return self.certs[digestalgo]
def _init_certs(self, digestalgo):
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
if name.startswith('META-INF/') and name.endswith(('.DSA', '.RSA')):
data = zf.read(name)
mine = Magic(data).get_type()
if mine != 'txt':
from apkutils.cert import Certificate
cert = Certificate(data, digestalgo=digestalgo)
self.certs[digestalgo] = cert.get()
except Exception as e:
raise e
def get_opcodes(self):
if not self.dex_files:
self._init_opcodes()
return self.opcodes
def _init_opcodes(self):
if not self.dex_files:
self._init_dex_files()
self.opcodes = []
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
opcodes = ""
if method.code:
for bc in method.code.bytecode:
opcode = str(hex(bc.opcode)).upper()[2:]
if len(opcode) == 2:
opcodes = opcodes + opcode
else:
opcodes = opcodes + "0" + opcode
proto = self.get_proto_string(
method.id.return_type, method.id.param_types)
item = {}
item['super_class'] = dexClass.super.decode()
item['class_name'] = method.id.cname.decode()
item['method_name'] = method.id.name.decode()
item['method_desc'] = method.id.desc.decode()
item['proto'] = proto
item['opcodes'] = opcodes
self.opcodes.append(item)
@staticmethod
def get_proto_string(return_type, param_types):
proto = return_type.decode()
if len(proto) > 1:
proto = 'L'
for item in param_types:
param_type = item.decode()
proto += 'L' if len(param_type) > 1 else param_type
return proto
|
mikusjelly/apkutils | apkutils/__init__.py | APK.get_methods | python | def get_methods(self, limit=10000):
if self.methods is None:
self._init_methods(limit)
return self.methods | 获取所有方法路径 com/a/b/mtd_name
Returns:
TYPE: set | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/__init__.py#L204-L212 | [
"def _init_methods(self, limit=10000):\n \"\"\"初始化方法\n\n 某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制\n\n Args:\n limit (int, optional): 方法数量限制,超过该值,则不获取方法\n\n Returns:\n TYPE: 方法集合\n \"\"\"\n methods = set()\n if not self.dex_files:\n self._init_dex_files()\n\n count = 0\n for dex... | class APK:
def __init__(self, apk_path):
self.apk_path = apk_path
self.dex_files = None
self.children = None
self.manifest = None
self.org_manifest = None
self.strings = None
self.org_strings = None
self.opcodes = None
self.certs = {}
self.arsc = None
self.strings_refx = None
self.app_icon = None
self.methods = None
self.trees = None # 代码结构序列字典
self.application = None
self.main_activity = None
self.mini_mani = None
self.classes = None
@staticmethod
def serialize_xml(org_xml):
if not org_xml:
return None
_xml = re.sub(r'\n', ' ', org_xml)
_xml = re.sub(r'"\s+?>', '">', _xml)
_xml = re.sub(r'>\s+?<', '><', _xml)
return _xml
def get_mini_mani(self):
if not self.mini_mani:
self.mini_mani = self.serialize_xml(self.get_org_manifest())
return self.mini_mani
def get_main_activity(self):
if not self.main_activity:
self._init_main_activity()
return self.main_activity
def _init_main_activity(self):
mani = self.get_mini_mani()
ptn = r'<activity android:name="([^"]*?)"[^<>]*?>.*?<action android:name="android.intent.action.MAIN">.*?</activity>'
result = re.search(ptn, mani)
if result:
self.main_activity = result.groups()[0]
def get_application(self):
if not self.application:
self._init_application()
return self.application
def _init_application(self):
mani = self.get_mini_mani()
if not mani:
return
ptn = r'<application[^<>]*?:name="([^<>"]*?)"[^<>]*?>'
result = re.search(ptn, mani)
if result:
self.application = result.groups()[0]
def get_app_icon(self):
if self.app_icon:
return self.app_icon
self._init_app_icon()
return self.app_icon
def _init_app_icon(self):
files = self.get_files()
result = re.search(r':icon="@(.*?)"', self.get_org_manifest())
ids = '0x' + result.groups()[0].lower()
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
data = z.read('resources.arsc')
self.arscobj = ARSCParser(data)
self.package = self.arscobj.get_packages_names()[0]
datas = xmltodict.parse(
self.arscobj.get_public_resources(self.package))
for item in datas['resources']['public']:
if ids != item['@id']:
continue
for f in files:
name = f['name']
if item['@type'] in name and item['@name'] in name:
self.app_icon = name
except Exception as ex:
raise ex
def get_trees(self, height=2, limit=5000):
if self.trees is None:
self._init_trees(height, limit)
return self.trees
@staticmethod
def pretty_print(node):
"""漂亮地打印一个节点
Args:
node (TYPE): Description
"""
for pre, _, node in RenderTree(node):
print('{}{}'.format(pre, node.name))
def _init_trees(self, height, limit):
if self.methods is None:
self._init_methods(limit)
if not self.methods:
return
root = Node('root')
r = Resolver(pathattr='name')
def find_node(path):
"""查找节点
Args:
root (TYPE): Description
path (TYPE): Description
Returns:
TYPE: Description
"""
try:
return r.glob(root, path)[0]
except Exception:
return None
def to_nodes(mtd):
"""把一个方法,转化成节点
Args:
root (TYPE): 根节点
mtd (TYPE): Description
Returns:
TYPE: Node
"""
current = root
node_path = '/root'
for item in mtd.split('/'):
node_path = node_path + '/' + item
tnode = find_node(node_path)
if tnode:
current = tnode
else:
current = Node(item, parent=current)
count = 0
# TODO 节点插入的顺序,决定了树的遍历顺序,及其计算结果
# 假设2个结构一样,但是,因为名字顺序不一样,导致插入顺序不一致
# 有可能导致一样的结构不一样的结果。
for mtd in self.methods:
count += 1
to_nodes(mtd)
def serialize_node(root_node):
snum = ''
for pre, _, node in RenderTree(root_node):
snum = snum + str(node.height)
return snum
self.trees = {}
for pre, _, node in RenderTree(root):
if node.height > height:
key = hash.hash(serialize_node(node), 'md5')
if key in self.trees:
self.trees[key].append(node)
else:
self.trees[key] = [node]
def get_classes(self):
if self.classes is None:
self._init_classes()
return self.classes
def _init_classes(self):
classes = set()
if not self.dex_files:
self._init_dex_files()
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
classes.add(dexClass.name)
self.classes = sorted(classes)
def _init_methods(self, limit=10000):
"""初始化方法
某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制
Args:
limit (int, optional): 方法数量限制,超过该值,则不获取方法
Returns:
TYPE: 方法集合
"""
methods = set()
if not self.dex_files:
self._init_dex_files()
count = 0
for dex_file in self.dex_files:
count += dex_file.method_ids.size
if limit < count:
return
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
methods.add(clsname + '/' + mtdname)
self.methods = sorted(methods)
def _init_strings_refx(self):
if not self.dex_files:
self._init_dex_files()
self.strings_refx = {}
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
if not method.code:
continue
for bc in method.code.bytecode:
# 1A const-string
# 1B const-string-jumbo
if bc.opcode not in {26, 27}:
continue
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
dexstr = dex_file.string(bc.args[1])
if clsname in self.strings_refx:
if mtdname in self.strings_refx[clsname]:
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname] = {}
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
def get_strings_refx(self):
"""获取字符串索引,即字符串被那些类、方法使用了。
:return: 字符串索引
:rtype: [dict]
"""
if self.strings_refx is None:
self._init_strings_refx()
return self.strings_refx
def get_dex_files(self):
if not self.dex_files:
self._init_dex_files()
return self.dex_files
def _init_dex_files(self):
self.dex_files = []
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
for name in z.namelist():
data = z.read(name)
if name.startswith('classes') and name.endswith('.dex') \
and Magic(data).get_type() == 'dex':
dex_file = DexFile(data)
self.dex_files.append(dex_file)
except Exception as ex:
raise ex
def get_strings(self):
if not self.strings:
self._init_strings()
return self.strings
def get_org_strings(self):
if not self.org_strings:
self._init_strings()
return self.org_strings
def _init_strings(self):
if not self.dex_files:
self._init_dex_files()
str_set = set()
org_str_set = set()
for dex_file in self.dex_files:
for i in range(dex_file.string_ids.size):
ostr = dex_file.string(i)
org_str_set.add(ostr)
str_set.add(binascii.hexlify(ostr).decode())
self.strings = list(str_set)
self.org_strings = list(org_str_set)
def get_files(self):
if not self.children:
self._init_children()
return self.children
def _init_children(self):
self.children = []
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mine = Magic(data).get_type()
info = zf.getinfo(name)
except Exception as ex:
print(name, ex)
continue
item = {}
item["name"] = name
item["type"] = mine
item["time"] = "%d%02d%02d%02d%02d%02d" % info.date_time
crc = str(hex(info.CRC)).upper()[2:]
crc = '0' * (8 - len(crc)) + crc
item["crc"] = crc
# item["sha1"] = ""
self.children.append(item)
except Exception as e:
raise e
def get_org_manifest(self):
if not self.org_manifest:
self._init_manifest()
return self.org_manifest
def _init_org_manifest(self):
ANDROID_MANIFEST = "AndroidManifest.xml"
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ANDROID_MANIFEST in zf.namelist():
data = zf.read(ANDROID_MANIFEST)
try:
axml = AXML(data)
if axml.is_valid:
self.org_manifest = axml.get_xml()
except Exception as e:
raise e
except Exception as e:
raise e
def get_manifest(self):
if not self.manifest:
self._init_manifest()
return self.manifest
def _init_manifest(self):
if not self.org_manifest:
self._init_org_manifest()
if self.org_manifest:
try:
self.manifest = xmltodict.parse(
self.org_manifest, False)['manifest']
except xml.parsers.expat.ExpatError as e:
pass
except Exception as e:
raise e
def _init_arsc(self):
ARSC_NAME = 'resources.arsc'
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ARSC_NAME in zf.namelist():
data = zf.read(ARSC_NAME)
self.arsc = ARSCParser(data)
except Exception as e:
raise e
def get_arsc(self):
if not self.arsc:
self._init_arsc()
return self.arsc
def get_certs(self, digestalgo='md5'):
if digestalgo not in self.certs:
self._init_certs(digestalgo)
return self.certs[digestalgo]
def _init_certs(self, digestalgo):
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
if name.startswith('META-INF/') and name.endswith(('.DSA', '.RSA')):
data = zf.read(name)
mine = Magic(data).get_type()
if mine != 'txt':
from apkutils.cert import Certificate
cert = Certificate(data, digestalgo=digestalgo)
self.certs[digestalgo] = cert.get()
except Exception as e:
raise e
def get_opcodes(self):
if not self.dex_files:
self._init_opcodes()
return self.opcodes
def _init_opcodes(self):
if not self.dex_files:
self._init_dex_files()
self.opcodes = []
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
opcodes = ""
if method.code:
for bc in method.code.bytecode:
opcode = str(hex(bc.opcode)).upper()[2:]
if len(opcode) == 2:
opcodes = opcodes + opcode
else:
opcodes = opcodes + "0" + opcode
proto = self.get_proto_string(
method.id.return_type, method.id.param_types)
item = {}
item['super_class'] = dexClass.super.decode()
item['class_name'] = method.id.cname.decode()
item['method_name'] = method.id.name.decode()
item['method_desc'] = method.id.desc.decode()
item['proto'] = proto
item['opcodes'] = opcodes
self.opcodes.append(item)
@staticmethod
def get_proto_string(return_type, param_types):
proto = return_type.decode()
if len(proto) > 1:
proto = 'L'
for item in param_types:
param_type = item.decode()
proto += 'L' if len(param_type) > 1 else param_type
return proto
|
mikusjelly/apkutils | apkutils/__init__.py | APK._init_methods | python | def _init_methods(self, limit=10000):
methods = set()
if not self.dex_files:
self._init_dex_files()
count = 0
for dex_file in self.dex_files:
count += dex_file.method_ids.size
if limit < count:
return
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
methods.add(clsname + '/' + mtdname)
self.methods = sorted(methods) | 初始化方法
某些APK可能存在大量的方法,可能会相当耗时,根据情况加限制
Args:
limit (int, optional): 方法数量限制,超过该值,则不获取方法
Returns:
TYPE: 方法集合 | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/__init__.py#L214-L246 | [
"def _init_dex_files(self):\n self.dex_files = []\n try:\n with apkfile.ZipFile(self.apk_path, 'r') as z:\n for name in z.namelist():\n data = z.read(name)\n if name.startswith('classes') and name.endswith('.dex') \\\n and Magic(data).get_... | class APK:
def __init__(self, apk_path):
self.apk_path = apk_path
self.dex_files = None
self.children = None
self.manifest = None
self.org_manifest = None
self.strings = None
self.org_strings = None
self.opcodes = None
self.certs = {}
self.arsc = None
self.strings_refx = None
self.app_icon = None
self.methods = None
self.trees = None # 代码结构序列字典
self.application = None
self.main_activity = None
self.mini_mani = None
self.classes = None
@staticmethod
def serialize_xml(org_xml):
if not org_xml:
return None
_xml = re.sub(r'\n', ' ', org_xml)
_xml = re.sub(r'"\s+?>', '">', _xml)
_xml = re.sub(r'>\s+?<', '><', _xml)
return _xml
def get_mini_mani(self):
if not self.mini_mani:
self.mini_mani = self.serialize_xml(self.get_org_manifest())
return self.mini_mani
def get_main_activity(self):
if not self.main_activity:
self._init_main_activity()
return self.main_activity
def _init_main_activity(self):
mani = self.get_mini_mani()
ptn = r'<activity android:name="([^"]*?)"[^<>]*?>.*?<action android:name="android.intent.action.MAIN">.*?</activity>'
result = re.search(ptn, mani)
if result:
self.main_activity = result.groups()[0]
def get_application(self):
if not self.application:
self._init_application()
return self.application
def _init_application(self):
mani = self.get_mini_mani()
if not mani:
return
ptn = r'<application[^<>]*?:name="([^<>"]*?)"[^<>]*?>'
result = re.search(ptn, mani)
if result:
self.application = result.groups()[0]
def get_app_icon(self):
if self.app_icon:
return self.app_icon
self._init_app_icon()
return self.app_icon
def _init_app_icon(self):
files = self.get_files()
result = re.search(r':icon="@(.*?)"', self.get_org_manifest())
ids = '0x' + result.groups()[0].lower()
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
data = z.read('resources.arsc')
self.arscobj = ARSCParser(data)
self.package = self.arscobj.get_packages_names()[0]
datas = xmltodict.parse(
self.arscobj.get_public_resources(self.package))
for item in datas['resources']['public']:
if ids != item['@id']:
continue
for f in files:
name = f['name']
if item['@type'] in name and item['@name'] in name:
self.app_icon = name
except Exception as ex:
raise ex
def get_trees(self, height=2, limit=5000):
if self.trees is None:
self._init_trees(height, limit)
return self.trees
@staticmethod
def pretty_print(node):
"""漂亮地打印一个节点
Args:
node (TYPE): Description
"""
for pre, _, node in RenderTree(node):
print('{}{}'.format(pre, node.name))
def _init_trees(self, height, limit):
if self.methods is None:
self._init_methods(limit)
if not self.methods:
return
root = Node('root')
r = Resolver(pathattr='name')
def find_node(path):
"""查找节点
Args:
root (TYPE): Description
path (TYPE): Description
Returns:
TYPE: Description
"""
try:
return r.glob(root, path)[0]
except Exception:
return None
def to_nodes(mtd):
"""把一个方法,转化成节点
Args:
root (TYPE): 根节点
mtd (TYPE): Description
Returns:
TYPE: Node
"""
current = root
node_path = '/root'
for item in mtd.split('/'):
node_path = node_path + '/' + item
tnode = find_node(node_path)
if tnode:
current = tnode
else:
current = Node(item, parent=current)
count = 0
# TODO 节点插入的顺序,决定了树的遍历顺序,及其计算结果
# 假设2个结构一样,但是,因为名字顺序不一样,导致插入顺序不一致
# 有可能导致一样的结构不一样的结果。
for mtd in self.methods:
count += 1
to_nodes(mtd)
def serialize_node(root_node):
snum = ''
for pre, _, node in RenderTree(root_node):
snum = snum + str(node.height)
return snum
self.trees = {}
for pre, _, node in RenderTree(root):
if node.height > height:
key = hash.hash(serialize_node(node), 'md5')
if key in self.trees:
self.trees[key].append(node)
else:
self.trees[key] = [node]
def get_classes(self):
if self.classes is None:
self._init_classes()
return self.classes
def _init_classes(self):
classes = set()
if not self.dex_files:
self._init_dex_files()
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
classes.add(dexClass.name)
self.classes = sorted(classes)
def get_methods(self, limit=10000):
"""获取所有方法路径 com/a/b/mtd_name
Returns:
TYPE: set
"""
if self.methods is None:
self._init_methods(limit)
return self.methods
def _init_strings_refx(self):
if not self.dex_files:
self._init_dex_files()
self.strings_refx = {}
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
if not method.code:
continue
for bc in method.code.bytecode:
# 1A const-string
# 1B const-string-jumbo
if bc.opcode not in {26, 27}:
continue
clsname = method.id.cname.decode()
mtdname = method.id.name.decode()
dexstr = dex_file.string(bc.args[1])
if clsname in self.strings_refx:
if mtdname in self.strings_refx[clsname]:
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
else:
self.strings_refx[clsname] = {}
self.strings_refx[clsname][mtdname] = set()
self.strings_refx[clsname][mtdname].add(dexstr)
def get_strings_refx(self):
"""获取字符串索引,即字符串被那些类、方法使用了。
:return: 字符串索引
:rtype: [dict]
"""
if self.strings_refx is None:
self._init_strings_refx()
return self.strings_refx
def get_dex_files(self):
if not self.dex_files:
self._init_dex_files()
return self.dex_files
def _init_dex_files(self):
self.dex_files = []
try:
with apkfile.ZipFile(self.apk_path, 'r') as z:
for name in z.namelist():
data = z.read(name)
if name.startswith('classes') and name.endswith('.dex') \
and Magic(data).get_type() == 'dex':
dex_file = DexFile(data)
self.dex_files.append(dex_file)
except Exception as ex:
raise ex
def get_strings(self):
if not self.strings:
self._init_strings()
return self.strings
def get_org_strings(self):
if not self.org_strings:
self._init_strings()
return self.org_strings
def _init_strings(self):
if not self.dex_files:
self._init_dex_files()
str_set = set()
org_str_set = set()
for dex_file in self.dex_files:
for i in range(dex_file.string_ids.size):
ostr = dex_file.string(i)
org_str_set.add(ostr)
str_set.add(binascii.hexlify(ostr).decode())
self.strings = list(str_set)
self.org_strings = list(org_str_set)
def get_files(self):
if not self.children:
self._init_children()
return self.children
def _init_children(self):
self.children = []
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mine = Magic(data).get_type()
info = zf.getinfo(name)
except Exception as ex:
print(name, ex)
continue
item = {}
item["name"] = name
item["type"] = mine
item["time"] = "%d%02d%02d%02d%02d%02d" % info.date_time
crc = str(hex(info.CRC)).upper()[2:]
crc = '0' * (8 - len(crc)) + crc
item["crc"] = crc
# item["sha1"] = ""
self.children.append(item)
except Exception as e:
raise e
def get_org_manifest(self):
if not self.org_manifest:
self._init_manifest()
return self.org_manifest
def _init_org_manifest(self):
ANDROID_MANIFEST = "AndroidManifest.xml"
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ANDROID_MANIFEST in zf.namelist():
data = zf.read(ANDROID_MANIFEST)
try:
axml = AXML(data)
if axml.is_valid:
self.org_manifest = axml.get_xml()
except Exception as e:
raise e
except Exception as e:
raise e
def get_manifest(self):
if not self.manifest:
self._init_manifest()
return self.manifest
def _init_manifest(self):
if not self.org_manifest:
self._init_org_manifest()
if self.org_manifest:
try:
self.manifest = xmltodict.parse(
self.org_manifest, False)['manifest']
except xml.parsers.expat.ExpatError as e:
pass
except Exception as e:
raise e
def _init_arsc(self):
ARSC_NAME = 'resources.arsc'
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
if ARSC_NAME in zf.namelist():
data = zf.read(ARSC_NAME)
self.arsc = ARSCParser(data)
except Exception as e:
raise e
def get_arsc(self):
if not self.arsc:
self._init_arsc()
return self.arsc
def get_certs(self, digestalgo='md5'):
if digestalgo not in self.certs:
self._init_certs(digestalgo)
return self.certs[digestalgo]
def _init_certs(self, digestalgo):
try:
with apkfile.ZipFile(self.apk_path, mode="r") as zf:
for name in zf.namelist():
if name.startswith('META-INF/') and name.endswith(('.DSA', '.RSA')):
data = zf.read(name)
mine = Magic(data).get_type()
if mine != 'txt':
from apkutils.cert import Certificate
cert = Certificate(data, digestalgo=digestalgo)
self.certs[digestalgo] = cert.get()
except Exception as e:
raise e
def get_opcodes(self):
if not self.dex_files:
self._init_opcodes()
return self.opcodes
def _init_opcodes(self):
if not self.dex_files:
self._init_dex_files()
self.opcodes = []
for dex_file in self.dex_files:
for dexClass in dex_file.classes:
try:
dexClass.parseData()
except IndexError:
continue
for method in dexClass.data.methods:
opcodes = ""
if method.code:
for bc in method.code.bytecode:
opcode = str(hex(bc.opcode)).upper()[2:]
if len(opcode) == 2:
opcodes = opcodes + opcode
else:
opcodes = opcodes + "0" + opcode
proto = self.get_proto_string(
method.id.return_type, method.id.param_types)
item = {}
item['super_class'] = dexClass.super.decode()
item['class_name'] = method.id.cname.decode()
item['method_name'] = method.id.name.decode()
item['method_desc'] = method.id.desc.decode()
item['proto'] = proto
item['opcodes'] = opcodes
self.opcodes.append(item)
@staticmethod
def get_proto_string(return_type, param_types):
proto = return_type.decode()
if len(proto) > 1:
proto = 'L'
for item in param_types:
param_type = item.decode()
proto += 'L' if len(param_type) > 1 else param_type
return proto
|
mikusjelly/apkutils | apkutils/elf/elfparser.py | get_dynsym_datas | python | def get_dynsym_datas(elf_data, elf_file, skip_import=True):
"""
获取符号/方法的相关信息(符号名、符号数据)。
"""
f = elf_data
dynsym_datas = []
symbol_table = elf_file.get_section_by_name('.dynsym')
if symbol_table:
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
f.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
f.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in f.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
item = {}
item["name"] = symbol.name
item["data"] = symbol_hexs
dynsym_datas.append(item)
return dynsym_datas | 获取符号/方法的相关信息(符号名、符号数据)。 | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L126-L158 | null | import zipfile
import io
import binascii
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import byte2int
from cigam import Magic
class ELF():
def __init__(self, file_path):
if Magic(file_path).get_type() != 'elf':
return
self.elf_data = open(file_path, 'rb')
self.elf_file = ELFFile(self.elf_data)
def close(self):
self.elf_data.close()
def get_dynsym_datas(self, skip_import=True):
dynsym_datas = []
symbol_table = self.elf_file.get_section_by_name('.dynsym')
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
self.elf_data.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
self.elf_data.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in self.elf_data.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
dynsym_datas.append(
(symbol.name, hex(symbol_addr), symbol_hexs))
return dynsym_datas
def get_rodata_strings(self):
try:
return display_string_dump(self.elf_file, '.rodata')
except ELFError as ex:
print('ELF error: %s\n' % ex)
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec)
def get_elf_files(apk_path):
files = list()
if zipfile.is_zipfile(apk_path):
try:
with zipfile.ZipFile(apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mime = Magic(data).get_type()
if mime == 'elf':
elf_data = io.BytesIO(data)
elf_file = ELFFile(elf_data)
files.append((name, elf_data, elf_file))
except Exception as ex:
continue
except Exception as ex:
raise ex
return files
def get_dynsym_datas(elf_data, elf_file, skip_import=True):
"""
获取符号/方法的相关信息(符号名、符号数据)。
"""
f = elf_data
dynsym_datas = []
symbol_table = elf_file.get_section_by_name('.dynsym')
if symbol_table:
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
f.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
f.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in f.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
item = {}
item["name"] = symbol.name
item["data"] = symbol_hexs
dynsym_datas.append(item)
return dynsym_datas
def get_rodata_strings(elf_file):
"""
获取字符串列表,以hex格式表示,避免字符编码问题。
"""
try:
return display_string_dump(elf_file, '.rodata')
except ELFError as ex:
import sys
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
def display_string_dump(elf_file, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while (dataptr < len(data) and not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(elf_file, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
if isinstance(spec, int):
num = int(spec)
if num < elf_file.num_sections():
return elf_file.get_section(num)
# Not a number. Must be a name then
if isinstance(spec, str):
try:
return elf_file.get_section_by_name(spec)
except AttributeError:
return None
|
mikusjelly/apkutils | apkutils/elf/elfparser.py | get_rodata_strings | python | def get_rodata_strings(elf_file):
"""
获取字符串列表,以hex格式表示,避免字符编码问题。
"""
try:
return display_string_dump(elf_file, '.rodata')
except ELFError as ex:
import sys
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1) | 获取字符串列表,以hex格式表示,避免字符编码问题。 | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L161-L170 | [
"def display_string_dump(elf_file, section_spec):\n \"\"\" Display a strings dump of a section. section_spec is either a\n section number or a name.\n \"\"\"\n section = _section_from_spec(elf_file, section_spec)\n if section is None:\n print(\"Section '%s' does not exist in the file!\" % ... | import zipfile
import io
import binascii
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import byte2int
from cigam import Magic
class ELF():
def __init__(self, file_path):
if Magic(file_path).get_type() != 'elf':
return
self.elf_data = open(file_path, 'rb')
self.elf_file = ELFFile(self.elf_data)
def close(self):
self.elf_data.close()
def get_dynsym_datas(self, skip_import=True):
dynsym_datas = []
symbol_table = self.elf_file.get_section_by_name('.dynsym')
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
self.elf_data.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
self.elf_data.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in self.elf_data.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
dynsym_datas.append(
(symbol.name, hex(symbol_addr), symbol_hexs))
return dynsym_datas
def get_rodata_strings(self):
try:
return display_string_dump(self.elf_file, '.rodata')
except ELFError as ex:
print('ELF error: %s\n' % ex)
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec)
def get_elf_files(apk_path):
files = list()
if zipfile.is_zipfile(apk_path):
try:
with zipfile.ZipFile(apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mime = Magic(data).get_type()
if mime == 'elf':
elf_data = io.BytesIO(data)
elf_file = ELFFile(elf_data)
files.append((name, elf_data, elf_file))
except Exception as ex:
continue
except Exception as ex:
raise ex
return files
def get_dynsym_datas(elf_data, elf_file, skip_import=True):
"""
获取符号/方法的相关信息(符号名、符号数据)。
"""
f = elf_data
dynsym_datas = []
symbol_table = elf_file.get_section_by_name('.dynsym')
if symbol_table:
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
f.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
f.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in f.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
item = {}
item["name"] = symbol.name
item["data"] = symbol_hexs
dynsym_datas.append(item)
return dynsym_datas
def get_rodata_strings(elf_file):
"""
获取字符串列表,以hex格式表示,避免字符编码问题。
"""
try:
return display_string_dump(elf_file, '.rodata')
except ELFError as ex:
import sys
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
def display_string_dump(elf_file, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while (dataptr < len(data) and not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(elf_file, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
if isinstance(spec, int):
num = int(spec)
if num < elf_file.num_sections():
return elf_file.get_section(num)
# Not a number. Must be a name then
if isinstance(spec, str):
try:
return elf_file.get_section_by_name(spec)
except AttributeError:
return None
|
mikusjelly/apkutils | apkutils/elf/elfparser.py | _section_from_spec | python | def _section_from_spec(elf_file, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
if isinstance(spec, int):
num = int(spec)
if num < elf_file.num_sections():
return elf_file.get_section(num)
# Not a number. Must be a name then
if isinstance(spec, str):
try:
return elf_file.get_section_by_name(spec)
except AttributeError:
return None | Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L203-L218 | null | import zipfile
import io
import binascii
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import byte2int
from cigam import Magic
class ELF():
def __init__(self, file_path):
if Magic(file_path).get_type() != 'elf':
return
self.elf_data = open(file_path, 'rb')
self.elf_file = ELFFile(self.elf_data)
def close(self):
self.elf_data.close()
def get_dynsym_datas(self, skip_import=True):
dynsym_datas = []
symbol_table = self.elf_file.get_section_by_name('.dynsym')
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
self.elf_data.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
self.elf_data.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in self.elf_data.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
dynsym_datas.append(
(symbol.name, hex(symbol_addr), symbol_hexs))
return dynsym_datas
def get_rodata_strings(self):
try:
return display_string_dump(self.elf_file, '.rodata')
except ELFError as ex:
print('ELF error: %s\n' % ex)
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec)
def get_elf_files(apk_path):
files = list()
if zipfile.is_zipfile(apk_path):
try:
with zipfile.ZipFile(apk_path, mode="r") as zf:
for name in zf.namelist():
try:
data = zf.read(name)
mime = Magic(data).get_type()
if mime == 'elf':
elf_data = io.BytesIO(data)
elf_file = ELFFile(elf_data)
files.append((name, elf_data, elf_file))
except Exception as ex:
continue
except Exception as ex:
raise ex
return files
def get_dynsym_datas(elf_data, elf_file, skip_import=True):
"""
获取符号/方法的相关信息(符号名、符号数据)。
"""
f = elf_data
dynsym_datas = []
symbol_table = elf_file.get_section_by_name('.dynsym')
if symbol_table:
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
f.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
f.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in f.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
item = {}
item["name"] = symbol.name
item["data"] = symbol_hexs
dynsym_datas.append(item)
return dynsym_datas
def get_rodata_strings(elf_file):
"""
获取字符串列表,以hex格式表示,避免字符编码问题。
"""
try:
return display_string_dump(elf_file, '.rodata')
except ELFError as ex:
import sys
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
def display_string_dump(elf_file, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while (dataptr < len(data) and not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(elf_file, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
if isinstance(spec, int):
num = int(spec)
if num < elf_file.num_sections():
return elf_file.get_section(num)
# Not a number. Must be a name then
if isinstance(spec, str):
try:
return elf_file.get_section_by_name(spec)
except AttributeError:
return None
|
mikusjelly/apkutils | apkutils/elf/elfparser.py | ELF.display_string_dump | python | def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs | Display a strings dump of a section. section_spec is either a
section number or a name. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L57-L85 | [
"def _section_from_spec(elf_file, spec):\n '''\n Retrieve a section given a \"spec\" (either number or name).\n Return None if no such section exists in the file.\n '''\n if isinstance(spec, int):\n num = int(spec)\n if num < elf_file.num_sections():\n return elf_file... | class ELF():
def __init__(self, file_path):
if Magic(file_path).get_type() != 'elf':
return
self.elf_data = open(file_path, 'rb')
self.elf_file = ELFFile(self.elf_data)
def close(self):
self.elf_data.close()
def get_dynsym_datas(self, skip_import=True):
dynsym_datas = []
symbol_table = self.elf_file.get_section_by_name('.dynsym')
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
self.elf_data.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
self.elf_data.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in self.elf_data.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
dynsym_datas.append(
(symbol.name, hex(symbol_addr), symbol_hexs))
return dynsym_datas
def get_rodata_strings(self):
try:
return display_string_dump(self.elf_file, '.rodata')
except ELFError as ex:
print('ELF error: %s\n' % ex)
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec)
|
mikusjelly/apkutils | apkutils/elf/elfparser.py | ELF._section_from_spec | python | def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec) | Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file. | train | https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L87-L100 | null | class ELF():
def __init__(self, file_path):
if Magic(file_path).get_type() != 'elf':
return
self.elf_data = open(file_path, 'rb')
self.elf_file = ELFFile(self.elf_data)
def close(self):
self.elf_data.close()
def get_dynsym_datas(self, skip_import=True):
dynsym_datas = []
symbol_table = self.elf_file.get_section_by_name('.dynsym')
for symbol in symbol_table.iter_symbols():
if skip_import and symbol.entry.st_size == 0 or symbol.entry.st_info.type != 'STT_FUNC':
continue
self.elf_data.seek(0)
symbol_addr = symbol.entry.st_value & 0xFFFE
self.elf_data.seek(symbol_addr)
symbol_hexs = ''
size = symbol.entry.st_size
if symbol.entry.st_size > 80:
size = 80
for x in self.elf_data.read(size):
op = str(hex(x)).upper()[2:]
if len(op) == 1:
op = '0' + op
symbol_hexs = symbol_hexs + op
dynsym_datas.append(
(symbol.name, hex(symbol_addr), symbol_hexs))
return dynsym_datas
def get_rodata_strings(self):
try:
return display_string_dump(self.elf_file, '.rodata')
except ELFError as ex:
print('ELF error: %s\n' % ex)
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
def _section_from_spec(self, spec):
'''
Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
'''
try:
num = int(spec)
if num < self.elf_file.num_sections():
return self.elf_file.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elf_file.get_section_by_name(spec)
|
konstantint/PassportEye | passporteye/mrz/text.py | MRZ._guess_type | python | def _guess_type(mrz_lines):
try:
if len(mrz_lines) == 3:
return 'TD1'
elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40:
return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2'
elif len(mrz_lines) == 2:
return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3'
else:
return None
except Exception: #pylint: disable=broad-except
return None | Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None.
The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V'
>>> MRZ._guess_type([]) is None
True
>>> MRZ._guess_type([1]) is None
True
>>> MRZ._guess_type([1,2]) is None # No len() for numbers
True
>>> MRZ._guess_type(['a','b']) # This way passes
'TD2'
>>> MRZ._guess_type(['*'*40, '*'*40])
'TD3'
>>> MRZ._guess_type([1,2,3])
'TD1'
>>> MRZ._guess_type(['V'*40, '*'*40])
'MRVA'
>>> MRZ._guess_type(['V'*36, '*'*36])
'MRVB' | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/text.py#L129-L160 | null | class MRZ(object):
"""
A simple parser for a Type1 or Type3 Machine-readable zone strings from identification documents.
See:
- https://en.wikipedia.org/wiki/Machine-readable_passport
- http://www.icao.int/publications/pages/publication.aspx?docnum=9303
Usage:
Represent the MRZ as a list of 2 or 3 lines, create an instance of this class,
and read off the various fields filled by the parser.
The first field you should check is .mrz_type.
It is either None (no parsing done at all) or 'TD1', 'TD2', 'TD3', 'MRVA' or 'MRVB' depending on the type of the MRZ.
The next one is 'valid'. If this is true, you may be pretty sure the parsing was successful and
all the checksum digits passed the test as well. Sometimes the validity check may fail for some trivial reason
(e.g. nonstandard document type character or one of the checksums wrong while others corect) -
for this reason there is a field `valid_score`, which is an integer between 0 (nothing is valid) to 100
(all checksums, line lengths and miscellaneous checks passed).
Otherwise at least some of the checks failed, the meaning of which is up to you to interpret.
When given invalid data, the algorithm attempts to do some trivial data clean-up: drop whitespaces from lines,
and extend short lines with filler characters <, after which the fields are extracted from the lines as if
they were valid.
The parsing computes three validation indicators:
valid_check_digits - a list of booleans indicating which of the "check digits" in the MRZ were valid.
TD1/TD2 has four check digits, TD3 - five, MRVA/B - three.
The separate booleans are also available as valid_number, valid_date_of_birth, valid_expiration_date, valid_composite
and valid_personal_number (TD3 only).
valid_line_lengths - a list of booleans, indicating which of the lines (3 in TD1, 2 in TD2/TD3) had the expected length.
valid_misc - a list of booleans, indicating various additional validity checks (unspecified, see code).
The valid_score field counts the "validity score" according to the flags above and is an int between 0 and 100.
When all validation passes, the valid field is set to True as well.
However, you may attempt reading fields from a "not completely valid" MRZ as well sometimes.
The reported fields are: type, country, number, date_of_birth, sex, expiration_date, nationality, names, surname
TD1 MRZ also has fields optional1 and optional2. TD2 MRZ has optional1, TD3 MRZ has personal_number.
MRVA and MRVB are the same as TD3 except personal_number and check_composite (which are not present)
The field aux is a dictionary of additional data that may be associated with MRZ by OCR code,
e.g. aux['roi'], aux['box'] or aux['text'] may be used to carry around the part of the image that was used
to extract the information, aux['method'] to mark the method used, etc.
# Valid ID card (TD1)
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<4', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<'])
>>> assert m.mrz_type == 'TD1' and m.valid and m.valid_score == 100
>>> assert m.type == 'ID' and m.country == 'AUT' and m.number == '10000999<'
>>> assert m.date_of_birth == '710909' and m.sex == 'F' and m.expiration_date == '111231' and m.nationality == 'AUT'
>>> assert m.names == 'ISOLDE' and m.surname == 'MUSTERFRAU'
>>> assert m.check_number == '6' and m.check_date_of_birth == '4' and m.check_expiration_date == '5' and m.check_composite == '4'
>>> assert m.optional1 == '<<<<<<<<<<<<<<<' and m.optional2 == '<<<<<<<<<<<'
# Valid TD2
>>> m = MRZ(['I<UTOERIKSSON<<ANNA<MARIA<<<<<<<<<<<', 'D231458907UTO7408122F1204159<<<<<<<6'])
>>> assert m.mrz_type == 'TD2' and m.valid and m.valid_score == 100
>>> assert m.type == 'I<' and m.country == 'UTO' and m.number == 'D23145890'
>>> assert m.date_of_birth == '740812' and m.sex == 'F' and m.expiration_date == '120415' and m.nationality == 'UTO'
>>> assert m.names == 'ANNA MARIA' and m.surname == 'ERIKSSON'
>>> assert m.check_number == '7' and m.check_date_of_birth == '2' and m.check_expiration_date == '9' and m.check_composite == '6'
# Valid Visa
>>> m = MRZ(['VIUSATRAVELER<<HAPPYPERSON<<<<<<<<<<<<<<<<<<', '555123ABC6GBR6502056F04122361FLNDDDAM5803085'])
>>> assert m.mrz_type == 'MRVA' and m.valid and m.valid_score == 100
>>> assert m.type == 'VI' and m.country == 'USA' and m.number == '555123ABC'
>>> assert m.date_of_birth == '650205' and m.sex == 'F' and m.expiration_date == '041223' and m.nationality == 'GBR'
>>> assert m.names == 'HAPPYPERSON' and m.surname == 'TRAVELER'
>>> assert m.check_number == '6' and m.check_date_of_birth == '6' and m.check_expiration_date == '6'
# Valid passport (TD3)
>>> m = MRZ(['P<POLKOWALSKA<KWIATKOWSKA<<JOANNA<<<<<<<<<<<', 'AA00000000POL6002084F1412314<<<<<<<<<<<<<<<4'])
>>> assert m.mrz_type == 'TD3' and m.valid and m.valid_score == 100
>>> assert m.type == 'P<' and m.country == 'POL' and m.number == 'AA0000000' and m.personal_number == '<<<<<<<<<<<<<<'
>>> assert m.date_of_birth == '600208' and m.sex == 'F' and m.expiration_date == '141231' and m.nationality == 'POL'
>>> assert m.names == 'JOANNA' and m.surname == 'KOWALSKA KWIATKOWSKA'
>>> assert m.check_number == '0' and m.check_date_of_birth == '4' and m.check_expiration_date == '4' and m.check_personal_number == '<' and m.check_composite == '4'
# Invalid examples
>>> assert MRZ([]).mrz_type is None
>>> assert MRZ([1,2,3,4]).mrz_type is None
>>> assert MRZ([1,2,3]).mrz_type is None
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<'])
>>> assert m.mrz_type == 'TD1' and not m.valid and m.valid_score < 100
>>> assert m.valid_check_digits == [True, True, True, False]
>>> assert m.type == 'ID' and m.country == 'AUT' and m.number == '10000999<'
# The utility from_ocr function will convert a single newline-separated string obtained as OCR output
# into MRZ lines, doing some basic cleanup inbetween (removing empty lines and lines that are too short,
# removing spaces, converting mismatched characters, etc), and then attempt the parsing.
>>> m = MRZ.from_ocr('\\n\\n this line useless \\n IDAUT10000999<6 <<<<<<<<< <<<<<< \\n 7IO9O94FIi iz3iSAUT<<<<<<<<<<<4 \\n MUSTERFRA U<<ISOLDE<<< <<<<<<<<<')
>>> assert m.valid and m.names == 'ISOLDE' and m.surname == 'MUSTERFRAU'
"""
def __init__(self, mrz_lines):
"""
Parse a TD1/TD2/TD3/MRVA/MRVB MRZ from a single newline-separated string or a list of strings.
:param mrz_lines: either a single string with newlines, or a list of 2 or 3 strings, representing the lines of an MRZ.
:return: self
"""
self._parse(mrz_lines)
self.aux = {}
@staticmethod
def from_ocr(mrz_ocr_string):
"""Given a single string which is output from an OCR routine, cleans it up using MRZ.ocr_cleanup and creates a MRZ object"""
return MRZ(MRZOCRCleaner.apply(mrz_ocr_string))
def __repr__(self):
if self.valid:
return "MRZ({0}[valid], {1}, {2}, {3}, {4}, {5})".format(self.mrz_type, self.number, self.names, self.surname, self.sex, self.date_of_birth)
elif self.valid_score > 0:
return "MRZ({0}[{1}], {2}, {3}, {4}, {5}, {6})".format(self.mrz_type, self.valid_score, self.number, self.names, self.surname, self.sex, self.date_of_birth)
else:
return "MRZ(invalid)"
@staticmethod
def _parse(self, mrz_lines):
self.mrz_type = MRZ._guess_type(mrz_lines)
try:
if self.mrz_type == 'TD1':
self.valid = self._parse_td1(*mrz_lines)
elif self.mrz_type == 'TD2':
self.valid = self._parse_td2(*mrz_lines)
elif self.mrz_type == 'TD3':
self.valid = self._parse_td3(*mrz_lines)
elif self.mrz_type == 'MRVA':
self.valid = self._parse_mrv(*mrz_lines, length=44)
elif self.mrz_type == 'MRVB':
self.valid = self._parse_mrv(*mrz_lines, length=36)
else:
self.valid = False
self.valid_score = 0
except Exception: #pylint: disable=broad-except
self.mrz_type = None
self.valid = False
self.valid_score = 0
def to_dict(self):
"""Converts this object to an (ordered) dictionary of field-value pairs.
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<']).to_dict()
>>> assert m['type'] == 'ID' and m['country'] == 'AUT' and m['number'] == '10000999<'
>>> assert m['valid_number'] and m['valid_date_of_birth'] and m['valid_expiration_date'] and not m['valid_composite']
"""
result = OrderedDict()
result['mrz_type'] = self.mrz_type
result['valid_score'] = self.valid_score
if self.mrz_type is not None:
result['type'] = self.type
result['country'] = self.country
result['number'] = self.number
result['date_of_birth'] = self.date_of_birth
result['expiration_date'] = self.expiration_date
result['nationality'] = self.nationality
result['sex'] = self.sex
result['names'] = self.names
result['surname'] = self.surname
if self.mrz_type == 'TD1':
result['optional1'] = self.optional1
result['optional2'] = self.optional2
elif self.mrz_type in ['TD2', 'MRVA', 'MRVB']:
result['optional1'] = self.optional1
else:
result['personal_number'] = self.personal_number
result['check_number'] = self.check_number
result['check_date_of_birth'] = self.check_date_of_birth
result['check_expiration_date'] = self.check_expiration_date
if self.mrz_type not in ['MRVA', 'MRVB']:
result['check_composite'] = self.check_composite
if self.mrz_type == 'TD3':
result['check_personal_number'] = self.check_personal_number
result['valid_number'] = self.valid_check_digits[0]
result['valid_date_of_birth'] = self.valid_check_digits[1]
result['valid_expiration_date'] = self.valid_check_digits[2]
if self.mrz_type not in ['MRVA', 'MRVB']:
result['valid_composite'] = self.valid_check_digits[3]
if self.mrz_type == 'TD3':
result['valid_personal_number'] = self.valid_check_digits[4]
if 'method' in self.aux:
result['method'] = self.aux['method']
return result
def _parse_td1(self, a, b, c):
len_a, len_b, len_c = len(a), len(b), len(c)
if len(a) < 30:
a = a + '<'*(30 - len(a))
if len(b) < 30:
b = b + '<'*(30 - len(b))
if len(c) < 30:
c = c + '<'*(30 - len(c))
self.type = a[0:2]
self.country = a[2:5]
self.number = a[5:14]
self.check_number = a[14]
self.optional1 = a[15:30]
self.date_of_birth = b[0:6]
self.check_date_of_birth = b[6]
self.sex = b[7]
self.expiration_date = b[8:14]
self.check_expiration_date = b[14]
self.nationality = b[15:18]
self.optional2 = b[18:29]
self.check_composite = b[29]
surname_names = c.split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(a[5:30] + b[0:7] + b[8:15] + b[18:29]) == self.check_composite]
self.valid_line_lengths = [len_a == 30, len_b == 30, len_c == 30]
self.valid_misc = [a[0] in 'IAC']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) + 1
self.valid_score = 100*self.valid_score//(40+3+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
def _parse_td2(self, a, b):
len_a, len_b = len(a), len(b)
if len(a) < 36:
a = a + '<'*(36 - len(a))
if len(b) < 36:
b = b + '<'*(36 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:36].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.optional1 = b[28:35]
self.check_composite = b[35]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(b[0:10] + b[13:20] + b[21:35]) == self.check_composite]
self.valid_line_lengths = [len_a == 36, len_b == 36]
self.valid_misc = [a[0] in 'ACI']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) +1
self.valid_score = 100*self.valid_score//(40+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
def _parse_td3(self, a, b):
len_a, len_b = len(a), len(b)
if len(a) < 44:
a = a + '<'*(44 - len(a))
if len(b) < 44:
b = b + '<'*(44 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:44].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.personal_number = b[28:42]
self.check_personal_number = b[42]
self.check_composite = b[43]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(b[0:10] + b[13:20] + b[21:43]) == self.check_composite,
((self.check_personal_number == '<' or self.check_personal_number == '0') and self.personal_number == '<<<<<<<<<<<<<<') # PN is optional
or MRZCheckDigit.compute(self.personal_number) == self.check_personal_number]
self.valid_line_lengths = [len_a == 44, len_b == 44]
self.valid_misc = [a[0] in 'P']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) +1
self.valid_score = 100*self.valid_score//(50+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_personal_number, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
@staticmethod
def _check_date(ymd):
try:
datetime.strptime(ymd, '%y%m%d')
return True
except ValueError:
return False
def _parse_mrv(self, a, b, length=44):
len_a, len_b = len(a), len(b)
if len(a) < length:
a = a + '<'*(44 - len(a))
if len(b) < length:
b = b + '<'*(44 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:length].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.optional1 = b[28:length]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth,
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date]
self.valid_line_lengths = [len_a == length, len_b == length]
self.valid_misc = [a[0] == 'V']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) + 1
self.valid_score = 100*self.valid_score//(30+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date = self.valid_check_digits
return self.valid_score == 100
|
konstantint/PassportEye | passporteye/mrz/text.py | MRZ.to_dict | python | def to_dict(self):
result = OrderedDict()
result['mrz_type'] = self.mrz_type
result['valid_score'] = self.valid_score
if self.mrz_type is not None:
result['type'] = self.type
result['country'] = self.country
result['number'] = self.number
result['date_of_birth'] = self.date_of_birth
result['expiration_date'] = self.expiration_date
result['nationality'] = self.nationality
result['sex'] = self.sex
result['names'] = self.names
result['surname'] = self.surname
if self.mrz_type == 'TD1':
result['optional1'] = self.optional1
result['optional2'] = self.optional2
elif self.mrz_type in ['TD2', 'MRVA', 'MRVB']:
result['optional1'] = self.optional1
else:
result['personal_number'] = self.personal_number
result['check_number'] = self.check_number
result['check_date_of_birth'] = self.check_date_of_birth
result['check_expiration_date'] = self.check_expiration_date
if self.mrz_type not in ['MRVA', 'MRVB']:
result['check_composite'] = self.check_composite
if self.mrz_type == 'TD3':
result['check_personal_number'] = self.check_personal_number
result['valid_number'] = self.valid_check_digits[0]
result['valid_date_of_birth'] = self.valid_check_digits[1]
result['valid_expiration_date'] = self.valid_check_digits[2]
if self.mrz_type not in ['MRVA', 'MRVB']:
result['valid_composite'] = self.valid_check_digits[3]
if self.mrz_type == 'TD3':
result['valid_personal_number'] = self.valid_check_digits[4]
if 'method' in self.aux:
result['method'] = self.aux['method']
return result | Converts this object to an (ordered) dictionary of field-value pairs.
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<']).to_dict()
>>> assert m['type'] == 'ID' and m['country'] == 'AUT' and m['number'] == '10000999<'
>>> assert m['valid_number'] and m['valid_date_of_birth'] and m['valid_expiration_date'] and not m['valid_composite'] | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/text.py#L183-L227 | null | class MRZ(object):
"""
A simple parser for a Type1 or Type3 Machine-readable zone strings from identification documents.
See:
- https://en.wikipedia.org/wiki/Machine-readable_passport
- http://www.icao.int/publications/pages/publication.aspx?docnum=9303
Usage:
Represent the MRZ as a list of 2 or 3 lines, create an instance of this class,
and read off the various fields filled by the parser.
The first field you should check is .mrz_type.
It is either None (no parsing done at all) or 'TD1', 'TD2', 'TD3', 'MRVA' or 'MRVB' depending on the type of the MRZ.
The next one is 'valid'. If this is true, you may be pretty sure the parsing was successful and
all the checksum digits passed the test as well. Sometimes the validity check may fail for some trivial reason
(e.g. nonstandard document type character or one of the checksums wrong while others corect) -
for this reason there is a field `valid_score`, which is an integer between 0 (nothing is valid) to 100
(all checksums, line lengths and miscellaneous checks passed).
Otherwise at least some of the checks failed, the meaning of which is up to you to interpret.
When given invalid data, the algorithm attempts to do some trivial data clean-up: drop whitespaces from lines,
and extend short lines with filler characters <, after which the fields are extracted from the lines as if
they were valid.
The parsing computes three validation indicators:
valid_check_digits - a list of booleans indicating which of the "check digits" in the MRZ were valid.
TD1/TD2 has four check digits, TD3 - five, MRVA/B - three.
The separate booleans are also available as valid_number, valid_date_of_birth, valid_expiration_date, valid_composite
and valid_personal_number (TD3 only).
valid_line_lengths - a list of booleans, indicating which of the lines (3 in TD1, 2 in TD2/TD3) had the expected length.
valid_misc - a list of booleans, indicating various additional validity checks (unspecified, see code).
The valid_score field counts the "validity score" according to the flags above and is an int between 0 and 100.
When all validation passes, the valid field is set to True as well.
However, you may attempt reading fields from a "not completely valid" MRZ as well sometimes.
The reported fields are: type, country, number, date_of_birth, sex, expiration_date, nationality, names, surname
TD1 MRZ also has fields optional1 and optional2. TD2 MRZ has optional1, TD3 MRZ has personal_number.
MRVA and MRVB are the same as TD3 except personal_number and check_composite (which are not present)
The field aux is a dictionary of additional data that may be associated with MRZ by OCR code,
e.g. aux['roi'], aux['box'] or aux['text'] may be used to carry around the part of the image that was used
to extract the information, aux['method'] to mark the method used, etc.
# Valid ID card (TD1)
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<4', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<'])
>>> assert m.mrz_type == 'TD1' and m.valid and m.valid_score == 100
>>> assert m.type == 'ID' and m.country == 'AUT' and m.number == '10000999<'
>>> assert m.date_of_birth == '710909' and m.sex == 'F' and m.expiration_date == '111231' and m.nationality == 'AUT'
>>> assert m.names == 'ISOLDE' and m.surname == 'MUSTERFRAU'
>>> assert m.check_number == '6' and m.check_date_of_birth == '4' and m.check_expiration_date == '5' and m.check_composite == '4'
>>> assert m.optional1 == '<<<<<<<<<<<<<<<' and m.optional2 == '<<<<<<<<<<<'
# Valid TD2
>>> m = MRZ(['I<UTOERIKSSON<<ANNA<MARIA<<<<<<<<<<<', 'D231458907UTO7408122F1204159<<<<<<<6'])
>>> assert m.mrz_type == 'TD2' and m.valid and m.valid_score == 100
>>> assert m.type == 'I<' and m.country == 'UTO' and m.number == 'D23145890'
>>> assert m.date_of_birth == '740812' and m.sex == 'F' and m.expiration_date == '120415' and m.nationality == 'UTO'
>>> assert m.names == 'ANNA MARIA' and m.surname == 'ERIKSSON'
>>> assert m.check_number == '7' and m.check_date_of_birth == '2' and m.check_expiration_date == '9' and m.check_composite == '6'
# Valid Visa
>>> m = MRZ(['VIUSATRAVELER<<HAPPYPERSON<<<<<<<<<<<<<<<<<<', '555123ABC6GBR6502056F04122361FLNDDDAM5803085'])
>>> assert m.mrz_type == 'MRVA' and m.valid and m.valid_score == 100
>>> assert m.type == 'VI' and m.country == 'USA' and m.number == '555123ABC'
>>> assert m.date_of_birth == '650205' and m.sex == 'F' and m.expiration_date == '041223' and m.nationality == 'GBR'
>>> assert m.names == 'HAPPYPERSON' and m.surname == 'TRAVELER'
>>> assert m.check_number == '6' and m.check_date_of_birth == '6' and m.check_expiration_date == '6'
# Valid passport (TD3)
>>> m = MRZ(['P<POLKOWALSKA<KWIATKOWSKA<<JOANNA<<<<<<<<<<<', 'AA00000000POL6002084F1412314<<<<<<<<<<<<<<<4'])
>>> assert m.mrz_type == 'TD3' and m.valid and m.valid_score == 100
>>> assert m.type == 'P<' and m.country == 'POL' and m.number == 'AA0000000' and m.personal_number == '<<<<<<<<<<<<<<'
>>> assert m.date_of_birth == '600208' and m.sex == 'F' and m.expiration_date == '141231' and m.nationality == 'POL'
>>> assert m.names == 'JOANNA' and m.surname == 'KOWALSKA KWIATKOWSKA'
>>> assert m.check_number == '0' and m.check_date_of_birth == '4' and m.check_expiration_date == '4' and m.check_personal_number == '<' and m.check_composite == '4'
# Invalid examples
>>> assert MRZ([]).mrz_type is None
>>> assert MRZ([1,2,3,4]).mrz_type is None
>>> assert MRZ([1,2,3]).mrz_type is None
>>> m = MRZ(['IDAUT10000999<6<<<<<<<<<<<<<<<', '7109094F1112315AUT<<<<<<<<<<<6', 'MUSTERFRAU<<ISOLDE<<<<<<<<<<<<'])
>>> assert m.mrz_type == 'TD1' and not m.valid and m.valid_score < 100
>>> assert m.valid_check_digits == [True, True, True, False]
>>> assert m.type == 'ID' and m.country == 'AUT' and m.number == '10000999<'
# The utility from_ocr function will convert a single newline-separated string obtained as OCR output
# into MRZ lines, doing some basic cleanup inbetween (removing empty lines and lines that are too short,
# removing spaces, converting mismatched characters, etc), and then attempt the parsing.
>>> m = MRZ.from_ocr('\\n\\n this line useless \\n IDAUT10000999<6 <<<<<<<<< <<<<<< \\n 7IO9O94FIi iz3iSAUT<<<<<<<<<<<4 \\n MUSTERFRA U<<ISOLDE<<< <<<<<<<<<')
>>> assert m.valid and m.names == 'ISOLDE' and m.surname == 'MUSTERFRAU'
"""
def __init__(self, mrz_lines):
"""
Parse a TD1/TD2/TD3/MRVA/MRVB MRZ from a single newline-separated string or a list of strings.
:param mrz_lines: either a single string with newlines, or a list of 2 or 3 strings, representing the lines of an MRZ.
:return: self
"""
self._parse(mrz_lines)
self.aux = {}
@staticmethod
def from_ocr(mrz_ocr_string):
"""Given a single string which is output from an OCR routine, cleans it up using MRZ.ocr_cleanup and creates a MRZ object"""
return MRZ(MRZOCRCleaner.apply(mrz_ocr_string))
def __repr__(self):
if self.valid:
return "MRZ({0}[valid], {1}, {2}, {3}, {4}, {5})".format(self.mrz_type, self.number, self.names, self.surname, self.sex, self.date_of_birth)
elif self.valid_score > 0:
return "MRZ({0}[{1}], {2}, {3}, {4}, {5}, {6})".format(self.mrz_type, self.valid_score, self.number, self.names, self.surname, self.sex, self.date_of_birth)
else:
return "MRZ(invalid)"
@staticmethod
def _guess_type(mrz_lines):
"""Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None.
The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V'
>>> MRZ._guess_type([]) is None
True
>>> MRZ._guess_type([1]) is None
True
>>> MRZ._guess_type([1,2]) is None # No len() for numbers
True
>>> MRZ._guess_type(['a','b']) # This way passes
'TD2'
>>> MRZ._guess_type(['*'*40, '*'*40])
'TD3'
>>> MRZ._guess_type([1,2,3])
'TD1'
>>> MRZ._guess_type(['V'*40, '*'*40])
'MRVA'
>>> MRZ._guess_type(['V'*36, '*'*36])
'MRVB'
"""
try:
if len(mrz_lines) == 3:
return 'TD1'
elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40:
return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2'
elif len(mrz_lines) == 2:
return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3'
else:
return None
except Exception: #pylint: disable=broad-except
return None
def _parse(self, mrz_lines):
self.mrz_type = MRZ._guess_type(mrz_lines)
try:
if self.mrz_type == 'TD1':
self.valid = self._parse_td1(*mrz_lines)
elif self.mrz_type == 'TD2':
self.valid = self._parse_td2(*mrz_lines)
elif self.mrz_type == 'TD3':
self.valid = self._parse_td3(*mrz_lines)
elif self.mrz_type == 'MRVA':
self.valid = self._parse_mrv(*mrz_lines, length=44)
elif self.mrz_type == 'MRVB':
self.valid = self._parse_mrv(*mrz_lines, length=36)
else:
self.valid = False
self.valid_score = 0
except Exception: #pylint: disable=broad-except
self.mrz_type = None
self.valid = False
self.valid_score = 0
def _parse_td1(self, a, b, c):
len_a, len_b, len_c = len(a), len(b), len(c)
if len(a) < 30:
a = a + '<'*(30 - len(a))
if len(b) < 30:
b = b + '<'*(30 - len(b))
if len(c) < 30:
c = c + '<'*(30 - len(c))
self.type = a[0:2]
self.country = a[2:5]
self.number = a[5:14]
self.check_number = a[14]
self.optional1 = a[15:30]
self.date_of_birth = b[0:6]
self.check_date_of_birth = b[6]
self.sex = b[7]
self.expiration_date = b[8:14]
self.check_expiration_date = b[14]
self.nationality = b[15:18]
self.optional2 = b[18:29]
self.check_composite = b[29]
surname_names = c.split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(a[5:30] + b[0:7] + b[8:15] + b[18:29]) == self.check_composite]
self.valid_line_lengths = [len_a == 30, len_b == 30, len_c == 30]
self.valid_misc = [a[0] in 'IAC']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) + 1
self.valid_score = 100*self.valid_score//(40+3+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
def _parse_td2(self, a, b):
len_a, len_b = len(a), len(b)
if len(a) < 36:
a = a + '<'*(36 - len(a))
if len(b) < 36:
b = b + '<'*(36 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:36].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.optional1 = b[28:35]
self.check_composite = b[35]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(b[0:10] + b[13:20] + b[21:35]) == self.check_composite]
self.valid_line_lengths = [len_a == 36, len_b == 36]
self.valid_misc = [a[0] in 'ACI']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) +1
self.valid_score = 100*self.valid_score//(40+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
def _parse_td3(self, a, b):
len_a, len_b = len(a), len(b)
if len(a) < 44:
a = a + '<'*(44 - len(a))
if len(b) < 44:
b = b + '<'*(44 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:44].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.personal_number = b[28:42]
self.check_personal_number = b[42]
self.check_composite = b[43]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth and MRZ._check_date(self.date_of_birth),
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date and MRZ._check_date(self.expiration_date),
MRZCheckDigit.compute(b[0:10] + b[13:20] + b[21:43]) == self.check_composite,
((self.check_personal_number == '<' or self.check_personal_number == '0') and self.personal_number == '<<<<<<<<<<<<<<') # PN is optional
or MRZCheckDigit.compute(self.personal_number) == self.check_personal_number]
self.valid_line_lengths = [len_a == 44, len_b == 44]
self.valid_misc = [a[0] in 'P']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) +1
self.valid_score = 100*self.valid_score//(50+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date, self.valid_personal_number, self.valid_composite = self.valid_check_digits
return self.valid_score == 100
@staticmethod
def _check_date(ymd):
try:
datetime.strptime(ymd, '%y%m%d')
return True
except ValueError:
return False
def _parse_mrv(self, a, b, length=44):
len_a, len_b = len(a), len(b)
if len(a) < length:
a = a + '<'*(44 - len(a))
if len(b) < length:
b = b + '<'*(44 - len(b))
self.type = a[0:2]
self.country = a[2:5]
surname_names = a[5:length].split('<<', 1)
if len(surname_names) < 2:
surname_names += ['']
self.surname, self.names = surname_names
self.names = self.names.replace('<', ' ').strip()
self.surname = self.surname.replace('<', ' ').strip()
self.number = b[0:9]
self.check_number = b[9]
self.nationality = b[10:13]
self.date_of_birth = b[13:19]
self.check_date_of_birth = b[19]
self.sex = b[20]
self.expiration_date = b[21:27]
self.check_expiration_date = b[27]
self.optional1 = b[28:length]
self.valid_check_digits = [MRZCheckDigit.compute(self.number) == self.check_number,
MRZCheckDigit.compute(self.date_of_birth) == self.check_date_of_birth,
MRZCheckDigit.compute(self.expiration_date) == self.check_expiration_date]
self.valid_line_lengths = [len_a == length, len_b == length]
self.valid_misc = [a[0] == 'V']
self.valid_score = 10*sum(self.valid_check_digits) + sum(self.valid_line_lengths) + sum(self.valid_misc) + 1
self.valid_score = 100*self.valid_score//(30+2+1+1)
self.valid_number, self.valid_date_of_birth, self.valid_expiration_date = self.valid_check_digits
return self.valid_score == 100
|
konstantint/PassportEye | passporteye/util/pdf.py | extract_first_jpeg_in_pdf | python | def extract_first_jpeg_in_pdf(fstream):
parser = PDFParser(fstream)
if PY2:
document = PDFDocument(parser)
else:
document = PDFDocument()
parser.set_document(document)
document.set_parser(parser)
document.initialize('')
rsrcmgr = PDFResourceManager()
device = PDFPageAggregator(rsrcmgr)
interpreter = PDFPageInterpreter(rsrcmgr, device)
pages = PDFPage.create_pages(document) if PY2 else document.get_pages()
for page in pages:
interpreter.process_page(page)
layout = device.result
for el in layout:
if isinstance(el, LTFigure):
for im in el:
if isinstance(im, LTImage):
# Found one!
st = None
try:
imdata = im.stream.get_data()
except:
# Failed to decode (seems to happen nearly always - there's probably a bug in PDFMiner), oh well...
imdata = im.stream.get_rawdata()
if imdata is not None and imdata.startswith(b'\xff\xd8\xff\xe0'):
return imdata
return None | Reads a given PDF file and scans for the first valid embedded JPEG image.
Returns either None (if none found) or a string of data for the image.
There is no 100% guarantee for this code, yet it seems to work fine with most
scanner-produced images around.
More testing might be needed though.
Note that in principle there is no serious problem extracting PNGs or other image types from PDFs,
however at the moment I do not have enough test data to try this, and the one I have seems to be unsuitable
for PDFMiner.
:param fstream: Readable binary stream of the PDF
:return: binary stream, containing the whole contents of the JPEG image or None if extraction failed. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pdf.py#L23-L67 | null | '''
PassportEye::Util: PDF processing utilities.
Author: Konstantin Tretyakov
License: MIT
'''
import sys
PY2 = sys.version_info.major == 2
if PY2:
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
else:
from pdfminer.pdfparser import PDFParser, PDFDocument, PDFPage
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTFigure, LTImage
|
konstantint/PassportEye | passporteye/util/pipeline.py | Pipeline.add_component | python | def add_component(self, name, callable, provides=None, depends=None):
provides = provides or getattr(callable, '__provides__', [])
depends = depends or getattr(callable, '__depends__', [])
for p in provides:
if p in self.whoprovides:
raise Exception("There is already a component that provides %s" % p)
self.provides[name] = provides
self.depends[name] = depends
self.components[name] = callable
for p in provides:
self.whoprovides[p] = name | Add a given callable to a list of components. The provides and depends are lists of strings, specifying what
keys the component computes and what keys it requires to be present. If those are not given, the callable must
have fields __provides__ and __depends__. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L51-L66 | null | class Pipeline(object):
"""
The computation performed in order to extract the information from an image
is essentially a list of steps of various complexity, where each step uses
results of the previous steps and introduces its own results.
Although this is nothing more than a standard sequential program, it seems to be somewhat
more convenient sometimes to regard it as a "pipeline" consisting of pluggable "components",
where each component explicitly specifies what inputs it needs and what outputs it produces,
and the workflow engine wires up the inputs to the outputs.
This class offers provides a simple implementation of such a pipeline.
It keeps track of a dictionary of values that were already computed, a dictionary of
"components" which know how to compute other values, and routes item accesses to computations automatically.
>>> a = Pipeline()
>>> a.add_component('1', lambda: 1, ['a'], [])
>>> a.add_component('2', lambda: 2, ['b'], [])
>>> a.add_component('s,d', lambda x,y: (x+y, x-y), ['c', 'd'], ['a', 'b'])
>>> a.add_component('sd', lambda x,y: (x+y, x-y), ['e'], ['a', 'b'])
>>> a['c']
3
>>> a['d']
-1
>>> a['e']
(3, -1)
>>> a.replace_component('1', lambda: 2, ['a'], [])
>>> a['e']
(4, 0)
>>> a['d']
0
"""
def __init__(self):
self.data = dict() # Maps key -> data item.
self.components = dict() # Maps name -> component
self.provides = dict() # Component name -> provides list
self.depends = dict() # Component name -> depends list
self.whoprovides = dict() # key -> component name
self.data['__data__'] = self.data
self.data['__pipeline__'] = self
def remove_component(self, name):
"""Removes an existing component with a given name, invalidating all the values computed by
the previous component."""
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name]
def replace_component(self, name, callable, provides=None, depends=None):
"""Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors."""
self.remove_component(name)
self.add_component(name, callable, provides, depends)
def invalidate(self, key):
"""Remove the given data item along with all items that depend on it in the graph."""
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key)
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
self._compute(key)
return self.data[key]
def _compute(self, key):
if key not in self.data:
cname = self.whoprovides[key]
for d in self.depends[cname]:
self._compute(d)
inputs = [self.data[d] for d in self.depends[cname]]
results = self.components[cname](*inputs)
if len(self.provides[cname]) == 1:
self.data[self.provides[cname][0]] = results
else:
for k, v in zip(self.provides[cname], results):
self.data[k] = v
|
konstantint/PassportEye | passporteye/util/pipeline.py | Pipeline.remove_component | python | def remove_component(self, name):
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name] | Removes an existing component with a given name, invalidating all the values computed by
the previous component. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L68-L78 | [
"def invalidate(self, key):\n \"\"\"Remove the given data item along with all items that depend on it in the graph.\"\"\"\n if key not in self.data:\n return\n del self.data[key]\n\n # Find all components that used it and invalidate their results\n for cname in self.components:\n if key... | class Pipeline(object):
"""
The computation performed in order to extract the information from an image
is essentially a list of steps of various complexity, where each step uses
results of the previous steps and introduces its own results.
Although this is nothing more than a standard sequential program, it seems to be somewhat
more convenient sometimes to regard it as a "pipeline" consisting of pluggable "components",
where each component explicitly specifies what inputs it needs and what outputs it produces,
and the workflow engine wires up the inputs to the outputs.
This class offers provides a simple implementation of such a pipeline.
It keeps track of a dictionary of values that were already computed, a dictionary of
"components" which know how to compute other values, and routes item accesses to computations automatically.
>>> a = Pipeline()
>>> a.add_component('1', lambda: 1, ['a'], [])
>>> a.add_component('2', lambda: 2, ['b'], [])
>>> a.add_component('s,d', lambda x,y: (x+y, x-y), ['c', 'd'], ['a', 'b'])
>>> a.add_component('sd', lambda x,y: (x+y, x-y), ['e'], ['a', 'b'])
>>> a['c']
3
>>> a['d']
-1
>>> a['e']
(3, -1)
>>> a.replace_component('1', lambda: 2, ['a'], [])
>>> a['e']
(4, 0)
>>> a['d']
0
"""
def __init__(self):
self.data = dict() # Maps key -> data item.
self.components = dict() # Maps name -> component
self.provides = dict() # Component name -> provides list
self.depends = dict() # Component name -> depends list
self.whoprovides = dict() # key -> component name
self.data['__data__'] = self.data
self.data['__pipeline__'] = self
def add_component(self, name, callable, provides=None, depends=None):
"""
Add a given callable to a list of components. The provides and depends are lists of strings, specifying what
keys the component computes and what keys it requires to be present. If those are not given, the callable must
have fields __provides__ and __depends__.
"""
provides = provides or getattr(callable, '__provides__', [])
depends = depends or getattr(callable, '__depends__', [])
for p in provides:
if p in self.whoprovides:
raise Exception("There is already a component that provides %s" % p)
self.provides[name] = provides
self.depends[name] = depends
self.components[name] = callable
for p in provides:
self.whoprovides[p] = name
def replace_component(self, name, callable, provides=None, depends=None):
"""Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors."""
self.remove_component(name)
self.add_component(name, callable, provides, depends)
def invalidate(self, key):
"""Remove the given data item along with all items that depend on it in the graph."""
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key)
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
self._compute(key)
return self.data[key]
def _compute(self, key):
if key not in self.data:
cname = self.whoprovides[key]
for d in self.depends[cname]:
self._compute(d)
inputs = [self.data[d] for d in self.depends[cname]]
results = self.components[cname](*inputs)
if len(self.provides[cname]) == 1:
self.data[self.provides[cname][0]] = results
else:
for k, v in zip(self.provides[cname], results):
self.data[k] = v
|
konstantint/PassportEye | passporteye/util/pipeline.py | Pipeline.replace_component | python | def replace_component(self, name, callable, provides=None, depends=None):
self.remove_component(name)
self.add_component(name, callable, provides, depends) | Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L80-L84 | [
"def add_component(self, name, callable, provides=None, depends=None):\n \"\"\"\n Add a given callable to a list of components. The provides and depends are lists of strings, specifying what\n keys the component computes and what keys it requires to be present. If those are not given, the callable must\n ... | class Pipeline(object):
"""
The computation performed in order to extract the information from an image
is essentially a list of steps of various complexity, where each step uses
results of the previous steps and introduces its own results.
Although this is nothing more than a standard sequential program, it seems to be somewhat
more convenient sometimes to regard it as a "pipeline" consisting of pluggable "components",
where each component explicitly specifies what inputs it needs and what outputs it produces,
and the workflow engine wires up the inputs to the outputs.
This class offers provides a simple implementation of such a pipeline.
It keeps track of a dictionary of values that were already computed, a dictionary of
"components" which know how to compute other values, and routes item accesses to computations automatically.
>>> a = Pipeline()
>>> a.add_component('1', lambda: 1, ['a'], [])
>>> a.add_component('2', lambda: 2, ['b'], [])
>>> a.add_component('s,d', lambda x,y: (x+y, x-y), ['c', 'd'], ['a', 'b'])
>>> a.add_component('sd', lambda x,y: (x+y, x-y), ['e'], ['a', 'b'])
>>> a['c']
3
>>> a['d']
-1
>>> a['e']
(3, -1)
>>> a.replace_component('1', lambda: 2, ['a'], [])
>>> a['e']
(4, 0)
>>> a['d']
0
"""
def __init__(self):
self.data = dict() # Maps key -> data item.
self.components = dict() # Maps name -> component
self.provides = dict() # Component name -> provides list
self.depends = dict() # Component name -> depends list
self.whoprovides = dict() # key -> component name
self.data['__data__'] = self.data
self.data['__pipeline__'] = self
def add_component(self, name, callable, provides=None, depends=None):
"""
Add a given callable to a list of components. The provides and depends are lists of strings, specifying what
keys the component computes and what keys it requires to be present. If those are not given, the callable must
have fields __provides__ and __depends__.
"""
provides = provides or getattr(callable, '__provides__', [])
depends = depends or getattr(callable, '__depends__', [])
for p in provides:
if p in self.whoprovides:
raise Exception("There is already a component that provides %s" % p)
self.provides[name] = provides
self.depends[name] = depends
self.components[name] = callable
for p in provides:
self.whoprovides[p] = name
def remove_component(self, name):
"""Removes an existing component with a given name, invalidating all the values computed by
the previous component."""
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name]
def invalidate(self, key):
"""Remove the given data item along with all items that depend on it in the graph."""
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key)
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
self._compute(key)
return self.data[key]
def _compute(self, key):
if key not in self.data:
cname = self.whoprovides[key]
for d in self.depends[cname]:
self._compute(d)
inputs = [self.data[d] for d in self.depends[cname]]
results = self.components[cname](*inputs)
if len(self.provides[cname]) == 1:
self.data[self.provides[cname][0]] = results
else:
for k, v in zip(self.provides[cname], results):
self.data[k] = v
|
konstantint/PassportEye | passporteye/util/pipeline.py | Pipeline.invalidate | python | def invalidate(self, key):
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key) | Remove the given data item along with all items that depend on it in the graph. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L86-L96 | [
"def invalidate(self, key):\n \"\"\"Remove the given data item along with all items that depend on it in the graph.\"\"\"\n if key not in self.data:\n return\n del self.data[key]\n\n # Find all components that used it and invalidate their results\n for cname in self.components:\n if key... | class Pipeline(object):
"""
The computation performed in order to extract the information from an image
is essentially a list of steps of various complexity, where each step uses
results of the previous steps and introduces its own results.
Although this is nothing more than a standard sequential program, it seems to be somewhat
more convenient sometimes to regard it as a "pipeline" consisting of pluggable "components",
where each component explicitly specifies what inputs it needs and what outputs it produces,
and the workflow engine wires up the inputs to the outputs.
This class offers provides a simple implementation of such a pipeline.
It keeps track of a dictionary of values that were already computed, a dictionary of
"components" which know how to compute other values, and routes item accesses to computations automatically.
>>> a = Pipeline()
>>> a.add_component('1', lambda: 1, ['a'], [])
>>> a.add_component('2', lambda: 2, ['b'], [])
>>> a.add_component('s,d', lambda x,y: (x+y, x-y), ['c', 'd'], ['a', 'b'])
>>> a.add_component('sd', lambda x,y: (x+y, x-y), ['e'], ['a', 'b'])
>>> a['c']
3
>>> a['d']
-1
>>> a['e']
(3, -1)
>>> a.replace_component('1', lambda: 2, ['a'], [])
>>> a['e']
(4, 0)
>>> a['d']
0
"""
def __init__(self):
self.data = dict() # Maps key -> data item.
self.components = dict() # Maps name -> component
self.provides = dict() # Component name -> provides list
self.depends = dict() # Component name -> depends list
self.whoprovides = dict() # key -> component name
self.data['__data__'] = self.data
self.data['__pipeline__'] = self
def add_component(self, name, callable, provides=None, depends=None):
"""
Add a given callable to a list of components. The provides and depends are lists of strings, specifying what
keys the component computes and what keys it requires to be present. If those are not given, the callable must
have fields __provides__ and __depends__.
"""
provides = provides or getattr(callable, '__provides__', [])
depends = depends or getattr(callable, '__depends__', [])
for p in provides:
if p in self.whoprovides:
raise Exception("There is already a component that provides %s" % p)
self.provides[name] = provides
self.depends[name] = depends
self.components[name] = callable
for p in provides:
self.whoprovides[p] = name
def remove_component(self, name):
"""Removes an existing component with a given name, invalidating all the values computed by
the previous component."""
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name]
def replace_component(self, name, callable, provides=None, depends=None):
"""Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors."""
self.remove_component(name)
self.add_component(name, callable, provides, depends)
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
self._compute(key)
return self.data[key]
def _compute(self, key):
if key not in self.data:
cname = self.whoprovides[key]
for d in self.depends[cname]:
self._compute(d)
inputs = [self.data[d] for d in self.depends[cname]]
results = self.components[cname](*inputs)
if len(self.provides[cname]) == 1:
self.data[self.provides[cname][0]] = results
else:
for k, v in zip(self.provides[cname], results):
self.data[k] = v
|
konstantint/PassportEye | passporteye/util/ocr.py | ocr | python | def ocr(img, mrz_mode=True, extra_cmdline_params=''):
input_file_name = '%s.bmp' % _tempnam()
output_file_name_base = '%s' % _tempnam()
output_file_name = "%s.txt" % output_file_name_base
try:
# Prevent annoying warning about lossy conversion to uint8
if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1:
img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999
img = img.astype(np.uint8)
imwrite(input_file_name, img)
if mrz_mode:
# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist
config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><"
" -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params)
else:
config = "{}".format(extra_cmdline_params)
pytesseract.run_tesseract(input_file_name,
output_file_name_base,
'txt',
lang=None,
config=config)
if sys.version_info.major == 3:
f = open(output_file_name, encoding='utf-8')
else:
f = open(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
pytesseract.cleanup(input_file_name)
pytesseract.cleanup(output_file_name) | Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/ocr.py#L16-L64 | [
"def _tempnam():\n '''TODO: Use the with(..) version for auto-deletion?'''\n tmpfile = tempfile.NamedTemporaryFile(prefix=\"tess_\")\n return tmpfile.name\n"
] | '''
PassportEye::Util: Interface between SKImage and the PyTesseract OCR
NB: You must have the "tesseract" tool present in your path for this to work.
Author: Konstantin Tretyakov
License: MIT
'''
import sys
import tempfile
import numpy as np
from imageio import imwrite
from pytesseract import pytesseract
def ocr(img, mrz_mode=True, extra_cmdline_params=''):
"""Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one.
"""
input_file_name = '%s.bmp' % _tempnam()
output_file_name_base = '%s' % _tempnam()
output_file_name = "%s.txt" % output_file_name_base
try:
# Prevent annoying warning about lossy conversion to uint8
if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1:
img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999
img = img.astype(np.uint8)
imwrite(input_file_name, img)
if mrz_mode:
# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist
config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><"
" -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params)
else:
config = "{}".format(extra_cmdline_params)
pytesseract.run_tesseract(input_file_name,
output_file_name_base,
'txt',
lang=None,
config=config)
if sys.version_info.major == 3:
f = open(output_file_name, encoding='utf-8')
else:
f = open(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
pytesseract.cleanup(input_file_name)
pytesseract.cleanup(output_file_name)
def _tempnam():
'''TODO: Use the with(..) version for auto-deletion?'''
tmpfile = tempfile.NamedTemporaryFile(prefix="tess_")
return tmpfile.name
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.approx_equal | python | def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol | Method mainly useful for testing | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L49-L52 | null | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.rotated | python | def rotated(self, rotation_center, angle):
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2)) | Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1) | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L54-L62 | null | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.as_poly | python | def as_poly(self, margin_width=0, margin_height=0):
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert]) | Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]]) | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L64-L94 | null | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def plot(self, mode='image', ax=None, **kwargs):
"""Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object.
"""
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
konstantint/PassportEye | passporteye/util/geometry.py | RotatedBox.plot | python | def plot(self, mode='image', ax=None, **kwargs):
ax = ax or plt.gca()
poly = self.as_poly()
if mode == 'image':
poly = poly[:,[1,0]]
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'r')
kwargs.setdefault('lw', 2)
p = patches.Polygon(poly, **kwargs)
ax.add_patch(p)
return p | Visualize the box on a matplotlib plot.
:param mode: How should the box coordinates and angle be interpreted.
- mode `'image'` corresponds to the situation where x coordinate of the box
denotes the "row of an image" (ie. the Y coordinate of the plot, arranged downwards)
and y coordinate of the box corresponds to the "column of an image",
(ie X coordinate of the plot). In other words, box's x goes downwards and y - rightwards.
- mode `'math'` corresponds to the "mathematics" situation where box's x and y correspond to the X and Y axes of the plot.
:param ax: the matplotlib axis to draw on. If unspecified, the current axis is used.
:param kwargs: arguments passed to the matplotlib's `Polygon` patch object. By default, fill is set to False, color to red and lw to 2.
:return: The created Polygon object. | train | https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L96-L117 | [
"def as_poly(self, margin_width=0, margin_height=0):\n \"\"\"Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.\n\n :param margin_width: The additional \"margin\" that will be added to the box along its width dimension (from b... | class RotatedBox(object):
"""
RotatedBox represents a rectangular box centered at (cx,cy) with dimensions width x height,
rotated by angle radians counterclockwise.
>>> RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]])
RotatedBox(cx=1.0, cy=0.5, width=2.0, height=1.0, angle=0.0)
"""
def __init__(self, center, width, height, angle, points=None):
"""Creates a new RotatedBox.
:param points: This parameter may be used to indicate the set of points used to create the box.
"""
self.center = np.asfarray(center)
self.width = width
self.height = height
self.angle = angle
self.points = points
def __repr__(self):
return "RotatedBox(cx={0}, cy={1}, width={2}, height={3}, angle={4})".format(self.cx, self.cy, self.width, self.height, self.angle)
@property
def cx(self):
return self.center[0]
@property
def cy(self):
return self.center[1]
@property
def area(self):
return self.width * self.height
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
def _compensate_rotation_shift(self, img, scale):
"""This is an auxiliary method used by extract_from_image.
It is needed due to particular specifics of the skimage.transform.rotate implementation.
Namely, when you use rotate(... , resize=True), the rotated image is rotated and shifted by certain amount.
Thus when we need to cut out the box from the image, we need to account for this shift.
We do this by repeating the computation from skimage.transform.rotate here.
TODO: This makes the code uncomfortably coupled to SKImage (e.g. this logic is appropriate for skimage 0.12.1, but not for 0.11,
and no one knows what happens in later versions). A solution would be to use skimage.transform.warp with custom settings, but we can think of it later.
"""
ctr = np.asarray([self.center[1]*scale, self.center[0]*scale])
tform1 = transform.SimilarityTransform(translation=ctr)
tform2 = transform.SimilarityTransform(rotation=np.pi/2 - self.angle)
tform3 = transform.SimilarityTransform(translation=-ctr)
tform = tform3 + tform2 + tform1
rows, cols = img.shape[0], img.shape[1]
corners = np.array([
[0, 0],
[0, rows - 1],
[cols - 1, rows - 1],
[cols - 1, 0]
])
corners = tform.inverse(corners)
minc = corners[:, 0].min()
minr = corners[:, 1].min()
maxc = corners[:, 0].max()
maxr = corners[:, 1].max()
# SKImage 0.11 version
out_rows = maxr - minr + 1
out_cols = maxc - minc + 1
# fit output image in new shape
return ((cols - out_cols) / 2., (rows - out_rows) / 2.)
@staticmethod
def from_points(points, box_type='bb'):
"""
Interpret a given point cloud as a RotatedBox, using PCA to determine the potential orientation (the longest component becomes width)
This is basically an approximate version of a min-area-rectangle algorithm.
TODO: Test whether using a true min-area-rectangle algorithm would be more precise or faster.
:param points: An n x 2 numpy array of coordinates.
:param box_type: The kind of method used to estimate the "box".
Possible values:
- `'bb'`, denoting the "bounding box" approach (min/max coordinates of the points correspond to box limits)
- `'mrz`, denoting a slightly modified technique, suited for MRZ zone detection from contour images.
Here the assumption is that the upper and lower bounds of the box are better estimated as the
10% and 90% quantile of the corresponding coordinates (rather than 0% and 100%, i.e. min and max).
This helps against accidental noise in the contour.
The `'mrz'` correction is only applied when there are at least 10 points in the set.
:returns: a RotatedBox, bounding the given set of points, oriented according to the principal components.
>>> RotatedBox.from_points([[0,0]])
RotatedBox(cx=0.0, cy=0.0, width=0.0, height=0.0, angle=0.0)
>>> assert RotatedBox.from_points([[0,0], [1,1], [2,2]]).approx_equal([1, 1], np.sqrt(8), 0, np.pi/4)
>>> assert RotatedBox.from_points([[0,0], [1,1], [0,1], [1,0]]).approx_equal([0.5, 0.5], 1, 1, 0.0) # The angle is rather arbitrary here
>>> assert RotatedBox.from_points([[0,0], [2,1], [0,1], [2,0]]).approx_equal([1, 0.5], 2, 1, 0)
>>> assert RotatedBox.from_points([[0,0], [2,4], [0,4], [2,0]]).approx_equal([1, 2], 4, 2, np.pi/2)
>>> assert RotatedBox.from_points([[0,0], [1,1.5], [2,0]]).approx_equal([1, 0.75], 2, 1.5, 0)
>>> assert RotatedBox.from_points([[0,0], [0,1], [1,1]]).approx_equal([0.25, 0.75], np.sqrt(2), np.sqrt(2)/2, np.pi/4)
"""
points = np.asfarray(points)
if points.shape[0] == 1:
return RotatedBox(points[0], width=0.0, height=0.0, angle=0.0, points=points)
m = PCA(2).fit(points)
# Find the angle
angle = (np.arctan2(m.components_[0,1], m.components_[0,0]) % np.pi)
if abs(angle - np.pi) < angle:
# Here the angle is always between -pi and pi
# If the principal component happened to be oriented so that the angle happens to be > pi/2 by absolute value,
# we flip the direction
angle = angle - np.pi if angle > 0 else angle + np.pi
points_transformed = m.transform(points)
ll = np.min(points_transformed, 0)
ur = np.max(points_transformed, 0)
wh = ur - ll
# Now compute and return the bounding box
if box_type == 'bb' or (box_type == 'mrz' and points.shape[0] < 10):
# We know that if we rotate the points around m.mean_, we get a box with bounds ur and ll
# The center of this box is (ur+ll)/2 + mean, which is not the same as the mean,
# hence to get the center of the original box we need to "unrotate" this box back.
return RotatedBox(np.dot(m.components_.T, (ll+ur)/2) + m.mean_, width=wh[0], height=wh[1], angle=angle, points=points)
elif box_type == 'mrz':
# When working with MRZ detection from contours, we may have minor "bumps" in the contour,
# that should be ignored at least along the long ("horizontal") side.
# To do that, we will use 10% and 90% quantiles as the bounds of the box instead of the max and min.
# We drop all points which lie beyond and simply repeat the estimation (now 'bb-style') without them.
h_coord = sorted(points_transformed[:,1])
n = len(h_coord)
bottom, top = h_coord[n/10], h_coord[n*9/10]
valid_points = np.logical_and(points_transformed[:,1]>=bottom, points_transformed[:,1]<=top)
rb = RotatedBox.from_points(points[valid_points, :], 'bb')
rb.points = points
return rb
else:
raise ValueError("Unknown parameter value: box_type=%s" % box_type)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.