function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def require_attr(node, attr):
"""
Checks for the existence of the named attribute on the given
node and raises an exception if it's not there. | markgw/jazzparser | [
5,
1,
5,
1,
1368367354
] |
def display_migration_details():
print "This migration script adds a UUID column to workflows" | mikel-egana-aranguren/SADI-Galaxy-Docker | [
1,
3,
1,
1,
1417087373
] |
def invert_mapping(mapping):
""" Invert a mapping dictionary
Parameters
----------
mapping: dict
Returns
-------
"""
inverted_mapping = defaultdict(list)
for key, value in mapping.items():
if isinstance(value, (list, set)):
for element in value:
inverted_mapping[element].append(key)
else:
inverted_mapping[value].append(key)
return inverted_mapping | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def get_annotation_to_item_map(list_of_items):
""" Find model items with overlapping annotations
Parameters
----------
item
list_of_items
Returns
-------
"""
annotation_to_item = defaultdict(list)
for item in list_of_items:
for annotation in item.annotation:
annotation_to_item[annotation].append(item)
return annotation_to_item | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def check_charge_balance(metabolites):
""" Check charge balance of the reaction """
# Check that charge is set for all metabolites
if not all(x.charge is not None for x in metabolites.keys()):
return None
else:
return sum([metabolite.charge * coefficient for metabolite, coefficient in iteritems(metabolites)]) | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def reaction_string(stoichiometry, use_metabolite_names=True):
"""Generate the reaction string """
attrib = "id"
if use_metabolite_names:
attrib = "name"
educts = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value < 0.]
products = [(str(abs(value)), getattr(key, attrib)) for key, value in iteritems(stoichiometry) if value > 0.]
return " + ".join([" ".join(x) for x in educts])+" --> "+" + ".join([" ".join(x) for x in products]) | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def reaction_balance(metabolites):
""" Check the balancing status of the stoichiometry
Parameters
----------
metabolites : dict - Dictionary of metabolites with stoichiometric coefficnets
Returns
-------
charge_str : str or bool
element_str : str or bool
balanced : str or bool
"""
element_result = check_element_balance(metabolites)
charge_result = check_charge_balance(metabolites)
if charge_result is None:
charge_str = "Unknown"
elif charge_result == 0:
charge_str = "OK"
else:
charge_str = str(charge_result)
if not all(x.formula for x in metabolites.keys()):
element_str = "Unknown"
elif element_result == {}:
element_str = "OK"
else:
element_str = unbalanced_metabolites_to_string(element_result)
if len(metabolites) < 2:
balanced = None
elif element_str == "OK" and charge_str == "OK":
balanced = True
elif element_str not in ("OK", "Unknown") or charge_str not in ("OK", "Unknown"):
balanced = False
else:
balanced = "Unknown"
return charge_str, element_str, balanced | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def new_location(new_index, n):
""" Find new location
Iteratively follow pointers to new location.
Parameters
----------
new_index: list, Should be initialized from range
n: int
Returns
-------
int
"""
while new_index[n] != n:
n = new_index[n]
return n | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def text_is_different(input, state):
""" Check if the input is different from output
Test if the input is different to the output
while ignoring the difference between None
and empty string.
Parameters
----------
input: str or None
state: str
Returns
-------
bool
"""
if not input and not state:
return False
else:
return input != state | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def restore_geometry(object, state):
""" Restore the geometry of an object
Parameters
----------
object: Object which should be restored
state: State from settings
Returns
-------
"""
if state:
object.restoreGeometry(state) | JuBra/GEMEditor | [
1,
3,
1,
5,
1493549658
] |
def __init__(self,age):
self.age = age | tuxfux-hlp-notes/python-batches | [
5,
15,
5,
2,
1481601578
] |
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
self.dp = [[0] * (n + 1) for _ in range(n + 1)]
return self.helper(1, n) | dichen001/Go4Jobs | [
1,
1,
1,
2,
1474399542
] |
def _get_full_queue(q):
output = []
while not q.empty():
output.append(q.get_nowait())
return u"\n".join(output) | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def process_document(worker, archive, filename, doc):
interface = worker.interface
# Resolve coreference, passing in PTB parse trees as strings
coref_result = interface.gateway.entry_point.resolveCoreference(doc)
if coref_result is None:
# Coref failed, return an invalid doc
# Fetch all output from stdout and stderr
stdout = _get_full_queue(interface.stdout_queue)
stderr = _get_full_queue(interface.stderr_queue)
return InvalidDocument(
u"opennlp_coref", u"Error running coref\nJava stdout:\n%s\nJava stderr:\n%s" % (stdout, stderr)
)
outputs = []
for output_name in worker.info.output_names:
if output_name == "coref":
# Pull all of the information out of the java objects to get ready to store as JSON
outputs.append([Entity.from_java_object(e) for e in get_field(coref_result, "entities")])
elif output_name == "tokenized":
outputs.append([sentence.split() for sentence in get_field(coref_result, "tokenizedSentences")])
elif output_name == "pos":
tokens = [sentence.split() for sentence in get_field(coref_result, "tokenizedSentences")]
pos_tags = [sentence.split() for sentence in get_field(coref_result, "posTags")]
outputs.append(list(zip(tokens, pos_tags)))
elif output_name == "parse":
outputs.append(list(coref_result.getParseTrees()))
return tuple(outputs) | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def worker_set_up(worker):
worker.interface = start_interface(worker.info) | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def preprocess(executor):
if executor.info.options["timeout"] is not None:
executor.log.info("Allow up to %d seconds for each coref job" % executor.info.options["timeout"])
# Don't parse the parse trees, since OpenNLP does that for us, straight from the text
executor.input_corpora[0].raw_data = True | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def _(text):
"""
A noop underscore function that marks strings for extraction.
"""
return text | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def course_key(self):
"""
:return: int course id
NB: The goal is to move this XBlock out of edx-platform, and so we use
scope_ids.usage_id instead of runtime.course_id so that the code will
continue to work with workbench-based testing.
"""
return getattr(self.scope_ids.usage_id, 'course_key', None) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def django_user(self):
"""
Returns django user associated with user currently interacting
with the XBlock.
"""
user_service = self.runtime.service(self, 'user')
if not user_service:
return None
return user_service._django_user # pylint: disable=protected-access | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def get_translation_content():
try:
return 'js/i18n/{lang}/djangojs.js'.format(
lang=get_language(),
)
except IOError:
return 'js/i18n/en/djangojs.js' | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def vendor_js_dependencies():
"""
Returns list of vendor JS files that this XBlock depends on.
The helper function that it uses to obtain the list of vendor JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
vendor_dependencies = get_js_dependencies('discussion_vendor')
base_vendor_dependencies = [
'edx-ui-toolkit/js/utils/global-loader.js',
'edx-ui-toolkit/js/utils/string-utils.js',
'edx-ui-toolkit/js/utils/html-utils.js',
'js/vendor/URI.min.js',
'js/vendor/jquery.leanModal.js'
]
return base_vendor_dependencies + vendor_dependencies | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def js_dependencies():
"""
Returns list of JS files that this XBlock depends on.
The helper function that it uses to obtain the list of JS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
return get_js_dependencies('discussion') | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def css_dependencies():
"""
Returns list of CSS files that this XBlock depends on.
The helper function that it uses to obtain the list of CSS files
works in conjunction with the Django pipeline to ensure that in development mode
the files are loaded individually, but in production just the single bundle is loaded.
"""
if get_language_bidi():
return get_css_dependencies('style-inline-discussion-rtl')
else:
return get_css_dependencies('style-inline-discussion') | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def has_permission(self, permission):
"""
Encapsulates lms specific functionality, as `has_permission` is not
importable outside of lms context, namely in tests.
:param user:
:param str permission: Permission
:rtype: bool
"""
# normal import causes the xmodule_assets command to fail due to circular import - hence importing locally
from lms.djangoapps.discussion.django_comment_client.permissions import has_permission
return has_permission(self.django_user, permission, self.course_key) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def author_view(self, context=None): # pylint: disable=unused-argument
"""
Renders author view for Studio.
"""
return self.studio_view_fragment() | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def studio_view_fragment(self):
"""
Returns a fragment for rendering this block in Studio.
"""
fragment = Fragment()
fragment.add_content(self.runtime.render_template(
'discussion/_discussion_inline_studio.html',
{'discussion_id': self.discussion_id}
))
return fragment | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def parse_xml(cls, node, runtime, keys, id_generator):
"""
Parses OLX into XBlock.
This method is overridden here to allow parsing legacy OLX, coming from discussion XModule.
XBlock stores all the associated data, fields and children in a XML element inlined into vertical XML file
XModule stored only minimal data on the element included into vertical XML and used a dedicated "discussion"
folder in OLX to store fields and children. Also, some info was put into "policy.json" file.
If no external data sources are found (file in "discussion" folder), it is exactly equivalent to base method
XBlock.parse_xml. Otherwise this method parses file in "discussion" folder (known as definition_xml), applies
policy.json and updates fields accordingly.
"""
block = super(DiscussionXBlock, cls).parse_xml(node, runtime, keys, id_generator)
cls._apply_translations_to_node_attributes(block, node)
cls._apply_metadata_and_policy(block, node, runtime)
return block | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def _apply_translations_to_node_attributes(cls, block, node):
"""
Applies metadata translations for attributes stored on an inlined XML element.
"""
for old_attr, target_attr in six.iteritems(cls.metadata_translations):
if old_attr in node.attrib and hasattr(block, target_attr):
setattr(block, target_attr, node.attrib[old_attr]) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def lookupCharge(self, people_tree, ds_charge_id, institution=None, moment=None):
"""
look for the correct open municipio charge, or return None
starting from an internal, domain-specific, charge id
using the mapping in people_tree (lxml.etree)
if the moment parameter is not passed, then current charges are looked up
"""
try:
people_charges = people_tree.xpath(
'//om:Person[@id="%s"]' % ds_charge_id,
namespaces=NS
)
if len(people_charges):
om_id = people_charges[0].get('om_id')
if om_id is None:
self.logger.warning("charge with id %s has no om_id (past charge?). Skipping." % ds_charge_id)
return None
if institution is None:
charge_type = people_charges[0].get('charge')
if charge_type is None:
self.logger.warning("charge with id %s has no charge attribute. Skipping." % ds_charge_id)
return None
# institution is grabbed from charge attribute, in acts import
# since mayor and deputies may sign acts, not only counselor
if charge_type == 'counselor':
institution = municipality.council.as_institution
elif charge_type == 'deputy' or charge_type == 'firstdeputy':
institution = municipality.gov.as_institution
elif charge_type == 'mayor':
institution = municipality.mayor.as_institution
else:
self.logger.error("Warning: charge with id %s has wrong charge attribute %s. Skipping." %
(ds_charge_id, charge_type))
return None
try:
person = Person.objects.get(pk=int(om_id))
charge = person.get_current_charge_in_institution(institution, moment=moment)
self.logger.debug("id %s (%s) mapped to %s (%s)" %
(ds_charge_id, institution, person, charge))
return charge
except ObjectDoesNotExist:
self.logger.warning("could not find person or charge for id = %s (om_id=%s) (%s) in OM DB. Skipping." % (ds_charge_id, om_id, institution))
return None
except MultipleObjectsReturned:
self.logger.error("found more than one person or charge for id %s (om_id=%s) (%s) in OM DB. Skipping." % (ds_charge_id, om_id, institution))
return None
else:
self.logger.warning("could not find person for id %s in people XML file. Skipping." % ds_charge_id)
return None
except ObjectDoesNotExist:
self.logger.warning("could not find charge for %s in Open Municipio DB. Skipping." % ds_charge_id)
return None | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def get_row_dicts(cursor, query, params=()):
"""
Convert a sequence of row (record) tuples -- as returned by a call to Python DBAPI's ``cursor.connect()``
method -- to a list of row dicts keyed by column names. | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def create_table_schema(table_name, table_schema):
"""
Generate the SQL statement to execute in order to create a DB table. | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def lookup_person(self, external, provider):
return LookupPerson.lookup(external, provider) | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def lookup(external, provider, as_of):
if provider in ChargeMapCache.cache:
if external in ChargeMapCache.cache[provider]:
if as_of in ChargeMapCache.cache[provider][external]:
return ChargeMapCache.cache[provider][external][as_of] | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def update(external, provider, as_of, value):
if not provider in ChargeMapCache.cache:
ChargeMapCache.cache[provider] = {} | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def lookup_charge(self, external, provider, as_of=None):
# if already mapped, return the result from the cache | openpolis/open_municipio | [
50,
16,
50,
182,
1305817082
] |
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, general.warning(msg))
self.generic = "Generic OSRFramework exception." | i3visio/osrframework | [
751,
229,
751,
66,
1419991203
] |
def __init__(self, platform, *args, **kwargs):
msg = """
[*] Warning:\t{}. Details:
No valid credentials provided for '{}'.
Update the configuration file at: '{}'.
""".format(
self.__class__.__name__,
platform,
os.path.join(configuration.getConfigPath()["appPath"], "accounts.cfg"),
general.emphasis("-x " + platform)
)
OSRFrameworkException.__init__(self, general.warning(msg))
self.generic = "The credentials for some platforms where NOT provided." | i3visio/osrframework | [
751,
229,
751,
66,
1419991203
] |
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, "{}".format(general.error(msg)))
self.generic = "Generic OSRFramework error." | i3visio/osrframework | [
751,
229,
751,
66,
1419991203
] |
def __init__(self, platform, mode, *args, **kwargs):
msg = """
[*] Error:\t{}. Details:
The '{}' wrapper has tried to call 'self.do_{}(...)'.
The method seems be implemented wrongly or not implemented.""".format(
self.__class__.__name__,
platform,
mode
)
OSRFrameworkError.__init__(self, msg)
self.generic = "A wrapper has tried to launch a mode which is not yet implemented. This error should not be happening unless you have added a new method out of the standard ones for mailfy, phonefy, searchfy or usufy." | i3visio/osrframework | [
751,
229,
751,
66,
1419991203
] |
def _get_paypal_urls(self, cr, uid, environment, context=None):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
} | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _migrate_paypal_account(self, cr, uid, context=None):
""" COMPLETE ME """
cr.execute('SELECT id, paypal_account FROM res_company')
res = cr.fetchall()
for (company_id, company_paypal_account) in res:
if company_paypal_account:
company_paypal_ids = self.search(cr, uid, [('company_id', '=', company_id), ('provider', '=', 'paypal')], limit=1, context=context)
if company_paypal_ids:
self.write(cr, uid, company_paypal_ids, {'paypal_email_account': company_paypal_account}, context=context)
else:
paypal_view = self.pool['ir.model.data'].get_object(cr, uid, 'payment_paypal', 'paypal_acquirer_button')
self.create(cr, uid, {
'name': 'Paypal',
'provider': 'paypal',
'paypal_email_account': company_paypal_account,
'view_template_id': paypal_view.id,
}, context=context)
return True | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def paypal_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
paypal_tx_values = dict(tx_values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': acquirer.paypal_email_account,
'item_name': '%s: %s' % (acquirer.company_id.name, tx_values['reference']),
'item_number': tx_values['reference'],
'amount': tx_values['amount'],
'currency_code': tx_values['currency'] and tx_values['currency'].name or '',
'address1': partner_values['address'],
'city': partner_values['city'],
'country': partner_values['country'] and partner_values['country'].code or '',
'state': partner_values['state'] and (partner_values['state'].code or partner_values['state'].name) or '',
'email': partner_values['email'],
'zip': partner_values['zip'],
'first_name': partner_values['first_name'],
'last_name': partner_values['last_name'],
'return': '%s' % urlparse.urljoin(base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(base_url, PaypalController._cancel_url),
})
if acquirer.fees_active:
paypal_tx_values['handling'] = '%.2f' % paypal_tx_values.pop('fees', 0.0)
if paypal_tx_values.get('return_url'):
paypal_tx_values['custom'] = json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')})
return partner_values, paypal_tx_values | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _paypal_s2s_get_access_token(self, cr, uid, ids, context=None):
"""
Note: see # see http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
for explanation why we use Authorization header instead of urllib2
password manager
"""
res = dict.fromkeys(ids, False)
parameters = werkzeug.url_encode({'grant_type': 'client_credentials'})
for acquirer in self.browse(cr, uid, ids, context=context):
tx_url = self._get_paypal_urls(cr, uid, acquirer.environment)['paypal_rest_url']
request = urllib2.Request(tx_url, parameters)
# add other headers (https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/)
request.add_header('Accept', 'application/json')
request.add_header('Accept-Language', tools.config.defaultLang)
# add authorization header
base64string = base64.encodestring('%s:%s' % (
acquirer.paypal_api_username,
acquirer.paypal_api_password)
).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request = urllib2.urlopen(request)
result = request.read()
res[acquirer.id] = json.loads(result).get('access_token')
request.close()
return res | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _paypal_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = 'Paypal: received data with missing reference (%s) or txn_id (%s)' % (reference, txn_id)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context) | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _paypal_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('payment_status')
data = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
'partner_reference': data.get('payer_id')
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (tx.reference))
data.update(state='done', date_validate=data.get('payment_date', fields.datetime.now()))
return tx.write(data)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (tx.reference))
data.update(state='pending', state_message=data.get('pending_reason', ''))
return tx.write(data)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
data.update(state='error', state_message=error)
return tx.write(data) | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _paypal_try_url(self, request, tries=3, context=None):
""" Try to contact Paypal. Due to some issues, internal service errors
seem to be quite frequent. Several tries are done before considering
the communication as failed.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
done, res = False, None
while (not done and tries):
try:
res = urllib2.urlopen(request)
done = True
except urllib2.HTTPError as e:
res = e.read()
e.close()
if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':
_logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)
tries = tries - 1
if not res:
pass
# raise openerp.exceptions.
result = res.read()
res.close()
return result | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def _paypal_s2s_get_invalid_parameters(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
invalid_parameters = []
return invalid_parameters | funkring/fdoo | [
1,
5,
1,
8,
1400249085
] |
def __repr__(self):
return 'NO_DEFAULT' | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, default=_NO_DEFAULT):
self._key = None
self._obj = None
self.default = default
self._creation_counter = _Filter._creation_counter
_Filter._creation_counter += 1 | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
o.selector = self
return o | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __str__(self):
return self.__class__.__name__ | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def wraper(function):
def print_debug(self, value):
logger = getLogger('b2filters')
result = ''
outputvalue = value
if isinstance(value, list):
from lxml import etree
outputvalue = ''
first = True
for element in value:
if first:
first = False
else:
outputvalue += ', '
if isinstance(element, etree.ElementBase):
outputvalue += "%s" % etree.tostring(element, encoding=unicode)
else:
outputvalue += "%r" % element
if self._obj is not None:
result += "%s" % self._obj._random_id
if self._key is not None:
result += ".%s" % self._key
name = str(self)
result += " %s(%r" % (name, outputvalue)
for arg in self.__dict__:
if arg.startswith('_') or arg == u"selector":
continue
if arg == u'default' and getattr(self, arg) == _NO_DEFAULT:
continue
result += ", %s=%r" % (arg, getattr(self, arg))
result += u')'
logger.log(DEBUG_FILTERS, result)
res = function(self, value)
return res
return print_debug | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Filter, self).__init__(default=default)
self.selector = selector | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def select(cls, selector, item, obj=None, key=None):
if isinstance(selector, basestring):
return item.xpath(selector)
elif isinstance(selector, _Filter):
selector._key = key
selector._obj = obj
return selector(item)
elif callable(selector):
return selector(item)
else:
return selector | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, value):
"""
This method have to be overrided by children classes.
"""
raise NotImplementedError() | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, elements):
if elements is not None:
return elements
else:
return self.default_or_raise(ParseError('Element %r not found' % self.selector)) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __call__(self, item):
link = self.select(self.selector, item, key=self._key, obj=self._obj)
return item.page.browser.async_open(link) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, name, selector=None):
super(Async, self).__init__()
self.selector = selector
self.name = name | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __call__(self, item):
result = item.loaders[self.name].result()
assert result.page is not None, 'The loaded url %s hasn\'t been matched by an URL object' % result.url
return self.selector(result.page.doc) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __call__(self, item):
base = self.select(self.base, item, obj=self._obj, key=self._key)
return self.selector(base) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, name, default=_NO_DEFAULT):
super(Env, self).__init__(default)
self.name = name | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, *names, **kwargs):
super(TableCell, self).__init__(**kwargs)
self.names = names | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, el):
if isinstance(el, (tuple, list)):
return u' '.join([self.filter(e) for e in el])
if el.text is None:
return self.default
else:
return unicode(el.text) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, symbols='', replace=[], children=True, newlines=True, normalize='NFC', **kwargs):
super(CleanText, self).__init__(selector, **kwargs)
self.symbols = symbols
self.toreplace = replace
self.children = children
self.newlines = newlines
self.normalize = normalize | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
if isinstance(txt, (tuple, list)):
txt = u' '.join([self.clean(item, children=self.children) for item in txt])
txt = self.clean(txt, self.children, self.newlines, self.normalize)
txt = self.remove(txt, self.symbols)
txt = self.replace(txt, self.toreplace)
# ensure it didn't become str by mistake
return unicode(txt) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def clean(cls, txt, children=True, newlines=True, normalize='NFC'):
if not isinstance(txt, basestring):
if children:
txt = [t.strip() for t in txt.itertext()]
else:
txt = [txt.text.strip()]
txt = u' '.join(txt) # 'foo bar'
if newlines:
txt = re.compile(u'\s+', flags=re.UNICODE).sub(u' ', txt) # 'foo bar'
else:
# normalize newlines and clean what is inside
txt = '\n'.join([cls.clean(l) for l in txt.splitlines()])
txt = txt.strip()
# lxml under Python 2 returns str instead of unicode if it is pure ASCII
txt = unicode(txt)
# normalize to a standard Unicode form
if normalize:
txt = unicodedata.normalize(normalize, txt)
return txt | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def remove(cls, txt, symbols):
for symbol in symbols:
txt = txt.replace(symbol, '')
return txt.strip() | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def replace(cls, txt, replace):
for before, after in replace:
txt = txt.replace(before, after)
return txt | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
txt = super(Lower, self).filter(txt)
return txt.lower() | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, replace_dots=False, sign=None, default=_NO_DEFAULT):
super(CleanDecimal, self).__init__(selector, default=default)
self.replace_dots = replace_dots
self.sign = sign | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, text):
if empty(text):
return self.default_or_raise(ParseError('Unable to parse %r' % text))
original_text = text = super(CleanDecimal, self).filter(text)
if self.replace_dots:
if type(self.replace_dots) is tuple:
thousands_sep, decimal_sep = self.replace_dots
else:
thousands_sep, decimal_sep = '.', ','
text = text.replace(thousands_sep, '').replace(decimal_sep, '.')
try:
v = Decimal(re.sub(r'[^\d\-\.]', '', text))
if self.sign:
v *= self.sign(original_text)
return v
except InvalidOperation as e:
return self.default_or_raise(e) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, label):
label = re.sub(r'[^A-Za-z0-9]', ' ', label.lower()).strip()
label = re.sub(r'\s+', '-', label)
return label | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, type=None, minlen=0, default=_NO_DEFAULT):
super(Type, self).__init__(selector, default=default)
self.type_func = type
self.minlen = minlen | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
if empty(txt):
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
if self.minlen is not False and len(txt) <= self.minlen:
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
try:
return self.type_func(txt)
except ValueError as e:
return self.default_or_raise(ParseError('Unable to parse %r: %s' % (txt, e))) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, name):
super(Field, self).__init__()
self.name = name | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def nth(iterable, n, default=None):
"Returns the nth item or a default value, n can be negative, or '*' for all"
if n == '*':
return iterable
if n < 0:
iterable = reversed(list(iterable))
n = abs(n) - 1
return next(islice(iterable, n, None), default) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, pattern=None, template=None, nth=0, flags=0, default=_NO_DEFAULT):
super(Regexp, self).__init__(selector, default=default)
assert pattern is not None
self.pattern = pattern
self._regex = re.compile(pattern, flags)
self.template = template
self.nth = nth | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
if isinstance(txt, (tuple, list)):
txt = u' '.join([t.strip() for t in txt.itertext()])
m = self._regex.search(txt) if self.nth == 0 else \
nth(self._regex.finditer(txt), self.nth)
if not m:
msg = 'Unable to find %s %s in %r' % (ordinal(self.nth), self.pattern, txt)
return self.default_or_raise(RegexpError(msg))
if isinstance(m, Iterator):
return map(self.expand, m)
return self.expand(m) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector, map_dict, default=_NO_DEFAULT):
super(Map, self).__init__(selector, default=default)
self.map_dict = map_dict | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
try:
return self.map_dict[txt]
except KeyError:
return self.default_or_raise(ItemNotFound('Unable to handle %r on %r' % (txt, self.map_dict))) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, default=_NO_DEFAULT, dayfirst=False, translations=None):
super(DateTime, self).__init__(selector, default=default)
self.dayfirst = dayfirst
self.translations = translations | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
if empty(txt) or txt == '':
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
try:
if self.translations:
for search, repl in self.translations:
txt = search.sub(repl, txt)
return parse_date(txt, dayfirst=self.dayfirst)
except (ValueError, TypeError) as e:
return self.default_or_raise(ParseError('Unable to parse %r: %s' % (txt, e))) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, default=_NO_DEFAULT, dayfirst=False, translations=None):
super(Date, self).__init__(selector, default=default, dayfirst=dayfirst, translations=translations) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
datetime = super(Date, self).filter(txt)
if hasattr(datetime, 'date'):
return datetime.date()
else:
return datetime | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector, date_guesser, **kwargs):
super(DateGuesser, self).__init__(selector)
self.date_guesser = date_guesser
self.kwargs = kwargs | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Time, self).__init__(selector, default=default) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, txt):
m = self._regexp.search(txt)
if m:
kwargs = {}
for key, index in self.kwargs.iteritems():
kwargs[key] = int(m.groupdict()[index] or 0)
return self.klass(**kwargs)
return self.default_or_raise(ParseError('Unable to find time in %r' % txt)) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, *args, **kwargs):
default = kwargs.pop('default', _NO_DEFAULT)
super(MultiFilter, self).__init__(args, default) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, values):
raise NotImplementedError() | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, date, time):
super(CombineDate, self).__init__(date, time) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, values):
return datetime.datetime.combine(values[0], values[1]) | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, fmt, *args):
super(Format, self).__init__(*args)
self.fmt = fmt | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, values):
return self.fmt % values | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, url_name, **kwargs):
super(BrowserURL, self).__init__(*kwargs.values())
self.url_name = url_name
self.keys = kwargs.keys() | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, values):
return values | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, pattern, selector=None, textCleaner=CleanText):
super(Join, self).__init__(selector)
self.pattern = pattern
self.textCleaner = textCleaner | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def filter(self, el):
res = u''
for li in el:
res += self.pattern % self.textCleaner.clean(li)
return res | frankrousseau/weboob | [
4,
3,
4,
1,
1381825736
] |
def __init__(self, file, **settings):
self.file = file
self.settings = {
"relating_process": None,
"related_object": None,
}
for key, value in settings.items():
self.settings[key] = value | IfcOpenShell/IfcOpenShell | [
1191,
546,
1191,
377,
1439197394
] |
def apply(self):
from absl import flags # pylint: disable=g-import-not-at-top
for flag in list(flags.FLAGS):
if flag not in ('showprefixforinfo',):
delattr(flags.FLAGS, flag) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError, 'Cannot create initializer for non-floating point type.'):
initializers.xavier_initializer(dtype=dtypes.int32)
self.assertIsNone(regularizers.l1_regularizer(0.)(None)) | google-research/tf-slim | [
334,
98,
334,
11,
1561681288
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.