repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
SchroterQuentin/django-search-listview | search_listview/list.py | alias_field | def alias_field(model, field):
"""
Return the prefix name of a field
"""
for part in field.split(LOOKUP_SEP)[:-1]:
model = associate_model(model,part)
return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1] | python | def alias_field(model, field):
"""
Return the prefix name of a field
"""
for part in field.split(LOOKUP_SEP)[:-1]:
model = associate_model(model,part)
return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1] | [
"def",
"alias_field",
"(",
"model",
",",
"field",
")",
":",
"for",
"part",
"in",
"field",
".",
"split",
"(",
"LOOKUP_SEP",
")",
"[",
":",
"-",
"1",
"]",
":",
"model",
"=",
"associate_model",
"(",
"model",
",",
"part",
")",
"return",
"model",
".",
"... | Return the prefix name of a field | [
"Return",
"the",
"prefix",
"name",
"of",
"a",
"field"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L184-L190 | train | 46,600 |
SchroterQuentin/django-search-listview | search_listview/list.py | associate_model | def associate_model(model, field):
"""
Return the model associate to the ForeignKey or ManyToMany
relation
"""
class_field = model._meta.get_field(field)
if hasattr(class_field, "field"):
return class_field.field.related.related_model
else:
return class_field.related_model | python | def associate_model(model, field):
"""
Return the model associate to the ForeignKey or ManyToMany
relation
"""
class_field = model._meta.get_field(field)
if hasattr(class_field, "field"):
return class_field.field.related.related_model
else:
return class_field.related_model | [
"def",
"associate_model",
"(",
"model",
",",
"field",
")",
":",
"class_field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
"if",
"hasattr",
"(",
"class_field",
",",
"\"field\"",
")",
":",
"return",
"class_field",
".",
"field",
".",
"... | Return the model associate to the ForeignKey or ManyToMany
relation | [
"Return",
"the",
"model",
"associate",
"to",
"the",
"ForeignKey",
"or",
"ManyToMany",
"relation"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L192-L201 | train | 46,601 |
SchroterQuentin/django-search-listview | search_listview/list.py | get_formfield | def get_formfield(model, field):
"""
Return the formfied associate to the field of the model
"""
class_field = model._meta.get_field(field)
if hasattr(class_field, "field"):
formfield = class_field.field.formfield()
else:
formfield = class_field.formfield()
# Otherwise the formfield contain the reverse relation
if isinstance(formfield, ChoiceField):
formfield.choices = class_field.get_choices()
return formfield | python | def get_formfield(model, field):
"""
Return the formfied associate to the field of the model
"""
class_field = model._meta.get_field(field)
if hasattr(class_field, "field"):
formfield = class_field.field.formfield()
else:
formfield = class_field.formfield()
# Otherwise the formfield contain the reverse relation
if isinstance(formfield, ChoiceField):
formfield.choices = class_field.get_choices()
return formfield | [
"def",
"get_formfield",
"(",
"model",
",",
"field",
")",
":",
"class_field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
"if",
"hasattr",
"(",
"class_field",
",",
"\"field\"",
")",
":",
"formfield",
"=",
"class_field",
".",
"field",
... | Return the formfied associate to the field of the model | [
"Return",
"the",
"formfied",
"associate",
"to",
"the",
"field",
"of",
"the",
"model"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L203-L218 | train | 46,602 |
SchroterQuentin/django-search-listview | search_listview/list.py | SearchableListView.get_q_object | def get_q_object(self):
"""
Build Q object to filter the queryset
"""
q_object = Q()
for field in self.searchable_fields:
value = self.request.GET.getlist(alias_field(self.model, field), None)
mini_q = Q()
for val in value:
attr = "{0}{1}".format(field, self.specifications.get(field, ''))
if val:
dic_tmp = {
attr: val
}
mini_q |= Q(**dic_tmp)
q_object &= mini_q
return q_object | python | def get_q_object(self):
"""
Build Q object to filter the queryset
"""
q_object = Q()
for field in self.searchable_fields:
value = self.request.GET.getlist(alias_field(self.model, field), None)
mini_q = Q()
for val in value:
attr = "{0}{1}".format(field, self.specifications.get(field, ''))
if val:
dic_tmp = {
attr: val
}
mini_q |= Q(**dic_tmp)
q_object &= mini_q
return q_object | [
"def",
"get_q_object",
"(",
"self",
")",
":",
"q_object",
"=",
"Q",
"(",
")",
"for",
"field",
"in",
"self",
".",
"searchable_fields",
":",
"value",
"=",
"self",
".",
"request",
".",
"GET",
".",
"getlist",
"(",
"alias_field",
"(",
"self",
".",
"model",
... | Build Q object to filter the queryset | [
"Build",
"Q",
"object",
"to",
"filter",
"the",
"queryset"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L26-L42 | train | 46,603 |
SchroterQuentin/django-search-listview | search_listview/list.py | SearchableListView.get_search_form | def get_search_form(self):
"""
Return list of form based on model
"""
magic_dico_form = self.get_dict_for_forms()
forms = []
initial = list(self.request.GET.lists())
for key, value in magic_dico_form.items():
form = Form()
model = value["model"]
if not value["fields"]:
continue
for field in value["fields"]:
formfield = get_formfield(model, field)
formfield.widget.attrs.update({'class': self.css_class})
form.fields.update({
field : formfield
})
initial_tmp = {}
for k, vals in initial:
tmp_list = k.split(model.__name__ + "-")
if len(tmp_list) == 2:
list_val_tmp = vals[0] if len(vals) == 1 else [val for val in vals if val != '']
initial_tmp[tmp_list[-1]] = list_val_tmp
form.initial = initial_tmp
form.prefix = model.__name__
forms.append(form)
return sorted(forms, key=lambda form: form.prefix) | python | def get_search_form(self):
"""
Return list of form based on model
"""
magic_dico_form = self.get_dict_for_forms()
forms = []
initial = list(self.request.GET.lists())
for key, value in magic_dico_form.items():
form = Form()
model = value["model"]
if not value["fields"]:
continue
for field in value["fields"]:
formfield = get_formfield(model, field)
formfield.widget.attrs.update({'class': self.css_class})
form.fields.update({
field : formfield
})
initial_tmp = {}
for k, vals in initial:
tmp_list = k.split(model.__name__ + "-")
if len(tmp_list) == 2:
list_val_tmp = vals[0] if len(vals) == 1 else [val for val in vals if val != '']
initial_tmp[tmp_list[-1]] = list_val_tmp
form.initial = initial_tmp
form.prefix = model.__name__
forms.append(form)
return sorted(forms, key=lambda form: form.prefix) | [
"def",
"get_search_form",
"(",
"self",
")",
":",
"magic_dico_form",
"=",
"self",
".",
"get_dict_for_forms",
"(",
")",
"forms",
"=",
"[",
"]",
"initial",
"=",
"list",
"(",
"self",
".",
"request",
".",
"GET",
".",
"lists",
"(",
")",
")",
"for",
"key",
... | Return list of form based on model | [
"Return",
"list",
"of",
"form",
"based",
"on",
"model"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L44-L74 | train | 46,604 |
SchroterQuentin/django-search-listview | search_listview/list.py | SearchableListView.get_dict_for_forms | def get_dict_for_forms(self):
"""
Build a dictionnary where searchable_fields are
next to their model to be use in modelform_factory
dico = {
"str(model)" : {
"model" : Model,
"fields" = [] #searchable_fields which are attribute of Model
}
}
"""
magic_dico = field_to_dict(self.searchable_fields)
dico = {}
def dict_from_fields_r(mini_dict, dico, model):
"""
Create the dico recursively from the magic_dico
"""
dico[str(model)] = {}
dico[str(model)]["model"] = model
dico[str(model)]["fields"] = []
for key, value in mini_dict.items():
if isinstance(value, bool):
continue
if value == EMPTY_DICT:
dico[str(model)]["fields"].append(key)
elif EMPTY_DICT.items() <= value.items():
dico[str(model)]["fields"].append(key)
model_tmp = associate_model(model, key)
dict_from_fields_r(value, dico, model_tmp)
else:
model_tmp = associate_model(model, key)
dict_from_fields_r(value, dico, model_tmp)
if magic_dico:
dict_from_fields_r(magic_dico, dico, self.model)
return dico | python | def get_dict_for_forms(self):
"""
Build a dictionnary where searchable_fields are
next to their model to be use in modelform_factory
dico = {
"str(model)" : {
"model" : Model,
"fields" = [] #searchable_fields which are attribute of Model
}
}
"""
magic_dico = field_to_dict(self.searchable_fields)
dico = {}
def dict_from_fields_r(mini_dict, dico, model):
"""
Create the dico recursively from the magic_dico
"""
dico[str(model)] = {}
dico[str(model)]["model"] = model
dico[str(model)]["fields"] = []
for key, value in mini_dict.items():
if isinstance(value, bool):
continue
if value == EMPTY_DICT:
dico[str(model)]["fields"].append(key)
elif EMPTY_DICT.items() <= value.items():
dico[str(model)]["fields"].append(key)
model_tmp = associate_model(model, key)
dict_from_fields_r(value, dico, model_tmp)
else:
model_tmp = associate_model(model, key)
dict_from_fields_r(value, dico, model_tmp)
if magic_dico:
dict_from_fields_r(magic_dico, dico, self.model)
return dico | [
"def",
"get_dict_for_forms",
"(",
"self",
")",
":",
"magic_dico",
"=",
"field_to_dict",
"(",
"self",
".",
"searchable_fields",
")",
"dico",
"=",
"{",
"}",
"def",
"dict_from_fields_r",
"(",
"mini_dict",
",",
"dico",
",",
"model",
")",
":",
"\"\"\"\n ... | Build a dictionnary where searchable_fields are
next to their model to be use in modelform_factory
dico = {
"str(model)" : {
"model" : Model,
"fields" = [] #searchable_fields which are attribute of Model
}
} | [
"Build",
"a",
"dictionnary",
"where",
"searchable_fields",
"are",
"next",
"to",
"their",
"model",
"to",
"be",
"use",
"in",
"modelform_factory"
] | 8b027a6908dc30c6ebc613bb4fde6b1ba40124a3 | https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L111-L150 | train | 46,605 |
retr0h/git-url-parse | giturlparse/parser.py | Parser.parse | def parse(self):
"""
Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError`
"""
d = {
'pathname': None,
'protocols': self._get_protocols(),
'protocol': 'ssh',
'href': self._url,
'resource': None,
'user': None,
'port': None,
'name': None,
'owner': None,
}
for regex in POSSIBLE_REGEXES:
match = regex.search(self._url)
if match:
d.update(match.groupdict())
break
else:
msg = "Invalid URL '{}'".format(self._url)
raise ParserError(msg)
return Parsed(**d) | python | def parse(self):
"""
Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError`
"""
d = {
'pathname': None,
'protocols': self._get_protocols(),
'protocol': 'ssh',
'href': self._url,
'resource': None,
'user': None,
'port': None,
'name': None,
'owner': None,
}
for regex in POSSIBLE_REGEXES:
match = regex.search(self._url)
if match:
d.update(match.groupdict())
break
else:
msg = "Invalid URL '{}'".format(self._url)
raise ParserError(msg)
return Parsed(**d) | [
"def",
"parse",
"(",
"self",
")",
":",
"d",
"=",
"{",
"'pathname'",
":",
"None",
",",
"'protocols'",
":",
"self",
".",
"_get_protocols",
"(",
")",
",",
"'protocol'",
":",
"'ssh'",
",",
"'href'",
":",
"self",
".",
"_url",
",",
"'resource'",
":",
"None... | Parses a GIT URL and returns an object. Raises an exception on invalid
URL.
:returns: Parsed object
:raise: :class:`.ParserError` | [
"Parses",
"a",
"GIT",
"URL",
"and",
"returns",
"an",
"object",
".",
"Raises",
"an",
"exception",
"on",
"invalid",
"URL",
"."
] | 98a5377aa8c8f3b8896f277c5c81558749feef58 | https://github.com/retr0h/git-url-parse/blob/98a5377aa8c8f3b8896f277c5c81558749feef58/giturlparse/parser.py#L78-L106 | train | 46,606 |
LandRegistry/lr-utils | lrutils/errorhandler/errorhandler_utils.py | setup_errors | def setup_errors(app, error_template="error.html"):
"""Add a handler for each of the available HTTP error responses."""
def error_handler(error):
if isinstance(error, HTTPException):
description = error.get_description(request.environ)
code = error.code
name = error.name
else:
description = error
code = 500
name = "Internal Server Error"
return render_template(error_template,
error=error,
code=code,
name=Markup(name),
description=Markup(description)), code
for exception in default_exceptions:
app.register_error_handler(exception, error_handler) | python | def setup_errors(app, error_template="error.html"):
"""Add a handler for each of the available HTTP error responses."""
def error_handler(error):
if isinstance(error, HTTPException):
description = error.get_description(request.environ)
code = error.code
name = error.name
else:
description = error
code = 500
name = "Internal Server Error"
return render_template(error_template,
error=error,
code=code,
name=Markup(name),
description=Markup(description)), code
for exception in default_exceptions:
app.register_error_handler(exception, error_handler) | [
"def",
"setup_errors",
"(",
"app",
",",
"error_template",
"=",
"\"error.html\"",
")",
":",
"def",
"error_handler",
"(",
"error",
")",
":",
"if",
"isinstance",
"(",
"error",
",",
"HTTPException",
")",
":",
"description",
"=",
"error",
".",
"get_description",
... | Add a handler for each of the available HTTP error responses. | [
"Add",
"a",
"handler",
"for",
"each",
"of",
"the",
"available",
"HTTP",
"error",
"responses",
"."
] | 811c9e5c11678a04ee203fa55a7c75080f4f9d89 | https://github.com/LandRegistry/lr-utils/blob/811c9e5c11678a04ee203fa55a7c75080f4f9d89/lrutils/errorhandler/errorhandler_utils.py#L40-L58 | train | 46,607 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.domain_search | def domain_search(self, domain=None, company=None, limit=None, offset=None,
emails_type=None, raw=False):
"""
Return all the email addresses found for a given domain.
:param domain: The domain on which to search for emails. Must be
defined if company is not.
:param company: The name of the company on which to search for emails.
Must be defined if domain is not.
:param limit: The maximum number of emails to give back. Default is 10.
:param offset: The number of emails to skip. Default is 0.
:param emails_type: The type of emails to give back. Can be one of
'personal' or 'generic'.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict, with email addresses
found.
"""
if not domain and not company:
raise MissingCompanyError(
'You must supply at least a domain name or a company name'
)
if domain:
params = {'domain': domain, 'api_key': self.api_key}
elif company:
params = {'company': company, 'api_key': self.api_key}
if limit:
params['limit'] = limit
if offset:
params['offset'] = offset
if emails_type:
params['type'] = emails_type
endpoint = self.base_endpoint.format('domain-search')
return self._query_hunter(endpoint, params, raw=raw) | python | def domain_search(self, domain=None, company=None, limit=None, offset=None,
emails_type=None, raw=False):
"""
Return all the email addresses found for a given domain.
:param domain: The domain on which to search for emails. Must be
defined if company is not.
:param company: The name of the company on which to search for emails.
Must be defined if domain is not.
:param limit: The maximum number of emails to give back. Default is 10.
:param offset: The number of emails to skip. Default is 0.
:param emails_type: The type of emails to give back. Can be one of
'personal' or 'generic'.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict, with email addresses
found.
"""
if not domain and not company:
raise MissingCompanyError(
'You must supply at least a domain name or a company name'
)
if domain:
params = {'domain': domain, 'api_key': self.api_key}
elif company:
params = {'company': company, 'api_key': self.api_key}
if limit:
params['limit'] = limit
if offset:
params['offset'] = offset
if emails_type:
params['type'] = emails_type
endpoint = self.base_endpoint.format('domain-search')
return self._query_hunter(endpoint, params, raw=raw) | [
"def",
"domain_search",
"(",
"self",
",",
"domain",
"=",
"None",
",",
"company",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
",",
"emails_type",
"=",
"None",
",",
"raw",
"=",
"False",
")",
":",
"if",
"not",
"domain",
"and",
... | Return all the email addresses found for a given domain.
:param domain: The domain on which to search for emails. Must be
defined if company is not.
:param company: The name of the company on which to search for emails.
Must be defined if domain is not.
:param limit: The maximum number of emails to give back. Default is 10.
:param offset: The number of emails to skip. Default is 0.
:param emails_type: The type of emails to give back. Can be one of
'personal' or 'generic'.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict, with email addresses
found. | [
"Return",
"all",
"the",
"email",
"addresses",
"found",
"for",
"a",
"given",
"domain",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L32-L76 | train | 46,608 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.email_finder | def email_finder(self, domain=None, company=None, first_name=None,
last_name=None, full_name=None, raw=False):
"""
Find the email address of a person given its name and company's domain.
:param domain: The domain of the company where the person works. Must
be defined if company is not.
:param company: The name of the company where the person works. Must
be defined if domain is not.
:param first_name: The first name of the person. Must be defined if
full_name is not.
:param last_name: The last name of the person. Must be defined if
full_name is not.
:param full_name: The full name of the person. Must be defined if
first_name AND last_name are not.
:param raw: Gives back the entire response instead of just email and score.
:return: email and score as a tuple.
"""
params = self.base_params
if not domain and not company:
raise MissingCompanyError(
'You must supply at least a domain name or a company name'
)
if domain:
params['domain'] = domain
elif company:
params['company'] = company
if not(first_name and last_name) and not full_name:
raise MissingNameError(
'You must supply a first name AND a last name OR a full name'
)
if first_name and last_name:
params['first_name'] = first_name
params['last_name'] = last_name
elif full_name:
params['full_name'] = full_name
endpoint = self.base_endpoint.format('email-finder')
res = self._query_hunter(endpoint, params, raw=raw)
if raw:
return res
email = res['email']
score = res['score']
return email, score | python | def email_finder(self, domain=None, company=None, first_name=None,
last_name=None, full_name=None, raw=False):
"""
Find the email address of a person given its name and company's domain.
:param domain: The domain of the company where the person works. Must
be defined if company is not.
:param company: The name of the company where the person works. Must
be defined if domain is not.
:param first_name: The first name of the person. Must be defined if
full_name is not.
:param last_name: The last name of the person. Must be defined if
full_name is not.
:param full_name: The full name of the person. Must be defined if
first_name AND last_name are not.
:param raw: Gives back the entire response instead of just email and score.
:return: email and score as a tuple.
"""
params = self.base_params
if not domain and not company:
raise MissingCompanyError(
'You must supply at least a domain name or a company name'
)
if domain:
params['domain'] = domain
elif company:
params['company'] = company
if not(first_name and last_name) and not full_name:
raise MissingNameError(
'You must supply a first name AND a last name OR a full name'
)
if first_name and last_name:
params['first_name'] = first_name
params['last_name'] = last_name
elif full_name:
params['full_name'] = full_name
endpoint = self.base_endpoint.format('email-finder')
res = self._query_hunter(endpoint, params, raw=raw)
if raw:
return res
email = res['email']
score = res['score']
return email, score | [
"def",
"email_finder",
"(",
"self",
",",
"domain",
"=",
"None",
",",
"company",
"=",
"None",
",",
"first_name",
"=",
"None",
",",
"last_name",
"=",
"None",
",",
"full_name",
"=",
"None",
",",
"raw",
"=",
"False",
")",
":",
"params",
"=",
"self",
".",... | Find the email address of a person given its name and company's domain.
:param domain: The domain of the company where the person works. Must
be defined if company is not.
:param company: The name of the company where the person works. Must
be defined if domain is not.
:param first_name: The first name of the person. Must be defined if
full_name is not.
:param last_name: The last name of the person. Must be defined if
full_name is not.
:param full_name: The full name of the person. Must be defined if
first_name AND last_name are not.
:param raw: Gives back the entire response instead of just email and score.
:return: email and score as a tuple. | [
"Find",
"the",
"email",
"address",
"of",
"a",
"person",
"given",
"its",
"name",
"and",
"company",
"s",
"domain",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L78-L134 | train | 46,609 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.email_verifier | def email_verifier(self, email, raw=False):
"""
Verify the deliverability of a given email adress.abs
:param email: The email adress to check.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict.
"""
params = {'email': email, 'api_key': self.api_key}
endpoint = self.base_endpoint.format('email-verifier')
return self._query_hunter(endpoint, params, raw=raw) | python | def email_verifier(self, email, raw=False):
"""
Verify the deliverability of a given email adress.abs
:param email: The email adress to check.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict.
"""
params = {'email': email, 'api_key': self.api_key}
endpoint = self.base_endpoint.format('email-verifier')
return self._query_hunter(endpoint, params, raw=raw) | [
"def",
"email_verifier",
"(",
"self",
",",
"email",
",",
"raw",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'email'",
":",
"email",
",",
"'api_key'",
":",
"self",
".",
"api_key",
"}",
"endpoint",
"=",
"self",
".",
"base_endpoint",
".",
"format",
"(",
... | Verify the deliverability of a given email adress.abs
:param email: The email adress to check.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict. | [
"Verify",
"the",
"deliverability",
"of",
"a",
"given",
"email",
"adress",
".",
"abs"
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L136-L150 | train | 46,610 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.account_information | def account_information(self, raw=False):
"""
Gives the information about the account associated with the api_key.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('account')
res = self._query_hunter(endpoint, params, raw=raw)
if raw:
return res
res['calls']['left'] = res['calls']['available'] - res['calls']['used']
return res | python | def account_information(self, raw=False):
"""
Gives the information about the account associated with the api_key.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('account')
res = self._query_hunter(endpoint, params, raw=raw)
if raw:
return res
res['calls']['left'] = res['calls']['available'] - res['calls']['used']
return res | [
"def",
"account_information",
"(",
"self",
",",
"raw",
"=",
"False",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"endpoint",
"=",
"self",
".",
"base_endpoint",
".",
"format",
"(",
"'account'",
")",
"res",
"=",
"self",
".",
"_query_hunter",
"(",
... | Gives the information about the account associated with the api_key.
:param raw: Gives back the entire response instead of just the 'data'.
:return: Full payload of the query as a dict. | [
"Gives",
"the",
"information",
"about",
"the",
"account",
"associated",
"with",
"the",
"api_key",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L183-L201 | train | 46,611 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.get_leads | def get_leads(self, offset=None, limit=None, lead_list_id=None,
first_name=None, last_name=None, email=None, company=None,
phone_number=None, twitter=None):
"""
Gives back all the leads saved in your account.
:param offset: Number of leads to skip.
:param limit: Maximum number of leads to return.
:param lead_list_id: Id of a lead list to query leads on.
:param first_name: First name to filter on.
:param last_name: Last name to filter on.
:param email: Email to filter on.
:param company: Company to filter on.
:param phone_number: Phone number to filter on.
:param twitter: Twitter account to filter on.
:return: All leads found as a dict.
"""
args = locals()
args_params = dict((key, value) for key, value in args.items() if value
is not None)
args_params.pop('self')
params = self.base_params
params.update(args_params)
endpoint = self.base_endpoint.format('leads')
return self._query_hunter(endpoint, params) | python | def get_leads(self, offset=None, limit=None, lead_list_id=None,
first_name=None, last_name=None, email=None, company=None,
phone_number=None, twitter=None):
"""
Gives back all the leads saved in your account.
:param offset: Number of leads to skip.
:param limit: Maximum number of leads to return.
:param lead_list_id: Id of a lead list to query leads on.
:param first_name: First name to filter on.
:param last_name: Last name to filter on.
:param email: Email to filter on.
:param company: Company to filter on.
:param phone_number: Phone number to filter on.
:param twitter: Twitter account to filter on.
:return: All leads found as a dict.
"""
args = locals()
args_params = dict((key, value) for key, value in args.items() if value
is not None)
args_params.pop('self')
params = self.base_params
params.update(args_params)
endpoint = self.base_endpoint.format('leads')
return self._query_hunter(endpoint, params) | [
"def",
"get_leads",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"lead_list_id",
"=",
"None",
",",
"first_name",
"=",
"None",
",",
"last_name",
"=",
"None",
",",
"email",
"=",
"None",
",",
"company",
"=",
"None",
",",
"pho... | Gives back all the leads saved in your account.
:param offset: Number of leads to skip.
:param limit: Maximum number of leads to return.
:param lead_list_id: Id of a lead list to query leads on.
:param first_name: First name to filter on.
:param last_name: Last name to filter on.
:param email: Email to filter on.
:param company: Company to filter on.
:param phone_number: Phone number to filter on.
:param twitter: Twitter account to filter on.
:return: All leads found as a dict. | [
"Gives",
"back",
"all",
"the",
"leads",
"saved",
"in",
"your",
"account",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L203-L239 | train | 46,612 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.get_lead | def get_lead(self, lead_id):
"""
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('leads/' + str(lead_id))
return self._query_hunter(endpoint, params) | python | def get_lead(self, lead_id):
"""
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('leads/' + str(lead_id))
return self._query_hunter(endpoint, params) | [
"def",
"get_lead",
"(",
"self",
",",
"lead_id",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"endpoint",
"=",
"self",
".",
"base_endpoint",
".",
"format",
"(",
"'leads/'",
"+",
"str",
"(",
"lead_id",
")",
")",
"return",
"self",
".",
"_query_hunte... | Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict. | [
"Get",
"a",
"specific",
"lead",
"saved",
"on",
"your",
"account",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L241-L253 | train | 46,613 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.create_lead | def create_lead(self, first_name, last_name, email=None, position=None,
company=None, company_industry=None, company_size=None,
confidence_score=None, website=None, country_code=None,
postal_code=None, source=None, linkedin_url=None,
phone_number=None, twitter=None, leads_list_id=None):
"""
Create a lead on your account.
:param first_name: The first name of the lead to create. Must be
defined.
:param last_name: The last name of the lead to create. Must be defined.
:param email: The email of the lead to create.
:param position: The professional position of the lead to create.
:param company: The company of the lead to create.
:param company_industry: The type of industry of the company where the
lead works.
:param company_size: The size of the company where the lead works.
:param confidence_score: The confidence score of the lead's email.
:param website: The website of the lead's company.
:param country_code: The country code of the lead's company.
:param postal_code: The postal code of the lead's company.
:param source: The source of the lead's email.
:param linkedin_url: The URL of the lead's LinkedIn profile.
:param phone_number: The phone number of the lead to create.
:param twitter: The lead's Twitter account.
:param leads_list_id: The id of the leads list where to save the new
lead.
:return: The newly created lead as a dict.
"""
args = locals()
payload = dict((key, value) for key, value in args.items() if value
is not None)
payload.pop('self')
params = self.base_params
endpoint = self.base_endpoint.format('leads')
return self._query_hunter(endpoint, params, 'post', payload) | python | def create_lead(self, first_name, last_name, email=None, position=None,
company=None, company_industry=None, company_size=None,
confidence_score=None, website=None, country_code=None,
postal_code=None, source=None, linkedin_url=None,
phone_number=None, twitter=None, leads_list_id=None):
"""
Create a lead on your account.
:param first_name: The first name of the lead to create. Must be
defined.
:param last_name: The last name of the lead to create. Must be defined.
:param email: The email of the lead to create.
:param position: The professional position of the lead to create.
:param company: The company of the lead to create.
:param company_industry: The type of industry of the company where the
lead works.
:param company_size: The size of the company where the lead works.
:param confidence_score: The confidence score of the lead's email.
:param website: The website of the lead's company.
:param country_code: The country code of the lead's company.
:param postal_code: The postal code of the lead's company.
:param source: The source of the lead's email.
:param linkedin_url: The URL of the lead's LinkedIn profile.
:param phone_number: The phone number of the lead to create.
:param twitter: The lead's Twitter account.
:param leads_list_id: The id of the leads list where to save the new
lead.
:return: The newly created lead as a dict.
"""
args = locals()
payload = dict((key, value) for key, value in args.items() if value
is not None)
payload.pop('self')
params = self.base_params
endpoint = self.base_endpoint.format('leads')
return self._query_hunter(endpoint, params, 'post', payload) | [
"def",
"create_lead",
"(",
"self",
",",
"first_name",
",",
"last_name",
",",
"email",
"=",
"None",
",",
"position",
"=",
"None",
",",
"company",
"=",
"None",
",",
"company_industry",
"=",
"None",
",",
"company_size",
"=",
"None",
",",
"confidence_score",
"... | Create a lead on your account.
:param first_name: The first name of the lead to create. Must be
defined.
:param last_name: The last name of the lead to create. Must be defined.
:param email: The email of the lead to create.
:param position: The professional position of the lead to create.
:param company: The company of the lead to create.
:param company_industry: The type of industry of the company where the
lead works.
:param company_size: The size of the company where the lead works.
:param confidence_score: The confidence score of the lead's email.
:param website: The website of the lead's company.
:param country_code: The country code of the lead's company.
:param postal_code: The postal code of the lead's company.
:param source: The source of the lead's email.
:param linkedin_url: The URL of the lead's LinkedIn profile.
:param phone_number: The phone number of the lead to create.
:param twitter: The lead's Twitter account.
:param leads_list_id: The id of the leads list where to save the new
lead.
:return: The newly created lead as a dict. | [
"Create",
"a",
"lead",
"on",
"your",
"account",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L255-L309 | train | 46,614 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.get_leads_lists | def get_leads_lists(self, offset=None, limit=None):
"""
Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict.
"""
params = self.base_params
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params) | python | def get_leads_lists(self, offset=None, limit=None):
"""
Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict.
"""
params = self.base_params
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params) | [
"def",
"get_leads_lists",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"if",
"offset",
":",
"params",
"[",
"'offset'",
"]",
"=",
"offset",
"if",
"limit",
":",
"params",
"[",
... | Gives back all the leads lists saved on your account.
:param offset: Number of lists to skip.
:param limit: Maximum number of lists to return.
:return: Leads lists found as a dict. | [
"Gives",
"back",
"all",
"the",
"leads",
"lists",
"saved",
"on",
"your",
"account",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L385-L404 | train | 46,615 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.create_leads_list | def create_leads_list(self, name, team_id=None):
"""
Create a leads list.
:param name: Name of the list to create. Must be defined.
:param team_id: The id of the list to share this list with.
:return: The created leads list as a dict.
"""
params = self.base_params
payload = {'name': name}
if team_id:
payload['team_id'] = team_id
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params, 'post', payload) | python | def create_leads_list(self, name, team_id=None):
"""
Create a leads list.
:param name: Name of the list to create. Must be defined.
:param team_id: The id of the list to share this list with.
:return: The created leads list as a dict.
"""
params = self.base_params
payload = {'name': name}
if team_id:
payload['team_id'] = team_id
endpoint = self.base_endpoint.format('leads_lists')
return self._query_hunter(endpoint, params, 'post', payload) | [
"def",
"create_leads_list",
"(",
"self",
",",
"name",
",",
"team_id",
"=",
"None",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"payload",
"=",
"{",
"'name'",
":",
"name",
"}",
"if",
"team_id",
":",
"payload",
"[",
"'team_id'",
"]",
"=",
"team_... | Create a leads list.
:param name: Name of the list to create. Must be defined.
:param team_id: The id of the list to share this list with.
:return: The created leads list as a dict. | [
"Create",
"a",
"leads",
"list",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L423-L441 | train | 46,616 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.update_leads_list | def update_leads_list(self, leads_list_id, name, team_id=None):
"""
Update a leads list.
:param name: Name of the list to update. Must be defined.
:param team_id: The id of the list to share this list with.
:return: 204 Response.
"""
params = self.base_params
payload = {'name': name}
if team_id:
payload['team_id'] = team_id
endpoint = self.base_endpoint.format('leads_lists/' + str(leads_list_id))
return self._query_hunter(endpoint, params, 'put', payload) | python | def update_leads_list(self, leads_list_id, name, team_id=None):
"""
Update a leads list.
:param name: Name of the list to update. Must be defined.
:param team_id: The id of the list to share this list with.
:return: 204 Response.
"""
params = self.base_params
payload = {'name': name}
if team_id:
payload['team_id'] = team_id
endpoint = self.base_endpoint.format('leads_lists/' + str(leads_list_id))
return self._query_hunter(endpoint, params, 'put', payload) | [
"def",
"update_leads_list",
"(",
"self",
",",
"leads_list_id",
",",
"name",
",",
"team_id",
"=",
"None",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"payload",
"=",
"{",
"'name'",
":",
"name",
"}",
"if",
"team_id",
":",
"payload",
"[",
"'team_id... | Update a leads list.
:param name: Name of the list to update. Must be defined.
:param team_id: The id of the list to share this list with.
:return: 204 Response. | [
"Update",
"a",
"leads",
"list",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L443-L461 | train | 46,617 |
VonStruddle/PyHunter | pyhunter/pyhunter.py | PyHunter.delete_leads_list | def delete_leads_list(self, leads_list_id):
"""
Delete a leads list.
:param leads_list_id: The id of the list to delete.
:return: 204 Response.
"""
params = self.base_params
endpoint = self.base_endpoint.format(
'leads_lists/' +
str(leads_list_id)
)
return self._query_hunter(endpoint, params, 'delete') | python | def delete_leads_list(self, leads_list_id):
"""
Delete a leads list.
:param leads_list_id: The id of the list to delete.
:return: 204 Response.
"""
params = self.base_params
endpoint = self.base_endpoint.format(
'leads_lists/' +
str(leads_list_id)
)
return self._query_hunter(endpoint, params, 'delete') | [
"def",
"delete_leads_list",
"(",
"self",
",",
"leads_list_id",
")",
":",
"params",
"=",
"self",
".",
"base_params",
"endpoint",
"=",
"self",
".",
"base_endpoint",
".",
"format",
"(",
"'leads_lists/'",
"+",
"str",
"(",
"leads_list_id",
")",
")",
"return",
"se... | Delete a leads list.
:param leads_list_id: The id of the list to delete.
:return: 204 Response. | [
"Delete",
"a",
"leads",
"list",
"."
] | e14882d22527102515458cddeb8e0aa1c02da549 | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L463-L478 | train | 46,618 |
mfussenegger/cr8 | cr8/cli.py | to_int | def to_int(s):
""" converts a string to an integer
>>> to_int('1_000_000')
1000000
>>> to_int('1e6')
1000000
>>> to_int('1000')
1000
"""
try:
return int(s.replace('_', ''))
except ValueError:
return int(ast.literal_eval(s)) | python | def to_int(s):
""" converts a string to an integer
>>> to_int('1_000_000')
1000000
>>> to_int('1e6')
1000000
>>> to_int('1000')
1000
"""
try:
return int(s.replace('_', ''))
except ValueError:
return int(ast.literal_eval(s)) | [
"def",
"to_int",
"(",
"s",
")",
":",
"try",
":",
"return",
"int",
"(",
"s",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
")",
"except",
"ValueError",
":",
"return",
"int",
"(",
"ast",
".",
"literal_eval",
"(",
"s",
")",
")"
] | converts a string to an integer
>>> to_int('1_000_000')
1000000
>>> to_int('1e6')
1000000
>>> to_int('1000')
1000 | [
"converts",
"a",
"string",
"to",
"an",
"integer"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/cli.py#L8-L23 | train | 46,619 |
mfussenegger/cr8 | cr8/cli.py | dicts_from_lines | def dicts_from_lines(lines):
""" returns a generator producing dicts from json lines
1 JSON object per line is supported:
{"name": "n1"}
{"name": "n2"}
Or 1 JSON object:
{
"name": "n1"
}
Or a list of JSON objects:
[
{"name": "n1"},
{"name": "n2"},
]
"""
lines = iter(lines)
for line in lines:
line = line.strip()
if not line:
continue # skip empty lines
try:
yield json.loads(line, object_pairs_hook=OrderedDict)
except json.decoder.JSONDecodeError:
content = line + ''.join(lines)
dicts = json.loads(content, object_pairs_hook=OrderedDict)
if isinstance(dicts, list):
yield from dicts
else:
yield dicts | python | def dicts_from_lines(lines):
""" returns a generator producing dicts from json lines
1 JSON object per line is supported:
{"name": "n1"}
{"name": "n2"}
Or 1 JSON object:
{
"name": "n1"
}
Or a list of JSON objects:
[
{"name": "n1"},
{"name": "n2"},
]
"""
lines = iter(lines)
for line in lines:
line = line.strip()
if not line:
continue # skip empty lines
try:
yield json.loads(line, object_pairs_hook=OrderedDict)
except json.decoder.JSONDecodeError:
content = line + ''.join(lines)
dicts = json.loads(content, object_pairs_hook=OrderedDict)
if isinstance(dicts, list):
yield from dicts
else:
yield dicts | [
"def",
"dicts_from_lines",
"(",
"lines",
")",
":",
"lines",
"=",
"iter",
"(",
"lines",
")",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
":",
"continue",
"# skip empty lines",
"try",
":",
"yield",... | returns a generator producing dicts from json lines
1 JSON object per line is supported:
{"name": "n1"}
{"name": "n2"}
Or 1 JSON object:
{
"name": "n1"
}
Or a list of JSON objects:
[
{"name": "n1"},
{"name": "n2"},
] | [
"returns",
"a",
"generator",
"producing",
"dicts",
"from",
"json",
"lines"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/cli.py#L35-L69 | train | 46,620 |
jaraco/jaraco.functools | jaraco/functools.py | compose | def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> from six import text_type
>>> stripped = text_type.strip(textwrap.dedent(compose.__doc__))
>>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs) | python | def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> from six import text_type
>>> stripped = text_type.strip(textwrap.dedent(compose.__doc__))
>>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs) | [
"def",
"compose",
"(",
"*",
"funcs",
")",
":",
"def",
"compose_two",
"(",
"f1",
",",
"f2",
")",
":",
"return",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"f1",
"(",
"f2",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
... | Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> from six import text_type
>>> stripped = text_type.strip(textwrap.dedent(compose.__doc__))
>>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] | [
"Compose",
"any",
"number",
"of",
"unary",
"functions",
"into",
"a",
"single",
"unary",
"function",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L31-L51 | train | 46,621 |
jaraco/jaraco.functools | jaraco/functools.py | method_caller | def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method | python | def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method | [
"def",
"method_caller",
"(",
"method_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"call_method",
"(",
"target",
")",
":",
"func",
"=",
"getattr",
"(",
"target",
",",
"method_name",
")",
"return",
"func",
"(",
"*",
"args",
",",
"... | Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring' | [
"Return",
"a",
"function",
"that",
"will",
"call",
"a",
"named",
"method",
"on",
"the",
"target",
"object",
"with",
"optional",
"positional",
"and",
"keyword",
"arguments",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L54-L67 | train | 46,622 |
jaraco/jaraco.functools | jaraco/functools.py | once | def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper | python | def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper | [
"def",
"once",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"wrapper",
",",
"'saved_result'",
")",
":",
"wrapper",
".",
... | Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0 | [
"Decorate",
"func",
"so",
"it",
"s",
"only",
"ever",
"called",
"the",
"first",
"time",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L70-L107 | train | 46,623 |
jaraco/jaraco.functools | jaraco/functools.py | method_cache | def method_cache(method, cache_wrapper=None):
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
cache_wrapper = cache_wrapper or lru_cache()
def wrapper(self, *args, **kwargs):
# it's the first call, replace the method with a cached, bound method
bound_method = types.MethodType(method, self)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
return _special_method_cache(method, cache_wrapper) or wrapper | python | def method_cache(method, cache_wrapper=None):
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
cache_wrapper = cache_wrapper or lru_cache()
def wrapper(self, *args, **kwargs):
# it's the first call, replace the method with a cached, bound method
bound_method = types.MethodType(method, self)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
return _special_method_cache(method, cache_wrapper) or wrapper | [
"def",
"method_cache",
"(",
"method",
",",
"cache_wrapper",
"=",
"None",
")",
":",
"cache_wrapper",
"=",
"cache_wrapper",
"or",
"lru_cache",
"(",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# it's the first cal... | Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification. | [
"Wrap",
"lru_cache",
"to",
"support",
"storing",
"the",
"cache",
"data",
"in",
"the",
"object",
"instances",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L110-L181 | train | 46,624 |
jaraco/jaraco.functools | jaraco/functools.py | _special_method_cache | def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy | python | def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy | [
"def",
"_special_method_cache",
"(",
"method",
",",
"cache_wrapper",
")",
":",
"name",
"=",
"method",
".",
"__name__",
"special_names",
"=",
"'__getattr__'",
",",
"'__getitem__'",
"if",
"name",
"not",
"in",
"special_names",
":",
"return",
"wrapper_name",
"=",
"'... | Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5 | [
"Because",
"Python",
"treats",
"special",
"methods",
"differently",
"it",
"s",
"not",
"possible",
"to",
"use",
"instance",
"attributes",
"to",
"implement",
"the",
"cached",
"methods",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L184-L211 | train | 46,625 |
jaraco/jaraco.functools | jaraco/functools.py | retry_call | def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func() | python | def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func() | [
"def",
"retry_call",
"(",
"func",
",",
"cleanup",
"=",
"lambda",
":",
"None",
",",
"retries",
"=",
"0",
",",
"trap",
"=",
"(",
")",
")",
":",
"attempts",
"=",
"count",
"(",
")",
"if",
"retries",
"==",
"float",
"(",
"'inf'",
")",
"else",
"range",
... | Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate. | [
"Given",
"a",
"callable",
"func",
"trap",
"the",
"indicated",
"exceptions",
"for",
"up",
"to",
"retries",
"times",
"invoking",
"cleanup",
"on",
"the",
"exception",
".",
"On",
"the",
"final",
"attempt",
"allow",
"any",
"exceptions",
"to",
"propagate",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L314-L328 | train | 46,626 |
jaraco/jaraco.functools | jaraco/functools.py | retry | def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate | python | def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate | [
"def",
"retry",
"(",
"*",
"r_args",
",",
"*",
"*",
"r_kwargs",
")",
":",
"def",
"decorate",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"f_args",
",",
"*",
"*",
"f_kwargs",
")",
":",
"bound... | Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk' | [
"Decorator",
"wrapper",
"for",
"retry_call",
".",
"Accepts",
"arguments",
"to",
"retry_call",
"except",
"func",
"and",
"then",
"returns",
"a",
"decorator",
"for",
"the",
"decorated",
"function",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L331-L351 | train | 46,627 |
jaraco/jaraco.functools | jaraco/functools.py | print_yielded | def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.recipes.consume, print_all, func)
return functools.wraps(func)(print_results) | python | def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.recipes.consume, print_all, func)
return functools.wraps(func)(print_results) | [
"def",
"print_yielded",
"(",
"func",
")",
":",
"print_all",
"=",
"functools",
".",
"partial",
"(",
"map",
",",
"print",
")",
"print_results",
"=",
"compose",
"(",
"more_itertools",
".",
"recipes",
".",
"consume",
",",
"print_all",
",",
"func",
")",
"return... | Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None | [
"Convert",
"a",
"generator",
"into",
"a",
"function",
"that",
"prints",
"all",
"yielded",
"elements"
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L354-L367 | train | 46,628 |
jaraco/jaraco.functools | jaraco/functools.py | pass_none | def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper | python | def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper | [
"def",
"pass_none",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"param",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"param",
"is",
"not",
"None",
":",
"return",
"func",
"(",
"par... | Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None) | [
"Wrap",
"func",
"so",
"it",
"s",
"not",
"called",
"if",
"its",
"first",
"param",
"is",
"None"
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L370-L383 | train | 46,629 |
jaraco/jaraco.functools | jaraco/functools.py | assign_params | def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
try:
sig = inspect.signature(func)
params = sig.parameters.keys()
except AttributeError:
spec = inspect.getargspec(func)
params = spec.args
call_ns = {
k: namespace[k]
for k in params
if k in namespace
}
return functools.partial(func, **call_ns) | python | def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
try:
sig = inspect.signature(func)
params = sig.parameters.keys()
except AttributeError:
spec = inspect.getargspec(func)
params = spec.args
call_ns = {
k: namespace[k]
for k in params
if k in namespace
}
return functools.partial(func, **call_ns) | [
"def",
"assign_params",
"(",
"func",
",",
"namespace",
")",
":",
"try",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"func",
")",
"params",
"=",
"sig",
".",
"parameters",
".",
"keys",
"(",
")",
"except",
"AttributeError",
":",
"spec",
"=",
"inspe... | Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal | [
"Assign",
"parameters",
"from",
"namespace",
"where",
"func",
"solicits",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L386-L423 | train | 46,630 |
jaraco/jaraco.functools | jaraco/functools.py | save_method_args | def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper | python | def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper | [
"def",
"save_method_args",
"(",
"method",
")",
":",
"args_and_kwargs",
"=",
"collections",
".",
"namedtuple",
"(",
"'args_and_kwargs'",
",",
"'args kwargs'",
")",
"@",
"functools",
".",
"wraps",
"(",
"method",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"a... | Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
() | [
"Wrap",
"a",
"method",
"such",
"that",
"when",
"it",
"is",
"called",
"the",
"args",
"and",
"kwargs",
"are",
"saved",
"on",
"the",
"method",
"."
] | cc972095e5aa2ae80d1d69d7ca84ee94178e869a | https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L426-L468 | train | 46,631 |
useblocks/sphinxcontrib-needs | sphinxcontrib/needs/utils.py | row_col_maker | def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''):
"""
Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry)
"""
row_col = nodes.entry()
para_col = nodes.paragraph()
if need_key in need_info and need_info[need_key] is not None:
if not isinstance(need_info[need_key], (list, set)):
data = [need_info[need_key]]
else:
data = need_info[need_key]
for index, datum in enumerate(data):
link_id = datum
link_part = None
if need_key in ['links', 'back_links']:
if '.' in datum:
link_id = datum.split('.')[0]
link_part = datum.split('.')[1]
datum_text = prefix + datum
text_col = nodes.Text(datum_text, datum_text)
if make_ref or ref_lookup:
try:
ref_col = nodes.reference("", "")
if not ref_lookup:
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname'])
ref_col['refuri'] += "#" + datum
else:
temp_need = all_needs[link_id]
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname'])
ref_col['refuri'] += "#" + temp_need["id"]
if link_part is not None:
ref_col['refuri'] += '.' + link_part
except KeyError:
para_col += text_col
else:
ref_col.append(text_col)
para_col += ref_col
else:
para_col += text_col
if index + 1 < len(data):
para_col += nodes.emphasis("; ", "; ")
row_col += para_col
return row_col | python | def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''):
"""
Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry)
"""
row_col = nodes.entry()
para_col = nodes.paragraph()
if need_key in need_info and need_info[need_key] is not None:
if not isinstance(need_info[need_key], (list, set)):
data = [need_info[need_key]]
else:
data = need_info[need_key]
for index, datum in enumerate(data):
link_id = datum
link_part = None
if need_key in ['links', 'back_links']:
if '.' in datum:
link_id = datum.split('.')[0]
link_part = datum.split('.')[1]
datum_text = prefix + datum
text_col = nodes.Text(datum_text, datum_text)
if make_ref or ref_lookup:
try:
ref_col = nodes.reference("", "")
if not ref_lookup:
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname'])
ref_col['refuri'] += "#" + datum
else:
temp_need = all_needs[link_id]
ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname'])
ref_col['refuri'] += "#" + temp_need["id"]
if link_part is not None:
ref_col['refuri'] += '.' + link_part
except KeyError:
para_col += text_col
else:
ref_col.append(text_col)
para_col += ref_col
else:
para_col += text_col
if index + 1 < len(data):
para_col += nodes.emphasis("; ", "; ")
row_col += para_col
return row_col | [
"def",
"row_col_maker",
"(",
"app",
",",
"fromdocname",
",",
"all_needs",
",",
"need_info",
",",
"need_key",
",",
"make_ref",
"=",
"False",
",",
"ref_lookup",
"=",
"False",
",",
"prefix",
"=",
"''",
")",
":",
"row_col",
"=",
"nodes",
".",
"entry",
"(",
... | Creates and returns a column.
:param app: current sphinx app
:param fromdocname: current document
:param all_needs: Dictionary of all need objects
:param need_info: need_info object, which stores all related need data
:param need_key: The key to access the needed data from need_info
:param make_ref: If true, creates a reference for the given data in need_key
:param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
:param prefix: string, which is used as prefix for the text output
:return: column object (nodes.entry) | [
"Creates",
"and",
"returns",
"a",
"column",
"."
] | f49af4859a74e9fe76de5b9133c01335ac6ae191 | https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/utils.py#L18-L78 | train | 46,632 |
mfussenegger/cr8 | cr8/insert_blob.py | insert_blob | def insert_blob(filename, hosts=None, table=None):
"""Upload a file into a blob table """
conn = connect(hosts)
container = conn.get_blob_container(table)
with open(filename, 'rb') as f:
digest = container.put(f)
return '{server}/_blobs/{table}/{digest}'.format(
server=conn.client.active_servers[0],
table=table,
digest=digest
) | python | def insert_blob(filename, hosts=None, table=None):
"""Upload a file into a blob table """
conn = connect(hosts)
container = conn.get_blob_container(table)
with open(filename, 'rb') as f:
digest = container.put(f)
return '{server}/_blobs/{table}/{digest}'.format(
server=conn.client.active_servers[0],
table=table,
digest=digest
) | [
"def",
"insert_blob",
"(",
"filename",
",",
"hosts",
"=",
"None",
",",
"table",
"=",
"None",
")",
":",
"conn",
"=",
"connect",
"(",
"hosts",
")",
"container",
"=",
"conn",
".",
"get_blob_container",
"(",
"table",
")",
"with",
"open",
"(",
"filename",
"... | Upload a file into a blob table | [
"Upload",
"a",
"file",
"into",
"a",
"blob",
"table"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/insert_blob.py#L15-L25 | train | 46,633 |
nickmckay/LiPD-utilities | Python/lipd/fetch_doi.py | update_dois | def update_dois(csv_source, write_file=True):
"""
Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs
listed in a single column. The output is LiPD-formatted publication data for each entry.
:param str csv_source: Local path to CSV file
:param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False)
:return none:
"""
_dois_arr = []
_dois_raw = []
# open the CSV file
with open(csv_source, "r") as f:
reader = csv.reader(f)
for row in reader:
# sort the DOIs as an array of DOI strings
_dois_arr.append(row[0])
# run the DOI resolver once for each DOI string.
for _doi in _dois_arr:
_dois_raw.append(_update_doi(_doi))
if write_file:
# Write the file
new_filename = os.path.splitext(csv_source)[0]
write_json_to_file(_dois_raw, new_filename)
else:
print(json.dumps(_dois_raw, indent=2))
return | python | def update_dois(csv_source, write_file=True):
"""
Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs
listed in a single column. The output is LiPD-formatted publication data for each entry.
:param str csv_source: Local path to CSV file
:param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False)
:return none:
"""
_dois_arr = []
_dois_raw = []
# open the CSV file
with open(csv_source, "r") as f:
reader = csv.reader(f)
for row in reader:
# sort the DOIs as an array of DOI strings
_dois_arr.append(row[0])
# run the DOI resolver once for each DOI string.
for _doi in _dois_arr:
_dois_raw.append(_update_doi(_doi))
if write_file:
# Write the file
new_filename = os.path.splitext(csv_source)[0]
write_json_to_file(_dois_raw, new_filename)
else:
print(json.dumps(_dois_raw, indent=2))
return | [
"def",
"update_dois",
"(",
"csv_source",
",",
"write_file",
"=",
"True",
")",
":",
"_dois_arr",
"=",
"[",
"]",
"_dois_raw",
"=",
"[",
"]",
"# open the CSV file",
"with",
"open",
"(",
"csv_source",
",",
"\"r\"",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
... | Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs
listed in a single column. The output is LiPD-formatted publication data for each entry.
:param str csv_source: Local path to CSV file
:param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False)
:return none: | [
"Get",
"DOI",
"publication",
"info",
"for",
"a",
"batch",
"of",
"DOIs",
".",
"This",
"is",
"LiPD",
"-",
"independent",
"and",
"only",
"requires",
"a",
"CSV",
"file",
"with",
"all",
"DOIs",
"listed",
"in",
"a",
"single",
"column",
".",
"The",
"output",
... | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/fetch_doi.py#L10-L42 | train | 46,634 |
mfussenegger/cr8 | cr8/timeit.py | timeit | def timeit(hosts=None,
stmt=None,
warmup=30,
repeat=None,
duration=None,
concurrency=1,
output_fmt=None,
fail_if=None,
sample_mode='reservoir'):
"""Run the given statement a number of times and return the runtime stats
Args:
fail-if: An expression that causes cr8 to exit with a failure if it
evaluates to true.
The expression can contain formatting expressions for:
- runtime_stats
- statement
- meta
- concurrency
- bulk_size
For example:
--fail-if "{runtime_stats.mean} > 1.34"
"""
num_lines = 0
log = Logger(output_fmt)
with Runner(hosts, concurrency, sample_mode) as runner:
version_info = aio.run(runner.client.get_server_version)
for line in as_statements(lines_from_stdin(stmt)):
runner.warmup(line, warmup)
timed_stats = runner.run(line, iterations=repeat, duration=duration)
r = Result(
version_info=version_info,
statement=line,
timed_stats=timed_stats,
concurrency=concurrency
)
log.result(r)
if fail_if:
eval_fail_if(fail_if, r)
num_lines += 1
if num_lines == 0:
raise SystemExit(
'No SQL statements provided. Use --stmt or provide statements via stdin') | python | def timeit(hosts=None,
stmt=None,
warmup=30,
repeat=None,
duration=None,
concurrency=1,
output_fmt=None,
fail_if=None,
sample_mode='reservoir'):
"""Run the given statement a number of times and return the runtime stats
Args:
fail-if: An expression that causes cr8 to exit with a failure if it
evaluates to true.
The expression can contain formatting expressions for:
- runtime_stats
- statement
- meta
- concurrency
- bulk_size
For example:
--fail-if "{runtime_stats.mean} > 1.34"
"""
num_lines = 0
log = Logger(output_fmt)
with Runner(hosts, concurrency, sample_mode) as runner:
version_info = aio.run(runner.client.get_server_version)
for line in as_statements(lines_from_stdin(stmt)):
runner.warmup(line, warmup)
timed_stats = runner.run(line, iterations=repeat, duration=duration)
r = Result(
version_info=version_info,
statement=line,
timed_stats=timed_stats,
concurrency=concurrency
)
log.result(r)
if fail_if:
eval_fail_if(fail_if, r)
num_lines += 1
if num_lines == 0:
raise SystemExit(
'No SQL statements provided. Use --stmt or provide statements via stdin') | [
"def",
"timeit",
"(",
"hosts",
"=",
"None",
",",
"stmt",
"=",
"None",
",",
"warmup",
"=",
"30",
",",
"repeat",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"concurrency",
"=",
"1",
",",
"output_fmt",
"=",
"None",
",",
"fail_if",
"=",
"None",
","... | Run the given statement a number of times and return the runtime stats
Args:
fail-if: An expression that causes cr8 to exit with a failure if it
evaluates to true.
The expression can contain formatting expressions for:
- runtime_stats
- statement
- meta
- concurrency
- bulk_size
For example:
--fail-if "{runtime_stats.mean} > 1.34" | [
"Run",
"the",
"given",
"statement",
"a",
"number",
"of",
"times",
"and",
"return",
"the",
"runtime",
"stats"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/timeit.py#L28-L70 | train | 46,635 |
mfussenegger/cr8 | cr8/run_crate.py | wait_until | def wait_until(predicate, timeout=30):
"""Wait until predicate returns a truthy value or the timeout is reached.
>>> wait_until(lambda: True, timeout=10)
"""
not_expired = Timeout(timeout)
while not_expired():
r = predicate()
if r:
break | python | def wait_until(predicate, timeout=30):
"""Wait until predicate returns a truthy value or the timeout is reached.
>>> wait_until(lambda: True, timeout=10)
"""
not_expired = Timeout(timeout)
while not_expired():
r = predicate()
if r:
break | [
"def",
"wait_until",
"(",
"predicate",
",",
"timeout",
"=",
"30",
")",
":",
"not_expired",
"=",
"Timeout",
"(",
"timeout",
")",
"while",
"not_expired",
"(",
")",
":",
"r",
"=",
"predicate",
"(",
")",
"if",
"r",
":",
"break"
] | Wait until predicate returns a truthy value or the timeout is reached.
>>> wait_until(lambda: True, timeout=10) | [
"Wait",
"until",
"predicate",
"returns",
"a",
"truthy",
"value",
"or",
"the",
"timeout",
"is",
"reached",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L116-L125 | train | 46,636 |
mfussenegger/cr8 | cr8/run_crate.py | _find_matching_version | def _find_matching_version(versions, version_pattern):
"""
Return the first matching version
>>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x')
'1.0.12'
>>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x')
"""
pattern = fnmatch.translate(version_pattern.replace('x', '*'))
return next((v for v in versions if re.match(pattern, v)), None) | python | def _find_matching_version(versions, version_pattern):
"""
Return the first matching version
>>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x')
'1.0.12'
>>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x')
"""
pattern = fnmatch.translate(version_pattern.replace('x', '*'))
return next((v for v in versions if re.match(pattern, v)), None) | [
"def",
"_find_matching_version",
"(",
"versions",
",",
"version_pattern",
")",
":",
"pattern",
"=",
"fnmatch",
".",
"translate",
"(",
"version_pattern",
".",
"replace",
"(",
"'x'",
",",
"'*'",
")",
")",
"return",
"next",
"(",
"(",
"v",
"for",
"v",
"in",
... | Return the first matching version
>>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x')
'1.0.12'
>>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x') | [
"Return",
"the",
"first",
"matching",
"version"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L474-L484 | train | 46,637 |
mfussenegger/cr8 | cr8/run_crate.py | _build_tarball | def _build_tarball(src_repo) -> str:
""" Build a tarball from src and return the path to it """
run = partial(subprocess.run, cwd=src_repo, check=True)
run(['git', 'clean', '-xdff'])
src_repo = Path(src_repo)
if os.path.exists(src_repo / 'es' / 'upstream'):
run(['git', 'submodule', 'update', '--init', '--', 'es/upstream'])
run(['./gradlew', '--no-daemon', 'clean', 'distTar'])
distributions = Path(src_repo) / 'app' / 'build' / 'distributions'
return next(distributions.glob('crate-*.tar.gz')) | python | def _build_tarball(src_repo) -> str:
""" Build a tarball from src and return the path to it """
run = partial(subprocess.run, cwd=src_repo, check=True)
run(['git', 'clean', '-xdff'])
src_repo = Path(src_repo)
if os.path.exists(src_repo / 'es' / 'upstream'):
run(['git', 'submodule', 'update', '--init', '--', 'es/upstream'])
run(['./gradlew', '--no-daemon', 'clean', 'distTar'])
distributions = Path(src_repo) / 'app' / 'build' / 'distributions'
return next(distributions.glob('crate-*.tar.gz')) | [
"def",
"_build_tarball",
"(",
"src_repo",
")",
"->",
"str",
":",
"run",
"=",
"partial",
"(",
"subprocess",
".",
"run",
",",
"cwd",
"=",
"src_repo",
",",
"check",
"=",
"True",
")",
"run",
"(",
"[",
"'git'",
",",
"'clean'",
",",
"'-xdff'",
"]",
")",
... | Build a tarball from src and return the path to it | [
"Build",
"a",
"tarball",
"from",
"src",
"and",
"return",
"the",
"path",
"to",
"it"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L517-L526 | train | 46,638 |
mfussenegger/cr8 | cr8/run_crate.py | _crates_cache | def _crates_cache() -> str:
""" Return the path to the crates cache folder """
return os.environ.get(
'XDG_CACHE_HOME',
os.path.join(os.path.expanduser('~'), '.cache', 'cr8', 'crates')) | python | def _crates_cache() -> str:
""" Return the path to the crates cache folder """
return os.environ.get(
'XDG_CACHE_HOME',
os.path.join(os.path.expanduser('~'), '.cache', 'cr8', 'crates')) | [
"def",
"_crates_cache",
"(",
")",
"->",
"str",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_CACHE_HOME'",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"'.cache'",
",",
"'cr8'",
... | Return the path to the crates cache folder | [
"Return",
"the",
"path",
"to",
"the",
"crates",
"cache",
"folder"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L573-L577 | train | 46,639 |
mfussenegger/cr8 | cr8/run_crate.py | get_crate | def get_crate(version, crate_root=None):
"""Retrieve a Crate tarball, extract it and return the path.
Args:
version: The Crate version to get.
Can be specified in different ways:
- A concrete version like '0.55.0'
- A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'.
This will use the latest version that matches.
- Release branch, like `3.1`
- An alias: 'latest-stable' or 'latest-testing'
- A URI pointing to a crate tarball
crate_root: Where to extract the tarball to.
If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates``
will be used.
"""
if not crate_root:
crate_root = _crates_cache()
_remove_old_crates(crate_root)
if _is_project_repo(version):
return _extract_tarball(_build_tarball(version))
m = BRANCH_VERSION_RE.match(version)
if m:
return _build_from_release_branch(m.group(0), crate_root)
uri = _lookup_uri(version)
crate_dir = _download_and_extract(uri, crate_root)
return crate_dir | python | def get_crate(version, crate_root=None):
"""Retrieve a Crate tarball, extract it and return the path.
Args:
version: The Crate version to get.
Can be specified in different ways:
- A concrete version like '0.55.0'
- A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'.
This will use the latest version that matches.
- Release branch, like `3.1`
- An alias: 'latest-stable' or 'latest-testing'
- A URI pointing to a crate tarball
crate_root: Where to extract the tarball to.
If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates``
will be used.
"""
if not crate_root:
crate_root = _crates_cache()
_remove_old_crates(crate_root)
if _is_project_repo(version):
return _extract_tarball(_build_tarball(version))
m = BRANCH_VERSION_RE.match(version)
if m:
return _build_from_release_branch(m.group(0), crate_root)
uri = _lookup_uri(version)
crate_dir = _download_and_extract(uri, crate_root)
return crate_dir | [
"def",
"get_crate",
"(",
"version",
",",
"crate_root",
"=",
"None",
")",
":",
"if",
"not",
"crate_root",
":",
"crate_root",
"=",
"_crates_cache",
"(",
")",
"_remove_old_crates",
"(",
"crate_root",
")",
"if",
"_is_project_repo",
"(",
"version",
")",
":",
"ret... | Retrieve a Crate tarball, extract it and return the path.
Args:
version: The Crate version to get.
Can be specified in different ways:
- A concrete version like '0.55.0'
- A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'.
This will use the latest version that matches.
- Release branch, like `3.1`
- An alias: 'latest-stable' or 'latest-testing'
- A URI pointing to a crate tarball
crate_root: Where to extract the tarball to.
If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates``
will be used. | [
"Retrieve",
"a",
"Crate",
"tarball",
"extract",
"it",
"and",
"return",
"the",
"path",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L580-L607 | train | 46,640 |
mfussenegger/cr8 | cr8/run_crate.py | _parse_options | def _parse_options(options: List[str]) -> Dict[str, str]:
""" Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"}
"""
try:
return dict(i.split('=', maxsplit=1) for i in options)
except ValueError:
raise ArgumentError(
f'Option must be in format <key>=<value>, got: {options}') | python | def _parse_options(options: List[str]) -> Dict[str, str]:
""" Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"}
"""
try:
return dict(i.split('=', maxsplit=1) for i in options)
except ValueError:
raise ArgumentError(
f'Option must be in format <key>=<value>, got: {options}') | [
"def",
"_parse_options",
"(",
"options",
":",
"List",
"[",
"str",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"try",
":",
"return",
"dict",
"(",
"i",
".",
"split",
"(",
"'='",
",",
"maxsplit",
"=",
"1",
")",
"for",
"i",
"in",
"opt... | Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"} | [
"Parse",
"repeatable",
"CLI",
"options"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L610-L621 | train | 46,641 |
mfussenegger/cr8 | cr8/run_crate.py | run_crate | def run_crate(
version,
env=None,
setting=None,
crate_root=None,
keep_data=False,
disable_java_magic=False,
):
"""Launch a crate instance.
Supported version specifications:
- Concrete version like "0.55.0" or with wildcard: "1.1.x"
- An alias (one of [latest-nightly, latest-stable, latest-testing])
- A URI pointing to a CrateDB tarball (in .tar.gz format)
- A URI pointing to a checked out CrateDB repo directory
run-crate supports command chaining. To launch a CrateDB node and another
sub-command use:
cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}'
To launch any (blocking) subprocess, prefix the name with '@':
cr8 run-crate <version> -- @http '{node.http_url}'
If run-crate is invoked using command chaining it will exit once all
chained commands finished.
The postgres host and port are available as {node.addresses.psql.host} and
{node.addresses.psql.port}
"""
with create_node(
version,
env,
setting,
crate_root,
keep_data,
java_magic=not disable_java_magic,
) as n:
try:
n.start()
n.process.wait()
except KeyboardInterrupt:
print('Stopping Crate...') | python | def run_crate(
version,
env=None,
setting=None,
crate_root=None,
keep_data=False,
disable_java_magic=False,
):
"""Launch a crate instance.
Supported version specifications:
- Concrete version like "0.55.0" or with wildcard: "1.1.x"
- An alias (one of [latest-nightly, latest-stable, latest-testing])
- A URI pointing to a CrateDB tarball (in .tar.gz format)
- A URI pointing to a checked out CrateDB repo directory
run-crate supports command chaining. To launch a CrateDB node and another
sub-command use:
cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}'
To launch any (blocking) subprocess, prefix the name with '@':
cr8 run-crate <version> -- @http '{node.http_url}'
If run-crate is invoked using command chaining it will exit once all
chained commands finished.
The postgres host and port are available as {node.addresses.psql.host} and
{node.addresses.psql.port}
"""
with create_node(
version,
env,
setting,
crate_root,
keep_data,
java_magic=not disable_java_magic,
) as n:
try:
n.start()
n.process.wait()
except KeyboardInterrupt:
print('Stopping Crate...') | [
"def",
"run_crate",
"(",
"version",
",",
"env",
"=",
"None",
",",
"setting",
"=",
"None",
",",
"crate_root",
"=",
"None",
",",
"keep_data",
"=",
"False",
",",
"disable_java_magic",
"=",
"False",
",",
")",
":",
"with",
"create_node",
"(",
"version",
",",
... | Launch a crate instance.
Supported version specifications:
- Concrete version like "0.55.0" or with wildcard: "1.1.x"
- An alias (one of [latest-nightly, latest-stable, latest-testing])
- A URI pointing to a CrateDB tarball (in .tar.gz format)
- A URI pointing to a checked out CrateDB repo directory
run-crate supports command chaining. To launch a CrateDB node and another
sub-command use:
cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}'
To launch any (blocking) subprocess, prefix the name with '@':
cr8 run-crate <version> -- @http '{node.http_url}'
If run-crate is invoked using command chaining it will exit once all
chained commands finished.
The postgres host and port are available as {node.addresses.psql.host} and
{node.addresses.psql.port} | [
"Launch",
"a",
"crate",
"instance",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L660-L703 | train | 46,642 |
mfussenegger/cr8 | cr8/run_crate.py | AddrConsumer._parse | def _parse(line):
""" Parse protocol and bound address from log message
>>> AddrConsumer._parse('NONE')
(None, None)
>>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}')
('http', '127.0.0.1:4200')
>>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}')
('transport', '127.0.0.1:4300')
>>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}')
('psql', '127.0.0.1:5432')
"""
m = AddrConsumer.ADDRESS_RE.match(line)
if not m:
return None, None
protocol = m.group('protocol')
protocol = AddrConsumer.PROTOCOL_MAP.get(protocol, protocol)
return protocol, m.group('addr') | python | def _parse(line):
""" Parse protocol and bound address from log message
>>> AddrConsumer._parse('NONE')
(None, None)
>>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}')
('http', '127.0.0.1:4200')
>>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}')
('transport', '127.0.0.1:4300')
>>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}')
('psql', '127.0.0.1:5432')
"""
m = AddrConsumer.ADDRESS_RE.match(line)
if not m:
return None, None
protocol = m.group('protocol')
protocol = AddrConsumer.PROTOCOL_MAP.get(protocol, protocol)
return protocol, m.group('addr') | [
"def",
"_parse",
"(",
"line",
")",
":",
"m",
"=",
"AddrConsumer",
".",
"ADDRESS_RE",
".",
"match",
"(",
"line",
")",
"if",
"not",
"m",
":",
"return",
"None",
",",
"None",
"protocol",
"=",
"m",
".",
"group",
"(",
"'protocol'",
")",
"protocol",
"=",
... | Parse protocol and bound address from log message
>>> AddrConsumer._parse('NONE')
(None, None)
>>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}')
('http', '127.0.0.1:4200')
>>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}')
('transport', '127.0.0.1:4300')
>>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}')
('psql', '127.0.0.1:5432') | [
"Parse",
"protocol",
"and",
"bound",
"address",
"from",
"log",
"message"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/run_crate.py#L382-L402 | train | 46,643 |
dchaplinsky/unshred | unshred/threshold.py | _calc_block_mean_variance | def _calc_block_mean_variance(image, mask, blocksize):
"""Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
"""
I = image.copy()
I_f = I.astype(np.float32) / 255. # Used for mean and std.
result = np.zeros(
(image.shape[0] / blocksize, image.shape[1] / blocksize),
dtype=np.float32)
for i in xrange(0, image.shape[0] - blocksize, blocksize):
for j in xrange(0, image.shape[1] - blocksize, blocksize):
patch = I_f[i:i+blocksize+1, j:j+blocksize+1]
mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1]
tmp1 = np.zeros((blocksize, blocksize))
tmp2 = np.zeros((blocksize, blocksize))
mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)
value = 0
if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD:
value = mean[0][0]
result[i/blocksize, j/blocksize] = value
small_image = cv2.resize(I, (image.shape[1] / blocksize,
image.shape[0] / blocksize))
res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)
inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5,
cv2.INPAINT_TELEA)
res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))
return res | python | def _calc_block_mean_variance(image, mask, blocksize):
"""Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
"""
I = image.copy()
I_f = I.astype(np.float32) / 255. # Used for mean and std.
result = np.zeros(
(image.shape[0] / blocksize, image.shape[1] / blocksize),
dtype=np.float32)
for i in xrange(0, image.shape[0] - blocksize, blocksize):
for j in xrange(0, image.shape[1] - blocksize, blocksize):
patch = I_f[i:i+blocksize+1, j:j+blocksize+1]
mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1]
tmp1 = np.zeros((blocksize, blocksize))
tmp2 = np.zeros((blocksize, blocksize))
mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)
value = 0
if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD:
value = mean[0][0]
result[i/blocksize, j/blocksize] = value
small_image = cv2.resize(I, (image.shape[1] / blocksize,
image.shape[0] / blocksize))
res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)
inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5,
cv2.INPAINT_TELEA)
res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))
return res | [
"def",
"_calc_block_mean_variance",
"(",
"image",
",",
"mask",
",",
"blocksize",
")",
":",
"I",
"=",
"image",
".",
"copy",
"(",
")",
"I_f",
"=",
"I",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"255.",
"# Used for mean and std.",
"result",
"=",
... | Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background. | [
"Adaptively",
"determines",
"image",
"background",
"."
] | ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8 | https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/threshold.py#L30-L74 | train | 46,644 |
dchaplinsky/unshred | unshred/threshold.py | threshold | def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None):
"""Applies adaptive thresholding to the given image.
Args:
image: BGRA image.
block_size: optional int block_size to use for adaptive thresholding.
mask: optional mask.
Returns:
Thresholded image.
"""
if mask is None:
mask = np.zeros(image.shape[:2], dtype=np.uint8)
mask[:] = 255
if len(image.shape) > 2 and image.shape[2] == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
res = _calc_block_mean_variance(image, mask, block_size)
res = image.astype(np.float32) - res.astype(np.float32) + 255
_, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY)
return res | python | def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None):
"""Applies adaptive thresholding to the given image.
Args:
image: BGRA image.
block_size: optional int block_size to use for adaptive thresholding.
mask: optional mask.
Returns:
Thresholded image.
"""
if mask is None:
mask = np.zeros(image.shape[:2], dtype=np.uint8)
mask[:] = 255
if len(image.shape) > 2 and image.shape[2] == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
res = _calc_block_mean_variance(image, mask, block_size)
res = image.astype(np.float32) - res.astype(np.float32) + 255
_, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY)
return res | [
"def",
"threshold",
"(",
"image",
",",
"block_size",
"=",
"DEFAULT_BLOCKSIZE",
",",
"mask",
"=",
"None",
")",
":",
"if",
"mask",
"is",
"None",
":",
"mask",
"=",
"np",
".",
"zeros",
"(",
"image",
".",
"shape",
"[",
":",
"2",
"]",
",",
"dtype",
"=",
... | Applies adaptive thresholding to the given image.
Args:
image: BGRA image.
block_size: optional int block_size to use for adaptive thresholding.
mask: optional mask.
Returns:
Thresholded image. | [
"Applies",
"adaptive",
"thresholding",
"to",
"the",
"given",
"image",
"."
] | ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8 | https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/threshold.py#L77-L96 | train | 46,645 |
habnabit/vcversioner | vcversioner.py | setup | def setup(dist, attr, value):
"""A hook for simplifying ``vcversioner`` use from distutils.
This hook, when installed properly, allows vcversioner to automatically run
when specifying a ``vcversioner`` argument to ``setup``. For example::
from setuptools import setup
setup(
setup_requires=['vcversioner'],
vcversioner={},
)
The parameter to the ``vcversioner`` argument is a dict of keyword
arguments which :func:`find_version` will be called with.
"""
dist.metadata.version = find_version(**value).version | python | def setup(dist, attr, value):
"""A hook for simplifying ``vcversioner`` use from distutils.
This hook, when installed properly, allows vcversioner to automatically run
when specifying a ``vcversioner`` argument to ``setup``. For example::
from setuptools import setup
setup(
setup_requires=['vcversioner'],
vcversioner={},
)
The parameter to the ``vcversioner`` argument is a dict of keyword
arguments which :func:`find_version` will be called with.
"""
dist.metadata.version = find_version(**value).version | [
"def",
"setup",
"(",
"dist",
",",
"attr",
",",
"value",
")",
":",
"dist",
".",
"metadata",
".",
"version",
"=",
"find_version",
"(",
"*",
"*",
"value",
")",
".",
"version"
] | A hook for simplifying ``vcversioner`` use from distutils.
This hook, when installed properly, allows vcversioner to automatically run
when specifying a ``vcversioner`` argument to ``setup``. For example::
from setuptools import setup
setup(
setup_requires=['vcversioner'],
vcversioner={},
)
The parameter to the ``vcversioner`` argument is a dict of keyword
arguments which :func:`find_version` will be called with. | [
"A",
"hook",
"for",
"simplifying",
"vcversioner",
"use",
"from",
"distutils",
"."
] | 72f8f0a7e0121cf5989a2cb00d5e1395e02a0445 | https://github.com/habnabit/vcversioner/blob/72f8f0a7e0121cf5989a2cb00d5e1395e02a0445/vcversioner.py#L247-L265 | train | 46,646 |
mfussenegger/cr8 | cr8/insert_json.py | to_insert | def to_insert(table, d):
"""Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values (?)', ['Marvin'])
"""
columns = []
args = []
for key, val in d.items():
columns.append('"{}"'.format(key))
args.append(val)
stmt = 'insert into {table} ({columns}) values ({params})'.format(
table=table,
columns=', '.join(columns),
params=', '.join(['?'] * len(columns)))
return (stmt, args) | python | def to_insert(table, d):
"""Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values (?)', ['Marvin'])
"""
columns = []
args = []
for key, val in d.items():
columns.append('"{}"'.format(key))
args.append(val)
stmt = 'insert into {table} ({columns}) values ({params})'.format(
table=table,
columns=', '.join(columns),
params=', '.join(['?'] * len(columns)))
return (stmt, args) | [
"def",
"to_insert",
"(",
"table",
",",
"d",
")",
":",
"columns",
"=",
"[",
"]",
"args",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"d",
".",
"items",
"(",
")",
":",
"columns",
".",
"append",
"(",
"'\"{}\"'",
".",
"format",
"(",
"key",
")",
... | Generate an insert statement using the given table and dictionary.
Args:
table (str): table name
d (dict): dictionary with column names as keys and values as values.
Returns:
tuple of statement and arguments
>>> to_insert('doc.foobar', {'name': 'Marvin'})
('insert into doc.foobar ("name") values (?)', ['Marvin']) | [
"Generate",
"an",
"insert",
"statement",
"using",
"the",
"given",
"table",
"and",
"dictionary",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/insert_json.py#L15-L37 | train | 46,647 |
mfussenegger/cr8 | cr8/insert_json.py | insert_json | def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
output_fmt=None):
"""Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
"""
if not hosts:
return print_only(table)
queries = (to_insert(table, d) for d in dicts_from_stdin())
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise | python | def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
output_fmt=None):
"""Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes
"""
if not hosts:
return print_only(table)
queries = (to_insert(table, d) for d in dicts_from_stdin())
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise | [
"def",
"insert_json",
"(",
"table",
"=",
"None",
",",
"bulk_size",
"=",
"1000",
",",
"concurrency",
"=",
"25",
",",
"hosts",
"=",
"None",
",",
"output_fmt",
"=",
"None",
")",
":",
"if",
"not",
"hosts",
":",
"return",
"print_only",
"(",
"table",
")",
... | Insert JSON lines fed into stdin into a Crate cluster.
If no hosts are specified the statements will be printed.
Args:
table: Target table name.
bulk_size: Bulk size of the insert statements.
concurrency: Number of operations to run concurrently.
hosts: hostname:port pairs of the Crate nodes | [
"Insert",
"JSON",
"lines",
"fed",
"into",
"stdin",
"into",
"a",
"Crate",
"cluster",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/insert_json.py#L54-L89 | train | 46,648 |
dchaplinsky/unshred | unshred/features/lines.py | _get_dominant_angle | def _get_dominant_angle(lines, domination_type=MEDIAN):
"""Picks dominant angle of a set of lines.
Args:
lines: iterable of (x1, y1, x2, y2) tuples that define lines.
domination_type: either MEDIAN or MEAN.
Returns:
Dominant angle value in radians.
Raises:
ValueError: on unknown domination_type.
"""
if domination_type == MEDIAN:
return _get_median_angle(lines)
elif domination_type == MEAN:
return _get_mean_angle(lines)
else:
raise ValueError('Unknown domination type provided: %s' % (
domination_type)) | python | def _get_dominant_angle(lines, domination_type=MEDIAN):
"""Picks dominant angle of a set of lines.
Args:
lines: iterable of (x1, y1, x2, y2) tuples that define lines.
domination_type: either MEDIAN or MEAN.
Returns:
Dominant angle value in radians.
Raises:
ValueError: on unknown domination_type.
"""
if domination_type == MEDIAN:
return _get_median_angle(lines)
elif domination_type == MEAN:
return _get_mean_angle(lines)
else:
raise ValueError('Unknown domination type provided: %s' % (
domination_type)) | [
"def",
"_get_dominant_angle",
"(",
"lines",
",",
"domination_type",
"=",
"MEDIAN",
")",
":",
"if",
"domination_type",
"==",
"MEDIAN",
":",
"return",
"_get_median_angle",
"(",
"lines",
")",
"elif",
"domination_type",
"==",
"MEAN",
":",
"return",
"_get_mean_angle",
... | Picks dominant angle of a set of lines.
Args:
lines: iterable of (x1, y1, x2, y2) tuples that define lines.
domination_type: either MEDIAN or MEAN.
Returns:
Dominant angle value in radians.
Raises:
ValueError: on unknown domination_type. | [
"Picks",
"dominant",
"angle",
"of",
"a",
"set",
"of",
"lines",
"."
] | ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8 | https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/features/lines.py#L16-L35 | train | 46,649 |
dchaplinsky/unshred | unshred/features/lines.py | _normalize_angle | def _normalize_angle(angle, range, step):
"""Finds an angle that matches the given one modulo step.
Increments and decrements the given value with a given step.
Args:
range: a 2-tuple of min and max target values.
step: tuning step.
Returns:
Normalized value within a given range.
"""
while angle <= range[0]:
angle += step
while angle >= range[1]:
angle -= step
return angle | python | def _normalize_angle(angle, range, step):
"""Finds an angle that matches the given one modulo step.
Increments and decrements the given value with a given step.
Args:
range: a 2-tuple of min and max target values.
step: tuning step.
Returns:
Normalized value within a given range.
"""
while angle <= range[0]:
angle += step
while angle >= range[1]:
angle -= step
return angle | [
"def",
"_normalize_angle",
"(",
"angle",
",",
"range",
",",
"step",
")",
":",
"while",
"angle",
"<=",
"range",
"[",
"0",
"]",
":",
"angle",
"+=",
"step",
"while",
"angle",
">=",
"range",
"[",
"1",
"]",
":",
"angle",
"-=",
"step",
"return",
"angle"
] | Finds an angle that matches the given one modulo step.
Increments and decrements the given value with a given step.
Args:
range: a 2-tuple of min and max target values.
step: tuning step.
Returns:
Normalized value within a given range. | [
"Finds",
"an",
"angle",
"that",
"matches",
"the",
"given",
"one",
"modulo",
"step",
"."
] | ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8 | https://github.com/dchaplinsky/unshred/blob/ca9cd6a1c6fb8c77d5424dd660ff5d2f3720c0f8/unshred/features/lines.py#L38-L54 | train | 46,650 |
sijis/sumologic-python | src/sumologic/collectors.py | Collectors.get_collectors | def get_collectors(self, limit=1000, offset=0):
"""Returns a dict of collectors.
Args:
limit (int): number of collectors to return
offset (int): the offset of where the list of collectors should begin from
"""
options = {
'limit': limit,
'offset': offset,
}
request = requests.get(self.url, params=options, auth=self.auth)
try:
results = request.json()['collectors']
except KeyError:
results = request.json()
except json.decoder.JSONDecodeError:
results = []
return results | python | def get_collectors(self, limit=1000, offset=0):
"""Returns a dict of collectors.
Args:
limit (int): number of collectors to return
offset (int): the offset of where the list of collectors should begin from
"""
options = {
'limit': limit,
'offset': offset,
}
request = requests.get(self.url, params=options, auth=self.auth)
try:
results = request.json()['collectors']
except KeyError:
results = request.json()
except json.decoder.JSONDecodeError:
results = []
return results | [
"def",
"get_collectors",
"(",
"self",
",",
"limit",
"=",
"1000",
",",
"offset",
"=",
"0",
")",
":",
"options",
"=",
"{",
"'limit'",
":",
"limit",
",",
"'offset'",
":",
"offset",
",",
"}",
"request",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"u... | Returns a dict of collectors.
Args:
limit (int): number of collectors to return
offset (int): the offset of where the list of collectors should begin from | [
"Returns",
"a",
"dict",
"of",
"collectors",
"."
] | b50200907837f0d452d14ead5e647b8e24e2e9e5 | https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L29-L49 | train | 46,651 |
sijis/sumologic-python | src/sumologic/collectors.py | Collectors.find | def find(self, name):
"""Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for
"""
collectors = self.get_collectors()
for collector in collectors:
if name.lower() == collector['name'].lower():
self.collector_id = collector['id']
return collector
return {'status': 'No results found.'} | python | def find(self, name):
"""Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for
"""
collectors = self.get_collectors()
for collector in collectors:
if name.lower() == collector['name'].lower():
self.collector_id = collector['id']
return collector
return {'status': 'No results found.'} | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"collectors",
"=",
"self",
".",
"get_collectors",
"(",
")",
"for",
"collector",
"in",
"collectors",
":",
"if",
"name",
".",
"lower",
"(",
")",
"==",
"collector",
"[",
"'name'",
"]",
".",
"lower",
"("... | Returns a dict of collector's details if found.
Args:
name (str): name of collector searching for | [
"Returns",
"a",
"dict",
"of",
"collector",
"s",
"details",
"if",
"found",
"."
] | b50200907837f0d452d14ead5e647b8e24e2e9e5 | https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L51-L64 | train | 46,652 |
sijis/sumologic-python | src/sumologic/collectors.py | Collectors.delete | def delete(self, collector_id=None):
"""Delete a collector from inventory.
Args:
collector_id (int): id of collector (optional)
"""
cid = self.collector_id
if collector_id:
cid = collector_id
# param to delete id
url = '{0}/{1}'.format(self.url, cid)
request = requests.delete(url, auth=self.auth)
try:
# unable to delete collector
response = request.json()
except ValueError:
# returns when collector is deleted
# apparently, the request does not return
# a json response
response = {
u'message': u'The request completed successfully.',
u'status': 200,
}
return response | python | def delete(self, collector_id=None):
"""Delete a collector from inventory.
Args:
collector_id (int): id of collector (optional)
"""
cid = self.collector_id
if collector_id:
cid = collector_id
# param to delete id
url = '{0}/{1}'.format(self.url, cid)
request = requests.delete(url, auth=self.auth)
try:
# unable to delete collector
response = request.json()
except ValueError:
# returns when collector is deleted
# apparently, the request does not return
# a json response
response = {
u'message': u'The request completed successfully.',
u'status': 200,
}
return response | [
"def",
"delete",
"(",
"self",
",",
"collector_id",
"=",
"None",
")",
":",
"cid",
"=",
"self",
".",
"collector_id",
"if",
"collector_id",
":",
"cid",
"=",
"collector_id",
"# param to delete id",
"url",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"url... | Delete a collector from inventory.
Args:
collector_id (int): id of collector (optional) | [
"Delete",
"a",
"collector",
"from",
"inventory",
"."
] | b50200907837f0d452d14ead5e647b8e24e2e9e5 | https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L66-L91 | train | 46,653 |
sijis/sumologic-python | src/sumologic/collectors.py | Collectors.info | def info(self, collector_id):
"""Return a dict of collector.
Args:
collector_id (int): id of collector (optional)
"""
cid = self.collector_id
if collector_id:
cid = collector_id
url = '{0}/{1}'.format(self.url, cid)
request = requests.get(url, auth=self.auth)
return request.json() | python | def info(self, collector_id):
"""Return a dict of collector.
Args:
collector_id (int): id of collector (optional)
"""
cid = self.collector_id
if collector_id:
cid = collector_id
url = '{0}/{1}'.format(self.url, cid)
request = requests.get(url, auth=self.auth)
return request.json() | [
"def",
"info",
"(",
"self",
",",
"collector_id",
")",
":",
"cid",
"=",
"self",
".",
"collector_id",
"if",
"collector_id",
":",
"cid",
"=",
"collector_id",
"url",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"url",
",",
"cid",
")",
"request",
"=",... | Return a dict of collector.
Args:
collector_id (int): id of collector (optional) | [
"Return",
"a",
"dict",
"of",
"collector",
"."
] | b50200907837f0d452d14ead5e647b8e24e2e9e5 | https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/collectors.py#L93-L105 | train | 46,654 |
mfussenegger/cr8 | cr8/insert_fake_data.py | _bulk_size_generator | def _bulk_size_generator(num_records, bulk_size, active):
""" Generate bulk_size until num_records is reached or active becomes false
>>> gen = _bulk_size_generator(155, 50, [True])
>>> list(gen)
[50, 50, 50, 5]
"""
while active and num_records > 0:
req_size = min(num_records, bulk_size)
num_records -= req_size
yield req_size | python | def _bulk_size_generator(num_records, bulk_size, active):
""" Generate bulk_size until num_records is reached or active becomes false
>>> gen = _bulk_size_generator(155, 50, [True])
>>> list(gen)
[50, 50, 50, 5]
"""
while active and num_records > 0:
req_size = min(num_records, bulk_size)
num_records -= req_size
yield req_size | [
"def",
"_bulk_size_generator",
"(",
"num_records",
",",
"bulk_size",
",",
"active",
")",
":",
"while",
"active",
"and",
"num_records",
">",
"0",
":",
"req_size",
"=",
"min",
"(",
"num_records",
",",
"bulk_size",
")",
"num_records",
"-=",
"req_size",
"yield",
... | Generate bulk_size until num_records is reached or active becomes false
>>> gen = _bulk_size_generator(155, 50, [True])
>>> list(gen)
[50, 50, 50, 5] | [
"Generate",
"bulk_size",
"until",
"num_records",
"is",
"reached",
"or",
"active",
"becomes",
"false"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/insert_fake_data.py#L171-L181 | train | 46,655 |
mfussenegger/cr8 | cr8/insert_fake_data.py | insert_fake_data | def insert_fake_data(hosts=None,
table=None,
num_records=1e5,
bulk_size=1000,
concurrency=25,
mapping_file=None):
"""Generate random data and insert it into a table.
This will read the table schema and then find suitable random data providers.
Which provider is choosen depends on the column name and data type.
Example:
A column named `name` will map to the `name` provider.
A column named `x` of type int will map to `random_int` because there
is no `x` provider.
Available providers are listed here:
https://faker.readthedocs.io/en/latest/providers.html
Additional providers:
- auto_inc:
Returns unique incrementing numbers.
Automatically used for columns named "id" of type int or long
- geo_point
Returns [<lon>, <lat>]
Automatically used for columns of type geo_point
Args:
hosts: <host>:[<port>] of the Crate node
table: The table name into which the data should be inserted.
Either fully qualified: `<schema>.<table>` or just `<table>`
num_records: Number of records to insert.
Usually a number but expressions like `1e4` work as well.
bulk_size: The bulk size of the insert statements.
concurrency: How many operations to run concurrently.
mapping_file: A JSON file that defines a mapping from column name to
fake-factory provider.
The format is as follows:
{
"column_name": ["provider_with_args", ["arg1", "arg"]],
"x": ["provider_with_args", ["arg1"]],
"y": "provider_without_args"
}
"""
with clients.client(hosts, concurrency=1) as client:
schema, table_name = parse_table(table)
columns = retrieve_columns(client, schema, table_name)
if not columns:
sys.exit('Could not find columns for table "{}"'.format(table))
print('Found schema: ')
print(json.dumps(columns, sort_keys=True, indent=4))
mapping = None
if mapping_file:
mapping = json.load(mapping_file)
bulk_size = min(num_records, bulk_size)
num_inserts = int(math.ceil(num_records / bulk_size))
gen_row = create_row_generator(columns, mapping)
stmt = to_insert('"{schema}"."{table_name}"'.format(**locals()), columns)[0]
print('Using insert statement: ')
print(stmt)
print('Will make {} requests with a bulk size of {}'.format(
num_inserts, bulk_size))
print('Generating fake data and executing inserts')
q = asyncio.Queue(maxsize=concurrency)
with clients.client(hosts, concurrency=concurrency) as client:
active = [True]
def stop():
asyncio.ensure_future(q.put(None))
active.clear()
loop.remove_signal_handler(signal.SIGINT)
if sys.platform != 'win32':
loop.add_signal_handler(signal.SIGINT, stop)
bulk_seq = _bulk_size_generator(num_records, bulk_size, active)
with ThreadPoolExecutor() as e:
tasks = asyncio.gather(
_gen_data_and_insert(q, e, client, stmt, gen_row, bulk_seq),
consume(q, total=num_inserts)
)
loop.run_until_complete(tasks) | python | def insert_fake_data(hosts=None,
table=None,
num_records=1e5,
bulk_size=1000,
concurrency=25,
mapping_file=None):
"""Generate random data and insert it into a table.
This will read the table schema and then find suitable random data providers.
Which provider is choosen depends on the column name and data type.
Example:
A column named `name` will map to the `name` provider.
A column named `x` of type int will map to `random_int` because there
is no `x` provider.
Available providers are listed here:
https://faker.readthedocs.io/en/latest/providers.html
Additional providers:
- auto_inc:
Returns unique incrementing numbers.
Automatically used for columns named "id" of type int or long
- geo_point
Returns [<lon>, <lat>]
Automatically used for columns of type geo_point
Args:
hosts: <host>:[<port>] of the Crate node
table: The table name into which the data should be inserted.
Either fully qualified: `<schema>.<table>` or just `<table>`
num_records: Number of records to insert.
Usually a number but expressions like `1e4` work as well.
bulk_size: The bulk size of the insert statements.
concurrency: How many operations to run concurrently.
mapping_file: A JSON file that defines a mapping from column name to
fake-factory provider.
The format is as follows:
{
"column_name": ["provider_with_args", ["arg1", "arg"]],
"x": ["provider_with_args", ["arg1"]],
"y": "provider_without_args"
}
"""
with clients.client(hosts, concurrency=1) as client:
schema, table_name = parse_table(table)
columns = retrieve_columns(client, schema, table_name)
if not columns:
sys.exit('Could not find columns for table "{}"'.format(table))
print('Found schema: ')
print(json.dumps(columns, sort_keys=True, indent=4))
mapping = None
if mapping_file:
mapping = json.load(mapping_file)
bulk_size = min(num_records, bulk_size)
num_inserts = int(math.ceil(num_records / bulk_size))
gen_row = create_row_generator(columns, mapping)
stmt = to_insert('"{schema}"."{table_name}"'.format(**locals()), columns)[0]
print('Using insert statement: ')
print(stmt)
print('Will make {} requests with a bulk size of {}'.format(
num_inserts, bulk_size))
print('Generating fake data and executing inserts')
q = asyncio.Queue(maxsize=concurrency)
with clients.client(hosts, concurrency=concurrency) as client:
active = [True]
def stop():
asyncio.ensure_future(q.put(None))
active.clear()
loop.remove_signal_handler(signal.SIGINT)
if sys.platform != 'win32':
loop.add_signal_handler(signal.SIGINT, stop)
bulk_seq = _bulk_size_generator(num_records, bulk_size, active)
with ThreadPoolExecutor() as e:
tasks = asyncio.gather(
_gen_data_and_insert(q, e, client, stmt, gen_row, bulk_seq),
consume(q, total=num_inserts)
)
loop.run_until_complete(tasks) | [
"def",
"insert_fake_data",
"(",
"hosts",
"=",
"None",
",",
"table",
"=",
"None",
",",
"num_records",
"=",
"1e5",
",",
"bulk_size",
"=",
"1000",
",",
"concurrency",
"=",
"25",
",",
"mapping_file",
"=",
"None",
")",
":",
"with",
"clients",
".",
"client",
... | Generate random data and insert it into a table.
This will read the table schema and then find suitable random data providers.
Which provider is choosen depends on the column name and data type.
Example:
A column named `name` will map to the `name` provider.
A column named `x` of type int will map to `random_int` because there
is no `x` provider.
Available providers are listed here:
https://faker.readthedocs.io/en/latest/providers.html
Additional providers:
- auto_inc:
Returns unique incrementing numbers.
Automatically used for columns named "id" of type int or long
- geo_point
Returns [<lon>, <lat>]
Automatically used for columns of type geo_point
Args:
hosts: <host>:[<port>] of the Crate node
table: The table name into which the data should be inserted.
Either fully qualified: `<schema>.<table>` or just `<table>`
num_records: Number of records to insert.
Usually a number but expressions like `1e4` work as well.
bulk_size: The bulk size of the insert statements.
concurrency: How many operations to run concurrently.
mapping_file: A JSON file that defines a mapping from column name to
fake-factory provider.
The format is as follows:
{
"column_name": ["provider_with_args", ["arg1", "arg"]],
"x": ["provider_with_args", ["arg1"]],
"y": "provider_without_args"
} | [
"Generate",
"random",
"data",
"and",
"insert",
"it",
"into",
"a",
"table",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/insert_fake_data.py#L204-L289 | train | 46,656 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | cast_values_csvs | def cast_values_csvs(d, idx, x):
"""
Attempt to cast string to float. If error, keep as a string.
:param dict d: Data
:param int idx: Index number
:param str x: Data
:return any:
"""
try:
d[idx].append(float(x))
except ValueError:
d[idx].append(x)
# logger_misc.warn("cast_values_csv: ValueError")
# logger_misc.warn("ValueError: col: {}, {}".format(x, e))
except KeyError as e:
logger_misc.warn("cast_values_csv: KeyError: col: {}, {}".format(x, e))
return d | python | def cast_values_csvs(d, idx, x):
"""
Attempt to cast string to float. If error, keep as a string.
:param dict d: Data
:param int idx: Index number
:param str x: Data
:return any:
"""
try:
d[idx].append(float(x))
except ValueError:
d[idx].append(x)
# logger_misc.warn("cast_values_csv: ValueError")
# logger_misc.warn("ValueError: col: {}, {}".format(x, e))
except KeyError as e:
logger_misc.warn("cast_values_csv: KeyError: col: {}, {}".format(x, e))
return d | [
"def",
"cast_values_csvs",
"(",
"d",
",",
"idx",
",",
"x",
")",
":",
"try",
":",
"d",
"[",
"idx",
"]",
".",
"append",
"(",
"float",
"(",
"x",
")",
")",
"except",
"ValueError",
":",
"d",
"[",
"idx",
"]",
".",
"append",
"(",
"x",
")",
"# logger_m... | Attempt to cast string to float. If error, keep as a string.
:param dict d: Data
:param int idx: Index number
:param str x: Data
:return any: | [
"Attempt",
"to",
"cast",
"string",
"to",
"float",
".",
"If",
"error",
"keep",
"as",
"a",
"string",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L22-L40 | train | 46,657 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | cast_float | def cast_float(x):
"""
Attempt to cleanup string or convert to number value.
:param any x:
:return float:
"""
try:
x = float(x)
except ValueError:
try:
x = x.strip()
except AttributeError as e:
logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e))
return x | python | def cast_float(x):
"""
Attempt to cleanup string or convert to number value.
:param any x:
:return float:
"""
try:
x = float(x)
except ValueError:
try:
x = x.strip()
except AttributeError as e:
logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e))
return x | [
"def",
"cast_float",
"(",
"x",
")",
":",
"try",
":",
"x",
"=",
"float",
"(",
"x",
")",
"except",
"ValueError",
":",
"try",
":",
"x",
"=",
"x",
".",
"strip",
"(",
")",
"except",
"AttributeError",
"as",
"e",
":",
"logger_misc",
".",
"warn",
"(",
"\... | Attempt to cleanup string or convert to number value.
:param any x:
:return float: | [
"Attempt",
"to",
"cleanup",
"string",
"or",
"convert",
"to",
"number",
"value",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L43-L57 | train | 46,658 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | cast_int | def cast_int(x):
"""
Cast unknown type into integer
:param any x:
:return int:
"""
try:
x = int(x)
except ValueError:
try:
x = x.strip()
except AttributeError as e:
logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e))
return x | python | def cast_int(x):
"""
Cast unknown type into integer
:param any x:
:return int:
"""
try:
x = int(x)
except ValueError:
try:
x = x.strip()
except AttributeError as e:
logger_misc.warn("parse_str: AttributeError: String not number or word, {}, {}".format(x, e))
return x | [
"def",
"cast_int",
"(",
"x",
")",
":",
"try",
":",
"x",
"=",
"int",
"(",
"x",
")",
"except",
"ValueError",
":",
"try",
":",
"x",
"=",
"x",
".",
"strip",
"(",
")",
"except",
"AttributeError",
"as",
"e",
":",
"logger_misc",
".",
"warn",
"(",
"\"par... | Cast unknown type into integer
:param any x:
:return int: | [
"Cast",
"unknown",
"type",
"into",
"integer"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L60-L74 | train | 46,659 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | decimal_precision | def decimal_precision(row):
"""
Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers.
ex: 300 -> 300
ex: 300.123456 -> 300.123
ex: 3.123456e-25 - > 3.123e-25
:param tuple row: Row of numbers to process
:return list row: Processed row
"""
# _row = []
try:
# Convert tuple to list for processing
row = list(row)
for idx, x in enumerate(row):
x = str(x)
# Is this a scientific notated float? Tear it apart with regex, round, and piece together again
m = re.match(re_sci_notation, x)
if m:
_x2 = round(float(m.group(2)), 3)
x = m.group(1) + str(_x2)[1:] + m.group(3)
# A normal float? round to 3 decimals as usual
else:
try:
x = round(float(x), 3)
except (ValueError, TypeError):
x = x
row[idx] = x
# Convert list back to tuple for csv writer
row = tuple(row)
except Exception as e:
print("Error: Unable to fix the precision of values. File size may be larger than normal, {}".format(e))
return row | python | def decimal_precision(row):
"""
Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers.
ex: 300 -> 300
ex: 300.123456 -> 300.123
ex: 3.123456e-25 - > 3.123e-25
:param tuple row: Row of numbers to process
:return list row: Processed row
"""
# _row = []
try:
# Convert tuple to list for processing
row = list(row)
for idx, x in enumerate(row):
x = str(x)
# Is this a scientific notated float? Tear it apart with regex, round, and piece together again
m = re.match(re_sci_notation, x)
if m:
_x2 = round(float(m.group(2)), 3)
x = m.group(1) + str(_x2)[1:] + m.group(3)
# A normal float? round to 3 decimals as usual
else:
try:
x = round(float(x), 3)
except (ValueError, TypeError):
x = x
row[idx] = x
# Convert list back to tuple for csv writer
row = tuple(row)
except Exception as e:
print("Error: Unable to fix the precision of values. File size may be larger than normal, {}".format(e))
return row | [
"def",
"decimal_precision",
"(",
"row",
")",
":",
"# _row = []",
"try",
":",
"# Convert tuple to list for processing",
"row",
"=",
"list",
"(",
"row",
")",
"for",
"idx",
",",
"x",
"in",
"enumerate",
"(",
"row",
")",
":",
"x",
"=",
"str",
"(",
"x",
")",
... | Change the "precision" of values before writing to CSV. Each value is rounded to 3 numbers.
ex: 300 -> 300
ex: 300.123456 -> 300.123
ex: 3.123456e-25 - > 3.123e-25
:param tuple row: Row of numbers to process
:return list row: Processed row | [
"Change",
"the",
"precision",
"of",
"values",
"before",
"writing",
"to",
"CSV",
".",
"Each",
"value",
"is",
"rounded",
"to",
"3",
"numbers",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L106-L140 | train | 46,660 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | fix_coordinate_decimal | def fix_coordinate_decimal(d):
"""
Coordinate decimal degrees calculated by an excel formula are often too long as a repeating decimal.
Round them down to 5 decimals
:param dict d: Metadata
:return dict d: Metadata
"""
try:
for idx, n in enumerate(d["geo"]["geometry"]["coordinates"]):
d["geo"]["geometry"]["coordinates"][idx] = round(n, 5)
except Exception as e:
logger_misc.error("fix_coordinate_decimal: {}".format(e))
return d | python | def fix_coordinate_decimal(d):
"""
Coordinate decimal degrees calculated by an excel formula are often too long as a repeating decimal.
Round them down to 5 decimals
:param dict d: Metadata
:return dict d: Metadata
"""
try:
for idx, n in enumerate(d["geo"]["geometry"]["coordinates"]):
d["geo"]["geometry"]["coordinates"][idx] = round(n, 5)
except Exception as e:
logger_misc.error("fix_coordinate_decimal: {}".format(e))
return d | [
"def",
"fix_coordinate_decimal",
"(",
"d",
")",
":",
"try",
":",
"for",
"idx",
",",
"n",
"in",
"enumerate",
"(",
"d",
"[",
"\"geo\"",
"]",
"[",
"\"geometry\"",
"]",
"[",
"\"coordinates\"",
"]",
")",
":",
"d",
"[",
"\"geo\"",
"]",
"[",
"\"geometry\"",
... | Coordinate decimal degrees calculated by an excel formula are often too long as a repeating decimal.
Round them down to 5 decimals
:param dict d: Metadata
:return dict d: Metadata | [
"Coordinate",
"decimal",
"degrees",
"calculated",
"by",
"an",
"excel",
"formula",
"are",
"often",
"too",
"long",
"as",
"a",
"repeating",
"decimal",
".",
"Round",
"them",
"down",
"to",
"5",
"decimals"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L143-L156 | train | 46,661 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | generate_timestamp | def generate_timestamp(fmt=None):
"""
Generate a timestamp to mark when this file was last modified.
:param str fmt: Special format instructions
:return str: YYYY-MM-DD format, or specified format
"""
if fmt:
time = dt.datetime.now().strftime(fmt)
else:
time = dt.date.today()
return str(time) | python | def generate_timestamp(fmt=None):
"""
Generate a timestamp to mark when this file was last modified.
:param str fmt: Special format instructions
:return str: YYYY-MM-DD format, or specified format
"""
if fmt:
time = dt.datetime.now().strftime(fmt)
else:
time = dt.date.today()
return str(time) | [
"def",
"generate_timestamp",
"(",
"fmt",
"=",
"None",
")",
":",
"if",
"fmt",
":",
"time",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"fmt",
")",
"else",
":",
"time",
"=",
"dt",
".",
"date",
".",
"today",
"(",
")",
"... | Generate a timestamp to mark when this file was last modified.
:param str fmt: Special format instructions
:return str: YYYY-MM-DD format, or specified format | [
"Generate",
"a",
"timestamp",
"to",
"mark",
"when",
"this",
"file",
"was",
"last",
"modified",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L159-L170 | train | 46,662 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_appended_name | def get_appended_name(name, columns):
"""
Append numbers to a name until it no longer conflicts with the other names in a column.
Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop.
There shouldn't ever be more than two or three identical variable names in a table.
:param str name: Variable name in question
:param dict columns: Columns listed by variable name
:return str: Appended variable name
"""
loop = 0
while name in columns:
loop += 1
if loop > 10:
logger_misc.warn("get_appended_name: Too many loops: Tried to get appended name but something looks wrong")
break
tmp = name + "-" + str(loop)
if tmp not in columns:
return tmp
return name + "-99" | python | def get_appended_name(name, columns):
"""
Append numbers to a name until it no longer conflicts with the other names in a column.
Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop.
There shouldn't ever be more than two or three identical variable names in a table.
:param str name: Variable name in question
:param dict columns: Columns listed by variable name
:return str: Appended variable name
"""
loop = 0
while name in columns:
loop += 1
if loop > 10:
logger_misc.warn("get_appended_name: Too many loops: Tried to get appended name but something looks wrong")
break
tmp = name + "-" + str(loop)
if tmp not in columns:
return tmp
return name + "-99" | [
"def",
"get_appended_name",
"(",
"name",
",",
"columns",
")",
":",
"loop",
"=",
"0",
"while",
"name",
"in",
"columns",
":",
"loop",
"+=",
"1",
"if",
"loop",
">",
"10",
":",
"logger_misc",
".",
"warn",
"(",
"\"get_appended_name: Too many loops: Tried to get app... | Append numbers to a name until it no longer conflicts with the other names in a column.
Necessary to avoid overwriting columns and losing data. Loop a preset amount of times to avoid an infinite loop.
There shouldn't ever be more than two or three identical variable names in a table.
:param str name: Variable name in question
:param dict columns: Columns listed by variable name
:return str: Appended variable name | [
"Append",
"numbers",
"to",
"a",
"name",
"until",
"it",
"no",
"longer",
"conflicts",
"with",
"the",
"other",
"names",
"in",
"a",
"column",
".",
"Necessary",
"to",
"avoid",
"overwriting",
"columns",
"and",
"losing",
"data",
".",
"Loop",
"a",
"preset",
"amoun... | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L185-L204 | train | 46,663 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_authors_as_str | def get_authors_as_str(x):
"""
Take author or investigator data, and convert it to a concatenated string of names.
Author data structure has a few variations, so account for all.
:param any x: Author data
:return str: Author string
"""
_authors = ""
# if it's a string already, we're done
if isinstance(x, str):
return x
# elif it's a list, keep going
elif isinstance(x, list):
# item in list is a str
if isinstance(x[0], str):
# loop and concat until the last item
for name in x[:-1]:
# all inner items get a semi-colon at the end
_authors += str(name) + "; "
# last item does not get a semi-colon at the end
_authors += str(x[-1])
# item in list is a dictionary
elif isinstance(x[0], dict):
# dictionary structure SHOULD have authors listed until the "name" key.
try:
# loop and concat until the last item
for entry in x[:-1]:
# all inner items get a semi-colon at the end
_authors += str(entry["name"]) + "; "
# last item does not get a semi-colon at the end
_authors += str(x[-1]["name"])
except KeyError:
logger_misc.warn("get_authors_as_str: KeyError: Authors incorrect data structure")
else:
logger_misc.debug("get_authors_as_str: TypeError: author/investigators isn't str or list: {}".format(type(x)))
return _authors | python | def get_authors_as_str(x):
"""
Take author or investigator data, and convert it to a concatenated string of names.
Author data structure has a few variations, so account for all.
:param any x: Author data
:return str: Author string
"""
_authors = ""
# if it's a string already, we're done
if isinstance(x, str):
return x
# elif it's a list, keep going
elif isinstance(x, list):
# item in list is a str
if isinstance(x[0], str):
# loop and concat until the last item
for name in x[:-1]:
# all inner items get a semi-colon at the end
_authors += str(name) + "; "
# last item does not get a semi-colon at the end
_authors += str(x[-1])
# item in list is a dictionary
elif isinstance(x[0], dict):
# dictionary structure SHOULD have authors listed until the "name" key.
try:
# loop and concat until the last item
for entry in x[:-1]:
# all inner items get a semi-colon at the end
_authors += str(entry["name"]) + "; "
# last item does not get a semi-colon at the end
_authors += str(x[-1]["name"])
except KeyError:
logger_misc.warn("get_authors_as_str: KeyError: Authors incorrect data structure")
else:
logger_misc.debug("get_authors_as_str: TypeError: author/investigators isn't str or list: {}".format(type(x)))
return _authors | [
"def",
"get_authors_as_str",
"(",
"x",
")",
":",
"_authors",
"=",
"\"\"",
"# if it's a string already, we're done",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"return",
"x",
"# elif it's a list, keep going",
"elif",
"isinstance",
"(",
"x",
",",
"list",
... | Take author or investigator data, and convert it to a concatenated string of names.
Author data structure has a few variations, so account for all.
:param any x: Author data
:return str: Author string | [
"Take",
"author",
"or",
"investigator",
"data",
"and",
"convert",
"it",
"to",
"a",
"concatenated",
"string",
"of",
"names",
".",
"Author",
"data",
"structure",
"has",
"a",
"few",
"variations",
"so",
"account",
"for",
"all",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L207-L247 | train | 46,664 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_dsn | def get_dsn(d):
"""
Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name
"""
try:
return d["dataSetName"]
except Exception as e:
logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e))
exit(1) | python | def get_dsn(d):
"""
Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name
"""
try:
return d["dataSetName"]
except Exception as e:
logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e))
exit(1) | [
"def",
"get_dsn",
"(",
"d",
")",
":",
"try",
":",
"return",
"d",
"[",
"\"dataSetName\"",
"]",
"except",
"Exception",
"as",
"e",
":",
"logger_misc",
".",
"warn",
"(",
"\"get_dsn: Exception: No datasetname found, unable to continue: {}\"",
".",
"format",
"(",
"e",
... | Get the dataset name from a record
:param dict d: Metadata
:return str: Dataset name | [
"Get",
"the",
"dataset",
"name",
"from",
"a",
"record"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L250-L262 | train | 46,665 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_ensemble_counts | def get_ensemble_counts(d):
"""
Determine if this is a 1 or 2 column ensemble. Then determine how many columns and rows it has.
:param dict d: Metadata (table)
:return dict _rows_cols: Row and column counts
"""
_rows_cols = {"rows": 0, "cols": 0}
try:
if len(d) == 1:
for var, data in d.items():
# increment columns by one
_rows_cols["cols"] += len(data["values"])
# get row count by getting len of column (since it's only one list
_rows_cols["rows"] = len(data["values"][0])
break
elif len(d) == 2:
for var, data in d.items():
# multiple columns in one. list of lists
if isinstance(data["number"], list):
# add total amount of columns to the running total
_rows_cols["cols"] += len(data["values"])
# single column. one list
else:
# increment columns by one
_rows_cols["cols"] += 1
# get row count by getting len of column (since it's only one list
_rows_cols["rows"] = len(data["values"])
except Exception as e:
logger_misc.warn("get_ensemble_counts: {}".format(e))
return _rows_cols | python | def get_ensemble_counts(d):
"""
Determine if this is a 1 or 2 column ensemble. Then determine how many columns and rows it has.
:param dict d: Metadata (table)
:return dict _rows_cols: Row and column counts
"""
_rows_cols = {"rows": 0, "cols": 0}
try:
if len(d) == 1:
for var, data in d.items():
# increment columns by one
_rows_cols["cols"] += len(data["values"])
# get row count by getting len of column (since it's only one list
_rows_cols["rows"] = len(data["values"][0])
break
elif len(d) == 2:
for var, data in d.items():
# multiple columns in one. list of lists
if isinstance(data["number"], list):
# add total amount of columns to the running total
_rows_cols["cols"] += len(data["values"])
# single column. one list
else:
# increment columns by one
_rows_cols["cols"] += 1
# get row count by getting len of column (since it's only one list
_rows_cols["rows"] = len(data["values"])
except Exception as e:
logger_misc.warn("get_ensemble_counts: {}".format(e))
return _rows_cols | [
"def",
"get_ensemble_counts",
"(",
"d",
")",
":",
"_rows_cols",
"=",
"{",
"\"rows\"",
":",
"0",
",",
"\"cols\"",
":",
"0",
"}",
"try",
":",
"if",
"len",
"(",
"d",
")",
"==",
"1",
":",
"for",
"var",
",",
"data",
"in",
"d",
".",
"items",
"(",
")"... | Determine if this is a 1 or 2 column ensemble. Then determine how many columns and rows it has.
:param dict d: Metadata (table)
:return dict _rows_cols: Row and column counts | [
"Determine",
"if",
"this",
"is",
"a",
"1",
"or",
"2",
"column",
"ensemble",
".",
"Then",
"determine",
"how",
"many",
"columns",
"and",
"rows",
"it",
"has",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L265-L299 | train | 46,666 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_missing_value_key | def get_missing_value_key(d):
"""
Get the Missing Value entry from a table of data. If none is found, try the columns.
If still none found, prompt user.
:param dict d: Table of data
:return str _mv: Missing Value
"""
_mv = "nan"
# Attempt to find a table-level missing value key
try:
# check for missing value key at the table root
_mv = d["missingValue"]
except KeyError as e:
logger_misc.info("get_missing_value: No missing value key found: {}".format(e))
except AttributeError as e:
logger_misc.warn("get_missing_value: Column is wrong data type: {}".format(e))
# No table-level missing value found. Attempt to find a column-level missing value key
if not _mv:
try:
# loop for each column of data, searching for a missing value key
for k, v in d["columns"].items():
# found a column with a missing value key. Store it and exit the loop.
_mv = v["missingValue"]
break
except KeyError:
# There are no columns in this table. We've got bigger problems!
pass
# No table-level or column-level missing value. Out of places to look. Ask the user to enter the missing value
# used in this data
# if not _mv:
# print("No 'missingValue' key provided. Please type the missingValue used in this file: {}\n".format(filename))
# _mv = input("missingValue: ")
return _mv | python | def get_missing_value_key(d):
"""
Get the Missing Value entry from a table of data. If none is found, try the columns.
If still none found, prompt user.
:param dict d: Table of data
:return str _mv: Missing Value
"""
_mv = "nan"
# Attempt to find a table-level missing value key
try:
# check for missing value key at the table root
_mv = d["missingValue"]
except KeyError as e:
logger_misc.info("get_missing_value: No missing value key found: {}".format(e))
except AttributeError as e:
logger_misc.warn("get_missing_value: Column is wrong data type: {}".format(e))
# No table-level missing value found. Attempt to find a column-level missing value key
if not _mv:
try:
# loop for each column of data, searching for a missing value key
for k, v in d["columns"].items():
# found a column with a missing value key. Store it and exit the loop.
_mv = v["missingValue"]
break
except KeyError:
# There are no columns in this table. We've got bigger problems!
pass
# No table-level or column-level missing value. Out of places to look. Ask the user to enter the missing value
# used in this data
# if not _mv:
# print("No 'missingValue' key provided. Please type the missingValue used in this file: {}\n".format(filename))
# _mv = input("missingValue: ")
return _mv | [
"def",
"get_missing_value_key",
"(",
"d",
")",
":",
"_mv",
"=",
"\"nan\"",
"# Attempt to find a table-level missing value key",
"try",
":",
"# check for missing value key at the table root",
"_mv",
"=",
"d",
"[",
"\"missingValue\"",
"]",
"except",
"KeyError",
"as",
"e",
... | Get the Missing Value entry from a table of data. If none is found, try the columns.
If still none found, prompt user.
:param dict d: Table of data
:return str _mv: Missing Value | [
"Get",
"the",
"Missing",
"Value",
"entry",
"from",
"a",
"table",
"of",
"data",
".",
"If",
"none",
"is",
"found",
"try",
"the",
"columns",
".",
"If",
"still",
"none",
"found",
"prompt",
"user",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L302-L338 | train | 46,667 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_variable_name_col | def get_variable_name_col(d):
"""
Get the variable name from a table or column
:param dict d: Metadata (column)
:return str var: Variable name
"""
var = ""
try:
var = d["variableName"]
except KeyError:
try:
var = d["name"]
except KeyError:
num = "unknown"
if "number" in d:
num = d["number"]
print("Error: column number <{}> is missing a variableName. Please fix.".format(num))
logger_misc.info("get_variable_name_col: KeyError: missing key")
return var | python | def get_variable_name_col(d):
"""
Get the variable name from a table or column
:param dict d: Metadata (column)
:return str var: Variable name
"""
var = ""
try:
var = d["variableName"]
except KeyError:
try:
var = d["name"]
except KeyError:
num = "unknown"
if "number" in d:
num = d["number"]
print("Error: column number <{}> is missing a variableName. Please fix.".format(num))
logger_misc.info("get_variable_name_col: KeyError: missing key")
return var | [
"def",
"get_variable_name_col",
"(",
"d",
")",
":",
"var",
"=",
"\"\"",
"try",
":",
"var",
"=",
"d",
"[",
"\"variableName\"",
"]",
"except",
"KeyError",
":",
"try",
":",
"var",
"=",
"d",
"[",
"\"name\"",
"]",
"except",
"KeyError",
":",
"num",
"=",
"\... | Get the variable name from a table or column
:param dict d: Metadata (column)
:return str var: Variable name | [
"Get",
"the",
"variable",
"name",
"from",
"a",
"table",
"or",
"column"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L341-L360 | train | 46,668 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | get_table_key | def get_table_key(key, d, fallback=""):
"""
Try to get a table name from a data table
:param str key: Key to try first
:param dict d: Data table
:param str fallback: (optional) If we don't find a table name, use this as a generic name fallback.
:return str var: Data table name
"""
try:
var = d[key]
return var
except KeyError:
logger_misc.info("get_variable_name_table: KeyError: missing {}, use name: {}".format(key, fallback))
return fallback | python | def get_table_key(key, d, fallback=""):
"""
Try to get a table name from a data table
:param str key: Key to try first
:param dict d: Data table
:param str fallback: (optional) If we don't find a table name, use this as a generic name fallback.
:return str var: Data table name
"""
try:
var = d[key]
return var
except KeyError:
logger_misc.info("get_variable_name_table: KeyError: missing {}, use name: {}".format(key, fallback))
return fallback | [
"def",
"get_table_key",
"(",
"key",
",",
"d",
",",
"fallback",
"=",
"\"\"",
")",
":",
"try",
":",
"var",
"=",
"d",
"[",
"key",
"]",
"return",
"var",
"except",
"KeyError",
":",
"logger_misc",
".",
"info",
"(",
"\"get_variable_name_table: KeyError: missing {},... | Try to get a table name from a data table
:param str key: Key to try first
:param dict d: Data table
:param str fallback: (optional) If we don't find a table name, use this as a generic name fallback.
:return str var: Data table name | [
"Try",
"to",
"get",
"a",
"table",
"name",
"from",
"a",
"data",
"table"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L363-L377 | train | 46,669 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | load_fn_matches_ext | def load_fn_matches_ext(file_path, file_type):
"""
Check that the file extension matches the target extension given.
:param str file_path: Path to be checked
:param str file_type: Target extension
:return bool correct_ext: Extension match or does not match
"""
correct_ext = False
curr_ext = os.path.splitext(file_path)[1]
exts = [curr_ext, file_type]
try:
# special case: if file type is excel, both extensions are valid.
if ".xlsx" in exts and ".xls" in exts:
correct_ext = True
elif curr_ext == file_type:
correct_ext = True
else:
print("Use '{}' to load this file: {}".format(FILE_TYPE_MAP[curr_ext]["load_fn"],
os.path.basename(file_path)))
except Exception as e:
logger_misc.debug("load_fn_matches_ext: {}".format(e))
return correct_ext | python | def load_fn_matches_ext(file_path, file_type):
"""
Check that the file extension matches the target extension given.
:param str file_path: Path to be checked
:param str file_type: Target extension
:return bool correct_ext: Extension match or does not match
"""
correct_ext = False
curr_ext = os.path.splitext(file_path)[1]
exts = [curr_ext, file_type]
try:
# special case: if file type is excel, both extensions are valid.
if ".xlsx" in exts and ".xls" in exts:
correct_ext = True
elif curr_ext == file_type:
correct_ext = True
else:
print("Use '{}' to load this file: {}".format(FILE_TYPE_MAP[curr_ext]["load_fn"],
os.path.basename(file_path)))
except Exception as e:
logger_misc.debug("load_fn_matches_ext: {}".format(e))
return correct_ext | [
"def",
"load_fn_matches_ext",
"(",
"file_path",
",",
"file_type",
")",
":",
"correct_ext",
"=",
"False",
"curr_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"[",
"1",
"]",
"exts",
"=",
"[",
"curr_ext",
",",
"file_type",
"]",
"try"... | Check that the file extension matches the target extension given.
:param str file_path: Path to be checked
:param str file_type: Target extension
:return bool correct_ext: Extension match or does not match | [
"Check",
"that",
"the",
"file",
"extension",
"matches",
"the",
"target",
"extension",
"given",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L396-L419 | train | 46,670 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | match_operators | def match_operators(inp, relate, cut):
"""
Compare two items. Match a string operator to an operator function
:param str inp: Comparison item
:param str relate: Comparison operator
:param any cut: Comparison item
:return bool truth: Comparison truth
"""
logger_misc.info("enter match_operators")
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq
}
try:
truth = ops[relate](inp, cut)
except KeyError as e:
truth = False
logger_misc.warn("get_truth: KeyError: Invalid operator input: {}, {}".format(relate, e))
logger_misc.info("exit match_operators")
return truth | python | def match_operators(inp, relate, cut):
"""
Compare two items. Match a string operator to an operator function
:param str inp: Comparison item
:param str relate: Comparison operator
:param any cut: Comparison item
:return bool truth: Comparison truth
"""
logger_misc.info("enter match_operators")
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq
}
try:
truth = ops[relate](inp, cut)
except KeyError as e:
truth = False
logger_misc.warn("get_truth: KeyError: Invalid operator input: {}, {}".format(relate, e))
logger_misc.info("exit match_operators")
return truth | [
"def",
"match_operators",
"(",
"inp",
",",
"relate",
",",
"cut",
")",
":",
"logger_misc",
".",
"info",
"(",
"\"enter match_operators\"",
")",
"ops",
"=",
"{",
"'>'",
":",
"operator",
".",
"gt",
",",
"'<'",
":",
"operator",
".",
"lt",
",",
"'>='",
":",
... | Compare two items. Match a string operator to an operator function
:param str inp: Comparison item
:param str relate: Comparison operator
:param any cut: Comparison item
:return bool truth: Comparison truth | [
"Compare",
"two",
"items",
".",
"Match",
"a",
"string",
"operator",
"to",
"an",
"operator",
"function"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L422-L444 | train | 46,671 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | match_arr_lengths | def match_arr_lengths(l):
"""
Check that all the array lengths match so that a DataFrame can be created successfully.
:param list l: Nested arrays
:return bool: Valid or invalid
"""
try:
# length of first list. use as basis to check other list lengths against.
inner_len = len(l[0])
# check each nested list
for i in l:
# if the length doesn't match the first list, then don't proceed.
if len(i) != inner_len:
return False
except IndexError:
# couldn't get index 0. Wrong data type given or not nested lists
print("Error: Array data is not formatted correctly.")
return False
except TypeError:
# Non-iterable data type given.
print("Error: Array data missing")
return False
# all array lengths are equal. made it through the whole list successfully
return True | python | def match_arr_lengths(l):
"""
Check that all the array lengths match so that a DataFrame can be created successfully.
:param list l: Nested arrays
:return bool: Valid or invalid
"""
try:
# length of first list. use as basis to check other list lengths against.
inner_len = len(l[0])
# check each nested list
for i in l:
# if the length doesn't match the first list, then don't proceed.
if len(i) != inner_len:
return False
except IndexError:
# couldn't get index 0. Wrong data type given or not nested lists
print("Error: Array data is not formatted correctly.")
return False
except TypeError:
# Non-iterable data type given.
print("Error: Array data missing")
return False
# all array lengths are equal. made it through the whole list successfully
return True | [
"def",
"match_arr_lengths",
"(",
"l",
")",
":",
"try",
":",
"# length of first list. use as basis to check other list lengths against.",
"inner_len",
"=",
"len",
"(",
"l",
"[",
"0",
"]",
")",
"# check each nested list",
"for",
"i",
"in",
"l",
":",
"# if the length doe... | Check that all the array lengths match so that a DataFrame can be created successfully.
:param list l: Nested arrays
:return bool: Valid or invalid | [
"Check",
"that",
"all",
"the",
"array",
"lengths",
"match",
"so",
"that",
"a",
"DataFrame",
"can",
"be",
"created",
"successfully",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L447-L471 | train | 46,672 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | mv_files | def mv_files(src, dst):
"""
Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none:
"""
# list the files in the src directory
files = os.listdir(src)
# loop for each file found
for file in files:
# move the file from the src to the dst
shutil.move(os.path.join(src, file), os.path.join(dst, file))
return | python | def mv_files(src, dst):
"""
Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none:
"""
# list the files in the src directory
files = os.listdir(src)
# loop for each file found
for file in files:
# move the file from the src to the dst
shutil.move(os.path.join(src, file), os.path.join(dst, file))
return | [
"def",
"mv_files",
"(",
"src",
",",
"dst",
")",
":",
"# list the files in the src directory",
"files",
"=",
"os",
".",
"listdir",
"(",
"src",
")",
"# loop for each file found",
"for",
"file",
"in",
"files",
":",
"# move the file from the src to the dst",
"shutil",
"... | Move all files from one directory to another
:param str src: Source directory
:param str dst: Destination directory
:return none: | [
"Move",
"all",
"files",
"from",
"one",
"directory",
"to",
"another"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L474-L488 | train | 46,673 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | normalize_name | def normalize_name(s):
"""
Remove foreign accents and characters to normalize the string. Prevents encoding errors.
:param str s: String
:return str s: String
"""
# Normalize the string into a byte string form
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
# Remove the byte string and quotes from the string
s = str(s)[2:-1]
return s | python | def normalize_name(s):
"""
Remove foreign accents and characters to normalize the string. Prevents encoding errors.
:param str s: String
:return str s: String
"""
# Normalize the string into a byte string form
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
# Remove the byte string and quotes from the string
s = str(s)[2:-1]
return s | [
"def",
"normalize_name",
"(",
"s",
")",
":",
"# Normalize the string into a byte string form",
"s",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"s",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"# Remove the byte string and quotes from the s... | Remove foreign accents and characters to normalize the string. Prevents encoding errors.
:param str s: String
:return str s: String | [
"Remove",
"foreign",
"accents",
"and",
"characters",
"to",
"normalize",
"the",
"string",
".",
"Prevents",
"encoding",
"errors",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L491-L502 | train | 46,674 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | path_type | def path_type(path, target):
"""
Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted.
:param str path: Path
:param str target: Target type wanted
:return bool: Path is what it claims to be (True) or mismatch (False)
"""
if os.path.isfile(path) and target == "file":
return True
elif os.path.isdir(path) and target == "directory":
return True
else:
print("Error: Path given is not a {}: {}".format(target, path))
return False | python | def path_type(path, target):
"""
Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted.
:param str path: Path
:param str target: Target type wanted
:return bool: Path is what it claims to be (True) or mismatch (False)
"""
if os.path.isfile(path) and target == "file":
return True
elif os.path.isdir(path) and target == "directory":
return True
else:
print("Error: Path given is not a {}: {}".format(target, path))
return False | [
"def",
"path_type",
"(",
"path",
",",
"target",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"and",
"target",
"==",
"\"file\"",
":",
"return",
"True",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"target"... | Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted.
:param str path: Path
:param str target: Target type wanted
:return bool: Path is what it claims to be (True) or mismatch (False) | [
"Determine",
"if",
"given",
"path",
"is",
"file",
"directory",
"or",
"other",
".",
"Compare",
"with",
"target",
"to",
"see",
"if",
"it",
"s",
"the",
"type",
"we",
"wanted",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L505-L519 | train | 46,675 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | print_filename | def print_filename(path):
"""
Print out lipd filename that is being read or written
:param str path: all file metadata
:return str: filename
"""
if os.path.basename(path):
return os.path.basename(path)
return path | python | def print_filename(path):
"""
Print out lipd filename that is being read or written
:param str path: all file metadata
:return str: filename
"""
if os.path.basename(path):
return os.path.basename(path)
return path | [
"def",
"print_filename",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"return",
"path"
] | Print out lipd filename that is being read or written
:param str path: all file metadata
:return str: filename | [
"Print",
"out",
"lipd",
"filename",
"that",
"is",
"being",
"read",
"or",
"written"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L522-L532 | train | 46,676 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | prompt_protocol | def prompt_protocol():
"""
Prompt user if they would like to save pickle file as a dictionary or an object.
:return str: Answer
"""
stop = 3
ans = ""
while True and stop > 0:
ans = input("Save as (d)ictionary or (o)bject?\n"
"* Note:\n"
"Dictionaries are more basic, and are compatible with Python v2.7+.\n"
"Objects are more complex, and are only compatible with v3.4+ ")
if ans not in ("d", "o"):
print("Invalid response: Please choose 'd' or 'o'")
else:
break
# if a valid answer isn't captured, default to dictionary (safer, broader)
if ans == "":
ans = "d"
return ans | python | def prompt_protocol():
"""
Prompt user if they would like to save pickle file as a dictionary or an object.
:return str: Answer
"""
stop = 3
ans = ""
while True and stop > 0:
ans = input("Save as (d)ictionary or (o)bject?\n"
"* Note:\n"
"Dictionaries are more basic, and are compatible with Python v2.7+.\n"
"Objects are more complex, and are only compatible with v3.4+ ")
if ans not in ("d", "o"):
print("Invalid response: Please choose 'd' or 'o'")
else:
break
# if a valid answer isn't captured, default to dictionary (safer, broader)
if ans == "":
ans = "d"
return ans | [
"def",
"prompt_protocol",
"(",
")",
":",
"stop",
"=",
"3",
"ans",
"=",
"\"\"",
"while",
"True",
"and",
"stop",
">",
"0",
":",
"ans",
"=",
"input",
"(",
"\"Save as (d)ictionary or (o)bject?\\n\"",
"\"* Note:\\n\"",
"\"Dictionaries are more basic, and are compatible wit... | Prompt user if they would like to save pickle file as a dictionary or an object.
:return str: Answer | [
"Prompt",
"user",
"if",
"they",
"would",
"like",
"to",
"save",
"pickle",
"file",
"as",
"a",
"dictionary",
"or",
"an",
"object",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L534-L554 | train | 46,677 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | rm_empty_doi | def rm_empty_doi(d):
"""
If an "identifier" dictionary has no doi ID, then it has no use. Delete it.
:param dict d: Metadata
:return dict d: Metadata
"""
logger_misc.info("enter remove_empty_doi")
try:
# Check each publication dictionary
for pub in d['pub']:
# If no identifier, then we can quit here. If identifier, then keep going.
if 'identifier' in pub:
if 'id' in pub['identifier'][0]:
# If there's a DOI id, but it's EMPTY
if pub['identifier'][0]['id'] in EMPTY:
del pub['identifier']
else:
# If there's an identifier section, with no DOI id
del pub['identifier']
except KeyError as e:
# What else could go wrong?
logger_misc.warn("remove_empty_doi: KeyError: publication key not found, {}".format(e))
logger_misc.info("exit remove_empty_doi")
return d | python | def rm_empty_doi(d):
"""
If an "identifier" dictionary has no doi ID, then it has no use. Delete it.
:param dict d: Metadata
:return dict d: Metadata
"""
logger_misc.info("enter remove_empty_doi")
try:
# Check each publication dictionary
for pub in d['pub']:
# If no identifier, then we can quit here. If identifier, then keep going.
if 'identifier' in pub:
if 'id' in pub['identifier'][0]:
# If there's a DOI id, but it's EMPTY
if pub['identifier'][0]['id'] in EMPTY:
del pub['identifier']
else:
# If there's an identifier section, with no DOI id
del pub['identifier']
except KeyError as e:
# What else could go wrong?
logger_misc.warn("remove_empty_doi: KeyError: publication key not found, {}".format(e))
logger_misc.info("exit remove_empty_doi")
return d | [
"def",
"rm_empty_doi",
"(",
"d",
")",
":",
"logger_misc",
".",
"info",
"(",
"\"enter remove_empty_doi\"",
")",
"try",
":",
"# Check each publication dictionary",
"for",
"pub",
"in",
"d",
"[",
"'pub'",
"]",
":",
"# If no identifier, then we can quit here. If identifier, ... | If an "identifier" dictionary has no doi ID, then it has no use. Delete it.
:param dict d: Metadata
:return dict d: Metadata | [
"If",
"an",
"identifier",
"dictionary",
"has",
"no",
"doi",
"ID",
"then",
"it",
"has",
"no",
"use",
".",
"Delete",
"it",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L643-L667 | train | 46,678 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | rm_files | def rm_files(path, extension):
"""
Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none:
"""
files = list_files(extension, path)
for file in files:
if file.endswith(extension):
os.remove(os.path.join(path, file))
return | python | def rm_files(path, extension):
"""
Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none:
"""
files = list_files(extension, path)
for file in files:
if file.endswith(extension):
os.remove(os.path.join(path, file))
return | [
"def",
"rm_files",
"(",
"path",
",",
"extension",
")",
":",
"files",
"=",
"list_files",
"(",
"extension",
",",
"path",
")",
"for",
"file",
"in",
"files",
":",
"if",
"file",
".",
"endswith",
"(",
"extension",
")",
":",
"os",
".",
"remove",
"(",
"os",
... | Remove all files in the given directory with the given extension
:param str path: Directory
:param str extension: File type to remove
:return none: | [
"Remove",
"all",
"files",
"in",
"the",
"given",
"directory",
"with",
"the",
"given",
"extension"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L670-L682 | train | 46,679 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | rm_missing_values_table | def rm_missing_values_table(d):
"""
Loop for each table column and remove the missingValue key & data
:param dict d: Metadata (table)
:return dict d: Metadata (table)
"""
try:
for k, v in d["columns"].items():
d["columns"][k] = rm_keys_from_dict(v, ["missingValue"])
except Exception:
# If we get a KeyError or some other error, it's not a big deal. Keep going.
pass
return d | python | def rm_missing_values_table(d):
"""
Loop for each table column and remove the missingValue key & data
:param dict d: Metadata (table)
:return dict d: Metadata (table)
"""
try:
for k, v in d["columns"].items():
d["columns"][k] = rm_keys_from_dict(v, ["missingValue"])
except Exception:
# If we get a KeyError or some other error, it's not a big deal. Keep going.
pass
return d | [
"def",
"rm_missing_values_table",
"(",
"d",
")",
":",
"try",
":",
"for",
"k",
",",
"v",
"in",
"d",
"[",
"\"columns\"",
"]",
".",
"items",
"(",
")",
":",
"d",
"[",
"\"columns\"",
"]",
"[",
"k",
"]",
"=",
"rm_keys_from_dict",
"(",
"v",
",",
"[",
"\... | Loop for each table column and remove the missingValue key & data
:param dict d: Metadata (table)
:return dict d: Metadata (table) | [
"Loop",
"for",
"each",
"table",
"column",
"and",
"remove",
"the",
"missingValue",
"key",
"&",
"data"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L708-L721 | train | 46,680 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | rm_keys_from_dict | def rm_keys_from_dict(d, keys):
"""
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
"""
# Loop for each key given
for key in keys:
# Is the key in the dictionary?
if key in d:
try:
d.pop(key, None)
except KeyError:
# Not concerned with an error. Keep going.
pass
return d | python | def rm_keys_from_dict(d, keys):
"""
Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata
"""
# Loop for each key given
for key in keys:
# Is the key in the dictionary?
if key in d:
try:
d.pop(key, None)
except KeyError:
# Not concerned with an error. Keep going.
pass
return d | [
"def",
"rm_keys_from_dict",
"(",
"d",
",",
"keys",
")",
":",
"# Loop for each key given",
"for",
"key",
"in",
"keys",
":",
"# Is the key in the dictionary?",
"if",
"key",
"in",
"d",
":",
"try",
":",
"d",
".",
"pop",
"(",
"key",
",",
"None",
")",
"except",
... | Given a dictionary and a key list, remove any data in the dictionary with the given keys.
:param dict d: Metadata
:param list keys: Keys to be removed
:return dict d: Metadata | [
"Given",
"a",
"dictionary",
"and",
"a",
"key",
"list",
"remove",
"any",
"data",
"in",
"the",
"dictionary",
"with",
"the",
"given",
"keys",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L724-L741 | train | 46,681 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | _replace_missing_values_table | def _replace_missing_values_table(values, mv):
"""
Receive all table column values as a list of lists. Loop for each column of values
:param list values: Metadata (columns)
:param any mv: Missing value currently in use
:return list: Metadata (columns)
"""
for idx, column in enumerate(values):
values[idx] = _replace_missing_values_column(column, mv)
return values | python | def _replace_missing_values_table(values, mv):
"""
Receive all table column values as a list of lists. Loop for each column of values
:param list values: Metadata (columns)
:param any mv: Missing value currently in use
:return list: Metadata (columns)
"""
for idx, column in enumerate(values):
values[idx] = _replace_missing_values_column(column, mv)
return values | [
"def",
"_replace_missing_values_table",
"(",
"values",
",",
"mv",
")",
":",
"for",
"idx",
",",
"column",
"in",
"enumerate",
"(",
"values",
")",
":",
"values",
"[",
"idx",
"]",
"=",
"_replace_missing_values_column",
"(",
"column",
",",
"mv",
")",
"return",
... | Receive all table column values as a list of lists. Loop for each column of values
:param list values: Metadata (columns)
:param any mv: Missing value currently in use
:return list: Metadata (columns) | [
"Receive",
"all",
"table",
"column",
"values",
"as",
"a",
"list",
"of",
"lists",
".",
"Loop",
"for",
"each",
"column",
"of",
"values"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L781-L793 | train | 46,682 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | _replace_missing_values_column | def _replace_missing_values_column(values, mv):
"""
Replace missing values in the values list where applicable
:param list values: Metadata (column values)
:return list values: Metadata (column values)
"""
for idx, v in enumerate(values):
try:
if v in EMPTY or v == mv:
values[idx] = "nan"
elif math.isnan(float(v)):
values[idx] = "nan"
else:
values[idx] = v
except (TypeError, ValueError):
values[idx] = v
return values | python | def _replace_missing_values_column(values, mv):
"""
Replace missing values in the values list where applicable
:param list values: Metadata (column values)
:return list values: Metadata (column values)
"""
for idx, v in enumerate(values):
try:
if v in EMPTY or v == mv:
values[idx] = "nan"
elif math.isnan(float(v)):
values[idx] = "nan"
else:
values[idx] = v
except (TypeError, ValueError):
values[idx] = v
return values | [
"def",
"_replace_missing_values_column",
"(",
"values",
",",
"mv",
")",
":",
"for",
"idx",
",",
"v",
"in",
"enumerate",
"(",
"values",
")",
":",
"try",
":",
"if",
"v",
"in",
"EMPTY",
"or",
"v",
"==",
"mv",
":",
"values",
"[",
"idx",
"]",
"=",
"\"na... | Replace missing values in the values list where applicable
:param list values: Metadata (column values)
:return list values: Metadata (column values) | [
"Replace",
"missing",
"values",
"in",
"the",
"values",
"list",
"where",
"applicable"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L796-L814 | train | 46,683 |
nickmckay/LiPD-utilities | Python/lipd/misc.py | split_path_and_file | def split_path_and_file(s):
"""
Given a full path to a file, split and return a path and filename
:param str s: Path
:return str _path: Directory Path
:return str _filename: Filename
"""
_path = s
_filename = ""
try:
x = os.path.split(s)
_path = x[0]
_filename = x[1]
except Exception:
print("Error: unable to split path")
return _path, _filename | python | def split_path_and_file(s):
"""
Given a full path to a file, split and return a path and filename
:param str s: Path
:return str _path: Directory Path
:return str _filename: Filename
"""
_path = s
_filename = ""
try:
x = os.path.split(s)
_path = x[0]
_filename = x[1]
except Exception:
print("Error: unable to split path")
return _path, _filename | [
"def",
"split_path_and_file",
"(",
"s",
")",
":",
"_path",
"=",
"s",
"_filename",
"=",
"\"\"",
"try",
":",
"x",
"=",
"os",
".",
"path",
".",
"split",
"(",
"s",
")",
"_path",
"=",
"x",
"[",
"0",
"]",
"_filename",
"=",
"x",
"[",
"1",
"]",
"except... | Given a full path to a file, split and return a path and filename
:param str s: Path
:return str _path: Directory Path
:return str _filename: Filename | [
"Given",
"a",
"full",
"path",
"to",
"a",
"file",
"split",
"and",
"return",
"a",
"path",
"and",
"filename"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L817-L834 | train | 46,684 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | extract | def extract(d, whichtables, mode, time):
"""
LiPD Version 1.3
Main function to initiate LiPD to TSOs conversion.
Each object has a
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param dict d: Metadata for one LiPD file
:param str whichtables: all, meas, summ, or ens
:param str mode: paleo or chron mode
:return list _ts: Time series
"""
logger_ts.info("enter extract_main")
_root = {}
_ts = {}
# _switch = {"paleoData": "chronData", "chronData": "paleoData"}
_pc = "paleoData"
if mode == "chron":
_pc = "chronData"
_root["mode"] = _pc
_root["time_id"] = time
try:
# Build the root level data.
# This will serve as the template for which column data will be added onto later.
for k, v in d.items():
if k == "funding":
_root = _extract_fund(v, _root)
elif k == "geo":
_root = _extract_geo(v, _root)
elif k == 'pub':
_root = _extract_pub(v, _root)
# elif k in ["chronData", "paleoData"]:
# # Store chronData and paleoData as-is. Need it to collapse without data loss.
# _root[k] = copy.deepcopy(v)
else:
if k not in ["chronData", "paleoData"]:
_root[k] = v
# Create tso dictionaries for each individual column (build on root data)
_ts = _extract_pc(d, _root, _pc, whichtables)
except Exception as e:
logger_ts.error("extract: Exception: {}".format(e))
print("extract: Exception: {}".format(e))
logger_ts.info("exit extract_main")
return _ts | python | def extract(d, whichtables, mode, time):
"""
LiPD Version 1.3
Main function to initiate LiPD to TSOs conversion.
Each object has a
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param dict d: Metadata for one LiPD file
:param str whichtables: all, meas, summ, or ens
:param str mode: paleo or chron mode
:return list _ts: Time series
"""
logger_ts.info("enter extract_main")
_root = {}
_ts = {}
# _switch = {"paleoData": "chronData", "chronData": "paleoData"}
_pc = "paleoData"
if mode == "chron":
_pc = "chronData"
_root["mode"] = _pc
_root["time_id"] = time
try:
# Build the root level data.
# This will serve as the template for which column data will be added onto later.
for k, v in d.items():
if k == "funding":
_root = _extract_fund(v, _root)
elif k == "geo":
_root = _extract_geo(v, _root)
elif k == 'pub':
_root = _extract_pub(v, _root)
# elif k in ["chronData", "paleoData"]:
# # Store chronData and paleoData as-is. Need it to collapse without data loss.
# _root[k] = copy.deepcopy(v)
else:
if k not in ["chronData", "paleoData"]:
_root[k] = v
# Create tso dictionaries for each individual column (build on root data)
_ts = _extract_pc(d, _root, _pc, whichtables)
except Exception as e:
logger_ts.error("extract: Exception: {}".format(e))
print("extract: Exception: {}".format(e))
logger_ts.info("exit extract_main")
return _ts | [
"def",
"extract",
"(",
"d",
",",
"whichtables",
",",
"mode",
",",
"time",
")",
":",
"logger_ts",
".",
"info",
"(",
"\"enter extract_main\"",
")",
"_root",
"=",
"{",
"}",
"_ts",
"=",
"{",
"}",
"# _switch = {\"paleoData\": \"chronData\", \"chronData\": \"paleoData\"... | LiPD Version 1.3
Main function to initiate LiPD to TSOs conversion.
Each object has a
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param dict d: Metadata for one LiPD file
:param str whichtables: all, meas, summ, or ens
:param str mode: paleo or chron mode
:return list _ts: Time series | [
"LiPD",
"Version",
"1",
".",
"3",
"Main",
"function",
"to",
"initiate",
"LiPD",
"to",
"TSOs",
"conversion",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L15-L66 | train | 46,685 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | _extract_method | def _extract_method(method):
"""
Make a timeseries-formatted version of model method data
:param dict method: Method data
:return dict _method: Method data, formatted
"""
_method = {}
for k,v in method.items():
_method["method_" + k] = v
return _method | python | def _extract_method(method):
"""
Make a timeseries-formatted version of model method data
:param dict method: Method data
:return dict _method: Method data, formatted
"""
_method = {}
for k,v in method.items():
_method["method_" + k] = v
return _method | [
"def",
"_extract_method",
"(",
"method",
")",
":",
"_method",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"method",
".",
"items",
"(",
")",
":",
"_method",
"[",
"\"method_\"",
"+",
"k",
"]",
"=",
"v",
"return",
"_method"
] | Make a timeseries-formatted version of model method data
:param dict method: Method data
:return dict _method: Method data, formatted | [
"Make",
"a",
"timeseries",
"-",
"formatted",
"version",
"of",
"model",
"method",
"data"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L239-L249 | train | 46,686 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | _extract_table_model | def _extract_table_model(table_data, current, tt):
"""
Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data
"""
try:
if tt in ["summ", "ens"]:
m = re.match(re_sheet, table_data["tableName"])
if m:
_pc_num= m.group(1) + "Number"
current[_pc_num] = m.group(2)
current["modelNumber"] = m.group(4)
current["tableNumber"] = m.group(6)
else:
logger_ts.error("extract_table_summary: Unable to parse paleo/model/table numbers")
except Exception as e:
logger_ts.error("extract_table_summary: {}".format(e))
return current | python | def _extract_table_model(table_data, current, tt):
"""
Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data
"""
try:
if tt in ["summ", "ens"]:
m = re.match(re_sheet, table_data["tableName"])
if m:
_pc_num= m.group(1) + "Number"
current[_pc_num] = m.group(2)
current["modelNumber"] = m.group(4)
current["tableNumber"] = m.group(6)
else:
logger_ts.error("extract_table_summary: Unable to parse paleo/model/table numbers")
except Exception as e:
logger_ts.error("extract_table_summary: {}".format(e))
return current | [
"def",
"_extract_table_model",
"(",
"table_data",
",",
"current",
",",
"tt",
")",
":",
"try",
":",
"if",
"tt",
"in",
"[",
"\"summ\"",
",",
"\"ens\"",
"]",
":",
"m",
"=",
"re",
".",
"match",
"(",
"re_sheet",
",",
"table_data",
"[",
"\"tableName\"",
"]",... | Add in modelNumber and summaryNumber fields if this is a summary table
:param dict table_data: Table data
:param dict current: LiPD root data
:param str tt: Table type "summ", "ens", "meas"
:return dict current: Current root data | [
"Add",
"in",
"modelNumber",
"and",
"summaryNumber",
"fields",
"if",
"this",
"is",
"a",
"summary",
"table"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L316-L337 | train | 46,687 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | _extract_table | def _extract_table(table_data, current, pc, ts, tt):
"""
Use the given table data to create a time series entry for each column in the table.
:param dict table_data: Table data
:param dict current: LiPD root data
:param str pc: paleoData or chronData
:param list ts: Time series (so far)
:param bool summary: Summary Table or not
:return list ts: Time series (so far)
"""
current["tableType"] = tt
# Get root items for this table
current = _extract_table_root(table_data, current, pc)
# Add in modelNumber and tableNumber if this is "ens" or "summ" table
current = _extract_table_model(table_data, current, tt)
# Add age, depth, and year columns to root if available
_table_tmp = _extract_special(current, table_data)
try:
# Start creating entries using dictionary copies.
for _col_name, _col_data in table_data["columns"].items():
# Add column data onto root items. Copy so we don't ruin original data
_col_tmp = _extract_columns(_col_data, copy.deepcopy(_table_tmp), pc)
try:
ts.append(_col_tmp)
except Exception as e:
logger_ts.warn("extract_table: Unable to create ts entry, {}".format(e))
except Exception as e:
logger_ts.error("extract_table: {}".format(e))
return ts | python | def _extract_table(table_data, current, pc, ts, tt):
"""
Use the given table data to create a time series entry for each column in the table.
:param dict table_data: Table data
:param dict current: LiPD root data
:param str pc: paleoData or chronData
:param list ts: Time series (so far)
:param bool summary: Summary Table or not
:return list ts: Time series (so far)
"""
current["tableType"] = tt
# Get root items for this table
current = _extract_table_root(table_data, current, pc)
# Add in modelNumber and tableNumber if this is "ens" or "summ" table
current = _extract_table_model(table_data, current, tt)
# Add age, depth, and year columns to root if available
_table_tmp = _extract_special(current, table_data)
try:
# Start creating entries using dictionary copies.
for _col_name, _col_data in table_data["columns"].items():
# Add column data onto root items. Copy so we don't ruin original data
_col_tmp = _extract_columns(_col_data, copy.deepcopy(_table_tmp), pc)
try:
ts.append(_col_tmp)
except Exception as e:
logger_ts.warn("extract_table: Unable to create ts entry, {}".format(e))
except Exception as e:
logger_ts.error("extract_table: {}".format(e))
return ts | [
"def",
"_extract_table",
"(",
"table_data",
",",
"current",
",",
"pc",
",",
"ts",
",",
"tt",
")",
":",
"current",
"[",
"\"tableType\"",
"]",
"=",
"tt",
"# Get root items for this table",
"current",
"=",
"_extract_table_root",
"(",
"table_data",
",",
"current",
... | Use the given table data to create a time series entry for each column in the table.
:param dict table_data: Table data
:param dict current: LiPD root data
:param str pc: paleoData or chronData
:param list ts: Time series (so far)
:param bool summary: Summary Table or not
:return list ts: Time series (so far) | [
"Use",
"the",
"given",
"table",
"data",
"to",
"create",
"a",
"time",
"series",
"entry",
"for",
"each",
"column",
"in",
"the",
"table",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L340-L369 | train | 46,688 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | collapse | def collapse(l, raw):
"""
LiPD Version 1.3
Main function to initiate time series to LiPD conversion
Each object has a:
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param list l: Time series
:return dict _master: LiPD data, sorted by dataset name
"""
logger_ts.info("enter collapse")
# LiPD data (in progress), sorted dataset name
_master = {}
_dsn = ""
try:
# Determine if we're collapsing a paleo or chron time series
_pc = l[0]["mode"]
# Loop the time series
for entry in l:
# Get notable keys
dsn = entry['dataSetName']
_dsn = dsn
_current = entry
# Since root items are the same in each column of the same dataset, we only need these steps the first time.
if dsn not in _master:
logger_ts.info("collapsing: {}".format(dsn))
print("collapsing: {}".format(dsn))
_master, _current = _collapse_root(_master, _current, dsn, _pc)
try:
_master[dsn]["paleoData"] = raw[dsn]["paleoData"]
if "chronData" in raw[dsn]:
_master[dsn]["chronData"] = raw[dsn]["chronData"]
except KeyError as e:
print("collapse: Could not collapse an object the dataset: {}, {}".format(dsn, e))
# Collapse pc, calibration, and interpretation
_master = _collapse_pc(_master, _current, dsn, _pc)
# The result combined into a single dataset. Remove the extra layer on the data.
if len(_master) == 1:
_master = _master[_dsn]
print("Created LiPD data: 1 dataset")
else:
print("Created LiPD data: {} datasets".format(len(_master)))
except Exception as e:
print("Error: Unable to collapse time series, {}".format(e))
logger_ts.error("collapse: Exception: {}".format(e))
logger_ts.info("exit collapse")
return _master | python | def collapse(l, raw):
"""
LiPD Version 1.3
Main function to initiate time series to LiPD conversion
Each object has a:
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param list l: Time series
:return dict _master: LiPD data, sorted by dataset name
"""
logger_ts.info("enter collapse")
# LiPD data (in progress), sorted dataset name
_master = {}
_dsn = ""
try:
# Determine if we're collapsing a paleo or chron time series
_pc = l[0]["mode"]
# Loop the time series
for entry in l:
# Get notable keys
dsn = entry['dataSetName']
_dsn = dsn
_current = entry
# Since root items are the same in each column of the same dataset, we only need these steps the first time.
if dsn not in _master:
logger_ts.info("collapsing: {}".format(dsn))
print("collapsing: {}".format(dsn))
_master, _current = _collapse_root(_master, _current, dsn, _pc)
try:
_master[dsn]["paleoData"] = raw[dsn]["paleoData"]
if "chronData" in raw[dsn]:
_master[dsn]["chronData"] = raw[dsn]["chronData"]
except KeyError as e:
print("collapse: Could not collapse an object the dataset: {}, {}".format(dsn, e))
# Collapse pc, calibration, and interpretation
_master = _collapse_pc(_master, _current, dsn, _pc)
# The result combined into a single dataset. Remove the extra layer on the data.
if len(_master) == 1:
_master = _master[_dsn]
print("Created LiPD data: 1 dataset")
else:
print("Created LiPD data: {} datasets".format(len(_master)))
except Exception as e:
print("Error: Unable to collapse time series, {}".format(e))
logger_ts.error("collapse: Exception: {}".format(e))
logger_ts.info("exit collapse")
return _master | [
"def",
"collapse",
"(",
"l",
",",
"raw",
")",
":",
"logger_ts",
".",
"info",
"(",
"\"enter collapse\"",
")",
"# LiPD data (in progress), sorted dataset name",
"_master",
"=",
"{",
"}",
"_dsn",
"=",
"\"\"",
"try",
":",
"# Determine if we're collapsing a paleo or chron ... | LiPD Version 1.3
Main function to initiate time series to LiPD conversion
Each object has a:
"paleoNumber" or "chronNumber"
"tableNumber"
"modelNumber"
"time_id"
"mode" - chronData or paleoData
"tableType" - "meas" "ens" "summ"
:param list l: Time series
:return dict _master: LiPD data, sorted by dataset name | [
"LiPD",
"Version",
"1",
".",
"3",
"Main",
"function",
"to",
"initiate",
"time",
"series",
"to",
"LiPD",
"conversion"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L406-L465 | train | 46,689 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | _get_current_names | def _get_current_names(current, dsn, pc):
"""
Get the table name and variable name from the given time series entry
:param dict current: Time series entry
:param str pc: paleoData or chronData
:return str _table_name:
:return str _variable_name:
"""
_table_name = ""
_variable_name = ""
# Get key info
try:
_table_name = current['{}_tableName'.format(pc)]
_variable_name = current['{}_variableName'.format(pc)]
except Exception as e:
print("Error: Unable to collapse time series: {}, {}".format(dsn, e))
logger_ts.error("get_current: {}, {}".format(dsn, e))
return _table_name, _variable_name | python | def _get_current_names(current, dsn, pc):
"""
Get the table name and variable name from the given time series entry
:param dict current: Time series entry
:param str pc: paleoData or chronData
:return str _table_name:
:return str _variable_name:
"""
_table_name = ""
_variable_name = ""
# Get key info
try:
_table_name = current['{}_tableName'.format(pc)]
_variable_name = current['{}_variableName'.format(pc)]
except Exception as e:
print("Error: Unable to collapse time series: {}, {}".format(dsn, e))
logger_ts.error("get_current: {}, {}".format(dsn, e))
return _table_name, _variable_name | [
"def",
"_get_current_names",
"(",
"current",
",",
"dsn",
",",
"pc",
")",
":",
"_table_name",
"=",
"\"\"",
"_variable_name",
"=",
"\"\"",
"# Get key info",
"try",
":",
"_table_name",
"=",
"current",
"[",
"'{}_tableName'",
".",
"format",
"(",
"pc",
")",
"]",
... | Get the table name and variable name from the given time series entry
:param dict current: Time series entry
:param str pc: paleoData or chronData
:return str _table_name:
:return str _variable_name: | [
"Get",
"the",
"table",
"name",
"and",
"variable",
"name",
"from",
"the",
"given",
"time",
"series",
"entry"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L468-L486 | train | 46,690 |
nickmckay/LiPD-utilities | Python/lipd/timeseries.py | _collapse_pc | def _collapse_pc(master, current, dsn, pc):
"""
Collapse the paleo or chron for the current time series entry
:param dict master: LiPD data (so far)
:param dict current: Current time series entry
:param str dsn: Dataset name
:param str pc: paleoData or chronData
:return dict master:
"""
logger_ts.info("enter collapse_paleo")
_table_name, _variable_name = _get_current_names(current, dsn, pc)
try:
# Get the names we need to build the hierarchy
_m = re.match(re_sheet_w_number, _table_name)
# Is this a summary table or a measurement table?
_switch = {"meas": "measurementTable", "summ": "summaryTable", "ens": "ensembleTable"}
_ms = _switch[current["tableType"]]
# This is a measurement table. Put it in the correct part of the structure
# master[datasetname][chronData][chron0][measurementTable][chron0measurement0]
if _ms == "measurementTable":
# master[dsn] = _collapse_build_skeleton(master(dsn), _ms, _m)
# Collapse the keys in the table root if a table does not yet exist
if _table_name not in master[dsn][pc][_m.group(1)][_ms]:
_tmp_table = _collapse_table_root(current, dsn, pc)
master[dsn][pc][_m.group(1)][_ms][_table_name] = _tmp_table
# Collapse the keys at the column level, and return the column data
_tmp_column = _collapse_column(current, pc)
# Create the column entry in the table
master[dsn][pc][_m.group(1)][_ms][_table_name]['columns'][_variable_name] = _tmp_column
# This is a summary table. Put it in the correct part of the structure
# master[datasetname][chronData][chron0][model][chron0model0][summaryTable][chron0model0summary0]
elif _ms in ["ensembleTable", "summaryTable"]:
# Collapse the keys in the table root if a table does not yet exist
if _table_name not in master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms]:
_tmp_table = _collapse_table_root(current, dsn, pc)
master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name] = _tmp_table
# Collapse the keys at the column level, and return the column data
_tmp_column = _collapse_column(current, pc)
# Create the column entry in the table
master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name]["columns"][_variable_name] = _tmp_column
except Exception as e:
print("Error: Unable to collapse column data: {}, {}".format(dsn, e))
logger_ts.error("collapse_paleo: {}, {}, {}".format(dsn, _variable_name, e))
# If these sections had any items added to them, then add them to the column master.
return master | python | def _collapse_pc(master, current, dsn, pc):
"""
Collapse the paleo or chron for the current time series entry
:param dict master: LiPD data (so far)
:param dict current: Current time series entry
:param str dsn: Dataset name
:param str pc: paleoData or chronData
:return dict master:
"""
logger_ts.info("enter collapse_paleo")
_table_name, _variable_name = _get_current_names(current, dsn, pc)
try:
# Get the names we need to build the hierarchy
_m = re.match(re_sheet_w_number, _table_name)
# Is this a summary table or a measurement table?
_switch = {"meas": "measurementTable", "summ": "summaryTable", "ens": "ensembleTable"}
_ms = _switch[current["tableType"]]
# This is a measurement table. Put it in the correct part of the structure
# master[datasetname][chronData][chron0][measurementTable][chron0measurement0]
if _ms == "measurementTable":
# master[dsn] = _collapse_build_skeleton(master(dsn), _ms, _m)
# Collapse the keys in the table root if a table does not yet exist
if _table_name not in master[dsn][pc][_m.group(1)][_ms]:
_tmp_table = _collapse_table_root(current, dsn, pc)
master[dsn][pc][_m.group(1)][_ms][_table_name] = _tmp_table
# Collapse the keys at the column level, and return the column data
_tmp_column = _collapse_column(current, pc)
# Create the column entry in the table
master[dsn][pc][_m.group(1)][_ms][_table_name]['columns'][_variable_name] = _tmp_column
# This is a summary table. Put it in the correct part of the structure
# master[datasetname][chronData][chron0][model][chron0model0][summaryTable][chron0model0summary0]
elif _ms in ["ensembleTable", "summaryTable"]:
# Collapse the keys in the table root if a table does not yet exist
if _table_name not in master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms]:
_tmp_table = _collapse_table_root(current, dsn, pc)
master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name] = _tmp_table
# Collapse the keys at the column level, and return the column data
_tmp_column = _collapse_column(current, pc)
# Create the column entry in the table
master[dsn][pc][_m.group(1)]["model"][_m.group(1) + _m.group(2)][_ms][_table_name]["columns"][_variable_name] = _tmp_column
except Exception as e:
print("Error: Unable to collapse column data: {}, {}".format(dsn, e))
logger_ts.error("collapse_paleo: {}, {}, {}".format(dsn, _variable_name, e))
# If these sections had any items added to them, then add them to the column master.
return master | [
"def",
"_collapse_pc",
"(",
"master",
",",
"current",
",",
"dsn",
",",
"pc",
")",
":",
"logger_ts",
".",
"info",
"(",
"\"enter collapse_paleo\"",
")",
"_table_name",
",",
"_variable_name",
"=",
"_get_current_names",
"(",
"current",
",",
"dsn",
",",
"pc",
")"... | Collapse the paleo or chron for the current time series entry
:param dict master: LiPD data (so far)
:param dict current: Current time series entry
:param str dsn: Dataset name
:param str pc: paleoData or chronData
:return dict master: | [
"Collapse",
"the",
"paleo",
"or",
"chron",
"for",
"the",
"current",
"time",
"series",
"entry"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L631-L687 | train | 46,691 |
mfussenegger/cr8 | cr8/clients.py | _to_http_hosts | def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]:
"""Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200']
"""
if isinstance(hosts, str):
hosts = hosts.replace(',', ' ').split()
return [_to_http_uri(i) for i in hosts] | python | def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]:
"""Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200']
"""
if isinstance(hosts, str):
hosts = hosts.replace(',', ' ').split()
return [_to_http_uri(i) for i in hosts] | [
"def",
"_to_http_hosts",
"(",
"hosts",
":",
"Union",
"[",
"Iterable",
"[",
"str",
"]",
",",
"str",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"isinstance",
"(",
"hosts",
",",
"str",
")",
":",
"hosts",
"=",
"hosts",
".",
"replace",
"(",
"... | Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200'] | [
"Convert",
"a",
"string",
"of",
"whitespace",
"or",
"comma",
"separated",
"hosts",
"into",
"a",
"list",
"of",
"hosts",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L59-L79 | train | 46,692 |
mfussenegger/cr8 | cr8/clients.py | _plain_or_callable | def _plain_or_callable(obj):
"""Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']]
"""
if callable(obj):
return obj()
elif isinstance(obj, types.GeneratorType):
return next(obj)
else:
return obj | python | def _plain_or_callable(obj):
"""Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']]
"""
if callable(obj):
return obj()
elif isinstance(obj, types.GeneratorType):
return next(obj)
else:
return obj | [
"def",
"_plain_or_callable",
"(",
"obj",
")",
":",
"if",
"callable",
"(",
"obj",
")",
":",
"return",
"obj",
"(",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"types",
".",
"GeneratorType",
")",
":",
"return",
"next",
"(",
"obj",
")",
"else",
":",
"ret... | Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']] | [
"Returns",
"the",
"value",
"of",
"the",
"called",
"object",
"of",
"obj",
"is",
"a",
"callable",
"otherwise",
"the",
"plain",
"object",
".",
"Returns",
"None",
"if",
"obj",
"is",
"None",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L97-L128 | train | 46,693 |
mfussenegger/cr8 | cr8/clients.py | _to_dsn | def _to_dsn(hosts):
"""Convert a host URI into a dsn for aiopg.
>>> _to_dsn('aiopg://myhostname:4242/mydb')
'postgres://crate@myhostname:4242/mydb'
>>> _to_dsn('aiopg://myhostname:4242')
'postgres://crate@myhostname:4242/doc'
>>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require')
'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require'
>>> _to_dsn('aiopg://myhostname')
'postgres://crate@myhostname:5432/doc'
"""
p = urlparse(hosts)
try:
user_and_pw, netloc = p.netloc.split('@', maxsplit=1)
except ValueError:
netloc = p.netloc
user_and_pw = 'crate'
try:
host, port = netloc.split(':', maxsplit=1)
except ValueError:
host = netloc
port = 5432
dbname = p.path[1:] if p.path else 'doc'
dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}'
if p.query:
dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items())
return dsn | python | def _to_dsn(hosts):
"""Convert a host URI into a dsn for aiopg.
>>> _to_dsn('aiopg://myhostname:4242/mydb')
'postgres://crate@myhostname:4242/mydb'
>>> _to_dsn('aiopg://myhostname:4242')
'postgres://crate@myhostname:4242/doc'
>>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require')
'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require'
>>> _to_dsn('aiopg://myhostname')
'postgres://crate@myhostname:5432/doc'
"""
p = urlparse(hosts)
try:
user_and_pw, netloc = p.netloc.split('@', maxsplit=1)
except ValueError:
netloc = p.netloc
user_and_pw = 'crate'
try:
host, port = netloc.split(':', maxsplit=1)
except ValueError:
host = netloc
port = 5432
dbname = p.path[1:] if p.path else 'doc'
dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}'
if p.query:
dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items())
return dsn | [
"def",
"_to_dsn",
"(",
"hosts",
")",
":",
"p",
"=",
"urlparse",
"(",
"hosts",
")",
"try",
":",
"user_and_pw",
",",
"netloc",
"=",
"p",
".",
"netloc",
".",
"split",
"(",
"'@'",
",",
"maxsplit",
"=",
"1",
")",
"except",
"ValueError",
":",
"netloc",
"... | Convert a host URI into a dsn for aiopg.
>>> _to_dsn('aiopg://myhostname:4242/mydb')
'postgres://crate@myhostname:4242/mydb'
>>> _to_dsn('aiopg://myhostname:4242')
'postgres://crate@myhostname:4242/doc'
>>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require')
'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require'
>>> _to_dsn('aiopg://myhostname')
'postgres://crate@myhostname:5432/doc' | [
"Convert",
"a",
"host",
"URI",
"into",
"a",
"dsn",
"for",
"aiopg",
"."
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L146-L176 | train | 46,694 |
mfussenegger/cr8 | cr8/clients.py | _verify_ssl_from_first | def _verify_ssl_from_first(hosts):
"""Check if SSL validation parameter is passed in URI
>>> _verify_ssl_from_first(['https://myhost:4200/?verify_ssl=false'])
False
>>> _verify_ssl_from_first(['https://myhost:4200/'])
True
>>> _verify_ssl_from_first([
... 'https://h1:4200/?verify_ssl=False',
... 'https://h2:4200/?verify_ssl=True'
... ])
False
"""
for host in hosts:
query = parse_qs(urlparse(host).query)
if 'verify_ssl' in query:
return _to_boolean(query['verify_ssl'][0])
return True | python | def _verify_ssl_from_first(hosts):
"""Check if SSL validation parameter is passed in URI
>>> _verify_ssl_from_first(['https://myhost:4200/?verify_ssl=false'])
False
>>> _verify_ssl_from_first(['https://myhost:4200/'])
True
>>> _verify_ssl_from_first([
... 'https://h1:4200/?verify_ssl=False',
... 'https://h2:4200/?verify_ssl=True'
... ])
False
"""
for host in hosts:
query = parse_qs(urlparse(host).query)
if 'verify_ssl' in query:
return _to_boolean(query['verify_ssl'][0])
return True | [
"def",
"_verify_ssl_from_first",
"(",
"hosts",
")",
":",
"for",
"host",
"in",
"hosts",
":",
"query",
"=",
"parse_qs",
"(",
"urlparse",
"(",
"host",
")",
".",
"query",
")",
"if",
"'verify_ssl'",
"in",
"query",
":",
"return",
"_to_boolean",
"(",
"query",
"... | Check if SSL validation parameter is passed in URI
>>> _verify_ssl_from_first(['https://myhost:4200/?verify_ssl=false'])
False
>>> _verify_ssl_from_first(['https://myhost:4200/'])
True
>>> _verify_ssl_from_first([
... 'https://h1:4200/?verify_ssl=False',
... 'https://h2:4200/?verify_ssl=True'
... ])
False | [
"Check",
"if",
"SSL",
"validation",
"parameter",
"is",
"passed",
"in",
"URI"
] | a37d6049f1f9fee2d0556efae2b7b7f8761bffe8 | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L188-L207 | train | 46,695 |
nickmckay/LiPD-utilities | Python/lipd/tables.py | addTable | def addTable(D):
"""
Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset)
"""
_swap = {
"1": "measurement",
"2": "summary",
"3": "ensemble",
"4": "distribution"
}
print("What type of table would you like to add?\n"
"1: measurement\n"
"2: summary\n"
"3: ensemble (under development)\n"
"4: distribution (under development)\n"
"\n Note: if you want to add a whole model, use the addModel() function")
_ans = input(">")
if _ans in ["3", "4"]:
print("I don't know how to do that yet.")
# if this is a summary or measurement, split the csv into each column
elif _ans in ["1", "2"]:
# read in a csv file. have the user point to it
print("Locate the CSV file with the values for this table: ")
_path, _files = browse_dialog_file()
_path = _confirm_file_path(_files)
_values = read_csv_from_file(_path)
_table = _build_table(_values)
_placement = _prompt_placement(D, _swap[_ans])
D = _put_table(D, _placement, _table)
else:
print("That's not a valid option")
return D | python | def addTable(D):
"""
Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset)
"""
_swap = {
"1": "measurement",
"2": "summary",
"3": "ensemble",
"4": "distribution"
}
print("What type of table would you like to add?\n"
"1: measurement\n"
"2: summary\n"
"3: ensemble (under development)\n"
"4: distribution (under development)\n"
"\n Note: if you want to add a whole model, use the addModel() function")
_ans = input(">")
if _ans in ["3", "4"]:
print("I don't know how to do that yet.")
# if this is a summary or measurement, split the csv into each column
elif _ans in ["1", "2"]:
# read in a csv file. have the user point to it
print("Locate the CSV file with the values for this table: ")
_path, _files = browse_dialog_file()
_path = _confirm_file_path(_files)
_values = read_csv_from_file(_path)
_table = _build_table(_values)
_placement = _prompt_placement(D, _swap[_ans])
D = _put_table(D, _placement, _table)
else:
print("That's not a valid option")
return D | [
"def",
"addTable",
"(",
"D",
")",
":",
"_swap",
"=",
"{",
"\"1\"",
":",
"\"measurement\"",
",",
"\"2\"",
":",
"\"summary\"",
",",
"\"3\"",
":",
"\"ensemble\"",
",",
"\"4\"",
":",
"\"distribution\"",
"}",
"print",
"(",
"\"What type of table would you like to add?... | Add any table type to the given dataset. Use prompts to determine index locations and table type.
:param dict D: Metadata (dataset)
:param dict dat: Metadata (table)
:return dict D: Metadata (dataset) | [
"Add",
"any",
"table",
"type",
"to",
"the",
"given",
"dataset",
".",
"Use",
"prompts",
"to",
"determine",
"index",
"locations",
"and",
"table",
"type",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/tables.py#L13-L55 | train | 46,696 |
nickmckay/LiPD-utilities | Python/lipd/tables.py | _prompt_placement | def _prompt_placement(D, tt):
"""
Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user.
:param dict D: Metadata
:param str tt: Table type
:return str _model_name: Chosen model name for placement
"""
_model_name = ""
# There wasn't a table name match, so we need prompts to fix it
_placement_options = _get_available_placements(D, tt)
print("Please choose where you'd like to place this model:")
for _idx, _opt in enumerate(_placement_options):
print("({}) {}".format(_idx, _opt))
_choice = input("> ")
try:
if int(_choice) <= len(_placement_options) and _choice:
# Get the option the user chose
_model_name = _placement_options[int(_choice)]
else:
# They user chose an option out of the placement list range
print("Invalid choice input")
return
except Exception as e:
# Choice was not a number or empty
print("Invalid choice")
return _model_name | python | def _prompt_placement(D, tt):
"""
Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user.
:param dict D: Metadata
:param str tt: Table type
:return str _model_name: Chosen model name for placement
"""
_model_name = ""
# There wasn't a table name match, so we need prompts to fix it
_placement_options = _get_available_placements(D, tt)
print("Please choose where you'd like to place this model:")
for _idx, _opt in enumerate(_placement_options):
print("({}) {}".format(_idx, _opt))
_choice = input("> ")
try:
if int(_choice) <= len(_placement_options) and _choice:
# Get the option the user chose
_model_name = _placement_options[int(_choice)]
else:
# They user chose an option out of the placement list range
print("Invalid choice input")
return
except Exception as e:
# Choice was not a number or empty
print("Invalid choice")
return _model_name | [
"def",
"_prompt_placement",
"(",
"D",
",",
"tt",
")",
":",
"_model_name",
"=",
"\"\"",
"# There wasn't a table name match, so we need prompts to fix it",
"_placement_options",
"=",
"_get_available_placements",
"(",
"D",
",",
"tt",
")",
"print",
"(",
"\"Please choose where... | Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user.
:param dict D: Metadata
:param str tt: Table type
:return str _model_name: Chosen model name for placement | [
"Since",
"automatic",
"placement",
"didn",
"t",
"work",
"find",
"somewhere",
"to",
"place",
"the",
"model",
"data",
"manually",
"with",
"the",
"help",
"of",
"the",
"user",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/tables.py#L178-L205 | train | 46,697 |
nickmckay/LiPD-utilities | Python/lipd/tables.py | _put_table | def _put_table(D, name, table):
"""
Use the dataset and name to place the new table data into the dataset.
:param dict D: Dataset
:param str name: Table name / path to store new table
:param dict table: Newly created table data
:return dict D: Dataset
"""
try:
# print("Placing table: {}".format(name))
table["tableName"] = name
m = re.match(re_table_name, name)
if m:
_pc = m.group(1) + "Data"
_section = m.group(1) + m.group(2)
# place a measurement table
if m.group(3) == "measurement":
# This shouldn't happen. User chose one of our options. That should be an empty location.
if name in D[_pc][_section]["measurementTable"]:
print("Oops. This shouldn't happen. That table path is occupied in the dataset")
# Place the data
else:
D[_pc][_section]["measurementTable"][name] = table
# place a model table type
else:
_model = _section + m.group(3) + m.group(4)
_tt = m.group(5) + "Table"
if name in D[_pc][_model][_tt]:
print("Oops. This shouldn't happen. That table path is occupied in the dataset")
else:
D[_pc][_model][_tt][name] = table
else:
print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error")
return
except Exception as e:
print("addTable: Unable to put the table data into the dataset, {}".format(e))
return D | python | def _put_table(D, name, table):
"""
Use the dataset and name to place the new table data into the dataset.
:param dict D: Dataset
:param str name: Table name / path to store new table
:param dict table: Newly created table data
:return dict D: Dataset
"""
try:
# print("Placing table: {}".format(name))
table["tableName"] = name
m = re.match(re_table_name, name)
if m:
_pc = m.group(1) + "Data"
_section = m.group(1) + m.group(2)
# place a measurement table
if m.group(3) == "measurement":
# This shouldn't happen. User chose one of our options. That should be an empty location.
if name in D[_pc][_section]["measurementTable"]:
print("Oops. This shouldn't happen. That table path is occupied in the dataset")
# Place the data
else:
D[_pc][_section]["measurementTable"][name] = table
# place a model table type
else:
_model = _section + m.group(3) + m.group(4)
_tt = m.group(5) + "Table"
if name in D[_pc][_model][_tt]:
print("Oops. This shouldn't happen. That table path is occupied in the dataset")
else:
D[_pc][_model][_tt][name] = table
else:
print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error")
return
except Exception as e:
print("addTable: Unable to put the table data into the dataset, {}".format(e))
return D | [
"def",
"_put_table",
"(",
"D",
",",
"name",
",",
"table",
")",
":",
"try",
":",
"# print(\"Placing table: {}\".format(name))",
"table",
"[",
"\"tableName\"",
"]",
"=",
"name",
"m",
"=",
"re",
".",
"match",
"(",
"re_table_name",
",",
"name",
")",
"if",
"m",... | Use the dataset and name to place the new table data into the dataset.
:param dict D: Dataset
:param str name: Table name / path to store new table
:param dict table: Newly created table data
:return dict D: Dataset | [
"Use",
"the",
"dataset",
"and",
"name",
"to",
"place",
"the",
"new",
"table",
"data",
"into",
"the",
"dataset",
"."
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/tables.py#L208-L249 | train | 46,698 |
nickmckay/LiPD-utilities | Python/lipd/tables.py | addModel | def addModel(D, models):
"""
Insert model data into a LiPD dataset
Examples of model naming:
chron0model0
chron0model1
chron1model0
Example of 'models' variable:
{
chron0model0: {
"method": {...},
"summaryTable": [...],
"ensembleTable": [...],
"distributionTable: [...]
},
chron0model1:...
}
:param dict D: Metadata (dataset)
:param dict models: Model data to add
:return dict D: Metadata (dataset)
"""
try:
# Loop for each model that needs to be added
for _model_name, _model_data in models.items():
# split the table name into a path that we can use
_m = re.match(re_model_name, _model_name)
if _m:
D = _put_model(D, _model_name, _model_data, _m)
else:
print("The table name found in the given model data isn't valid for automatic placement")
_placement_name = _prompt_placement(D, "model")
_m = re.match(re_model_name, _placement_name)
if _m:
D = _put_model(D, _placement_name, _model_data, _m)
else:
print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error")
return
except Exception as e:
print("addModel: Model data NOT added, {}".format(e))
return D | python | def addModel(D, models):
"""
Insert model data into a LiPD dataset
Examples of model naming:
chron0model0
chron0model1
chron1model0
Example of 'models' variable:
{
chron0model0: {
"method": {...},
"summaryTable": [...],
"ensembleTable": [...],
"distributionTable: [...]
},
chron0model1:...
}
:param dict D: Metadata (dataset)
:param dict models: Model data to add
:return dict D: Metadata (dataset)
"""
try:
# Loop for each model that needs to be added
for _model_name, _model_data in models.items():
# split the table name into a path that we can use
_m = re.match(re_model_name, _model_name)
if _m:
D = _put_model(D, _model_name, _model_data, _m)
else:
print("The table name found in the given model data isn't valid for automatic placement")
_placement_name = _prompt_placement(D, "model")
_m = re.match(re_model_name, _placement_name)
if _m:
D = _put_model(D, _placement_name, _model_data, _m)
else:
print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error")
return
except Exception as e:
print("addModel: Model data NOT added, {}".format(e))
return D | [
"def",
"addModel",
"(",
"D",
",",
"models",
")",
":",
"try",
":",
"# Loop for each model that needs to be added",
"for",
"_model_name",
",",
"_model_data",
"in",
"models",
".",
"items",
"(",
")",
":",
"# split the table name into a path that we can use",
"_m",
"=",
... | Insert model data into a LiPD dataset
Examples of model naming:
chron0model0
chron0model1
chron1model0
Example of 'models' variable:
{
chron0model0: {
"method": {...},
"summaryTable": [...],
"ensembleTable": [...],
"distributionTable: [...]
},
chron0model1:...
}
:param dict D: Metadata (dataset)
:param dict models: Model data to add
:return dict D: Metadata (dataset) | [
"Insert",
"model",
"data",
"into",
"a",
"LiPD",
"dataset"
] | 5dab6bbeffc5effd68e3a6beaca6b76aa928e860 | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/tables.py#L289-L333 | train | 46,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.