repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pymacaron/pymacaron | pymacaron/auth.py | get_user_token | def get_user_token():
"""Return the authenticated user's auth token"""
if not hasattr(stack.top, 'current_user'):
return ''
current_user = stack.top.current_user
return current_user.get('token', '') | python | def get_user_token():
"""Return the authenticated user's auth token"""
if not hasattr(stack.top, 'current_user'):
return ''
current_user = stack.top.current_user
return current_user.get('token', '') | [
"def",
"get_user_token",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"stack",
".",
"top",
",",
"'current_user'",
")",
":",
"return",
"''",
"current_user",
"=",
"stack",
".",
"top",
".",
"current_user",
"return",
"current_user",
".",
"get",
"(",
"'token'",
... | Return the authenticated user's auth token | [
"Return",
"the",
"authenticated",
"user",
"s",
"auth",
"token"
] | af244f203f8216108b39d374d46bf8e1813f13d5 | https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/auth.py#L249-L254 | train | 44,900 |
pymacaron/pymacaron | pymacaron/auth.py | get_token_issuer | def get_token_issuer():
"""Return the issuer in which this user's token was created"""
try:
current_user = stack.top.current_user
return current_user.get('iss', get_config().jwt_issuer)
except Exception:
pass
return get_config().jwt_issuer | python | def get_token_issuer():
"""Return the issuer in which this user's token was created"""
try:
current_user = stack.top.current_user
return current_user.get('iss', get_config().jwt_issuer)
except Exception:
pass
return get_config().jwt_issuer | [
"def",
"get_token_issuer",
"(",
")",
":",
"try",
":",
"current_user",
"=",
"stack",
".",
"top",
".",
"current_user",
"return",
"current_user",
".",
"get",
"(",
"'iss'",
",",
"get_config",
"(",
")",
".",
"jwt_issuer",
")",
"except",
"Exception",
":",
"pass"... | Return the issuer in which this user's token was created | [
"Return",
"the",
"issuer",
"in",
"which",
"this",
"user",
"s",
"token",
"was",
"created"
] | af244f203f8216108b39d374d46bf8e1813f13d5 | https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/auth.py#L257-L264 | train | 44,901 |
tristantao/py-ms-cognitive | py_ms_cognitive/py_ms_cognitive_search/py_ms_cognitive_news_search.py | PyMsCognitiveNewsSearch._search | def _search(self, limit, format):
'''
Returns a list of result objects, with the url for the next page MsCognitive search url.
'''
limit = min(limit, self.MAX_SEARCH_PER_QUERY)
payload = {
'q' : self.query,
'count' : limit, #currently 50 is max per search.
'offset': self.current_offset,
}
payload.update(self.CUSTOM_PARAMS)
headers = { 'Ocp-Apim-Subscription-Key' : self.api_key }
if not self.silent_fail:
QueryChecker.check_web_params(payload, headers)
response = requests.get(self.QUERY_URL, params=payload, headers=headers)
json_results = self.get_json_results(response)
packaged_results = [NewsResult(single_result_json) for single_result_json in json_results["value"]]
self.current_offset += min(50, limit, len(packaged_results))
return packaged_results | python | def _search(self, limit, format):
'''
Returns a list of result objects, with the url for the next page MsCognitive search url.
'''
limit = min(limit, self.MAX_SEARCH_PER_QUERY)
payload = {
'q' : self.query,
'count' : limit, #currently 50 is max per search.
'offset': self.current_offset,
}
payload.update(self.CUSTOM_PARAMS)
headers = { 'Ocp-Apim-Subscription-Key' : self.api_key }
if not self.silent_fail:
QueryChecker.check_web_params(payload, headers)
response = requests.get(self.QUERY_URL, params=payload, headers=headers)
json_results = self.get_json_results(response)
packaged_results = [NewsResult(single_result_json) for single_result_json in json_results["value"]]
self.current_offset += min(50, limit, len(packaged_results))
return packaged_results | [
"def",
"_search",
"(",
"self",
",",
"limit",
",",
"format",
")",
":",
"limit",
"=",
"min",
"(",
"limit",
",",
"self",
".",
"MAX_SEARCH_PER_QUERY",
")",
"payload",
"=",
"{",
"'q'",
":",
"self",
".",
"query",
",",
"'count'",
":",
"limit",
",",
"#curren... | Returns a list of result objects, with the url for the next page MsCognitive search url. | [
"Returns",
"a",
"list",
"of",
"result",
"objects",
"with",
"the",
"url",
"for",
"the",
"next",
"page",
"MsCognitive",
"search",
"url",
"."
] | 8f4b10df1b4bf1f2c9af64218cfcdc35176b75e7 | https://github.com/tristantao/py-ms-cognitive/blob/8f4b10df1b4bf1f2c9af64218cfcdc35176b75e7/py_ms_cognitive/py_ms_cognitive_search/py_ms_cognitive_news_search.py#L22-L41 | train | 44,902 |
openstack/python-monascaclient | monascaclient/v2_0/notifications.py | NotificationsManager.create | def create(self, **kwargs):
"""Create a notification."""
body = self.client.create(url=self.base_url,
json=kwargs)
return body | python | def create(self, **kwargs):
"""Create a notification."""
body = self.client.create(url=self.base_url,
json=kwargs)
return body | [
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"body",
"=",
"self",
".",
"client",
".",
"create",
"(",
"url",
"=",
"self",
".",
"base_url",
",",
"json",
"=",
"kwargs",
")",
"return",
"body"
] | Create a notification. | [
"Create",
"a",
"notification",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/notifications.py#L23-L27 | train | 44,903 |
openstack/python-monascaclient | monascaclient/v2_0/notifications.py | NotificationsManager.get | def get(self, **kwargs):
"""Get the details for a specific notification."""
# NOTE(trebskit) should actually be find_one, but
# monasca does not support expected response format
url = '%s/%s' % (self.base_url, kwargs['notification_id'])
resp = self.client.list(path=url)
return resp | python | def get(self, **kwargs):
"""Get the details for a specific notification."""
# NOTE(trebskit) should actually be find_one, but
# monasca does not support expected response format
url = '%s/%s' % (self.base_url, kwargs['notification_id'])
resp = self.client.list(path=url)
return resp | [
"def",
"get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# NOTE(trebskit) should actually be find_one, but",
"# monasca does not support expected response format",
"url",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"base_url",
",",
"kwargs",
"[",
"'notification_id'",
... | Get the details for a specific notification. | [
"Get",
"the",
"details",
"for",
"a",
"specific",
"notification",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/notifications.py#L29-L37 | train | 44,904 |
openstack/python-monascaclient | monascaclient/v2_0/notifications.py | NotificationsManager.delete | def delete(self, **kwargs):
"""Delete a notification."""
url = self.base_url + '/%s' % kwargs['notification_id']
resp = self.client.delete(url=url)
return resp | python | def delete(self, **kwargs):
"""Delete a notification."""
url = self.base_url + '/%s' % kwargs['notification_id']
resp = self.client.delete(url=url)
return resp | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"'/%s'",
"%",
"kwargs",
"[",
"'notification_id'",
"]",
"resp",
"=",
"self",
".",
"client",
".",
"delete",
"(",
"url",
"=",
"url",
")",
"retur... | Delete a notification. | [
"Delete",
"a",
"notification",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/notifications.py#L43-L47 | train | 44,905 |
openstack/python-monascaclient | monascaclient/v2_0/alarm_definitions.py | AlarmDefinitionsManager.update | def update(self, **kwargs):
"""Update a specific alarm definition."""
url_str = self.base_url + '/%s' % kwargs['alarm_id']
del kwargs['alarm_id']
resp = self.client.create(url=url_str,
method='PUT',
json=kwargs)
return resp | python | def update(self, **kwargs):
"""Update a specific alarm definition."""
url_str = self.base_url + '/%s' % kwargs['alarm_id']
del kwargs['alarm_id']
resp = self.client.create(url=url_str,
method='PUT',
json=kwargs)
return resp | [
"def",
"update",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"url_str",
"=",
"self",
".",
"base_url",
"+",
"'/%s'",
"%",
"kwargs",
"[",
"'alarm_id'",
"]",
"del",
"kwargs",
"[",
"'alarm_id'",
"]",
"resp",
"=",
"self",
".",
"client",
".",
"create",... | Update a specific alarm definition. | [
"Update",
"a",
"specific",
"alarm",
"definition",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/alarm_definitions.py#L49-L58 | train | 44,906 |
mdscruggs/ga | ga/examples/polynomials.py | PolyModelGA.compute_err | def compute_err(self, solution_y, coefficients):
"""
Return an error value by finding the absolute difference for each
element in a list of solution-generated y-values versus expected values.
Compounds error by 50% for each negative coefficient in the solution.
solution_y: list of y-values produced by a solution
coefficients: list of polynomial coefficients represented by the solution
return: error value
"""
error = 0
for modeled, expected in zip(solution_y, self.expected_values):
error += abs(modeled - expected)
if any([c < 0 for c in coefficients]):
error *= 1.5
return error | python | def compute_err(self, solution_y, coefficients):
"""
Return an error value by finding the absolute difference for each
element in a list of solution-generated y-values versus expected values.
Compounds error by 50% for each negative coefficient in the solution.
solution_y: list of y-values produced by a solution
coefficients: list of polynomial coefficients represented by the solution
return: error value
"""
error = 0
for modeled, expected in zip(solution_y, self.expected_values):
error += abs(modeled - expected)
if any([c < 0 for c in coefficients]):
error *= 1.5
return error | [
"def",
"compute_err",
"(",
"self",
",",
"solution_y",
",",
"coefficients",
")",
":",
"error",
"=",
"0",
"for",
"modeled",
",",
"expected",
"in",
"zip",
"(",
"solution_y",
",",
"self",
".",
"expected_values",
")",
":",
"error",
"+=",
"abs",
"(",
"modeled"... | Return an error value by finding the absolute difference for each
element in a list of solution-generated y-values versus expected values.
Compounds error by 50% for each negative coefficient in the solution.
solution_y: list of y-values produced by a solution
coefficients: list of polynomial coefficients represented by the solution
return: error value | [
"Return",
"an",
"error",
"value",
"by",
"finding",
"the",
"absolute",
"difference",
"for",
"each",
"element",
"in",
"a",
"list",
"of",
"solution",
"-",
"generated",
"y",
"-",
"values",
"versus",
"expected",
"values",
"."
] | adac7a004e5e22d888e44ab39f313064c3803b38 | https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/examples/polynomials.py#L44-L63 | train | 44,907 |
fabaff/python-mystrom | pymystrom/cli.py | read_config | def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json()) | python | def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json()) | [
"def",
"read_config",
"(",
"ip",
",",
"mac",
")",
":",
"click",
".",
"echo",
"(",
"\"Read configuration from %s\"",
"%",
"ip",
")",
"request",
"=",
"requests",
".",
"get",
"(",
"'http://{}/{}/{}/'",
".",
"format",
"(",
"ip",
",",
"URI",
",",
"mac",
")",
... | Read the current configuration of a myStrom device. | [
"Read",
"the",
"current",
"configuration",
"of",
"a",
"myStrom",
"device",
"."
] | 86410f8952104651ef76ad37c84c29740c50551e | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L35-L40 | train | 44,908 |
fabaff/python-mystrom | pymystrom/cli.py | write_config | def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac) | python | def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac) | [
"def",
"write_config",
"(",
"ip",
",",
"mac",
",",
"single",
",",
"double",
",",
"long",
",",
"touch",
")",
":",
"click",
".",
"echo",
"(",
"\"Write configuration to device %s\"",
"%",
"ip",
")",
"data",
"=",
"{",
"'single'",
":",
"single",
",",
"'double... | Write the current configuration of a myStrom button. | [
"Write",
"the",
"current",
"configuration",
"of",
"a",
"myStrom",
"button",
"."
] | 86410f8952104651ef76ad37c84c29740c50551e | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L61-L74 | train | 44,909 |
fabaff/python-mystrom | pymystrom/cli.py | write_ha_config | def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id) | python | def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id) | [
"def",
"write_ha_config",
"(",
"ip",
",",
"mac",
",",
"hass",
",",
"port",
",",
"id",
")",
":",
"click",
".",
"echo",
"(",
"\"Write configuration for Home Assistant to device %s...\"",
"%",
"ip",
")",
"action",
"=",
"\"get://{1}:{2}/api/mystrom?{0}={3}\"",
"data",
... | Write the configuration for Home Assistant to a myStrom button. | [
"Write",
"the",
"configuration",
"for",
"Home",
"Assistant",
"to",
"a",
"myStrom",
"button",
"."
] | 86410f8952104651ef76ad37c84c29740c50551e | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L89-L106 | train | 44,910 |
fabaff/python-mystrom | pymystrom/cli.py | reset_config | def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac) | python | def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac) | [
"def",
"reset_config",
"(",
"ip",
",",
"mac",
")",
":",
"click",
".",
"echo",
"(",
"\"Reset configuration of button %s...\"",
"%",
"ip",
")",
"data",
"=",
"{",
"'single'",
":",
"\"\"",
",",
"'double'",
":",
"\"\"",
",",
"'long'",
":",
"\"\"",
",",
"'touc... | Reset the current configuration of a myStrom WiFi Button. | [
"Reset",
"the",
"current",
"configuration",
"of",
"a",
"myStrom",
"WiFi",
"Button",
"."
] | 86410f8952104651ef76ad37c84c29740c50551e | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L114-L127 | train | 44,911 |
fabaff/python-mystrom | pymystrom/cli.py | color | def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value) | python | def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value) | [
"def",
"color",
"(",
"ip",
",",
"mac",
",",
"hue",
",",
"saturation",
",",
"value",
")",
":",
"bulb",
"=",
"MyStromBulb",
"(",
"ip",
",",
"mac",
")",
"bulb",
".",
"set_color_hsv",
"(",
"hue",
",",
"saturation",
",",
"value",
")"
] | Switch the bulb on with the given color. | [
"Switch",
"the",
"bulb",
"on",
"with",
"the",
"given",
"color",
"."
] | 86410f8952104651ef76ad37c84c29740c50551e | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L157-L160 | train | 44,912 |
pymacaron/pymacaron | pymacaron/utils.py | to_epoch | def to_epoch(t):
"""Take a datetime, either as a string or a datetime.datetime object,
and return the corresponding epoch"""
if isinstance(t, str):
if '+' not in t:
t = t + '+00:00'
t = parser.parse(t)
elif t.tzinfo is None or t.tzinfo.utcoffset(t) is None:
t = t.replace(tzinfo=pytz.timezone('utc'))
t0 = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, pytz.timezone('utc'))
delta = t - t0
return int(delta.total_seconds()) | python | def to_epoch(t):
"""Take a datetime, either as a string or a datetime.datetime object,
and return the corresponding epoch"""
if isinstance(t, str):
if '+' not in t:
t = t + '+00:00'
t = parser.parse(t)
elif t.tzinfo is None or t.tzinfo.utcoffset(t) is None:
t = t.replace(tzinfo=pytz.timezone('utc'))
t0 = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, pytz.timezone('utc'))
delta = t - t0
return int(delta.total_seconds()) | [
"def",
"to_epoch",
"(",
"t",
")",
":",
"if",
"isinstance",
"(",
"t",
",",
"str",
")",
":",
"if",
"'+'",
"not",
"in",
"t",
":",
"t",
"=",
"t",
"+",
"'+00:00'",
"t",
"=",
"parser",
".",
"parse",
"(",
"t",
")",
"elif",
"t",
".",
"tzinfo",
"is",
... | Take a datetime, either as a string or a datetime.datetime object,
and return the corresponding epoch | [
"Take",
"a",
"datetime",
"either",
"as",
"a",
"string",
"or",
"a",
"datetime",
".",
"datetime",
"object",
"and",
"return",
"the",
"corresponding",
"epoch"
] | af244f203f8216108b39d374d46bf8e1813f13d5 | https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/utils.py#L48-L60 | train | 44,913 |
pymacaron/pymacaron | pymacaron/utils.py | get_container_version | def get_container_version():
"""Return the version of the docker container running the present server,
or '' if not in a container"""
root_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version_file = os.path.join(root_dir, 'VERSION')
if os.path.exists(version_file):
with open(version_file) as f:
return f.read()
return '' | python | def get_container_version():
"""Return the version of the docker container running the present server,
or '' if not in a container"""
root_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version_file = os.path.join(root_dir, 'VERSION')
if os.path.exists(version_file):
with open(version_file) as f:
return f.read()
return '' | [
"def",
"get_container_version",
"(",
")",
":",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"version_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
... | Return the version of the docker container running the present server,
or '' if not in a container | [
"Return",
"the",
"version",
"of",
"the",
"docker",
"container",
"running",
"the",
"present",
"server",
"or",
"if",
"not",
"in",
"a",
"container"
] | af244f203f8216108b39d374d46bf8e1813f13d5 | https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/utils.py#L68-L76 | train | 44,914 |
mdscruggs/ga | ga/examples/__init__.py | run_all | def run_all(plot=True, seed=None):
""" Run all examples. """
if seed is not None:
import random
random.seed(seed)
print("Running biggest_multiple.py")
biggest_multiple.run(plot=plot)
print("Running polynomials.py")
polynomials.run(plot=plot)
print("Running travelling_salesman.py")
travelling_salesman.run(plot=plot)
print("Running irrigation.py")
irrigation.run() | python | def run_all(plot=True, seed=None):
""" Run all examples. """
if seed is not None:
import random
random.seed(seed)
print("Running biggest_multiple.py")
biggest_multiple.run(plot=plot)
print("Running polynomials.py")
polynomials.run(plot=plot)
print("Running travelling_salesman.py")
travelling_salesman.run(plot=plot)
print("Running irrigation.py")
irrigation.run() | [
"def",
"run_all",
"(",
"plot",
"=",
"True",
",",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"import",
"random",
"random",
".",
"seed",
"(",
"seed",
")",
"print",
"(",
"\"Running biggest_multiple.py\"",
")",
"biggest_multiple",
... | Run all examples. | [
"Run",
"all",
"examples",
"."
] | adac7a004e5e22d888e44ab39f313064c3803b38 | https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/examples/__init__.py#L9-L25 | train | 44,915 |
mdscruggs/ga | ga/genes.py | BaseGene.copy | def copy(self):
""" Return a new instance of this gene with the same DNA. """
return type(self)(self.dna, suppressed=self.suppressed, name=self.name) | python | def copy(self):
""" Return a new instance of this gene with the same DNA. """
return type(self)(self.dna, suppressed=self.suppressed, name=self.name) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"type",
"(",
"self",
")",
"(",
"self",
".",
"dna",
",",
"suppressed",
"=",
"self",
".",
"suppressed",
",",
"name",
"=",
"self",
".",
"name",
")"
] | Return a new instance of this gene with the same DNA. | [
"Return",
"a",
"new",
"instance",
"of",
"this",
"gene",
"with",
"the",
"same",
"DNA",
"."
] | adac7a004e5e22d888e44ab39f313064c3803b38 | https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/genes.py#L81-L83 | train | 44,916 |
mdscruggs/ga | ga/genes.py | BaseGene._check_dna | def _check_dna(self, dna):
""" Check that a DNA string only contains characters in ``GENETIC_MATERIAL_OPTIONS``. """
valid_chars = set(self.GENETIC_MATERIAL_OPTIONS)
assert all(char in valid_chars for char in dna) | python | def _check_dna(self, dna):
""" Check that a DNA string only contains characters in ``GENETIC_MATERIAL_OPTIONS``. """
valid_chars = set(self.GENETIC_MATERIAL_OPTIONS)
assert all(char in valid_chars for char in dna) | [
"def",
"_check_dna",
"(",
"self",
",",
"dna",
")",
":",
"valid_chars",
"=",
"set",
"(",
"self",
".",
"GENETIC_MATERIAL_OPTIONS",
")",
"assert",
"all",
"(",
"char",
"in",
"valid_chars",
"for",
"char",
"in",
"dna",
")"
] | Check that a DNA string only contains characters in ``GENETIC_MATERIAL_OPTIONS``. | [
"Check",
"that",
"a",
"DNA",
"string",
"only",
"contains",
"characters",
"in",
"GENETIC_MATERIAL_OPTIONS",
"."
] | adac7a004e5e22d888e44ab39f313064c3803b38 | https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/genes.py#L85-L88 | train | 44,917 |
mdscruggs/ga | ga/genes.py | BinaryGene.mutate | def mutate(self, p_mutate):
"""
Check each element for mutation, swapping "0" for "1" and vice-versa.
"""
new_dna = []
for bit in self.dna:
if random.random() < p_mutate:
bit = '1' if bit == '0' else '0'
new_dna.append(bit)
self.dna = ''.join(new_dna) | python | def mutate(self, p_mutate):
"""
Check each element for mutation, swapping "0" for "1" and vice-versa.
"""
new_dna = []
for bit in self.dna:
if random.random() < p_mutate:
bit = '1' if bit == '0' else '0'
new_dna.append(bit)
self.dna = ''.join(new_dna) | [
"def",
"mutate",
"(",
"self",
",",
"p_mutate",
")",
":",
"new_dna",
"=",
"[",
"]",
"for",
"bit",
"in",
"self",
".",
"dna",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"p_mutate",
":",
"bit",
"=",
"'1'",
"if",
"bit",
"==",
"'0'",
"else",
... | Check each element for mutation, swapping "0" for "1" and vice-versa. | [
"Check",
"each",
"element",
"for",
"mutation",
"swapping",
"0",
"for",
"1",
"and",
"vice",
"-",
"versa",
"."
] | adac7a004e5e22d888e44ab39f313064c3803b38 | https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/genes.py#L104-L116 | train | 44,918 |
castle/castle-python | castle/utils.py | deep_merge | def deep_merge(base, extra):
"""
Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
if extra is None:
return
for key, value in extra.items():
if value is None:
if key in base:
del base[key]
# If the key represents a dict on both given dicts, merge the sub-dicts
elif isinstance(base.get(key), dict) and isinstance(value, dict):
deep_merge(base[key], value)
else:
# Otherwise, set the key on the base to be the value of the extra.
base[key] = value | python | def deep_merge(base, extra):
"""
Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
if extra is None:
return
for key, value in extra.items():
if value is None:
if key in base:
del base[key]
# If the key represents a dict on both given dicts, merge the sub-dicts
elif isinstance(base.get(key), dict) and isinstance(value, dict):
deep_merge(base[key], value)
else:
# Otherwise, set the key on the base to be the value of the extra.
base[key] = value | [
"def",
"deep_merge",
"(",
"base",
",",
"extra",
")",
":",
"if",
"extra",
"is",
"None",
":",
"return",
"for",
"key",
",",
"value",
"in",
"extra",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"None",
":",
"if",
"key",
"in",
"base",
":",
"del",... | Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence. | [
"Deeply",
"merge",
"two",
"dictionaries",
"overriding",
"existing",
"keys",
"in",
"the",
"base",
"."
] | 5f8736932e4b6ecdb712885eef393b6c0c53cfeb | https://github.com/castle/castle-python/blob/5f8736932e4b6ecdb712885eef393b6c0c53cfeb/castle/utils.py#L9-L29 | train | 44,919 |
openstack/python-monascaclient | monascaclient/osc/migration.py | make_client | def make_client(api_version, session=None,
endpoint=None, service_type='monitoring'):
"""Returns an monitoring API client."""
client_cls = utils.get_client_class('monitoring', api_version, VERSION_MAP)
c = client_cls(
session=session,
service_type=service_type,
endpoint=endpoint,
app_name='monascaclient',
app_version=version.version_string,
)
return c | python | def make_client(api_version, session=None,
endpoint=None, service_type='monitoring'):
"""Returns an monitoring API client."""
client_cls = utils.get_client_class('monitoring', api_version, VERSION_MAP)
c = client_cls(
session=session,
service_type=service_type,
endpoint=endpoint,
app_name='monascaclient',
app_version=version.version_string,
)
return c | [
"def",
"make_client",
"(",
"api_version",
",",
"session",
"=",
"None",
",",
"endpoint",
"=",
"None",
",",
"service_type",
"=",
"'monitoring'",
")",
":",
"client_cls",
"=",
"utils",
".",
"get_client_class",
"(",
"'monitoring'",
",",
"api_version",
",",
"VERSION... | Returns an monitoring API client. | [
"Returns",
"an",
"monitoring",
"API",
"client",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/osc/migration.py#L34-L47 | train | 44,920 |
openstack/python-monascaclient | monascaclient/osc/migration.py | create_command_class | def create_command_class(name, func_module):
"""Dynamically creates subclass of MigratingCommand.
Method takes name of the function, module it is part of
and builds the subclass of :py:class:`MigratingCommand`.
Having a subclass of :py:class:`cliff.command.Command` is mandatory
for the osc-lib integration.
:param name: name of the function
:type name: basestring
:param func_module: the module function is part of
:type func_module: module
:return: command name, subclass of :py:class:`MigratingCommand`
:rtype: tuple(basestring, class)
"""
cmd_name = name[3:].replace('_', '-')
callback = getattr(func_module, name)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
body = {
'_args': arguments,
'_callback': staticmethod(callback),
'_description': desc,
'_epilog': desc,
'_help': help
}
claz = type('%sCommand' % cmd_name.title().replace('-', ''),
(MigratingCommand,), body)
return cmd_name, claz | python | def create_command_class(name, func_module):
"""Dynamically creates subclass of MigratingCommand.
Method takes name of the function, module it is part of
and builds the subclass of :py:class:`MigratingCommand`.
Having a subclass of :py:class:`cliff.command.Command` is mandatory
for the osc-lib integration.
:param name: name of the function
:type name: basestring
:param func_module: the module function is part of
:type func_module: module
:return: command name, subclass of :py:class:`MigratingCommand`
:rtype: tuple(basestring, class)
"""
cmd_name = name[3:].replace('_', '-')
callback = getattr(func_module, name)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
body = {
'_args': arguments,
'_callback': staticmethod(callback),
'_description': desc,
'_epilog': desc,
'_help': help
}
claz = type('%sCommand' % cmd_name.title().replace('-', ''),
(MigratingCommand,), body)
return cmd_name, claz | [
"def",
"create_command_class",
"(",
"name",
",",
"func_module",
")",
":",
"cmd_name",
"=",
"name",
"[",
"3",
":",
"]",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"callback",
"=",
"getattr",
"(",
"func_module",
",",
"name",
")",
"desc",
"=",
"callback... | Dynamically creates subclass of MigratingCommand.
Method takes name of the function, module it is part of
and builds the subclass of :py:class:`MigratingCommand`.
Having a subclass of :py:class:`cliff.command.Command` is mandatory
for the osc-lib integration.
:param name: name of the function
:type name: basestring
:param func_module: the module function is part of
:type func_module: module
:return: command name, subclass of :py:class:`MigratingCommand`
:rtype: tuple(basestring, class) | [
"Dynamically",
"creates",
"subclass",
"of",
"MigratingCommand",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/osc/migration.py#L50-L85 | train | 44,921 |
openstack/python-monascaclient | monascaclient/client.py | _session | def _session(kwargs):
"""Returns or reuses session.
Method takes care of providing instance of
session object for the client.
:param kwargs: all params (without api_version) client was initialized with
:type kwargs: dict
:returns: session object
:rtype keystoneauth1.session.Session
"""
if 'session' in kwargs:
LOG.debug('Reusing session')
sess = kwargs.get('session')
if not isinstance(sess, k_session.Session):
msg = ('session should be an instance of %s' % k_session.Session)
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.debug('Initializing new session')
auth = _get_auth_handler(kwargs)
sess = _get_session(auth, kwargs)
return sess | python | def _session(kwargs):
"""Returns or reuses session.
Method takes care of providing instance of
session object for the client.
:param kwargs: all params (without api_version) client was initialized with
:type kwargs: dict
:returns: session object
:rtype keystoneauth1.session.Session
"""
if 'session' in kwargs:
LOG.debug('Reusing session')
sess = kwargs.get('session')
if not isinstance(sess, k_session.Session):
msg = ('session should be an instance of %s' % k_session.Session)
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.debug('Initializing new session')
auth = _get_auth_handler(kwargs)
sess = _get_session(auth, kwargs)
return sess | [
"def",
"_session",
"(",
"kwargs",
")",
":",
"if",
"'session'",
"in",
"kwargs",
":",
"LOG",
".",
"debug",
"(",
"'Reusing session'",
")",
"sess",
"=",
"kwargs",
".",
"get",
"(",
"'session'",
")",
"if",
"not",
"isinstance",
"(",
"sess",
",",
"k_session",
... | Returns or reuses session.
Method takes care of providing instance of
session object for the client.
:param kwargs: all params (without api_version) client was initialized with
:type kwargs: dict
:returns: session object
:rtype keystoneauth1.session.Session | [
"Returns",
"or",
"reuses",
"session",
"."
] | 03b07534145928eb2debad938da033c232dda105 | https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/client.py#L45-L69 | train | 44,922 |
uchicago-cs/deepdish | deepdish/util/saveable.py | Saveable.load | def load(cls, path):
"""
Loads an instance of the class from a file.
Parameters
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP
"""
if path is None:
return cls.load_from_dict({})
else:
d = io.load(path)
return cls.load_from_dict(d) | python | def load(cls, path):
"""
Loads an instance of the class from a file.
Parameters
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP
"""
if path is None:
return cls.load_from_dict({})
else:
d = io.load(path)
return cls.load_from_dict(d) | [
"def",
"load",
"(",
"cls",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"cls",
".",
"load_from_dict",
"(",
"{",
"}",
")",
"else",
":",
"d",
"=",
"io",
".",
"load",
"(",
"path",
")",
"return",
"cls",
".",
"load_from_dict",
"("... | Loads an instance of the class from a file.
Parameters
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP | [
"Loads",
"an",
"instance",
"of",
"the",
"class",
"from",
"a",
"file",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/saveable.py#L17-L38 | train | 44,923 |
uchicago-cs/deepdish | deepdish/util/saveable.py | NamedRegistry.register | def register(cls, name):
"""Decorator to register a class."""
def register_decorator(reg_cls):
def name_func(self):
return name
reg_cls.name = property(name_func)
assert issubclass(reg_cls, cls), \
"Must be subclass matching your NamedRegistry class"
cls.REGISTRY[name] = reg_cls
return reg_cls
return register_decorator | python | def register(cls, name):
"""Decorator to register a class."""
def register_decorator(reg_cls):
def name_func(self):
return name
reg_cls.name = property(name_func)
assert issubclass(reg_cls, cls), \
"Must be subclass matching your NamedRegistry class"
cls.REGISTRY[name] = reg_cls
return reg_cls
return register_decorator | [
"def",
"register",
"(",
"cls",
",",
"name",
")",
":",
"def",
"register_decorator",
"(",
"reg_cls",
")",
":",
"def",
"name_func",
"(",
"self",
")",
":",
"return",
"name",
"reg_cls",
".",
"name",
"=",
"property",
"(",
"name_func",
")",
"assert",
"issubclas... | Decorator to register a class. | [
"Decorator",
"to",
"register",
"a",
"class",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/saveable.py#L104-L114 | train | 44,924 |
uchicago-cs/deepdish | deepdish/util/saveable.py | NamedRegistry.construct | def construct(cls, name, *args, **kwargs):
"""
Constructs an instance of an object given its name.
"""
return cls.REGISTRY[name](*args, **kwargs) | python | def construct(cls, name, *args, **kwargs):
"""
Constructs an instance of an object given its name.
"""
return cls.REGISTRY[name](*args, **kwargs) | [
"def",
"construct",
"(",
"cls",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cls",
".",
"REGISTRY",
"[",
"name",
"]",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Constructs an instance of an object given its name. | [
"Constructs",
"an",
"instance",
"of",
"an",
"object",
"given",
"its",
"name",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/saveable.py#L124-L128 | train | 44,925 |
uchicago-cs/deepdish | deepdish/image.py | resize_by_factor | def resize_by_factor(im, factor):
"""
Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides.
"""
_import_skimage()
from skimage.transform.pyramids import pyramid_reduce, pyramid_expand
if factor < 1:
return pyramid_reduce(im, downscale=1/factor)
elif factor > 1:
return pyramid_expand(im, upscale=factor)
else:
return im | python | def resize_by_factor(im, factor):
"""
Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides.
"""
_import_skimage()
from skimage.transform.pyramids import pyramid_reduce, pyramid_expand
if factor < 1:
return pyramid_reduce(im, downscale=1/factor)
elif factor > 1:
return pyramid_expand(im, upscale=factor)
else:
return im | [
"def",
"resize_by_factor",
"(",
"im",
",",
"factor",
")",
":",
"_import_skimage",
"(",
")",
"from",
"skimage",
".",
"transform",
".",
"pyramids",
"import",
"pyramid_reduce",
",",
"pyramid_expand",
"if",
"factor",
"<",
"1",
":",
"return",
"pyramid_reduce",
"(",... | Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides. | [
"Resizes",
"the",
"image",
"according",
"to",
"a",
"factor",
".",
"The",
"image",
"is",
"pre",
"-",
"filtered",
"with",
"a",
"Gaussian",
"and",
"then",
"resampled",
"with",
"bilinear",
"interpolation",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L26-L50 | train | 44,926 |
uchicago-cs/deepdish | deepdish/image.py | asgray | def asgray(im):
"""
Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image.
"""
if im.ndim == 2:
return im
elif im.ndim == 3 and im.shape[2] in (3, 4):
return im[..., :3].mean(axis=-1)
else:
raise ValueError('Invalid image format') | python | def asgray(im):
"""
Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image.
"""
if im.ndim == 2:
return im
elif im.ndim == 3 and im.shape[2] in (3, 4):
return im[..., :3].mean(axis=-1)
else:
raise ValueError('Invalid image format') | [
"def",
"asgray",
"(",
"im",
")",
":",
"if",
"im",
".",
"ndim",
"==",
"2",
":",
"return",
"im",
"elif",
"im",
".",
"ndim",
"==",
"3",
"and",
"im",
".",
"shape",
"[",
"2",
"]",
"in",
"(",
"3",
",",
"4",
")",
":",
"return",
"im",
"[",
"...",
... | Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image. | [
"Takes",
"an",
"image",
"and",
"returns",
"its",
"grayscale",
"version",
"by",
"averaging",
"the",
"color",
"channels",
".",
"if",
"an",
"alpha",
"channel",
"is",
"present",
"it",
"will",
"simply",
"be",
"ignored",
".",
"If",
"a",
"grayscale",
"image",
"is... | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L71-L92 | train | 44,927 |
uchicago-cs/deepdish | deepdish/image.py | load | def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
_import_skimage()
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype') | python | def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
_import_skimage()
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype') | [
"def",
"load",
"(",
"path",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
":",
"_import_skimage",
"(",
")",
"import",
"skimage",
".",
"io",
"im",
"=",
"skimage",
".",
"io",
".",
"imread",
"(",
"path",
")",
"if",
"dtype",
"==",
"np",
".",
"uint8",
... | Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred. | [
"Loads",
"an",
"image",
"from",
"file",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L135-L157 | train | 44,928 |
uchicago-cs/deepdish | deepdish/image.py | save | def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path) | python | def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path) | [
"def",
"save",
"(",
"path",
",",
"im",
")",
":",
"from",
"PIL",
"import",
"Image",
"if",
"im",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"pil_im",
"=",
"Image",
".",
"fromarray",
"(",
"im",
")",
"else",
":",
"pil_im",
"=",
"Image",
".",
"froma... | Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image. | [
"Saves",
"an",
"image",
"to",
"file",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L176-L194 | train | 44,929 |
uchicago-cs/deepdish | deepdish/image.py | integrate | def integrate(ii, r0, c0, r1, c1):
"""
Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
# This line is modified
S = np.zeros(ii.shape[-1])
S += ii[r1, c1]
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += ii[r0 - 1, c0 - 1]
if (r0 - 1 >= 0):
S -= ii[r0 - 1, c1]
if (c0 - 1 >= 0):
S -= ii[r1, c0 - 1]
return S | python | def integrate(ii, r0, c0, r1, c1):
"""
Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
# This line is modified
S = np.zeros(ii.shape[-1])
S += ii[r1, c1]
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += ii[r0 - 1, c0 - 1]
if (r0 - 1 >= 0):
S -= ii[r0 - 1, c1]
if (c0 - 1 >= 0):
S -= ii[r1, c0 - 1]
return S | [
"def",
"integrate",
"(",
"ii",
",",
"r0",
",",
"c0",
",",
"r1",
",",
"c1",
")",
":",
"# This line is modified",
"S",
"=",
"np",
".",
"zeros",
"(",
"ii",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"S",
"+=",
"ii",
"[",
"r1",
",",
"c1",
"]",
"if",
... | Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window. | [
"Use",
"an",
"integral",
"image",
"to",
"integrate",
"over",
"a",
"given",
"window",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L197-L230 | train | 44,930 |
uchicago-cs/deepdish | deepdish/image.py | offset | def offset(img, offset, fill_value=0):
"""
Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0.
"""
sh = img.shape
if sh == (0, 0):
return img
else:
x = np.empty(sh)
x[:] = fill_value
x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]),
max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \
img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]),
max(-offset[1], 0):min(sh[1]-offset[1], sh[1])]
return x | python | def offset(img, offset, fill_value=0):
"""
Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0.
"""
sh = img.shape
if sh == (0, 0):
return img
else:
x = np.empty(sh)
x[:] = fill_value
x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]),
max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \
img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]),
max(-offset[1], 0):min(sh[1]-offset[1], sh[1])]
return x | [
"def",
"offset",
"(",
"img",
",",
"offset",
",",
"fill_value",
"=",
"0",
")",
":",
"sh",
"=",
"img",
".",
"shape",
"if",
"sh",
"==",
"(",
"0",
",",
"0",
")",
":",
"return",
"img",
"else",
":",
"x",
"=",
"np",
".",
"empty",
"(",
"sh",
")",
"... | Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0. | [
"Moves",
"the",
"contents",
"of",
"image",
"without",
"changing",
"the",
"image",
"size",
".",
"The",
"missing",
"values",
"are",
"given",
"a",
"specified",
"fill",
"value",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L233-L258 | train | 44,931 |
uchicago-cs/deepdish | deepdish/image.py | bounding_box | def bounding_box(alpha, threshold=0.1):
"""
Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold.
"""
assert alpha.ndim == 2
# Take the bounding box of the support, with a certain threshold.
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
# Check first and last value of that threshold
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1]) | python | def bounding_box(alpha, threshold=0.1):
"""
Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold.
"""
assert alpha.ndim == 2
# Take the bounding box of the support, with a certain threshold.
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
# Check first and last value of that threshold
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1]) | [
"def",
"bounding_box",
"(",
"alpha",
",",
"threshold",
"=",
"0.1",
")",
":",
"assert",
"alpha",
".",
"ndim",
"==",
"2",
"# Take the bounding box of the support, with a certain threshold.",
"supp_axs",
"=",
"[",
"alpha",
".",
"max",
"(",
"axis",
"=",
"1",
"-",
... | Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold. | [
"Returns",
"a",
"bounding",
"box",
"of",
"the",
"support",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L261-L286 | train | 44,932 |
uchicago-cs/deepdish | deepdish/image.py | bounding_box_as_binary_map | def bounding_box_as_binary_map(alpha, threshold=0.1):
"""
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
"""
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x | python | def bounding_box_as_binary_map(alpha, threshold=0.1):
"""
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
"""
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x | [
"def",
"bounding_box_as_binary_map",
"(",
"alpha",
",",
"threshold",
"=",
"0.1",
")",
":",
"bb",
"=",
"bounding_box",
"(",
"alpha",
")",
"x",
"=",
"np",
".",
"zeros",
"(",
"alpha",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
"x",
"[",
... | Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background. | [
"Similar",
"to",
"bounding_box",
"except",
"returns",
"the",
"bounding",
"box",
"as",
"a",
"binary",
"map",
"the",
"same",
"size",
"as",
"the",
"input",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L289-L305 | train | 44,933 |
uchicago-cs/deepdish | deepdish/image.py | extract_patches | def extract_patches(images, patch_shape, samples_per_image=40, seed=0,
cycle=True):
"""
Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show()
"""
rs = np.random.RandomState(seed)
for Xi in itr.cycle(images):
# How many patches could we extract?
w, h = [Xi.shape[i]-patch_shape[i] for i in range(2)]
assert w > 0 and h > 0
# Maybe shuffle an iterator of the indices?
indices = np.asarray(list(itr.product(range(w), range(h))))
rs.shuffle(indices)
for x, y in indices[:samples_per_image]:
yield Xi[x:x+patch_shape[0], y:y+patch_shape[1]] | python | def extract_patches(images, patch_shape, samples_per_image=40, seed=0,
cycle=True):
"""
Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show()
"""
rs = np.random.RandomState(seed)
for Xi in itr.cycle(images):
# How many patches could we extract?
w, h = [Xi.shape[i]-patch_shape[i] for i in range(2)]
assert w > 0 and h > 0
# Maybe shuffle an iterator of the indices?
indices = np.asarray(list(itr.product(range(w), range(h))))
rs.shuffle(indices)
for x, y in indices[:samples_per_image]:
yield Xi[x:x+patch_shape[0], y:y+patch_shape[1]] | [
"def",
"extract_patches",
"(",
"images",
",",
"patch_shape",
",",
"samples_per_image",
"=",
"40",
",",
"seed",
"=",
"0",
",",
"cycle",
"=",
"True",
")",
":",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"for",
"Xi",
"in",
"it... | Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show() | [
"Takes",
"a",
"set",
"of",
"images",
"and",
"yields",
"randomly",
"chosen",
"patches",
"of",
"specified",
"size",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L308-L366 | train | 44,934 |
uchicago-cs/deepdish | deepdish/core.py | bytesize | def bytesize(arr):
"""
Returns the memory byte size of a Numpy array as an integer.
"""
byte_size = np.prod(arr.shape) * np.dtype(arr.dtype).itemsize
return byte_size | python | def bytesize(arr):
"""
Returns the memory byte size of a Numpy array as an integer.
"""
byte_size = np.prod(arr.shape) * np.dtype(arr.dtype).itemsize
return byte_size | [
"def",
"bytesize",
"(",
"arr",
")",
":",
"byte_size",
"=",
"np",
".",
"prod",
"(",
"arr",
".",
"shape",
")",
"*",
"np",
".",
"dtype",
"(",
"arr",
".",
"dtype",
")",
".",
"itemsize",
"return",
"byte_size"
] | Returns the memory byte size of a Numpy array as an integer. | [
"Returns",
"the",
"memory",
"byte",
"size",
"of",
"a",
"Numpy",
"array",
"as",
"an",
"integer",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/core.py#L23-L28 | train | 44,935 |
uchicago-cs/deepdish | deepdish/core.py | apply_once | def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed | python | def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed | [
"def",
"apply_once",
"(",
"func",
",",
"arr",
",",
"axes",
",",
"keepdims",
"=",
"True",
")",
":",
"all_axes",
"=",
"np",
".",
"arange",
"(",
"arr",
".",
"ndim",
")",
"if",
"isinstance",
"(",
"axes",
",",
"int",
")",
":",
"axes",
"=",
"{",
"axes"... | Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) | [
"Similar",
"to",
"numpy",
".",
"apply_over_axes",
"except",
"this",
"performs",
"the",
"operation",
"over",
"a",
"flattened",
"version",
"of",
"all",
"the",
"axes",
"meaning",
"that",
"the",
"function",
"will",
"only",
"be",
"called",
"once",
".",
"This",
"o... | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/core.py#L74-L162 | train | 44,936 |
uchicago-cs/deepdish | deepdish/core.py | tupled_argmax | def tupled_argmax(a):
"""
Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2)
"""
return np.unravel_index(np.argmax(a), np.shape(a)) | python | def tupled_argmax(a):
"""
Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2)
"""
return np.unravel_index(np.argmax(a), np.shape(a)) | [
"def",
"tupled_argmax",
"(",
"a",
")",
":",
"return",
"np",
".",
"unravel_index",
"(",
"np",
".",
"argmax",
"(",
"a",
")",
",",
"np",
".",
"shape",
"(",
"a",
")",
")"
] | Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2) | [
"Argmax",
"that",
"returns",
"an",
"index",
"tuple",
".",
"Note",
"that",
"numpy",
".",
"argmax",
"will",
"return",
"a",
"scalar",
"index",
"as",
"if",
"you",
"had",
"flattened",
"the",
"array",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/core.py#L165-L192 | train | 44,937 |
uchicago-cs/deepdish | deepdish/util/padding.py | pad | def pad(data, padwidth, value=0.0):
"""
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[..., :] = value
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
return new_data | python | def pad(data, padwidth, value=0.0):
"""
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[..., :] = value
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
return new_data | [
"def",
"pad",
"(",
"data",
",",
"padwidth",
",",
"value",
"=",
"0.0",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"shape",
"=",
"data",
".",
"shape",
"if",
"isinstance",
"(",
"padwidth",
",",
"int",
")",
":",
"padwidth",
"=",
"... | Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]]) | [
"Pad",
"an",
"array",
"with",
"a",
"specific",
"value",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/padding.py#L5-L50 | train | 44,938 |
uchicago-cs/deepdish | deepdish/util/padding.py | pad_to_size | def pad_to_size(data, shape, value=0.0):
"""
This is similar to `pad`, except you specify the final shape of the array.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning. If a dimension is specified as ``-1``, then it
will remain its current size along that dimension.
value : data.dtype
The value with which to pad. Default is ``0.0``. This can even be an
array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the
size of the padded array.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((4, 2))
>>> dd.util.pad_to_size(x, (5, 5))
array([[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
shape = [data.shape[i] if shape[i] == -1 else shape[i]
for i in range(len(shape))]
new_data = np.empty(shape)
new_data[:] = value
II = [slice((shape[i] - data.shape[i])//2,
(shape[i] - data.shape[i])//2 + data.shape[i])
for i in range(len(shape))]
new_data[II] = data
return new_data | python | def pad_to_size(data, shape, value=0.0):
"""
This is similar to `pad`, except you specify the final shape of the array.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning. If a dimension is specified as ``-1``, then it
will remain its current size along that dimension.
value : data.dtype
The value with which to pad. Default is ``0.0``. This can even be an
array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the
size of the padded array.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((4, 2))
>>> dd.util.pad_to_size(x, (5, 5))
array([[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
shape = [data.shape[i] if shape[i] == -1 else shape[i]
for i in range(len(shape))]
new_data = np.empty(shape)
new_data[:] = value
II = [slice((shape[i] - data.shape[i])//2,
(shape[i] - data.shape[i])//2 + data.shape[i])
for i in range(len(shape))]
new_data[II] = data
return new_data | [
"def",
"pad_to_size",
"(",
"data",
",",
"shape",
",",
"value",
"=",
"0.0",
")",
":",
"shape",
"=",
"[",
"data",
".",
"shape",
"[",
"i",
"]",
"if",
"shape",
"[",
"i",
"]",
"==",
"-",
"1",
"else",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"rang... | This is similar to `pad`, except you specify the final shape of the array.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning. If a dimension is specified as ``-1``, then it
will remain its current size along that dimension.
value : data.dtype
The value with which to pad. Default is ``0.0``. This can even be an
array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the
size of the padded array.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((4, 2))
>>> dd.util.pad_to_size(x, (5, 5))
array([[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]]) | [
"This",
"is",
"similar",
"to",
"pad",
"except",
"you",
"specify",
"the",
"final",
"shape",
"of",
"the",
"array",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/padding.py#L53-L96 | train | 44,939 |
uchicago-cs/deepdish | deepdish/util/padding.py | pad_repeat_border | def pad_repeat_border(data, padwidth):
"""
Similar to `pad`, except the border value from ``data`` is used to pad.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its borders:
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border(x, 2)
array([[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 4, 4, 4, 5, 6, 7, 7, 7],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
for i, pw in enumerate(padwidth):
if pw > 0:
selection = [slice(None)] * data.ndim
selection2 = [slice(None)] * data.ndim
# Lower boundary
selection[i] = slice(0, pw)
selection2[i] = slice(pw, pw+1)
new_data[tuple(selection)] = new_data[tuple(selection2)]
# Upper boundary
selection[i] = slice(-pw, None)
selection2[i] = slice(-pw-1, -pw)
new_data[tuple(selection)] = new_data[tuple(selection2)]
return new_data | python | def pad_repeat_border(data, padwidth):
"""
Similar to `pad`, except the border value from ``data`` is used to pad.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its borders:
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border(x, 2)
array([[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 4, 4, 4, 5, 6, 7, 7, 7],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
for i, pw in enumerate(padwidth):
if pw > 0:
selection = [slice(None)] * data.ndim
selection2 = [slice(None)] * data.ndim
# Lower boundary
selection[i] = slice(0, pw)
selection2[i] = slice(pw, pw+1)
new_data[tuple(selection)] = new_data[tuple(selection2)]
# Upper boundary
selection[i] = slice(-pw, None)
selection2[i] = slice(-pw-1, -pw)
new_data[tuple(selection)] = new_data[tuple(selection2)]
return new_data | [
"def",
"pad_repeat_border",
"(",
"data",
",",
"padwidth",
")",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"shape",
"=",
"data",
".",
"shape",
"if",
"isinstance",
"(",
"padwidth",
",",
"int",
")",
":",
"padwidth",
"=",
"(",
"padwidth",
... | Similar to `pad`, except the border value from ``data`` is used to pad.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its borders:
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border(x, 2)
array([[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 4, 4, 4, 5, 6, 7, 7, 7],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11]]) | [
"Similar",
"to",
"pad",
"except",
"the",
"border",
"value",
"from",
"data",
"is",
"used",
"to",
"pad",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/padding.py#L99-L157 | train | 44,940 |
uchicago-cs/deepdish | deepdish/util/padding.py | pad_repeat_border_corner | def pad_repeat_border_corner(data, shape):
"""
Similar to `pad_repeat_border`, except the padding is always done on the
upper end of each axis and the target size is specified.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its upper borders.
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border_corner(x, (5, 5))
array([[ 0., 1., 2., 3., 3.],
[ 4., 5., 6., 7., 7.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.]])
"""
new_data = np.empty(shape)
new_data[[slice(upper) for upper in data.shape]] = data
for i in range(len(shape)):
selection = [slice(None)]*i + [slice(data.shape[i], None)]
selection2 = [slice(None)]*i + [slice(data.shape[i]-1, data.shape[i])]
new_data[selection] = new_data[selection2]
return new_data | python | def pad_repeat_border_corner(data, shape):
"""
Similar to `pad_repeat_border`, except the padding is always done on the
upper end of each axis and the target size is specified.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its upper borders.
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border_corner(x, (5, 5))
array([[ 0., 1., 2., 3., 3.],
[ 4., 5., 6., 7., 7.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.]])
"""
new_data = np.empty(shape)
new_data[[slice(upper) for upper in data.shape]] = data
for i in range(len(shape)):
selection = [slice(None)]*i + [slice(data.shape[i], None)]
selection2 = [slice(None)]*i + [slice(data.shape[i]-1, data.shape[i])]
new_data[selection] = new_data[selection2]
return new_data | [
"def",
"pad_repeat_border_corner",
"(",
"data",
",",
"shape",
")",
":",
"new_data",
"=",
"np",
".",
"empty",
"(",
"shape",
")",
"new_data",
"[",
"[",
"slice",
"(",
"upper",
")",
"for",
"upper",
"in",
"data",
".",
"shape",
"]",
"]",
"=",
"data",
"for"... | Similar to `pad_repeat_border`, except the padding is always done on the
upper end of each axis and the target size is specified.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its upper borders.
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border_corner(x, (5, 5))
array([[ 0., 1., 2., 3., 3.],
[ 4., 5., 6., 7., 7.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.]]) | [
"Similar",
"to",
"pad_repeat_border",
"except",
"the",
"padding",
"is",
"always",
"done",
"on",
"the",
"upper",
"end",
"of",
"each",
"axis",
"and",
"the",
"target",
"size",
"is",
"specified",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/util/padding.py#L160-L197 | train | 44,941 |
uchicago-cs/deepdish | deepdish/io/hdf5io.py | _dict_native_ok | def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, six.string_types):
return False
return True | python | def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, six.string_types):
return False
return True | [
"def",
"_dict_native_ok",
"(",
"d",
")",
":",
"if",
"len",
"(",
"d",
")",
">=",
"256",
":",
"return",
"False",
"# All keys must be strings",
"for",
"k",
"in",
"d",
":",
"if",
"not",
"isinstance",
"(",
"k",
",",
"six",
".",
"string_types",
")",
":",
"... | This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled. | [
"This",
"checks",
"if",
"a",
"dictionary",
"can",
"be",
"saved",
"natively",
"as",
"HDF5",
"groups",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/hdf5io.py#L70-L84 | train | 44,942 |
uchicago-cs/deepdish | deepdish/io/hdf5io.py | _load_level | def _load_level(handler, level, pathtable):
"""
Loads level and builds appropriate type, handling softlinks if necessary
"""
if isinstance(level, tables.link.SoftLink):
# this is a link, so see if target is already loaded, return it
pathname = level.target
node = level()
else:
# not a link, but it might be a target that's already been
# loaded ... if so, return it
pathname = level._v_pathname
node = level
try:
return pathtable[pathname]
except KeyError:
pathtable[pathname] = _load_nonlink_level(handler, node, pathtable,
pathname)
return pathtable[pathname] | python | def _load_level(handler, level, pathtable):
"""
Loads level and builds appropriate type, handling softlinks if necessary
"""
if isinstance(level, tables.link.SoftLink):
# this is a link, so see if target is already loaded, return it
pathname = level.target
node = level()
else:
# not a link, but it might be a target that's already been
# loaded ... if so, return it
pathname = level._v_pathname
node = level
try:
return pathtable[pathname]
except KeyError:
pathtable[pathname] = _load_nonlink_level(handler, node, pathtable,
pathname)
return pathtable[pathname] | [
"def",
"_load_level",
"(",
"handler",
",",
"level",
",",
"pathtable",
")",
":",
"if",
"isinstance",
"(",
"level",
",",
"tables",
".",
"link",
".",
"SoftLink",
")",
":",
"# this is a link, so see if target is already loaded, return it",
"pathname",
"=",
"level",
".... | Loads level and builds appropriate type, handling softlinks if necessary | [
"Loads",
"level",
"and",
"builds",
"appropriate",
"type",
"handling",
"softlinks",
"if",
"necessary"
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/hdf5io.py#L465-L483 | train | 44,943 |
uchicago-cs/deepdish | deepdish/io/hdf5io.py | load | def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
with tables.open_file(path, mode='r') as h5file:
pathtable = {} # dict to keep track of objects already loaded
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file, h5file, group, sel=sel,
pathtable=pathtable)
else: # Assume group is a list or tuple
data = []
for g in group:
data_i = _load_specific_level(h5file, h5file, g, sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn('This file was saved with a newer version of '
'deepdish. Please upgrade to make sure it loads '
'correctly.')
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data | python | def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
with tables.open_file(path, mode='r') as h5file:
pathtable = {} # dict to keep track of objects already loaded
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file, h5file, group, sel=sel,
pathtable=pathtable)
else: # Assume group is a list or tuple
data = []
for g in group:
data_i = _load_specific_level(h5file, h5file, g, sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn('This file was saved with a newer version of '
'deepdish. Please upgrade to make sure it loads '
'correctly.')
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data | [
"def",
"load",
"(",
"path",
",",
"group",
"=",
"None",
",",
"sel",
"=",
"None",
",",
"unpack",
"=",
"False",
")",
":",
"with",
"tables",
".",
"open_file",
"(",
"path",
",",
"mode",
"=",
"'r'",
")",
"as",
"h5file",
":",
"pathtable",
"=",
"{",
"}",... | Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save | [
"Loads",
"an",
"HDF5",
"saved",
"with",
"save",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/hdf5io.py#L601-L679 | train | 44,944 |
uchicago-cs/deepdish | deepdish/io/ls.py | sorted_maybe_numeric | def sorted_maybe_numeric(x):
"""
Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting.
"""
all_numeric = all(map(str.isdigit, x))
if all_numeric:
return sorted(x, key=int)
else:
return sorted(x) | python | def sorted_maybe_numeric(x):
"""
Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting.
"""
all_numeric = all(map(str.isdigit, x))
if all_numeric:
return sorted(x, key=int)
else:
return sorted(x) | [
"def",
"sorted_maybe_numeric",
"(",
"x",
")",
":",
"all_numeric",
"=",
"all",
"(",
"map",
"(",
"str",
".",
"isdigit",
",",
"x",
")",
")",
"if",
"all_numeric",
":",
"return",
"sorted",
"(",
"x",
",",
"key",
"=",
"int",
")",
"else",
":",
"return",
"s... | Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting. | [
"Sorts",
"x",
"with",
"numeric",
"semantics",
"if",
"all",
"keys",
"are",
"nonnegative",
"integers",
".",
"Otherwise",
"uses",
"standard",
"string",
"sorting",
"."
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/ls.py#L64-L73 | train | 44,945 |
uchicago-cs/deepdish | deepdish/io/ls.py | abbreviate | def abbreviate(s, maxlength=25):
"""Color-aware abbreviator"""
assert maxlength >= 4
skip = False
abbrv = None
i = 0
for j, c in enumerate(s):
if c == '\033':
skip = True
elif skip:
if c == 'm':
skip = False
else:
i += 1
if i == maxlength - 1:
abbrv = s[:j] + '\033[0m...'
elif i > maxlength:
break
if i <= maxlength:
return s
else:
return abbrv | python | def abbreviate(s, maxlength=25):
"""Color-aware abbreviator"""
assert maxlength >= 4
skip = False
abbrv = None
i = 0
for j, c in enumerate(s):
if c == '\033':
skip = True
elif skip:
if c == 'm':
skip = False
else:
i += 1
if i == maxlength - 1:
abbrv = s[:j] + '\033[0m...'
elif i > maxlength:
break
if i <= maxlength:
return s
else:
return abbrv | [
"def",
"abbreviate",
"(",
"s",
",",
"maxlength",
"=",
"25",
")",
":",
"assert",
"maxlength",
">=",
"4",
"skip",
"=",
"False",
"abbrv",
"=",
"None",
"i",
"=",
"0",
"for",
"j",
",",
"c",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"c",
"==",
"'\... | Color-aware abbreviator | [
"Color",
"-",
"aware",
"abbreviator"
] | 01af93621fe082a3972fe53ba7375388c02b0085 | https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/io/ls.py#L117-L140 | train | 44,946 |
genialis/resolwe | resolwe/flow/executors/prepare.py | BaseFlowExecutorPreparer.extend_settings | def extend_settings(self, data_id, files, secrets):
"""Extend the settings the manager will serialize.
:param data_id: The :class:`~resolwe.flow.models.Data` object id
being prepared for.
:param files: The settings dictionary to be serialized. Keys are
filenames, values are the objects that will be serialized
into those files. Standard filenames are listed in
``resolwe.flow.managers.protocol.ExecutorFiles``.
:param secrets: Secret files dictionary describing additional secret
file content that should be created and made available to
processes with special permissions. Keys are filenames, values
are the raw strings that should be written into those files.
"""
data = Data.objects.select_related('process').get(pk=data_id)
files[ExecutorFiles.DJANGO_SETTINGS].update({
'USE_TZ': settings.USE_TZ,
'FLOW_EXECUTOR_TOOLS_PATHS': self.get_tools_paths(),
})
files[ExecutorFiles.DATA] = model_to_dict(data)
files[ExecutorFiles.DATA_LOCATION] = model_to_dict(data.location)
files[ExecutorFiles.PROCESS] = model_to_dict(data.process)
files[ExecutorFiles.PROCESS]['resource_limits'] = data.process.get_resource_limits()
# Add secrets if the process has permission to read them.
secrets.update(data.resolve_secrets()) | python | def extend_settings(self, data_id, files, secrets):
"""Extend the settings the manager will serialize.
:param data_id: The :class:`~resolwe.flow.models.Data` object id
being prepared for.
:param files: The settings dictionary to be serialized. Keys are
filenames, values are the objects that will be serialized
into those files. Standard filenames are listed in
``resolwe.flow.managers.protocol.ExecutorFiles``.
:param secrets: Secret files dictionary describing additional secret
file content that should be created and made available to
processes with special permissions. Keys are filenames, values
are the raw strings that should be written into those files.
"""
data = Data.objects.select_related('process').get(pk=data_id)
files[ExecutorFiles.DJANGO_SETTINGS].update({
'USE_TZ': settings.USE_TZ,
'FLOW_EXECUTOR_TOOLS_PATHS': self.get_tools_paths(),
})
files[ExecutorFiles.DATA] = model_to_dict(data)
files[ExecutorFiles.DATA_LOCATION] = model_to_dict(data.location)
files[ExecutorFiles.PROCESS] = model_to_dict(data.process)
files[ExecutorFiles.PROCESS]['resource_limits'] = data.process.get_resource_limits()
# Add secrets if the process has permission to read them.
secrets.update(data.resolve_secrets()) | [
"def",
"extend_settings",
"(",
"self",
",",
"data_id",
",",
"files",
",",
"secrets",
")",
":",
"data",
"=",
"Data",
".",
"objects",
".",
"select_related",
"(",
"'process'",
")",
".",
"get",
"(",
"pk",
"=",
"data_id",
")",
"files",
"[",
"ExecutorFiles",
... | Extend the settings the manager will serialize.
:param data_id: The :class:`~resolwe.flow.models.Data` object id
being prepared for.
:param files: The settings dictionary to be serialized. Keys are
filenames, values are the objects that will be serialized
into those files. Standard filenames are listed in
``resolwe.flow.managers.protocol.ExecutorFiles``.
:param secrets: Secret files dictionary describing additional secret
file content that should be created and made available to
processes with special permissions. Keys are filenames, values
are the raw strings that should be written into those files. | [
"Extend",
"the",
"settings",
"the",
"manager",
"will",
"serialize",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/prepare.py#L30-L56 | train | 44,947 |
genialis/resolwe | resolwe/flow/executors/prepare.py | BaseFlowExecutorPreparer.get_tools_paths | def get_tools_paths(self):
"""Get tools' paths."""
if settings.DEBUG or is_testing():
return list(get_apps_tools().values())
else:
tools_root = settings.FLOW_TOOLS_ROOT
subdirs = next(os.walk(tools_root))[1]
return [os.path.join(tools_root, sdir) for sdir in subdirs] | python | def get_tools_paths(self):
"""Get tools' paths."""
if settings.DEBUG or is_testing():
return list(get_apps_tools().values())
else:
tools_root = settings.FLOW_TOOLS_ROOT
subdirs = next(os.walk(tools_root))[1]
return [os.path.join(tools_root, sdir) for sdir in subdirs] | [
"def",
"get_tools_paths",
"(",
"self",
")",
":",
"if",
"settings",
".",
"DEBUG",
"or",
"is_testing",
"(",
")",
":",
"return",
"list",
"(",
"get_apps_tools",
"(",
")",
".",
"values",
"(",
")",
")",
"else",
":",
"tools_root",
"=",
"settings",
".",
"FLOW_... | Get tools' paths. | [
"Get",
"tools",
"paths",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/prepare.py#L58-L67 | train | 44,948 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.dump | def dump(self):
"""Dump the current state to debug level log."""
logging.debug('AccessPolicy:')
map(
logging.debug,
[
' {}'.format(s)
for s in pprint.pformat(self.get_normalized_perm_list()).splitlines()
],
) | python | def dump(self):
"""Dump the current state to debug level log."""
logging.debug('AccessPolicy:')
map(
logging.debug,
[
' {}'.format(s)
for s in pprint.pformat(self.get_normalized_perm_list()).splitlines()
],
) | [
"def",
"dump",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"'AccessPolicy:'",
")",
"map",
"(",
"logging",
".",
"debug",
",",
"[",
"' {}'",
".",
"format",
"(",
"s",
")",
"for",
"s",
"in",
"pprint",
".",
"pformat",
"(",
"self",
".",
"get_no... | Dump the current state to debug level log. | [
"Dump",
"the",
"current",
"state",
"to",
"debug",
"level",
"log",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L362-L371 | train | 44,949 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.add_authenticated_read | def add_authenticated_read(self):
"""Add ``read`` perm for all authenticated subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_AUTHENTICATED, 'read') | python | def add_authenticated_read(self):
"""Add ``read`` perm for all authenticated subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_AUTHENTICATED, 'read') | [
"def",
"add_authenticated_read",
"(",
"self",
")",
":",
"self",
".",
"remove_perm",
"(",
"d1_common",
".",
"const",
".",
"SUBJECT_PUBLIC",
",",
"'read'",
")",
"self",
".",
"add_perm",
"(",
"d1_common",
".",
"const",
".",
"SUBJECT_AUTHENTICATED",
",",
"'read'",... | Add ``read`` perm for all authenticated subj.
Public ``read`` is removed if present. | [
"Add",
"read",
"perm",
"for",
"all",
"authenticated",
"subj",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L456-L463 | train | 44,950 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.add_verified_read | def add_verified_read(self):
"""Add ``read`` perm for all verified subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_VERIFIED, 'read') | python | def add_verified_read(self):
"""Add ``read`` perm for all verified subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_VERIFIED, 'read') | [
"def",
"add_verified_read",
"(",
"self",
")",
":",
"self",
".",
"remove_perm",
"(",
"d1_common",
".",
"const",
".",
"SUBJECT_PUBLIC",
",",
"'read'",
")",
"self",
".",
"add_perm",
"(",
"d1_common",
".",
"const",
".",
"SUBJECT_VERIFIED",
",",
"'read'",
")"
] | Add ``read`` perm for all verified subj.
Public ``read`` is removed if present. | [
"Add",
"read",
"perm",
"for",
"all",
"verified",
"subj",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L465-L472 | train | 44,951 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.add_perm | def add_perm(self, subj_str, perm_str):
"""Add a permission for a subject.
Args:
subj_str : str
Subject for which to add permission(s)
perm_str : str
Permission to add. Implicitly adds all lower permissions. E.g., ``write``
will also add ``read``.
"""
self._assert_valid_permission(perm_str)
self._perm_dict.setdefault(perm_str, set()).add(subj_str) | python | def add_perm(self, subj_str, perm_str):
"""Add a permission for a subject.
Args:
subj_str : str
Subject for which to add permission(s)
perm_str : str
Permission to add. Implicitly adds all lower permissions. E.g., ``write``
will also add ``read``.
"""
self._assert_valid_permission(perm_str)
self._perm_dict.setdefault(perm_str, set()).add(subj_str) | [
"def",
"add_perm",
"(",
"self",
",",
"subj_str",
",",
"perm_str",
")",
":",
"self",
".",
"_assert_valid_permission",
"(",
"perm_str",
")",
"self",
".",
"_perm_dict",
".",
"setdefault",
"(",
"perm_str",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"subj_st... | Add a permission for a subject.
Args:
subj_str : str
Subject for which to add permission(s)
perm_str : str
Permission to add. Implicitly adds all lower permissions. E.g., ``write``
will also add ``read``. | [
"Add",
"a",
"permission",
"for",
"a",
"subject",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L474-L487 | train | 44,952 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.remove_perm | def remove_perm(self, subj_str, perm_str):
"""Remove permission from a subject.
Args:
subj_str : str
Subject for which to remove permission(s)
perm_str : str
Permission to remove. Implicitly removes all higher permissions. E.g., ``write``
will also remove ``changePermission`` if previously granted.
"""
self._assert_valid_permission(perm_str)
for perm_str in self._equal_or_higher_perm(perm_str):
self._perm_dict.setdefault(perm_str, set()).discard(subj_str) | python | def remove_perm(self, subj_str, perm_str):
"""Remove permission from a subject.
Args:
subj_str : str
Subject for which to remove permission(s)
perm_str : str
Permission to remove. Implicitly removes all higher permissions. E.g., ``write``
will also remove ``changePermission`` if previously granted.
"""
self._assert_valid_permission(perm_str)
for perm_str in self._equal_or_higher_perm(perm_str):
self._perm_dict.setdefault(perm_str, set()).discard(subj_str) | [
"def",
"remove_perm",
"(",
"self",
",",
"subj_str",
",",
"perm_str",
")",
":",
"self",
".",
"_assert_valid_permission",
"(",
"perm_str",
")",
"for",
"perm_str",
"in",
"self",
".",
"_equal_or_higher_perm",
"(",
"perm_str",
")",
":",
"self",
".",
"_perm_dict",
... | Remove permission from a subject.
Args:
subj_str : str
Subject for which to remove permission(s)
perm_str : str
Permission to remove. Implicitly removes all higher permissions. E.g., ``write``
will also remove ``changePermission`` if previously granted. | [
"Remove",
"permission",
"from",
"a",
"subject",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L489-L503 | train | 44,953 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper.remove_subj | def remove_subj(self, subj_str):
"""Remove all permissions for subject.
Args:
subj_str : str
Subject for which to remove all permissions. Since subjects can only be present
in the AccessPolicy when they have one or more permissions, this removes the
subject itself as well.
The subject may still have access to the obj. E.g.:
* The obj has public access.
* The subj has indirect access by being in a group which has access.
* The subj has an equivalent subj that has access.
* The subj is set as the rightsHolder for the object.
"""
for subj_set in list(self._perm_dict.values()):
subj_set -= {subj_str} | python | def remove_subj(self, subj_str):
"""Remove all permissions for subject.
Args:
subj_str : str
Subject for which to remove all permissions. Since subjects can only be present
in the AccessPolicy when they have one or more permissions, this removes the
subject itself as well.
The subject may still have access to the obj. E.g.:
* The obj has public access.
* The subj has indirect access by being in a group which has access.
* The subj has an equivalent subj that has access.
* The subj is set as the rightsHolder for the object.
"""
for subj_set in list(self._perm_dict.values()):
subj_set -= {subj_str} | [
"def",
"remove_subj",
"(",
"self",
",",
"subj_str",
")",
":",
"for",
"subj_set",
"in",
"list",
"(",
"self",
".",
"_perm_dict",
".",
"values",
"(",
")",
")",
":",
"subj_set",
"-=",
"{",
"subj_str",
"}"
] | Remove all permissions for subject.
Args:
subj_str : str
Subject for which to remove all permissions. Since subjects can only be present
in the AccessPolicy when they have one or more permissions, this removes the
subject itself as well.
The subject may still have access to the obj. E.g.:
* The obj has public access.
* The subj has indirect access by being in a group which has access.
* The subj has an equivalent subj that has access.
* The subj is set as the rightsHolder for the object. | [
"Remove",
"all",
"permissions",
"for",
"subject",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L505-L523 | train | 44,954 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._perm_dict_from_pyxb | def _perm_dict_from_pyxb(self, access_pyxb):
"""Return dict representation of AccessPolicy PyXB obj."""
subj_dict = self._subj_dict_from_pyxb(access_pyxb)
return self._perm_dict_from_subj_dict(subj_dict) | python | def _perm_dict_from_pyxb(self, access_pyxb):
"""Return dict representation of AccessPolicy PyXB obj."""
subj_dict = self._subj_dict_from_pyxb(access_pyxb)
return self._perm_dict_from_subj_dict(subj_dict) | [
"def",
"_perm_dict_from_pyxb",
"(",
"self",
",",
"access_pyxb",
")",
":",
"subj_dict",
"=",
"self",
".",
"_subj_dict_from_pyxb",
"(",
"access_pyxb",
")",
"return",
"self",
".",
"_perm_dict_from_subj_dict",
"(",
"subj_dict",
")"
] | Return dict representation of AccessPolicy PyXB obj. | [
"Return",
"dict",
"representation",
"of",
"AccessPolicy",
"PyXB",
"obj",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L529-L532 | train | 44,955 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._perm_dict_from_subj_dict | def _perm_dict_from_subj_dict(self, subj_dict):
"""Return dict where keys and values of ``subj_dict`` have been flipped
around."""
perm_dict = {}
for subj_str, perm_set in list(subj_dict.items()):
for perm_str in perm_set:
perm_dict.setdefault(perm_str, set()).add(subj_str)
return perm_dict | python | def _perm_dict_from_subj_dict(self, subj_dict):
"""Return dict where keys and values of ``subj_dict`` have been flipped
around."""
perm_dict = {}
for subj_str, perm_set in list(subj_dict.items()):
for perm_str in perm_set:
perm_dict.setdefault(perm_str, set()).add(subj_str)
return perm_dict | [
"def",
"_perm_dict_from_subj_dict",
"(",
"self",
",",
"subj_dict",
")",
":",
"perm_dict",
"=",
"{",
"}",
"for",
"subj_str",
",",
"perm_set",
"in",
"list",
"(",
"subj_dict",
".",
"items",
"(",
")",
")",
":",
"for",
"perm_str",
"in",
"perm_set",
":",
"perm... | Return dict where keys and values of ``subj_dict`` have been flipped
around. | [
"Return",
"dict",
"where",
"keys",
"and",
"values",
"of",
"subj_dict",
"have",
"been",
"flipped",
"around",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L534-L541 | train | 44,956 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._pyxb_from_perm_dict | def _pyxb_from_perm_dict(self, perm_dict):
"""Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object.
"""
norm_perm_list = self._norm_perm_list_from_perm_dict(perm_dict)
return self._pyxb_from_norm_perm_list(norm_perm_list) | python | def _pyxb_from_perm_dict(self, perm_dict):
"""Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object.
"""
norm_perm_list = self._norm_perm_list_from_perm_dict(perm_dict)
return self._pyxb_from_norm_perm_list(norm_perm_list) | [
"def",
"_pyxb_from_perm_dict",
"(",
"self",
",",
"perm_dict",
")",
":",
"norm_perm_list",
"=",
"self",
".",
"_norm_perm_list_from_perm_dict",
"(",
"perm_dict",
")",
"return",
"self",
".",
"_pyxb_from_norm_perm_list",
"(",
"norm_perm_list",
")"
] | Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object. | [
"Return",
"an",
"AccessPolicy",
"PyXB",
"representation",
"of",
"perm_dict"
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L543-L553 | train | 44,957 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._pyxb_from_norm_perm_list | def _pyxb_from_norm_perm_list(self, norm_perm_list):
"""Return an AccessPolicy PyXB representation of ``norm_perm_list``"""
# Using accessPolicy() instead of AccessPolicy() and accessRule() instead of
# AccessRule() gives PyXB the type information required for using this as a
# root element.
access_pyxb = d1_common.types.dataoneTypes.accessPolicy()
for perm_str, subj_list in norm_perm_list:
rule_pyxb = d1_common.types.dataoneTypes.accessRule()
rule_pyxb.permission.append(perm_str)
for subj_str in subj_list:
rule_pyxb.subject.append(subj_str)
access_pyxb.allow.append(rule_pyxb)
if len(access_pyxb.allow):
return access_pyxb | python | def _pyxb_from_norm_perm_list(self, norm_perm_list):
"""Return an AccessPolicy PyXB representation of ``norm_perm_list``"""
# Using accessPolicy() instead of AccessPolicy() and accessRule() instead of
# AccessRule() gives PyXB the type information required for using this as a
# root element.
access_pyxb = d1_common.types.dataoneTypes.accessPolicy()
for perm_str, subj_list in norm_perm_list:
rule_pyxb = d1_common.types.dataoneTypes.accessRule()
rule_pyxb.permission.append(perm_str)
for subj_str in subj_list:
rule_pyxb.subject.append(subj_str)
access_pyxb.allow.append(rule_pyxb)
if len(access_pyxb.allow):
return access_pyxb | [
"def",
"_pyxb_from_norm_perm_list",
"(",
"self",
",",
"norm_perm_list",
")",
":",
"# Using accessPolicy() instead of AccessPolicy() and accessRule() instead of",
"# AccessRule() gives PyXB the type information required for using this as a",
"# root element.",
"access_pyxb",
"=",
"d1_common... | Return an AccessPolicy PyXB representation of ``norm_perm_list`` | [
"Return",
"an",
"AccessPolicy",
"PyXB",
"representation",
"of",
"norm_perm_list"
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L555-L568 | train | 44,958 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._subj_dict_from_pyxb | def _subj_dict_from_pyxb(self, access_pyxb):
"""Return a dict representation of ``access_pyxb``, which is an AccessPolicy
PyXB object.
This also remove any duplicate subjects and permissions in the PyXB object.
"""
subj_dict = {}
for allow_pyxb in access_pyxb.allow:
perm_set = set()
for perm_pyxb in allow_pyxb.permission:
perm_set.add(perm_pyxb)
for subj_pyxb in allow_pyxb.subject:
subj_dict.setdefault(subj_pyxb.value(), set()).update(perm_set)
return subj_dict | python | def _subj_dict_from_pyxb(self, access_pyxb):
"""Return a dict representation of ``access_pyxb``, which is an AccessPolicy
PyXB object.
This also remove any duplicate subjects and permissions in the PyXB object.
"""
subj_dict = {}
for allow_pyxb in access_pyxb.allow:
perm_set = set()
for perm_pyxb in allow_pyxb.permission:
perm_set.add(perm_pyxb)
for subj_pyxb in allow_pyxb.subject:
subj_dict.setdefault(subj_pyxb.value(), set()).update(perm_set)
return subj_dict | [
"def",
"_subj_dict_from_pyxb",
"(",
"self",
",",
"access_pyxb",
")",
":",
"subj_dict",
"=",
"{",
"}",
"for",
"allow_pyxb",
"in",
"access_pyxb",
".",
"allow",
":",
"perm_set",
"=",
"set",
"(",
")",
"for",
"perm_pyxb",
"in",
"allow_pyxb",
".",
"permission",
... | Return a dict representation of ``access_pyxb``, which is an AccessPolicy
PyXB object.
This also remove any duplicate subjects and permissions in the PyXB object. | [
"Return",
"a",
"dict",
"representation",
"of",
"access_pyxb",
"which",
"is",
"an",
"AccessPolicy",
"PyXB",
"object",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L570-L584 | train | 44,959 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._highest_perm_dict_from_perm_dict | def _highest_perm_dict_from_perm_dict(self, perm_dict):
"""Return a perm_dict where only the highest permission for each subject is
included."""
highest_perm_dict = copy.copy(perm_dict)
for ordered_str in reversed(ORDERED_PERM_LIST):
for lower_perm in self._lower_perm_list(ordered_str):
highest_perm_dict.setdefault(lower_perm, set())
highest_perm_dict[lower_perm] -= perm_dict.get(ordered_str, set())
return highest_perm_dict | python | def _highest_perm_dict_from_perm_dict(self, perm_dict):
"""Return a perm_dict where only the highest permission for each subject is
included."""
highest_perm_dict = copy.copy(perm_dict)
for ordered_str in reversed(ORDERED_PERM_LIST):
for lower_perm in self._lower_perm_list(ordered_str):
highest_perm_dict.setdefault(lower_perm, set())
highest_perm_dict[lower_perm] -= perm_dict.get(ordered_str, set())
return highest_perm_dict | [
"def",
"_highest_perm_dict_from_perm_dict",
"(",
"self",
",",
"perm_dict",
")",
":",
"highest_perm_dict",
"=",
"copy",
".",
"copy",
"(",
"perm_dict",
")",
"for",
"ordered_str",
"in",
"reversed",
"(",
"ORDERED_PERM_LIST",
")",
":",
"for",
"lower_perm",
"in",
"sel... | Return a perm_dict where only the highest permission for each subject is
included. | [
"Return",
"a",
"perm_dict",
"where",
"only",
"the",
"highest",
"permission",
"for",
"each",
"subject",
"is",
"included",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L586-L594 | train | 44,960 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._norm_perm_list_from_perm_dict | def _norm_perm_list_from_perm_dict(self, perm_dict):
"""Return a minimal, ordered, hashable list of subjects and permissions."""
high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict)
return [
[k, list(sorted(high_perm_dict[k]))]
for k in ORDERED_PERM_LIST
if high_perm_dict.get(k, False)
] | python | def _norm_perm_list_from_perm_dict(self, perm_dict):
"""Return a minimal, ordered, hashable list of subjects and permissions."""
high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict)
return [
[k, list(sorted(high_perm_dict[k]))]
for k in ORDERED_PERM_LIST
if high_perm_dict.get(k, False)
] | [
"def",
"_norm_perm_list_from_perm_dict",
"(",
"self",
",",
"perm_dict",
")",
":",
"high_perm_dict",
"=",
"self",
".",
"_highest_perm_dict_from_perm_dict",
"(",
"perm_dict",
")",
"return",
"[",
"[",
"k",
",",
"list",
"(",
"sorted",
"(",
"high_perm_dict",
"[",
"k"... | Return a minimal, ordered, hashable list of subjects and permissions. | [
"Return",
"a",
"minimal",
"ordered",
"hashable",
"list",
"of",
"subjects",
"and",
"permissions",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L596-L603 | train | 44,961 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._effective_perm_list_from_iter | def _effective_perm_list_from_iter(self, perm_iter):
"""Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty."""
highest_perm_str = self._highest_perm_from_iter(perm_iter)
return (
self._equal_or_lower_perm_list(highest_perm_str)
if highest_perm_str is not None
else None
) | python | def _effective_perm_list_from_iter(self, perm_iter):
"""Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty."""
highest_perm_str = self._highest_perm_from_iter(perm_iter)
return (
self._equal_or_lower_perm_list(highest_perm_str)
if highest_perm_str is not None
else None
) | [
"def",
"_effective_perm_list_from_iter",
"(",
"self",
",",
"perm_iter",
")",
":",
"highest_perm_str",
"=",
"self",
".",
"_highest_perm_from_iter",
"(",
"perm_iter",
")",
"return",
"(",
"self",
".",
"_equal_or_lower_perm_list",
"(",
"highest_perm_str",
")",
"if",
"hi... | Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty. | [
"Return",
"list",
"of",
"effective",
"permissions",
"for",
"for",
"highest",
"permission",
"in",
"perm_iter",
"ordered",
"lower",
"to",
"higher",
"or",
"None",
"if",
"perm_iter",
"is",
"empty",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L605-L613 | train | 44,962 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._present_perm_set_for_subj | def _present_perm_set_for_subj(self, perm_dict, subj_str):
"""Return a set containing only the permissions that are present in the
``perm_dict`` for ``subj_str``"""
return {p for p, s in list(perm_dict.items()) if subj_str in s} | python | def _present_perm_set_for_subj(self, perm_dict, subj_str):
"""Return a set containing only the permissions that are present in the
``perm_dict`` for ``subj_str``"""
return {p for p, s in list(perm_dict.items()) if subj_str in s} | [
"def",
"_present_perm_set_for_subj",
"(",
"self",
",",
"perm_dict",
",",
"subj_str",
")",
":",
"return",
"{",
"p",
"for",
"p",
",",
"s",
"in",
"list",
"(",
"perm_dict",
".",
"items",
"(",
")",
")",
"if",
"subj_str",
"in",
"s",
"}"
] | Return a set containing only the permissions that are present in the
``perm_dict`` for ``subj_str`` | [
"Return",
"a",
"set",
"containing",
"only",
"the",
"permissions",
"that",
"are",
"present",
"in",
"the",
"perm_dict",
"for",
"subj_str"
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L615-L618 | train | 44,963 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._highest_perm_from_iter | def _highest_perm_from_iter(self, perm_iter):
"""Return the highest perm present in ``perm_iter`` or None if ``perm_iter`` is
empty."""
perm_set = set(perm_iter)
for perm_str in reversed(ORDERED_PERM_LIST):
if perm_str in perm_set:
return perm_str | python | def _highest_perm_from_iter(self, perm_iter):
"""Return the highest perm present in ``perm_iter`` or None if ``perm_iter`` is
empty."""
perm_set = set(perm_iter)
for perm_str in reversed(ORDERED_PERM_LIST):
if perm_str in perm_set:
return perm_str | [
"def",
"_highest_perm_from_iter",
"(",
"self",
",",
"perm_iter",
")",
":",
"perm_set",
"=",
"set",
"(",
"perm_iter",
")",
"for",
"perm_str",
"in",
"reversed",
"(",
"ORDERED_PERM_LIST",
")",
":",
"if",
"perm_str",
"in",
"perm_set",
":",
"return",
"perm_str"
] | Return the highest perm present in ``perm_iter`` or None if ``perm_iter`` is
empty. | [
"Return",
"the",
"highest",
"perm",
"present",
"in",
"perm_iter",
"or",
"None",
"if",
"perm_iter",
"is",
"empty",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L620-L626 | train | 44,964 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._ordered_idx_from_perm | def _ordered_idx_from_perm(self, perm_str):
"""Return the ordered index of ``perm_str`` or None if ``perm_str`` is not a
valid permission."""
for i, ordered_str in enumerate(ORDERED_PERM_LIST):
if perm_str == ordered_str:
return i | python | def _ordered_idx_from_perm(self, perm_str):
"""Return the ordered index of ``perm_str`` or None if ``perm_str`` is not a
valid permission."""
for i, ordered_str in enumerate(ORDERED_PERM_LIST):
if perm_str == ordered_str:
return i | [
"def",
"_ordered_idx_from_perm",
"(",
"self",
",",
"perm_str",
")",
":",
"for",
"i",
",",
"ordered_str",
"in",
"enumerate",
"(",
"ORDERED_PERM_LIST",
")",
":",
"if",
"perm_str",
"==",
"ordered_str",
":",
"return",
"i"
] | Return the ordered index of ``perm_str`` or None if ``perm_str`` is not a
valid permission. | [
"Return",
"the",
"ordered",
"index",
"of",
"perm_str",
"or",
"None",
"if",
"perm_str",
"is",
"not",
"a",
"valid",
"permission",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L628-L633 | train | 44,965 |
DataONEorg/d1_python | lib_common/src/d1_common/wrap/access_policy.py | AccessPolicyWrapper._assert_valid_permission | def _assert_valid_permission(self, perm_str):
"""Raise D1 exception if ``perm_str`` is not a valid permission."""
if perm_str not in ORDERED_PERM_LIST:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Permission must be one of {}. perm_str="{}"'.format(
', '.join(ORDERED_PERM_LIST), perm_str
),
) | python | def _assert_valid_permission(self, perm_str):
"""Raise D1 exception if ``perm_str`` is not a valid permission."""
if perm_str not in ORDERED_PERM_LIST:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Permission must be one of {}. perm_str="{}"'.format(
', '.join(ORDERED_PERM_LIST), perm_str
),
) | [
"def",
"_assert_valid_permission",
"(",
"self",
",",
"perm_str",
")",
":",
"if",
"perm_str",
"not",
"in",
"ORDERED_PERM_LIST",
":",
"raise",
"d1_common",
".",
"types",
".",
"exceptions",
".",
"InvalidRequest",
"(",
"0",
",",
"'Permission must be one of {}. perm_str=... | Raise D1 exception if ``perm_str`` is not a valid permission. | [
"Raise",
"D1",
"exception",
"if",
"perm_str",
"is",
"not",
"a",
"valid",
"permission",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L654-L662 | train | 44,966 |
DataONEorg/d1_python | client_cli/src/d1_cli/dataone.py | handle_unexpected_exception | def handle_unexpected_exception(max_traceback_levels=100):
"""Suppress stack traces for common errors and provide hints for how to resolve
them."""
exc_type, exc_msgs = sys.exc_info()[:2]
if exc_type.__name__ == "SSLError":
d1_cli.impl.util.print_error(
"""HTTPS / TLS / SSL / X.509v3 Certificate Error:
An HTTPS connection could not be established. Verify that a DataONE node
responds at the URL provided in the cn-url or mn-url session variable. If the
URL is valid and if you intended to connect without authentication, make sure
that the session variable, "anonymous", is set to True. If you intended to
connect with authentication, make sure that the parameter, "cert-file", points
to a valid certificate from CILogon. If the certificate has the private
key in a separate file, also set "key-file" to the private key file.
Otherwise, set "key-file" to None. Note that CILogon certificates must be
renewed after 18 hours.
"""
)
elif exc_type.__name__ == "timeout":
d1_cli.impl.util.print_error(
"""Timeout error:
A connection to a DataONE node timed out. Verify that a DataONE node responds
at the URL provided in the cn-url or mn-url session variable.
"""
)
else:
_print_unexpected_exception(max_traceback_levels) | python | def handle_unexpected_exception(max_traceback_levels=100):
"""Suppress stack traces for common errors and provide hints for how to resolve
them."""
exc_type, exc_msgs = sys.exc_info()[:2]
if exc_type.__name__ == "SSLError":
d1_cli.impl.util.print_error(
"""HTTPS / TLS / SSL / X.509v3 Certificate Error:
An HTTPS connection could not be established. Verify that a DataONE node
responds at the URL provided in the cn-url or mn-url session variable. If the
URL is valid and if you intended to connect without authentication, make sure
that the session variable, "anonymous", is set to True. If you intended to
connect with authentication, make sure that the parameter, "cert-file", points
to a valid certificate from CILogon. If the certificate has the private
key in a separate file, also set "key-file" to the private key file.
Otherwise, set "key-file" to None. Note that CILogon certificates must be
renewed after 18 hours.
"""
)
elif exc_type.__name__ == "timeout":
d1_cli.impl.util.print_error(
"""Timeout error:
A connection to a DataONE node timed out. Verify that a DataONE node responds
at the URL provided in the cn-url or mn-url session variable.
"""
)
else:
_print_unexpected_exception(max_traceback_levels) | [
"def",
"handle_unexpected_exception",
"(",
"max_traceback_levels",
"=",
"100",
")",
":",
"exc_type",
",",
"exc_msgs",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
":",
"2",
"]",
"if",
"exc_type",
".",
"__name__",
"==",
"\"SSLError\"",
":",
"d1_cli",
".",
"i... | Suppress stack traces for common errors and provide hints for how to resolve
them. | [
"Suppress",
"stack",
"traces",
"for",
"common",
"errors",
"and",
"provide",
"hints",
"for",
"how",
"to",
"resolve",
"them",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/dataone.py#L430-L456 | train | 44,967 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/views/create.py | _save_sciobj_bytes_from_request | def _save_sciobj_bytes_from_request(request, pid):
"""Django stores small uploads in memory and streams large uploads directly to disk.
Uploads stored in memory are represented by UploadedFile and on disk,
TemporaryUploadedFile. To store an UploadedFile on disk, it's iterated and saved in
chunks. To store a TemporaryUploadedFile, it's moved from the temporary to the final
location. Django automatically handles this when using the file related fields in
the models, but GMN is not using those, so has to do it manually here.
"""
sciobj_path = d1_gmn.app.sciobj_store.get_abs_sciobj_file_path_by_pid(pid)
if hasattr(request.FILES['object'], 'temporary_file_path'):
d1_common.utils.filesystem.create_missing_directories_for_file(sciobj_path)
django.core.files.move.file_move_safe(
request.FILES['object'].temporary_file_path(), sciobj_path
)
else:
with d1_gmn.app.sciobj_store.open_sciobj_file_by_path_ctx(
sciobj_path, write=True
) as sciobj_stream:
for chunk in request.FILES['object'].chunks():
sciobj_stream.write(chunk) | python | def _save_sciobj_bytes_from_request(request, pid):
"""Django stores small uploads in memory and streams large uploads directly to disk.
Uploads stored in memory are represented by UploadedFile and on disk,
TemporaryUploadedFile. To store an UploadedFile on disk, it's iterated and saved in
chunks. To store a TemporaryUploadedFile, it's moved from the temporary to the final
location. Django automatically handles this when using the file related fields in
the models, but GMN is not using those, so has to do it manually here.
"""
sciobj_path = d1_gmn.app.sciobj_store.get_abs_sciobj_file_path_by_pid(pid)
if hasattr(request.FILES['object'], 'temporary_file_path'):
d1_common.utils.filesystem.create_missing_directories_for_file(sciobj_path)
django.core.files.move.file_move_safe(
request.FILES['object'].temporary_file_path(), sciobj_path
)
else:
with d1_gmn.app.sciobj_store.open_sciobj_file_by_path_ctx(
sciobj_path, write=True
) as sciobj_stream:
for chunk in request.FILES['object'].chunks():
sciobj_stream.write(chunk) | [
"def",
"_save_sciobj_bytes_from_request",
"(",
"request",
",",
"pid",
")",
":",
"sciobj_path",
"=",
"d1_gmn",
".",
"app",
".",
"sciobj_store",
".",
"get_abs_sciobj_file_path_by_pid",
"(",
"pid",
")",
"if",
"hasattr",
"(",
"request",
".",
"FILES",
"[",
"'object'"... | Django stores small uploads in memory and streams large uploads directly to disk.
Uploads stored in memory are represented by UploadedFile and on disk,
TemporaryUploadedFile. To store an UploadedFile on disk, it's iterated and saved in
chunks. To store a TemporaryUploadedFile, it's moved from the temporary to the final
location. Django automatically handles this when using the file related fields in
the models, but GMN is not using those, so has to do it manually here. | [
"Django",
"stores",
"small",
"uploads",
"in",
"memory",
"and",
"streams",
"large",
"uploads",
"directly",
"to",
"disk",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/create.py#L119-L140 | train | 44,968 |
genialis/resolwe | resolwe/flow/models/entity.py | EntityQuerySet.move_to_collection | def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection) | python | def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection) | [
"def",
"move_to_collection",
"(",
"self",
",",
"source_collection",
",",
"destination_collection",
")",
":",
"for",
"entity",
"in",
"self",
":",
"entity",
".",
"move_to_collection",
"(",
"source_collection",
",",
"destination_collection",
")"
] | Move entities from source to destination collection. | [
"Move",
"entities",
"from",
"source",
"to",
"destination",
"collection",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/entity.py#L34-L37 | train | 44,969 |
genialis/resolwe | resolwe/flow/models/entity.py | Entity.move_to_collection | def move_to_collection(self, source_collection, destination_collection):
"""Move entity from source to destination collection."""
# Remove from collection.
self.collections.remove(source_collection) # pylint: disable=no-member
source_collection.data.remove(*self.data.all()) # pylint: disable=no-member
# Add to collection.
self.collections.add(destination_collection) # pylint: disable=no-member
destination_collection.data.add(*self.data.all()) | python | def move_to_collection(self, source_collection, destination_collection):
"""Move entity from source to destination collection."""
# Remove from collection.
self.collections.remove(source_collection) # pylint: disable=no-member
source_collection.data.remove(*self.data.all()) # pylint: disable=no-member
# Add to collection.
self.collections.add(destination_collection) # pylint: disable=no-member
destination_collection.data.add(*self.data.all()) | [
"def",
"move_to_collection",
"(",
"self",
",",
"source_collection",
",",
"destination_collection",
")",
":",
"# Remove from collection.",
"self",
".",
"collections",
".",
"remove",
"(",
"source_collection",
")",
"# pylint: disable=no-member",
"source_collection",
".",
"da... | Move entity from source to destination collection. | [
"Move",
"entity",
"from",
"source",
"to",
"destination",
"collection",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/entity.py#L112-L120 | train | 44,970 |
DataONEorg/d1_python | client_onedrive/src/d1_onedrive/impl/os_escape.py | quote | def quote(s, unsafe='/'):
"""Pass in a dictionary that has unsafe characters as the keys, and the percent
encoded value as the value."""
res = s.replace('%', '%25')
for c in unsafe:
res = res.replace(c, '%' + (hex(ord(c)).upper())[2:])
return res | python | def quote(s, unsafe='/'):
"""Pass in a dictionary that has unsafe characters as the keys, and the percent
encoded value as the value."""
res = s.replace('%', '%25')
for c in unsafe:
res = res.replace(c, '%' + (hex(ord(c)).upper())[2:])
return res | [
"def",
"quote",
"(",
"s",
",",
"unsafe",
"=",
"'/'",
")",
":",
"res",
"=",
"s",
".",
"replace",
"(",
"'%'",
",",
"'%25'",
")",
"for",
"c",
"in",
"unsafe",
":",
"res",
"=",
"res",
".",
"replace",
"(",
"c",
",",
"'%'",
"+",
"(",
"hex",
"(",
"... | Pass in a dictionary that has unsafe characters as the keys, and the percent
encoded value as the value. | [
"Pass",
"in",
"a",
"dictionary",
"that",
"has",
"unsafe",
"characters",
"as",
"the",
"keys",
"and",
"the",
"percent",
"encoded",
"value",
"as",
"the",
"value",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/os_escape.py#L78-L84 | train | 44,971 |
genialis/resolwe | resolwe/flow/elastic_indexes/data.py | DataIndex.get_dependencies | def get_dependencies(self):
"""Return dependencies, which should trigger updates of this model."""
# pylint: disable=no-member
return super().get_dependencies() + [
Data.collection_set,
Data.entity_set,
Data.parents,
] | python | def get_dependencies(self):
"""Return dependencies, which should trigger updates of this model."""
# pylint: disable=no-member
return super().get_dependencies() + [
Data.collection_set,
Data.entity_set,
Data.parents,
] | [
"def",
"get_dependencies",
"(",
"self",
")",
":",
"# pylint: disable=no-member",
"return",
"super",
"(",
")",
".",
"get_dependencies",
"(",
")",
"+",
"[",
"Data",
".",
"collection_set",
",",
"Data",
".",
"entity_set",
",",
"Data",
".",
"parents",
",",
"]"
] | Return dependencies, which should trigger updates of this model. | [
"Return",
"dependencies",
"which",
"should",
"trigger",
"updates",
"of",
"this",
"model",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/data.py#L52-L59 | train | 44,972 |
genialis/resolwe | resolwe/flow/management/commands/collecttools.py | Command.get_confirmation | def get_confirmation(self):
"""Get user confirmation to proceed."""
if self.clear:
action = 'This will DELETE ALL FILES in this location!'
else:
action = 'This will overwrite existing files!'
message = (
"\n"
"You have requested to collect static files at the destination\n"
"location as specified in your settings\n"
"\n"
" {destination}\n"
"\n"
"{action}\n"
"Are you sure you want to do this?\n"
"\n"
"Type 'yes' to continue, or 'no' to cancel: ".format(
destination=self.destination_path,
action=action,
)
)
if input(''.join(message)) != 'yes':
raise CommandError("Collecting tools cancelled.") | python | def get_confirmation(self):
"""Get user confirmation to proceed."""
if self.clear:
action = 'This will DELETE ALL FILES in this location!'
else:
action = 'This will overwrite existing files!'
message = (
"\n"
"You have requested to collect static files at the destination\n"
"location as specified in your settings\n"
"\n"
" {destination}\n"
"\n"
"{action}\n"
"Are you sure you want to do this?\n"
"\n"
"Type 'yes' to continue, or 'no' to cancel: ".format(
destination=self.destination_path,
action=action,
)
)
if input(''.join(message)) != 'yes':
raise CommandError("Collecting tools cancelled.") | [
"def",
"get_confirmation",
"(",
"self",
")",
":",
"if",
"self",
".",
"clear",
":",
"action",
"=",
"'This will DELETE ALL FILES in this location!'",
"else",
":",
"action",
"=",
"'This will overwrite existing files!'",
"message",
"=",
"(",
"\"\\n\"",
"\"You have requested... | Get user confirmation to proceed. | [
"Get",
"user",
"confirmation",
"to",
"proceed",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L50-L74 | train | 44,973 |
genialis/resolwe | resolwe/flow/management/commands/collecttools.py | Command.clear_dir | def clear_dir(self):
"""Delete contents of the directory on the given path."""
self.stdout.write("Deleting contents of '{}'.".format(self.destination_path))
for filename in os.listdir(self.destination_path):
if os.path.isfile(filename) or os.path.islink(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename) | python | def clear_dir(self):
"""Delete contents of the directory on the given path."""
self.stdout.write("Deleting contents of '{}'.".format(self.destination_path))
for filename in os.listdir(self.destination_path):
if os.path.isfile(filename) or os.path.islink(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename) | [
"def",
"clear_dir",
"(",
"self",
")",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"\"Deleting contents of '{}'.\"",
".",
"format",
"(",
"self",
".",
"destination_path",
")",
")",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"destinat... | Delete contents of the directory on the given path. | [
"Delete",
"contents",
"of",
"the",
"directory",
"on",
"the",
"given",
"path",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L76-L84 | train | 44,974 |
genialis/resolwe | resolwe/flow/management/commands/collecttools.py | Command.change_path_prefix | def change_path_prefix(self, path, old_prefix, new_prefix, app_name):
"""Change path prefix and include app name."""
relative_path = os.path.relpath(path, old_prefix)
return os.path.join(new_prefix, app_name, relative_path) | python | def change_path_prefix(self, path, old_prefix, new_prefix, app_name):
"""Change path prefix and include app name."""
relative_path = os.path.relpath(path, old_prefix)
return os.path.join(new_prefix, app_name, relative_path) | [
"def",
"change_path_prefix",
"(",
"self",
",",
"path",
",",
"old_prefix",
",",
"new_prefix",
",",
"app_name",
")",
":",
"relative_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"old_prefix",
")",
"return",
"os",
".",
"path",
".",
"join"... | Change path prefix and include app name. | [
"Change",
"path",
"prefix",
"and",
"include",
"app",
"name",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L86-L89 | train | 44,975 |
genialis/resolwe | resolwe/flow/management/commands/collecttools.py | Command.collect | def collect(self):
"""Get tools' locations and copy them to a single location."""
for app_name, tools_path in get_apps_tools().items():
self.stdout.write("Copying files from '{}'.".format(tools_path))
app_name = app_name.replace('.', '_')
app_destination_path = os.path.join(self.destination_path, app_name)
if not os.path.isdir(app_destination_path):
os.mkdir(app_destination_path)
for root, dirs, files in os.walk(tools_path):
for dir_name in dirs:
dir_source_path = os.path.join(root, dir_name)
dir_destination_path = self.change_path_prefix(
dir_source_path, tools_path, self.destination_path, app_name
)
if not os.path.isdir(dir_destination_path):
os.mkdir(dir_destination_path)
for file_name in files:
file_source_path = os.path.join(root, file_name)
file_destination_path = self.change_path_prefix(
file_source_path, tools_path, self.destination_path, app_name
)
shutil.copy2(file_source_path, file_destination_path) | python | def collect(self):
"""Get tools' locations and copy them to a single location."""
for app_name, tools_path in get_apps_tools().items():
self.stdout.write("Copying files from '{}'.".format(tools_path))
app_name = app_name.replace('.', '_')
app_destination_path = os.path.join(self.destination_path, app_name)
if not os.path.isdir(app_destination_path):
os.mkdir(app_destination_path)
for root, dirs, files in os.walk(tools_path):
for dir_name in dirs:
dir_source_path = os.path.join(root, dir_name)
dir_destination_path = self.change_path_prefix(
dir_source_path, tools_path, self.destination_path, app_name
)
if not os.path.isdir(dir_destination_path):
os.mkdir(dir_destination_path)
for file_name in files:
file_source_path = os.path.join(root, file_name)
file_destination_path = self.change_path_prefix(
file_source_path, tools_path, self.destination_path, app_name
)
shutil.copy2(file_source_path, file_destination_path) | [
"def",
"collect",
"(",
"self",
")",
":",
"for",
"app_name",
",",
"tools_path",
"in",
"get_apps_tools",
"(",
")",
".",
"items",
"(",
")",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"\"Copying files from '{}'.\"",
".",
"format",
"(",
"tools_path",
")",
... | Get tools' locations and copy them to a single location. | [
"Get",
"tools",
"locations",
"and",
"copy",
"them",
"to",
"a",
"single",
"location",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L91-L118 | train | 44,976 |
genialis/resolwe | resolwe/flow/management/commands/collecttools.py | Command.handle | def handle(self, **options):
"""Collect tools."""
self.set_options(**options)
os.makedirs(self.destination_path, exist_ok=True)
if self.interactive and any(os.listdir(self.destination_path)):
self.get_confirmation()
if self.clear:
self.clear_dir()
self.collect() | python | def handle(self, **options):
"""Collect tools."""
self.set_options(**options)
os.makedirs(self.destination_path, exist_ok=True)
if self.interactive and any(os.listdir(self.destination_path)):
self.get_confirmation()
if self.clear:
self.clear_dir()
self.collect() | [
"def",
"handle",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"set_options",
"(",
"*",
"*",
"options",
")",
"os",
".",
"makedirs",
"(",
"self",
".",
"destination_path",
",",
"exist_ok",
"=",
"True",
")",
"if",
"self",
".",
"interacti... | Collect tools. | [
"Collect",
"tools",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L120-L132 | train | 44,977 |
genialis/resolwe | resolwe/flow/utils/__init__.py | get_data_checksum | def get_data_checksum(proc_input, proc_slug, proc_version):
"""Compute checksum of processor inputs, name and version."""
checksum = hashlib.sha256()
checksum.update(json.dumps(proc_input, sort_keys=True).encode('utf-8'))
checksum.update(proc_slug.encode('utf-8'))
checksum.update(str(proc_version).encode('utf-8'))
return checksum.hexdigest() | python | def get_data_checksum(proc_input, proc_slug, proc_version):
"""Compute checksum of processor inputs, name and version."""
checksum = hashlib.sha256()
checksum.update(json.dumps(proc_input, sort_keys=True).encode('utf-8'))
checksum.update(proc_slug.encode('utf-8'))
checksum.update(str(proc_version).encode('utf-8'))
return checksum.hexdigest() | [
"def",
"get_data_checksum",
"(",
"proc_input",
",",
"proc_slug",
",",
"proc_version",
")",
":",
"checksum",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"checksum",
".",
"update",
"(",
"json",
".",
"dumps",
"(",
"proc_input",
",",
"sort_keys",
"=",
"True",
")"... | Compute checksum of processor inputs, name and version. | [
"Compute",
"checksum",
"of",
"processor",
"inputs",
"name",
"and",
"version",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/__init__.py#L29-L35 | train | 44,978 |
genialis/resolwe | resolwe/flow/utils/__init__.py | dict_dot | def dict_dot(d, k, val=None, default=None):
"""Get or set value using a dot-notation key in a multilevel dict."""
if val is None and k == '':
return d
def set_default(dict_or_model, key, default_value):
"""Set default field value."""
if isinstance(dict_or_model, models.Model):
if not hasattr(dict_or_model, key):
setattr(dict_or_model, key, default_value)
return getattr(dict_or_model, key)
else:
return dict_or_model.setdefault(key, default_value)
def get_item(dict_or_model, key):
"""Get field value."""
if isinstance(dict_or_model, models.Model):
return getattr(dict_or_model, key)
else:
return dict_or_model[key]
def set_item(dict_or_model, key, value):
"""Set field value."""
if isinstance(dict_or_model, models.Model):
setattr(dict_or_model, key, value)
else:
dict_or_model[key] = value
if val is None and callable(default):
# Get value, default for missing
return functools.reduce(lambda a, b: set_default(a, b, default()), k.split('.'), d)
elif val is None:
# Get value, error on missing
return functools.reduce(get_item, k.split('.'), d)
else:
# Set value
try:
k, k_last = k.rsplit('.', 1)
set_item(dict_dot(d, k, default=dict), k_last, val)
except ValueError:
set_item(d, k, val)
return val | python | def dict_dot(d, k, val=None, default=None):
"""Get or set value using a dot-notation key in a multilevel dict."""
if val is None and k == '':
return d
def set_default(dict_or_model, key, default_value):
"""Set default field value."""
if isinstance(dict_or_model, models.Model):
if not hasattr(dict_or_model, key):
setattr(dict_or_model, key, default_value)
return getattr(dict_or_model, key)
else:
return dict_or_model.setdefault(key, default_value)
def get_item(dict_or_model, key):
"""Get field value."""
if isinstance(dict_or_model, models.Model):
return getattr(dict_or_model, key)
else:
return dict_or_model[key]
def set_item(dict_or_model, key, value):
"""Set field value."""
if isinstance(dict_or_model, models.Model):
setattr(dict_or_model, key, value)
else:
dict_or_model[key] = value
if val is None and callable(default):
# Get value, default for missing
return functools.reduce(lambda a, b: set_default(a, b, default()), k.split('.'), d)
elif val is None:
# Get value, error on missing
return functools.reduce(get_item, k.split('.'), d)
else:
# Set value
try:
k, k_last = k.rsplit('.', 1)
set_item(dict_dot(d, k, default=dict), k_last, val)
except ValueError:
set_item(d, k, val)
return val | [
"def",
"dict_dot",
"(",
"d",
",",
"k",
",",
"val",
"=",
"None",
",",
"default",
"=",
"None",
")",
":",
"if",
"val",
"is",
"None",
"and",
"k",
"==",
"''",
":",
"return",
"d",
"def",
"set_default",
"(",
"dict_or_model",
",",
"key",
",",
"default_valu... | Get or set value using a dot-notation key in a multilevel dict. | [
"Get",
"or",
"set",
"value",
"using",
"a",
"dot",
"-",
"notation",
"key",
"in",
"a",
"multilevel",
"dict",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/__init__.py#L38-L82 | train | 44,979 |
genialis/resolwe | resolwe/flow/utils/__init__.py | get_apps_tools | def get_apps_tools():
"""Get applications' tools and their paths.
Return a dict with application names as keys and paths to tools'
directories as values. Applications without tools are omitted.
"""
tools_paths = {}
for app_config in apps.get_app_configs():
proc_path = os.path.join(app_config.path, 'tools')
if os.path.isdir(proc_path):
tools_paths[app_config.name] = proc_path
custom_tools_paths = getattr(settings, 'RESOLWE_CUSTOM_TOOLS_PATHS', [])
if not isinstance(custom_tools_paths, list):
raise KeyError("`RESOLWE_CUSTOM_TOOLS_PATHS` setting must be a list.")
for seq, custom_path in enumerate(custom_tools_paths):
custom_key = '_custom_{}'.format(seq)
tools_paths[custom_key] = custom_path
return tools_paths | python | def get_apps_tools():
"""Get applications' tools and their paths.
Return a dict with application names as keys and paths to tools'
directories as values. Applications without tools are omitted.
"""
tools_paths = {}
for app_config in apps.get_app_configs():
proc_path = os.path.join(app_config.path, 'tools')
if os.path.isdir(proc_path):
tools_paths[app_config.name] = proc_path
custom_tools_paths = getattr(settings, 'RESOLWE_CUSTOM_TOOLS_PATHS', [])
if not isinstance(custom_tools_paths, list):
raise KeyError("`RESOLWE_CUSTOM_TOOLS_PATHS` setting must be a list.")
for seq, custom_path in enumerate(custom_tools_paths):
custom_key = '_custom_{}'.format(seq)
tools_paths[custom_key] = custom_path
return tools_paths | [
"def",
"get_apps_tools",
"(",
")",
":",
"tools_paths",
"=",
"{",
"}",
"for",
"app_config",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
":",
"proc_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_config",
".",
"path",
",",
"'tools'",
")",
"if",... | Get applications' tools and their paths.
Return a dict with application names as keys and paths to tools'
directories as values. Applications without tools are omitted. | [
"Get",
"applications",
"tools",
"and",
"their",
"paths",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/__init__.py#L85-L106 | train | 44,980 |
genialis/resolwe | resolwe/flow/utils/__init__.py | rewire_inputs | def rewire_inputs(data_list):
"""Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs).
"""
if len(data_list) < 2:
return data_list
mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list}
for bundle in data_list:
updated = False
copy = bundle['copy']
for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema['type'].startswith('data:') and value in mapped_ids:
fields[name] = mapped_ids[value]
updated = True
elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]):
fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value]
updated = True
if updated:
copy.save()
return data_list | python | def rewire_inputs(data_list):
"""Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs).
"""
if len(data_list) < 2:
return data_list
mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list}
for bundle in data_list:
updated = False
copy = bundle['copy']
for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema['type'].startswith('data:') and value in mapped_ids:
fields[name] = mapped_ids[value]
updated = True
elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]):
fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value]
updated = True
if updated:
copy.save()
return data_list | [
"def",
"rewire_inputs",
"(",
"data_list",
")",
":",
"if",
"len",
"(",
"data_list",
")",
"<",
"2",
":",
"return",
"data_list",
"mapped_ids",
"=",
"{",
"bundle",
"[",
"'original'",
"]",
".",
"id",
":",
"bundle",
"[",
"'copy'",
"]",
".",
"id",
"for",
"b... | Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs). | [
"Rewire",
"inputs",
"of",
"provided",
"data",
"objects",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/__init__.py#L109-L143 | train | 44,981 |
DataONEorg/d1_python | lib_common/src/d1_common/types/generated/dataoneTypes_v1.py | CreateFromDocument | def CreateFromDocument(xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(
fallback_namespace=default_namespace, location_base=location_base
)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, pyxb.utils.six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance | python | def CreateFromDocument(xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(
fallback_namespace=default_namespace, location_base=location_base
)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, pyxb.utils.six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance | [
"def",
"CreateFromDocument",
"(",
"xml_text",
",",
"default_namespace",
"=",
"None",
",",
"location_base",
"=",
"None",
")",
":",
"if",
"pyxb",
".",
"XMLStyle_saxer",
"!=",
"pyxb",
".",
"_XMLStyle",
":",
"dom",
"=",
"pyxb",
".",
"utils",
".",
"domutils",
"... | Parse the given XML and use the document element to create a Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained. | [
"Parse",
"the",
"given",
"XML",
"and",
"use",
"the",
"document",
"element",
"to",
"create",
"a",
"Python",
"instance",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/generated/dataoneTypes_v1.py#L42-L75 | train | 44,982 |
DataONEorg/d1_python | lib_common/src/d1_common/types/generated/dataoneTypes_v1.py | CreateFromDOM | def CreateFromDOM(node, default_namespace=None):
"""Create a Python instance from the given DOM node. The node tag must correspond to
an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.
"""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace) | python | def CreateFromDOM(node, default_namespace=None):
"""Create a Python instance from the given DOM node. The node tag must correspond to
an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.
"""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace) | [
"def",
"CreateFromDOM",
"(",
"node",
",",
"default_namespace",
"=",
"None",
")",
":",
"if",
"default_namespace",
"is",
"None",
":",
"default_namespace",
"=",
"Namespace",
".",
"fallbackNamespace",
"(",
")",
"return",
"pyxb",
".",
"binding",
".",
"basis",
".",
... | Create a Python instance from the given DOM node. The node tag must correspond to
an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}. | [
"Create",
"a",
"Python",
"instance",
"from",
"the",
"given",
"DOM",
"node",
".",
"The",
"node",
"tag",
"must",
"correspond",
"to",
"an",
"element",
"declaration",
"in",
"this",
"module",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/generated/dataoneTypes_v1.py#L78-L87 | train | 44,983 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.postloop | def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) # Clean up command completion
d1_cli.impl.util.print_info("Exiting...") | python | def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) # Clean up command completion
d1_cli.impl.util.print_info("Exiting...") | [
"def",
"postloop",
"(",
"self",
")",
":",
"cmd",
".",
"Cmd",
".",
"postloop",
"(",
"self",
")",
"# Clean up command completion",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_info",
"(",
"\"Exiting...\"",
")"
] | Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub. | [
"Take",
"care",
"of",
"any",
"unfinished",
"business",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L60-L67 | train | 44,984 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.precmd | def precmd(self, line):
"""This method is called after the line has been input but before it has been
interpreted.
If you want to modify the input line before execution (for example, variable
substitution) do it here.
"""
line = self.prefix + line
self._history += [line.strip()]
return line | python | def precmd(self, line):
"""This method is called after the line has been input but before it has been
interpreted.
If you want to modify the input line before execution (for example, variable
substitution) do it here.
"""
line = self.prefix + line
self._history += [line.strip()]
return line | [
"def",
"precmd",
"(",
"self",
",",
"line",
")",
":",
"line",
"=",
"self",
".",
"prefix",
"+",
"line",
"self",
".",
"_history",
"+=",
"[",
"line",
".",
"strip",
"(",
")",
"]",
"return",
"line"
] | This method is called after the line has been input but before it has been
interpreted.
If you want to modify the input line before execution (for example, variable
substitution) do it here. | [
"This",
"method",
"is",
"called",
"after",
"the",
"line",
"has",
"been",
"input",
"but",
"before",
"it",
"has",
"been",
"interpreted",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L69-L79 | train | 44,985 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.default | def default(self, line):
"""Called on an input line when the command prefix is not recognized."""
args = self._split_args(line, 0, 99)
d1_cli.impl.util.print_error("Unknown command: {}".format(args[0])) | python | def default(self, line):
"""Called on an input line when the command prefix is not recognized."""
args = self._split_args(line, 0, 99)
d1_cli.impl.util.print_error("Unknown command: {}".format(args[0])) | [
"def",
"default",
"(",
"self",
",",
"line",
")",
":",
"args",
"=",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"99",
")",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_error",
"(",
"\"Unknown command: {}\"",
".",
"format",
"(",
"args",
... | Called on an input line when the command prefix is not recognized. | [
"Called",
"on",
"an",
"input",
"line",
"when",
"the",
"command",
"prefix",
"is",
"not",
"recognized",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L93-L96 | train | 44,986 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_history | def do_history(self, line):
"""history Display a list of commands that have been entered."""
self._split_args(line, 0, 0)
for idx, item in enumerate(self._history):
d1_cli.impl.util.print_info("{0: 3d} {1}".format(idx, item)) | python | def do_history(self, line):
"""history Display a list of commands that have been entered."""
self._split_args(line, 0, 0)
for idx, item in enumerate(self._history):
d1_cli.impl.util.print_info("{0: 3d} {1}".format(idx, item)) | [
"def",
"do_history",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"self",
".",
"_history",
")",
":",
"d1_cli",
".",
"impl",
".",
"util",
"... | history Display a list of commands that have been entered. | [
"history",
"Display",
"a",
"list",
"of",
"commands",
"that",
"have",
"been",
"entered",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L118-L122 | train | 44,987 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_eof | def do_eof(self, line):
"""Exit on system EOF character."""
d1_cli.impl.util.print_info("")
self.do_exit(line) | python | def do_eof(self, line):
"""Exit on system EOF character."""
d1_cli.impl.util.print_info("")
self.do_exit(line) | [
"def",
"do_eof",
"(",
"self",
",",
"line",
")",
":",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_info",
"(",
"\"\"",
")",
"self",
".",
"do_exit",
"(",
"line",
")"
] | Exit on system EOF character. | [
"Exit",
"on",
"system",
"EOF",
"character",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L142-L145 | train | 44,988 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_reset | def do_reset(self, line):
"""reset Set all session variables to their default values."""
self._split_args(line, 0, 0)
self._command_processor.get_session().reset()
self._print_info_if_verbose("Successfully reset session variables") | python | def do_reset(self, line):
"""reset Set all session variables to their default values."""
self._split_args(line, 0, 0)
self._command_processor.get_session().reset()
self._print_info_if_verbose("Successfully reset session variables") | [
"def",
"do_reset",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"reset",
"(",
")",
"self",
".",
"_print_info_if_verbose",
"("... | reset Set all session variables to their default values. | [
"reset",
"Set",
"all",
"session",
"variables",
"to",
"their",
"default",
"values",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L200-L204 | train | 44,989 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_clearaccess | def do_clearaccess(self, line):
"""clearaccess Remove all subjects from access policy Only the submitter will
have access to the object."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_access_control().clear()
self._print_info_if_verbose("Removed all subjects from access policy") | python | def do_clearaccess(self, line):
"""clearaccess Remove all subjects from access policy Only the submitter will
have access to the object."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_access_control().clear()
self._print_info_if_verbose("Removed all subjects from access policy") | [
"def",
"do_clearaccess",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"get_access_control",
"(",
")",
".",
"clear",
"(",
")",... | clearaccess Remove all subjects from access policy Only the submitter will
have access to the object. | [
"clearaccess",
"Remove",
"all",
"subjects",
"from",
"access",
"policy",
"Only",
"the",
"submitter",
"will",
"have",
"access",
"to",
"the",
"object",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L238-L243 | train | 44,990 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_allowrep | def do_allowrep(self, line):
"""allowrep Allow new objects to be replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
True
)
self._print_info_if_verbose("Set replication policy to allow replication") | python | def do_allowrep(self, line):
"""allowrep Allow new objects to be replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
True
)
self._print_info_if_verbose("Set replication policy to allow replication") | [
"def",
"do_allowrep",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"get_replication_policy",
"(",
")",
".",
"set_replication_allo... | allowrep Allow new objects to be replicated. | [
"allowrep",
"Allow",
"new",
"objects",
"to",
"be",
"replicated",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L249-L255 | train | 44,991 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_denyrep | def do_denyrep(self, line):
"""denyrep Prevent new objects from being replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
False
)
self._print_info_if_verbose("Set replication policy to deny replication") | python | def do_denyrep(self, line):
"""denyrep Prevent new objects from being replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
False
)
self._print_info_if_verbose("Set replication policy to deny replication") | [
"def",
"do_denyrep",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"get_replication_policy",
"(",
")",
".",
"set_replication_allow... | denyrep Prevent new objects from being replicated. | [
"denyrep",
"Prevent",
"new",
"objects",
"from",
"being",
"replicated",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L257-L263 | train | 44,992 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_clearrep | def do_clearrep(self, line):
"""clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3.
"""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().clear()
self._print_info_if_verbose("Cleared the replication policy") | python | def do_clearrep(self, line):
"""clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3.
"""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().clear()
self._print_info_if_verbose("Cleared the replication policy") | [
"def",
"do_clearrep",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_session",
"(",
")",
".",
"get_replication_policy",
"(",
")",
".",
"clear",
"(",
")"... | clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3. | [
"clearrep",
"Set",
"the",
"replication",
"policy",
"to",
"default",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L304-L313 | train | 44,993 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_queue | def do_queue(self, line):
"""queue Print the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().display() | python | def do_queue(self, line):
"""queue Print the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().display() | [
"def",
"do_queue",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_operation_queue",
"(",
")",
".",
"display",
"(",
")"
] | queue Print the queue of write operations. | [
"queue",
"Print",
"the",
"queue",
"of",
"write",
"operations",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L494-L497 | train | 44,994 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_run | def do_run(self, line):
"""run Perform each operation in the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().execute()
self._print_info_if_verbose(
"All operations in the write queue were successfully executed"
) | python | def do_run(self, line):
"""run Perform each operation in the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().execute()
self._print_info_if_verbose(
"All operations in the write queue were successfully executed"
) | [
"def",
"do_run",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_operation_queue",
"(",
")",
".",
"execute",
"(",
")",
"self",
".",
"_print_info_if_verbose... | run Perform each operation in the queue of write operations. | [
"run",
"Perform",
"each",
"operation",
"in",
"the",
"queue",
"of",
"write",
"operations",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L499-L505 | train | 44,995 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_edit | def do_edit(self, line):
"""edit Edit the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().edit()
self._print_info_if_verbose("The write operation queue was successfully edited") | python | def do_edit(self, line):
"""edit Edit the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().edit()
self._print_info_if_verbose("The write operation queue was successfully edited") | [
"def",
"do_edit",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_operation_queue",
"(",
")",
".",
"edit",
"(",
")",
"self",
".",
"_print_info_if_verbose",... | edit Edit the queue of write operations. | [
"edit",
"Edit",
"the",
"queue",
"of",
"write",
"operations",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L507-L511 | train | 44,996 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | CLI.do_clearqueue | def do_clearqueue(self, line):
"""clearqueue Remove the operations in the queue of write operations without
performing them."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().clear()
self._print_info_if_verbose("All operations in the write queue were cleared") | python | def do_clearqueue(self, line):
"""clearqueue Remove the operations in the queue of write operations without
performing them."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().clear()
self._print_info_if_verbose("All operations in the write queue were cleared") | [
"def",
"do_clearqueue",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"_split_args",
"(",
"line",
",",
"0",
",",
"0",
")",
"self",
".",
"_command_processor",
".",
"get_operation_queue",
"(",
")",
".",
"clear",
"(",
")",
"self",
".",
"_print_info_if_ve... | clearqueue Remove the operations in the queue of write operations without
performing them. | [
"clearqueue",
"Remove",
"the",
"operations",
"in",
"the",
"queue",
"of",
"write",
"operations",
"without",
"performing",
"them",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L513-L518 | train | 44,997 |
genialis/resolwe | resolwe/permissions/shortcuts.py | _group_groups | def _group_groups(perm_list):
"""Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list
"""
perm_list = sorted(perm_list, key=lambda tup: tup[0])
grouped_perms = []
for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])):
grouped_perms.append((key[0], key[1], [g[2] for g in group]))
return grouped_perms | python | def _group_groups(perm_list):
"""Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list
"""
perm_list = sorted(perm_list, key=lambda tup: tup[0])
grouped_perms = []
for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])):
grouped_perms.append((key[0], key[1], [g[2] for g in group]))
return grouped_perms | [
"def",
"_group_groups",
"(",
"perm_list",
")",
":",
"perm_list",
"=",
"sorted",
"(",
"perm_list",
",",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"0",
"]",
")",
"grouped_perms",
"=",
"[",
"]",
"for",
"key",
",",
"group",
"in",
"groupby",
"(",
"per... | Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list | [
"Group",
"permissions",
"by",
"group",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/shortcuts.py#L61-L85 | train | 44,998 |
genialis/resolwe | resolwe/permissions/shortcuts.py | get_user_group_perms | def get_user_group_perms(user_or_group, obj):
"""Get permissins for user groups.
Based on guardian.core.ObjectPermissionChecker.
"""
user, group = get_identity(user_or_group)
if user and not user.is_active:
return [], []
user_model = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if user:
user_rel_name = user_model.groups.field.related_query_name()
group_filters = {user_rel_name: user}
else:
group_filters = {'pk': group.pk}
if group_model.objects.is_generic():
group_filters.update({
'{}__content_type'.format(group_rel_name): ctype,
'{}__object_pk'.format(group_rel_name): obj.pk,
})
else:
group_filters['{}__content_object'.format(group_rel_name)] = obj
user_perms, group_perms = [], []
if user:
perms_qs = Permission.objects.filter(content_type=ctype)
if user.is_superuser:
user_perms = list(chain(perms_qs.values_list("codename", flat=True)))
else:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'{}__user'.format(related_name): user}
if model.objects.is_generic():
user_filters.update({
'{}__content_type'.format(related_name): ctype,
'{}__object_pk'.format(related_name): obj.pk,
})
else:
user_filters['{}__content_object'.format(related_name)] = obj
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = list(chain(user_perms_qs.values_list("codename", flat=True)))
group_perms_qs = Group.objects.filter(**group_filters)
group_perms = list(chain(group_perms_qs.order_by("pk").values_list(
"pk", "name", "{}__permission__codename".format(group_rel_name))))
group_perms = _group_groups(group_perms)
return user_perms, group_perms | python | def get_user_group_perms(user_or_group, obj):
"""Get permissins for user groups.
Based on guardian.core.ObjectPermissionChecker.
"""
user, group = get_identity(user_or_group)
if user and not user.is_active:
return [], []
user_model = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if user:
user_rel_name = user_model.groups.field.related_query_name()
group_filters = {user_rel_name: user}
else:
group_filters = {'pk': group.pk}
if group_model.objects.is_generic():
group_filters.update({
'{}__content_type'.format(group_rel_name): ctype,
'{}__object_pk'.format(group_rel_name): obj.pk,
})
else:
group_filters['{}__content_object'.format(group_rel_name)] = obj
user_perms, group_perms = [], []
if user:
perms_qs = Permission.objects.filter(content_type=ctype)
if user.is_superuser:
user_perms = list(chain(perms_qs.values_list("codename", flat=True)))
else:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'{}__user'.format(related_name): user}
if model.objects.is_generic():
user_filters.update({
'{}__content_type'.format(related_name): ctype,
'{}__object_pk'.format(related_name): obj.pk,
})
else:
user_filters['{}__content_object'.format(related_name)] = obj
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = list(chain(user_perms_qs.values_list("codename", flat=True)))
group_perms_qs = Group.objects.filter(**group_filters)
group_perms = list(chain(group_perms_qs.order_by("pk").values_list(
"pk", "name", "{}__permission__codename".format(group_rel_name))))
group_perms = _group_groups(group_perms)
return user_perms, group_perms | [
"def",
"get_user_group_perms",
"(",
"user_or_group",
",",
"obj",
")",
":",
"user",
",",
"group",
"=",
"get_identity",
"(",
"user_or_group",
")",
"if",
"user",
"and",
"not",
"user",
".",
"is_active",
":",
"return",
"[",
"]",
",",
"[",
"]",
"user_model",
"... | Get permissins for user groups.
Based on guardian.core.ObjectPermissionChecker. | [
"Get",
"permissins",
"for",
"user",
"groups",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/shortcuts.py#L88-L143 | train | 44,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.