repository_name stringlengths 5 67 | func_path_in_repository stringlengths 4 234 | func_name stringlengths 0 314 | whole_func_string stringlengths 52 3.87M | language stringclasses 6
values | func_code_string stringlengths 52 3.87M | func_code_tokens listlengths 15 672k | func_documentation_string stringlengths 1 47.2k | func_documentation_tokens listlengths 1 3.92k | split_name stringclasses 1
value | func_code_url stringlengths 85 339 |
|---|---|---|---|---|---|---|---|---|---|---|
DataKitchen/DKCloudCommand | DKCloudCommand/modules/DKRecipeDisk.py | is_same | def is_same(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
:param dir2:
:param dir1:
"""
compared = dircmp(dir1, dir2, IGNORED_FILES) # ignore the OS X file
if compared.left_only or compared.right_only or compared.diff_files or compared.funny_files:
return False
for subdir in compared.common_dirs:
if not is_same(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True | python | def is_same(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
:param dir2:
:param dir1:
"""
compared = dircmp(dir1, dir2, IGNORED_FILES) # ignore the OS X file
if compared.left_only or compared.right_only or compared.diff_files or compared.funny_files:
return False
for subdir in compared.common_dirs:
if not is_same(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True | [
"def",
"is_same",
"(",
"dir1",
",",
"dir2",
")",
":",
"compared",
"=",
"dircmp",
"(",
"dir1",
",",
"dir2",
",",
"IGNORED_FILES",
")",
"# ignore the OS X file",
"if",
"compared",
".",
"left_only",
"or",
"compared",
".",
"right_only",
"or",
"compared",
".",
... | Compare two directory trees content.
Return False if they differ, True is they are the same.
:param dir2:
:param dir1: | [
"Compare",
"two",
"directory",
"trees",
"content",
".",
"Return",
"False",
"if",
"they",
"differ",
"True",
"is",
"they",
"are",
"the",
"same",
".",
":",
"param",
"dir2",
":",
":",
"param",
"dir1",
":"
] | train | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKRecipeDisk.py#L361-L374 |
DataKitchen/DKCloudCommand | DKCloudCommand/modules/DKRecipeDisk.py | dircmp.phase3 | def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp | python | def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp | [
"def",
"phase3",
"(",
"self",
")",
":",
"fcomp",
"=",
"filecmp",
".",
"cmpfiles",
"(",
"self",
".",
"left",
",",
"self",
".",
"right",
",",
"self",
".",
"common_files",
",",
"shallow",
"=",
"False",
")",
"self",
".",
"same_files",
",",
"self",
".",
... | Find out differences between common files.
Ensure we are using content comparison with shallow=False. | [
"Find",
"out",
"differences",
"between",
"common",
"files",
".",
"Ensure",
"we",
"are",
"using",
"content",
"comparison",
"with",
"shallow",
"=",
"False",
"."
] | train | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKRecipeDisk.py#L351-L358 |
Locu/chronology | kronos/kronos/storage/elasticsearch/client.py | ElasticSearchStorage._insert | def _insert(self, namespace, stream, events, configuration):
"""
`namespace` acts as db for different streams
`stream` is the name of a stream and `events` is a list of events to
insert.
"""
index = self.index_manager.get_index(namespace)
start_dts_to_add = set()
def actions():
for _id, event in events:
dt = kronos_time_to_datetime(uuid_to_kronos_time(_id))
start_dts_to_add.add(_round_datetime_down(dt))
event['_index'] = index
event['_type'] = stream
event[LOGSTASH_TIMESTAMP_FIELD] = dt.isoformat()
yield event
list(es_helpers.streaming_bulk(self.es, actions(), chunk_size=1000,
refresh=self.force_refresh))
self.index_manager.add_aliases(namespace,
index,
start_dts_to_add) | python | def _insert(self, namespace, stream, events, configuration):
"""
`namespace` acts as db for different streams
`stream` is the name of a stream and `events` is a list of events to
insert.
"""
index = self.index_manager.get_index(namespace)
start_dts_to_add = set()
def actions():
for _id, event in events:
dt = kronos_time_to_datetime(uuid_to_kronos_time(_id))
start_dts_to_add.add(_round_datetime_down(dt))
event['_index'] = index
event['_type'] = stream
event[LOGSTASH_TIMESTAMP_FIELD] = dt.isoformat()
yield event
list(es_helpers.streaming_bulk(self.es, actions(), chunk_size=1000,
refresh=self.force_refresh))
self.index_manager.add_aliases(namespace,
index,
start_dts_to_add) | [
"def",
"_insert",
"(",
"self",
",",
"namespace",
",",
"stream",
",",
"events",
",",
"configuration",
")",
":",
"index",
"=",
"self",
".",
"index_manager",
".",
"get_index",
"(",
"namespace",
")",
"start_dts_to_add",
"=",
"set",
"(",
")",
"def",
"actions",
... | `namespace` acts as db for different streams
`stream` is the name of a stream and `events` is a list of events to
insert. | [
"namespace",
"acts",
"as",
"db",
"for",
"different",
"streams",
"stream",
"is",
"the",
"name",
"of",
"a",
"stream",
"and",
"events",
"is",
"a",
"list",
"of",
"events",
"to",
"insert",
"."
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/elasticsearch/client.py#L223-L246 |
Locu/chronology | kronos/kronos/storage/elasticsearch/client.py | ElasticSearchStorage._delete | def _delete(self, namespace, stream, start_id, end_time, configuration):
"""
Delete events with id > `start_id` and end_time <= `end_time`.
"""
start_time = uuid_to_kronos_time(start_id)
body_query = {
'query': {
'filtered': {
'query': {'match_all': {}},
'filter': {
'bool': {
'should': [
{
'range': {TIMESTAMP_FIELD: {'gt': start_time,
'lte': end_time}}
},
{
'bool': {
'must': [
{'range': {ID_FIELD: {'gt': str(start_id)}}},
{'term': {TIMESTAMP_FIELD: start_time}}
]
}
}
]
}
}
}
}
}
query = {'index': self.index_manager.get_index(namespace),
'doc_type': stream,
'body': body_query,
'ignore': 404,
'allow_no_indices': True,
'ignore_unavailable': True}
try:
# XXX: ElasticSearch does not return stats on deletions.
# https://github.com/elasticsearch/elasticsearch/issues/6519
count = self.es.count(**query).get('count', 0)
if count:
self.es.delete_by_query(**query)
return count, []
except Exception, e:
return 0, [repr(e)] | python | def _delete(self, namespace, stream, start_id, end_time, configuration):
"""
Delete events with id > `start_id` and end_time <= `end_time`.
"""
start_time = uuid_to_kronos_time(start_id)
body_query = {
'query': {
'filtered': {
'query': {'match_all': {}},
'filter': {
'bool': {
'should': [
{
'range': {TIMESTAMP_FIELD: {'gt': start_time,
'lte': end_time}}
},
{
'bool': {
'must': [
{'range': {ID_FIELD: {'gt': str(start_id)}}},
{'term': {TIMESTAMP_FIELD: start_time}}
]
}
}
]
}
}
}
}
}
query = {'index': self.index_manager.get_index(namespace),
'doc_type': stream,
'body': body_query,
'ignore': 404,
'allow_no_indices': True,
'ignore_unavailable': True}
try:
# XXX: ElasticSearch does not return stats on deletions.
# https://github.com/elasticsearch/elasticsearch/issues/6519
count = self.es.count(**query).get('count', 0)
if count:
self.es.delete_by_query(**query)
return count, []
except Exception, e:
return 0, [repr(e)] | [
"def",
"_delete",
"(",
"self",
",",
"namespace",
",",
"stream",
",",
"start_id",
",",
"end_time",
",",
"configuration",
")",
":",
"start_time",
"=",
"uuid_to_kronos_time",
"(",
"start_id",
")",
"body_query",
"=",
"{",
"'query'",
":",
"{",
"'filtered'",
":",
... | Delete events with id > `start_id` and end_time <= `end_time`. | [
"Delete",
"events",
"with",
"id",
">",
"start_id",
"and",
"end_time",
"<",
"=",
"end_time",
"."
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/elasticsearch/client.py#L248-L292 |
Locu/chronology | kronos/kronos/storage/elasticsearch/client.py | ElasticSearchStorage._retrieve | def _retrieve(self, namespace, stream, start_id, end_time, order, limit,
configuration):
"""
Yield events from stream starting after the event with id `start_id` until
and including events with timestamp `end_time`.
"""
indices = self.index_manager.get_aliases(namespace,
uuid_to_kronos_time(start_id),
end_time)
if not indices:
return
end_id = uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST)
end_id.descending = start_id.descending = descending = (
order == ResultOrder.DESCENDING)
start_time = uuid_to_kronos_time(start_id)
body_query = {
'query': {
'filtered': {
'query': {'match_all': {}},
'filter': {
'range': {TIMESTAMP_FIELD: {'gte': start_time, 'lte': end_time}}
}
}
}
}
order = 'desc' if descending else 'asc'
sort_query = [
'%s:%s' % (TIMESTAMP_FIELD, order),
'%s:%s' % (ID_FIELD, order)
]
last_id = end_id if descending else start_id
scroll_id = None
while True:
size = max(min(limit, configuration['read_size']) / self.shards, 10)
if scroll_id is None:
res = self.es.search(index=indices,
doc_type=stream,
size=size,
body=body_query,
sort=sort_query,
_source=True,
scroll='1m',
ignore=[400, 404],
allow_no_indices=True,
ignore_unavailable=True)
else:
res = self.es.scroll(scroll_id, scroll='1m')
if '_scroll_id' not in res:
break
scroll_id = res['_scroll_id']
hits = res.get('hits', {}).get('hits')
if not hits:
break
for hit in hits:
_id = TimeUUID(hit['_id'], descending=descending)
if _id <= last_id:
continue
last_id = _id
event = hit['_source']
del event[LOGSTASH_TIMESTAMP_FIELD]
yield json.dumps(event)
limit -= 1
if limit == 0:
break
if scroll_id is not None:
self.es.clear_scroll(scroll_id) | python | def _retrieve(self, namespace, stream, start_id, end_time, order, limit,
configuration):
"""
Yield events from stream starting after the event with id `start_id` until
and including events with timestamp `end_time`.
"""
indices = self.index_manager.get_aliases(namespace,
uuid_to_kronos_time(start_id),
end_time)
if not indices:
return
end_id = uuid_from_kronos_time(end_time, _type=UUIDType.HIGHEST)
end_id.descending = start_id.descending = descending = (
order == ResultOrder.DESCENDING)
start_time = uuid_to_kronos_time(start_id)
body_query = {
'query': {
'filtered': {
'query': {'match_all': {}},
'filter': {
'range': {TIMESTAMP_FIELD: {'gte': start_time, 'lte': end_time}}
}
}
}
}
order = 'desc' if descending else 'asc'
sort_query = [
'%s:%s' % (TIMESTAMP_FIELD, order),
'%s:%s' % (ID_FIELD, order)
]
last_id = end_id if descending else start_id
scroll_id = None
while True:
size = max(min(limit, configuration['read_size']) / self.shards, 10)
if scroll_id is None:
res = self.es.search(index=indices,
doc_type=stream,
size=size,
body=body_query,
sort=sort_query,
_source=True,
scroll='1m',
ignore=[400, 404],
allow_no_indices=True,
ignore_unavailable=True)
else:
res = self.es.scroll(scroll_id, scroll='1m')
if '_scroll_id' not in res:
break
scroll_id = res['_scroll_id']
hits = res.get('hits', {}).get('hits')
if not hits:
break
for hit in hits:
_id = TimeUUID(hit['_id'], descending=descending)
if _id <= last_id:
continue
last_id = _id
event = hit['_source']
del event[LOGSTASH_TIMESTAMP_FIELD]
yield json.dumps(event)
limit -= 1
if limit == 0:
break
if scroll_id is not None:
self.es.clear_scroll(scroll_id) | [
"def",
"_retrieve",
"(",
"self",
",",
"namespace",
",",
"stream",
",",
"start_id",
",",
"end_time",
",",
"order",
",",
"limit",
",",
"configuration",
")",
":",
"indices",
"=",
"self",
".",
"index_manager",
".",
"get_aliases",
"(",
"namespace",
",",
"uuid_t... | Yield events from stream starting after the event with id `start_id` until
and including events with timestamp `end_time`. | [
"Yield",
"events",
"from",
"stream",
"starting",
"after",
"the",
"event",
"with",
"id",
"start_id",
"until",
"and",
"including",
"events",
"with",
"timestamp",
"end_time",
"."
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/elasticsearch/client.py#L294-L364 |
sid5432/pypdx | pypdx/dbconn.py | DBconn.create_tables | def create_tables(self):
"""
create tables in database (if they don't already exist)
"""
cdir = os.path.dirname( os.path.realpath(__file__) )
# table schemas -------------------------------------
schema = os.path.join(cdir,"data","partsmaster.sql")
if self.debug:
print(self.hdr,"parts master schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","approvedmfg.sql")
if self.debug:
print(self.hdr,"approved mfg list schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","attachment.sql")
if self.debug:
print(self.hdr,"attachment schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","bom.sql")
if self.debug:
print(self.hdr,"bill of materials schema is ",schema)
self.populate(schema)
return | python | def create_tables(self):
"""
create tables in database (if they don't already exist)
"""
cdir = os.path.dirname( os.path.realpath(__file__) )
# table schemas -------------------------------------
schema = os.path.join(cdir,"data","partsmaster.sql")
if self.debug:
print(self.hdr,"parts master schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","approvedmfg.sql")
if self.debug:
print(self.hdr,"approved mfg list schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","attachment.sql")
if self.debug:
print(self.hdr,"attachment schema is ",schema)
self.populate(schema)
schema = os.path.join(cdir,"data","bom.sql")
if self.debug:
print(self.hdr,"bill of materials schema is ",schema)
self.populate(schema)
return | [
"def",
"create_tables",
"(",
"self",
")",
":",
"cdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"# table schemas -------------------------------------",
"schema",
"=",
"os",
".",
"path",
".",... | create tables in database (if they don't already exist) | [
"create",
"tables",
"in",
"database",
"(",
"if",
"they",
"don",
"t",
"already",
"exist",
")"
] | train | https://github.com/sid5432/pypdx/blob/06b99086fe398d4246aed9df75258f3bed163e31/pypdx/dbconn.py#L55-L86 |
bovee/Aston | aston/tracefile/__init__.py | parse_c_serialized | def parse_c_serialized(f):
"""
Reads in a binary file created by a C++ serializer (prob. MFC?)
and returns tuples of (header name, data following the header).
These are used by Thermo for *.CF and *.DXF files and by Agilent
for new-style *.REG files.
"""
# TODO: rewrite to use re library
f.seek(0)
try:
p_rec_type = None
while True:
rec_off = f.tell()
while True:
if f.read(2) == b'\xff\xff':
h = struct.unpack('<HH', f.read(4))
if h[1] < 64 and h[1] != 0:
rec_type = f.read(h[1])
if rec_type[0] == 67: # starts with 'C'
break
if f.read(1) == b'':
raise EOFError
f.seek(f.tell() - 2)
if p_rec_type is not None:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len)
f.seek(f.tell() + 6 + len(rec_type))
# p_type = h[0]
p_rec_type = rec_type
except EOFError:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len) | python | def parse_c_serialized(f):
"""
Reads in a binary file created by a C++ serializer (prob. MFC?)
and returns tuples of (header name, data following the header).
These are used by Thermo for *.CF and *.DXF files and by Agilent
for new-style *.REG files.
"""
# TODO: rewrite to use re library
f.seek(0)
try:
p_rec_type = None
while True:
rec_off = f.tell()
while True:
if f.read(2) == b'\xff\xff':
h = struct.unpack('<HH', f.read(4))
if h[1] < 64 and h[1] != 0:
rec_type = f.read(h[1])
if rec_type[0] == 67: # starts with 'C'
break
if f.read(1) == b'':
raise EOFError
f.seek(f.tell() - 2)
if p_rec_type is not None:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len)
f.seek(f.tell() + 6 + len(rec_type))
# p_type = h[0]
p_rec_type = rec_type
except EOFError:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len) | [
"def",
"parse_c_serialized",
"(",
"f",
")",
":",
"# TODO: rewrite to use re library",
"f",
".",
"seek",
"(",
"0",
")",
"try",
":",
"p_rec_type",
"=",
"None",
"while",
"True",
":",
"rec_off",
"=",
"f",
".",
"tell",
"(",
")",
"while",
"True",
":",
"if",
... | Reads in a binary file created by a C++ serializer (prob. MFC?)
and returns tuples of (header name, data following the header).
These are used by Thermo for *.CF and *.DXF files and by Agilent
for new-style *.REG files. | [
"Reads",
"in",
"a",
"binary",
"file",
"created",
"by",
"a",
"C",
"++",
"serializer",
"(",
"prob",
".",
"MFC?",
")",
"and",
"returns",
"tuples",
"of",
"(",
"header",
"name",
"data",
"following",
"the",
"header",
")",
".",
"These",
"are",
"used",
"by",
... | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/__init__.py#L32-L65 |
bovee/Aston | aston/tracefile/__init__.py | TraceFile.scan | def scan(self, t, dt=None, aggfunc=None):
"""
Returns the spectrum from a specific time or range of times.
"""
return self.data.scan(t, dt, aggfunc) | python | def scan(self, t, dt=None, aggfunc=None):
"""
Returns the spectrum from a specific time or range of times.
"""
return self.data.scan(t, dt, aggfunc) | [
"def",
"scan",
"(",
"self",
",",
"t",
",",
"dt",
"=",
"None",
",",
"aggfunc",
"=",
"None",
")",
":",
"return",
"self",
".",
"data",
".",
"scan",
"(",
"t",
",",
"dt",
",",
"aggfunc",
")"
] | Returns the spectrum from a specific time or range of times. | [
"Returns",
"the",
"spectrum",
"from",
"a",
"specific",
"time",
"or",
"range",
"of",
"times",
"."
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/__init__.py#L162-L166 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _generate_storage_broker_lookup | def _generate_storage_broker_lookup():
"""Return dictionary of available storage brokers."""
storage_broker_lookup = dict()
for entrypoint in iter_entry_points("dtool.storage_brokers"):
StorageBroker = entrypoint.load()
storage_broker_lookup[StorageBroker.key] = StorageBroker
return storage_broker_lookup | python | def _generate_storage_broker_lookup():
"""Return dictionary of available storage brokers."""
storage_broker_lookup = dict()
for entrypoint in iter_entry_points("dtool.storage_brokers"):
StorageBroker = entrypoint.load()
storage_broker_lookup[StorageBroker.key] = StorageBroker
return storage_broker_lookup | [
"def",
"_generate_storage_broker_lookup",
"(",
")",
":",
"storage_broker_lookup",
"=",
"dict",
"(",
")",
"for",
"entrypoint",
"in",
"iter_entry_points",
"(",
"\"dtool.storage_brokers\"",
")",
":",
"StorageBroker",
"=",
"entrypoint",
".",
"load",
"(",
")",
"storage_b... | Return dictionary of available storage brokers. | [
"Return",
"dictionary",
"of",
"available",
"storage",
"brokers",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L17-L23 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _get_storage_broker | def _get_storage_broker(uri, config_path):
"""Helper function to enable use lookup of appropriate storage brokers."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker_lookup = _generate_storage_broker_lookup()
parsed_uri = dtoolcore.utils.generous_parse_uri(uri)
StorageBroker = storage_broker_lookup[parsed_uri.scheme]
return StorageBroker(uri, config_path) | python | def _get_storage_broker(uri, config_path):
"""Helper function to enable use lookup of appropriate storage brokers."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker_lookup = _generate_storage_broker_lookup()
parsed_uri = dtoolcore.utils.generous_parse_uri(uri)
StorageBroker = storage_broker_lookup[parsed_uri.scheme]
return StorageBroker(uri, config_path) | [
"def",
"_get_storage_broker",
"(",
"uri",
",",
"config_path",
")",
":",
"uri",
"=",
"dtoolcore",
".",
"utils",
".",
"sanitise_uri",
"(",
"uri",
")",
"storage_broker_lookup",
"=",
"_generate_storage_broker_lookup",
"(",
")",
"parsed_uri",
"=",
"dtoolcore",
".",
"... | Helper function to enable use lookup of appropriate storage brokers. | [
"Helper",
"function",
"to",
"enable",
"use",
"lookup",
"of",
"appropriate",
"storage",
"brokers",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L26-L32 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _admin_metadata_from_uri | def _admin_metadata_from_uri(uri, config_path):
"""Helper function for getting admin metadata."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker = _get_storage_broker(uri, config_path)
admin_metadata = storage_broker.get_admin_metadata()
return admin_metadata | python | def _admin_metadata_from_uri(uri, config_path):
"""Helper function for getting admin metadata."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker = _get_storage_broker(uri, config_path)
admin_metadata = storage_broker.get_admin_metadata()
return admin_metadata | [
"def",
"_admin_metadata_from_uri",
"(",
"uri",
",",
"config_path",
")",
":",
"uri",
"=",
"dtoolcore",
".",
"utils",
".",
"sanitise_uri",
"(",
"uri",
")",
"storage_broker",
"=",
"_get_storage_broker",
"(",
"uri",
",",
"config_path",
")",
"admin_metadata",
"=",
... | Helper function for getting admin metadata. | [
"Helper",
"function",
"for",
"getting",
"admin",
"metadata",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L35-L40 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _is_dataset | def _is_dataset(uri, config_path):
"""Helper function for determining if a URI is a dataset."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker = _get_storage_broker(uri, config_path)
return storage_broker.has_admin_metadata() | python | def _is_dataset(uri, config_path):
"""Helper function for determining if a URI is a dataset."""
uri = dtoolcore.utils.sanitise_uri(uri)
storage_broker = _get_storage_broker(uri, config_path)
return storage_broker.has_admin_metadata() | [
"def",
"_is_dataset",
"(",
"uri",
",",
"config_path",
")",
":",
"uri",
"=",
"dtoolcore",
".",
"utils",
".",
"sanitise_uri",
"(",
"uri",
")",
"storage_broker",
"=",
"_get_storage_broker",
"(",
"uri",
",",
"config_path",
")",
"return",
"storage_broker",
".",
"... | Helper function for determining if a URI is a dataset. | [
"Helper",
"function",
"for",
"determining",
"if",
"a",
"URI",
"is",
"a",
"dataset",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L43-L47 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | generate_admin_metadata | def generate_admin_metadata(name, creator_username=None):
"""Return admin metadata as a dictionary."""
if not dtoolcore.utils.name_is_valid(name):
raise(DtoolCoreInvalidNameError())
if creator_username is None:
creator_username = dtoolcore.utils.getuser()
datetime_obj = datetime.datetime.utcnow()
admin_metadata = {
"uuid": str(uuid.uuid4()),
"dtoolcore_version": __version__,
"name": name,
"type": "protodataset",
"creator_username": creator_username,
"created_at": dtoolcore.utils.timestamp(datetime_obj)
}
return admin_metadata | python | def generate_admin_metadata(name, creator_username=None):
"""Return admin metadata as a dictionary."""
if not dtoolcore.utils.name_is_valid(name):
raise(DtoolCoreInvalidNameError())
if creator_username is None:
creator_username = dtoolcore.utils.getuser()
datetime_obj = datetime.datetime.utcnow()
admin_metadata = {
"uuid": str(uuid.uuid4()),
"dtoolcore_version": __version__,
"name": name,
"type": "protodataset",
"creator_username": creator_username,
"created_at": dtoolcore.utils.timestamp(datetime_obj)
}
return admin_metadata | [
"def",
"generate_admin_metadata",
"(",
"name",
",",
"creator_username",
"=",
"None",
")",
":",
"if",
"not",
"dtoolcore",
".",
"utils",
".",
"name_is_valid",
"(",
"name",
")",
":",
"raise",
"(",
"DtoolCoreInvalidNameError",
"(",
")",
")",
"if",
"creator_usernam... | Return admin metadata as a dictionary. | [
"Return",
"admin",
"metadata",
"as",
"a",
"dictionary",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L50-L69 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _generate_uri | def _generate_uri(admin_metadata, base_uri):
"""Return dataset URI.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI from which to derive dataset URI
:returns: dataset URI
"""
name = admin_metadata["name"]
uuid = admin_metadata["uuid"]
# storage_broker_lookup = _generate_storage_broker_lookup()
# parse_result = urlparse(base_uri)
# storage = parse_result.scheme
StorageBroker = _get_storage_broker(base_uri, config_path=None)
return StorageBroker.generate_uri(name, uuid, base_uri) | python | def _generate_uri(admin_metadata, base_uri):
"""Return dataset URI.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI from which to derive dataset URI
:returns: dataset URI
"""
name = admin_metadata["name"]
uuid = admin_metadata["uuid"]
# storage_broker_lookup = _generate_storage_broker_lookup()
# parse_result = urlparse(base_uri)
# storage = parse_result.scheme
StorageBroker = _get_storage_broker(base_uri, config_path=None)
return StorageBroker.generate_uri(name, uuid, base_uri) | [
"def",
"_generate_uri",
"(",
"admin_metadata",
",",
"base_uri",
")",
":",
"name",
"=",
"admin_metadata",
"[",
"\"name\"",
"]",
"uuid",
"=",
"admin_metadata",
"[",
"\"uuid\"",
"]",
"# storage_broker_lookup = _generate_storage_broker_lookup()",
"# parse_result = urlparse(base... | Return dataset URI.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI from which to derive dataset URI
:returns: dataset URI | [
"Return",
"dataset",
"URI",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L72-L85 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | generate_proto_dataset | def generate_proto_dataset(admin_metadata, base_uri, config_path=None):
"""Return :class:`dtoolcore.ProtoDataSet` instance.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI for proto dataset
:param config_path: path to dtool configuration file
"""
uri = _generate_uri(admin_metadata, base_uri)
return ProtoDataSet(uri, admin_metadata, config_path) | python | def generate_proto_dataset(admin_metadata, base_uri, config_path=None):
"""Return :class:`dtoolcore.ProtoDataSet` instance.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI for proto dataset
:param config_path: path to dtool configuration file
"""
uri = _generate_uri(admin_metadata, base_uri)
return ProtoDataSet(uri, admin_metadata, config_path) | [
"def",
"generate_proto_dataset",
"(",
"admin_metadata",
",",
"base_uri",
",",
"config_path",
"=",
"None",
")",
":",
"uri",
"=",
"_generate_uri",
"(",
"admin_metadata",
",",
"base_uri",
")",
"return",
"ProtoDataSet",
"(",
"uri",
",",
"admin_metadata",
",",
"confi... | Return :class:`dtoolcore.ProtoDataSet` instance.
:param admin_metadata: dataset administrative metadata
:param base_uri: base URI for proto dataset
:param config_path: path to dtool configuration file | [
"Return",
":",
"class",
":",
"dtoolcore",
".",
"ProtoDataSet",
"instance",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L88-L96 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | copy | def copy(src_uri, dest_base_uri, config_path=None, progressbar=None):
"""Copy a dataset to another location.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset
"""
dataset = DataSet.from_uri(src_uri)
proto_dataset = _copy_create_proto_dataset(
dataset,
dest_base_uri,
config_path,
progressbar
)
_copy_content(dataset, proto_dataset, progressbar)
proto_dataset.freeze(progressbar=progressbar)
return proto_dataset.uri | python | def copy(src_uri, dest_base_uri, config_path=None, progressbar=None):
"""Copy a dataset to another location.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset
"""
dataset = DataSet.from_uri(src_uri)
proto_dataset = _copy_create_proto_dataset(
dataset,
dest_base_uri,
config_path,
progressbar
)
_copy_content(dataset, proto_dataset, progressbar)
proto_dataset.freeze(progressbar=progressbar)
return proto_dataset.uri | [
"def",
"copy",
"(",
"src_uri",
",",
"dest_base_uri",
",",
"config_path",
"=",
"None",
",",
"progressbar",
"=",
"None",
")",
":",
"dataset",
"=",
"DataSet",
".",
"from_uri",
"(",
"src_uri",
")",
"proto_dataset",
"=",
"_copy_create_proto_dataset",
"(",
"dataset"... | Copy a dataset to another location.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset | [
"Copy",
"a",
"dataset",
"to",
"another",
"location",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L174-L193 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | copy_resume | def copy_resume(src_uri, dest_base_uri, config_path=None, progressbar=None):
"""Resume coping a dataset to another location.
Items that have been copied to the destination and have the same size
as in the source dataset are skipped. All other items are copied across
and the dataset is frozen.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset
"""
dataset = DataSet.from_uri(src_uri)
# Generate the URI of the destination proto dataset.
dest_uri = _generate_uri(dataset._admin_metadata, dest_base_uri)
proto_dataset = ProtoDataSet.from_uri(dest_uri)
_copy_content(dataset, proto_dataset, progressbar)
proto_dataset.freeze(progressbar=progressbar)
return proto_dataset.uri | python | def copy_resume(src_uri, dest_base_uri, config_path=None, progressbar=None):
"""Resume coping a dataset to another location.
Items that have been copied to the destination and have the same size
as in the source dataset are skipped. All other items are copied across
and the dataset is frozen.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset
"""
dataset = DataSet.from_uri(src_uri)
# Generate the URI of the destination proto dataset.
dest_uri = _generate_uri(dataset._admin_metadata, dest_base_uri)
proto_dataset = ProtoDataSet.from_uri(dest_uri)
_copy_content(dataset, proto_dataset, progressbar)
proto_dataset.freeze(progressbar=progressbar)
return proto_dataset.uri | [
"def",
"copy_resume",
"(",
"src_uri",
",",
"dest_base_uri",
",",
"config_path",
"=",
"None",
",",
"progressbar",
"=",
"None",
")",
":",
"dataset",
"=",
"DataSet",
".",
"from_uri",
"(",
"src_uri",
")",
"# Generate the URI of the destination proto dataset.",
"dest_uri... | Resume coping a dataset to another location.
Items that have been copied to the destination and have the same size
as in the source dataset are skipped. All other items are copied across
and the dataset is frozen.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset | [
"Resume",
"coping",
"a",
"dataset",
"to",
"another",
"location",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L196-L218 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _BaseDataSet.update_name | def update_name(self, new_name):
"""Update the name of the proto dataset.
:param new_name: the new name of the proto dataset
"""
if not dtoolcore.utils.name_is_valid(new_name):
raise(DtoolCoreInvalidNameError())
self._admin_metadata['name'] = new_name
if self._storage_broker.has_admin_metadata():
self._storage_broker.put_admin_metadata(self._admin_metadata) | python | def update_name(self, new_name):
"""Update the name of the proto dataset.
:param new_name: the new name of the proto dataset
"""
if not dtoolcore.utils.name_is_valid(new_name):
raise(DtoolCoreInvalidNameError())
self._admin_metadata['name'] = new_name
if self._storage_broker.has_admin_metadata():
self._storage_broker.put_admin_metadata(self._admin_metadata) | [
"def",
"update_name",
"(",
"self",
",",
"new_name",
")",
":",
"if",
"not",
"dtoolcore",
".",
"utils",
".",
"name_is_valid",
"(",
"new_name",
")",
":",
"raise",
"(",
"DtoolCoreInvalidNameError",
"(",
")",
")",
"self",
".",
"_admin_metadata",
"[",
"'name'",
... | Update the name of the proto dataset.
:param new_name: the new name of the proto dataset | [
"Update",
"the",
"name",
"of",
"the",
"proto",
"dataset",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L275-L286 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _BaseDataSet._put_overlay | def _put_overlay(self, overlay_name, overlay):
"""Store overlay so that it is accessible by the given name.
:param overlay_name: name of the overlay
:param overlay: overlay must be a dictionary where the keys are
identifiers in the dataset
:raises: TypeError if the overlay is not a dictionary,
ValueError if identifiers in overlay and dataset do not match
"""
if not isinstance(overlay, dict):
raise TypeError("Overlay must be dict")
if set(self._identifiers()) != set(overlay.keys()):
raise ValueError("Overlay keys must be dataset identifiers")
self._storage_broker.put_overlay(overlay_name, overlay) | python | def _put_overlay(self, overlay_name, overlay):
"""Store overlay so that it is accessible by the given name.
:param overlay_name: name of the overlay
:param overlay: overlay must be a dictionary where the keys are
identifiers in the dataset
:raises: TypeError if the overlay is not a dictionary,
ValueError if identifiers in overlay and dataset do not match
"""
if not isinstance(overlay, dict):
raise TypeError("Overlay must be dict")
if set(self._identifiers()) != set(overlay.keys()):
raise ValueError("Overlay keys must be dataset identifiers")
self._storage_broker.put_overlay(overlay_name, overlay) | [
"def",
"_put_overlay",
"(",
"self",
",",
"overlay_name",
",",
"overlay",
")",
":",
"if",
"not",
"isinstance",
"(",
"overlay",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"Overlay must be dict\"",
")",
"if",
"set",
"(",
"self",
".",
"_identifiers",
... | Store overlay so that it is accessible by the given name.
:param overlay_name: name of the overlay
:param overlay: overlay must be a dictionary where the keys are
identifiers in the dataset
:raises: TypeError if the overlay is not a dictionary,
ValueError if identifiers in overlay and dataset do not match | [
"Store",
"overlay",
"so",
"that",
"it",
"is",
"accessible",
"by",
"the",
"given",
"name",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L296-L311 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | _BaseDataSet.generate_manifest | def generate_manifest(self, progressbar=None):
"""Return manifest generated from knowledge about contents."""
items = dict()
if progressbar:
progressbar.label = "Generating manifest"
for handle in self._storage_broker.iter_item_handles():
key = dtoolcore.utils.generate_identifier(handle)
value = self._storage_broker.item_properties(handle)
items[key] = value
if progressbar:
progressbar.item_show_func = lambda x: handle
progressbar.update(1)
manifest = {
"items": items,
"dtoolcore_version": __version__,
"hash_function": self._storage_broker.hasher.name
}
return manifest | python | def generate_manifest(self, progressbar=None):
"""Return manifest generated from knowledge about contents."""
items = dict()
if progressbar:
progressbar.label = "Generating manifest"
for handle in self._storage_broker.iter_item_handles():
key = dtoolcore.utils.generate_identifier(handle)
value = self._storage_broker.item_properties(handle)
items[key] = value
if progressbar:
progressbar.item_show_func = lambda x: handle
progressbar.update(1)
manifest = {
"items": items,
"dtoolcore_version": __version__,
"hash_function": self._storage_broker.hasher.name
}
return manifest | [
"def",
"generate_manifest",
"(",
"self",
",",
"progressbar",
"=",
"None",
")",
":",
"items",
"=",
"dict",
"(",
")",
"if",
"progressbar",
":",
"progressbar",
".",
"label",
"=",
"\"Generating manifest\"",
"for",
"handle",
"in",
"self",
".",
"_storage_broker",
... | Return manifest generated from knowledge about contents. | [
"Return",
"manifest",
"generated",
"from",
"knowledge",
"about",
"contents",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L313-L334 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | DataSet._manifest | def _manifest(self):
"""Return manifest content."""
if self._manifest_cache is None:
self._manifest_cache = self._storage_broker.get_manifest()
return self._manifest_cache | python | def _manifest(self):
"""Return manifest content."""
if self._manifest_cache is None:
self._manifest_cache = self._storage_broker.get_manifest()
return self._manifest_cache | [
"def",
"_manifest",
"(",
"self",
")",
":",
"if",
"self",
".",
"_manifest_cache",
"is",
"None",
":",
"self",
".",
"_manifest_cache",
"=",
"self",
".",
"_storage_broker",
".",
"get_manifest",
"(",
")",
"return",
"self",
".",
"_manifest_cache"
] | Return manifest content. | [
"Return",
"manifest",
"content",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L366-L371 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | ProtoDataSet._identifiers | def _identifiers(self):
"""Return iterable of dataset item identifiers."""
for handle in self._storage_broker.iter_item_handles():
yield dtoolcore.utils.generate_identifier(handle) | python | def _identifiers(self):
"""Return iterable of dataset item identifiers."""
for handle in self._storage_broker.iter_item_handles():
yield dtoolcore.utils.generate_identifier(handle) | [
"def",
"_identifiers",
"(",
"self",
")",
":",
"for",
"handle",
"in",
"self",
".",
"_storage_broker",
".",
"iter_item_handles",
"(",
")",
":",
"yield",
"dtoolcore",
".",
"utils",
".",
"generate_identifier",
"(",
"handle",
")"
] | Return iterable of dataset item identifiers. | [
"Return",
"iterable",
"of",
"dataset",
"item",
"identifiers",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L439-L442 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | ProtoDataSet.create | def create(self):
"""Create the required directory structure and admin metadata."""
self._storage_broker.create_structure()
self._storage_broker.put_admin_metadata(self._admin_metadata) | python | def create(self):
"""Create the required directory structure and admin metadata."""
self._storage_broker.create_structure()
self._storage_broker.put_admin_metadata(self._admin_metadata) | [
"def",
"create",
"(",
"self",
")",
":",
"self",
".",
"_storage_broker",
".",
"create_structure",
"(",
")",
"self",
".",
"_storage_broker",
".",
"put_admin_metadata",
"(",
"self",
".",
"_admin_metadata",
")"
] | Create the required directory structure and admin metadata. | [
"Create",
"the",
"required",
"directory",
"structure",
"and",
"admin",
"metadata",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L444-L447 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | ProtoDataSet.add_item_metadata | def add_item_metadata(self, handle, key, value):
"""
Add metadata to a specific item in the :class:`dtoolcore.ProtoDataSet`.
:param handle: handle representing the relative path of the item in the
:class:`dtoolcore.ProtoDataSet`
:param key: metadata key
:param value: metadata value
"""
self._storage_broker.add_item_metadata(handle, key, value) | python | def add_item_metadata(self, handle, key, value):
"""
Add metadata to a specific item in the :class:`dtoolcore.ProtoDataSet`.
:param handle: handle representing the relative path of the item in the
:class:`dtoolcore.ProtoDataSet`
:param key: metadata key
:param value: metadata value
"""
self._storage_broker.add_item_metadata(handle, key, value) | [
"def",
"add_item_metadata",
"(",
"self",
",",
"handle",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_storage_broker",
".",
"add_item_metadata",
"(",
"handle",
",",
"key",
",",
"value",
")"
] | Add metadata to a specific item in the :class:`dtoolcore.ProtoDataSet`.
:param handle: handle representing the relative path of the item in the
:class:`dtoolcore.ProtoDataSet`
:param key: metadata key
:param value: metadata value | [
"Add",
"metadata",
"to",
"a",
"specific",
"item",
"in",
"the",
":",
"class",
":",
"dtoolcore",
".",
"ProtoDataSet",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L470-L479 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | ProtoDataSet._generate_overlays | def _generate_overlays(self):
"""Return dictionary of overlays generated from added item metadata."""
overlays = defaultdict(dict)
for handle in self._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = self._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
return overlays | python | def _generate_overlays(self):
"""Return dictionary of overlays generated from added item metadata."""
overlays = defaultdict(dict)
for handle in self._storage_broker.iter_item_handles():
identifier = dtoolcore.utils.generate_identifier(handle)
item_metadata = self._storage_broker.get_item_metadata(handle)
for k, v in item_metadata.items():
overlays[k][identifier] = v
return overlays | [
"def",
"_generate_overlays",
"(",
"self",
")",
":",
"overlays",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"handle",
"in",
"self",
".",
"_storage_broker",
".",
"iter_item_handles",
"(",
")",
":",
"identifier",
"=",
"dtoolcore",
".",
"utils",
".",
"generate... | Return dictionary of overlays generated from added item metadata. | [
"Return",
"dictionary",
"of",
"overlays",
"generated",
"from",
"added",
"item",
"metadata",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L481-L490 |
jic-dtool/dtoolcore | dtoolcore/__init__.py | ProtoDataSet.freeze | def freeze(self, progressbar=None):
"""
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
"""
# Call the storage broker pre_freeze hook.
self._storage_broker.pre_freeze_hook()
if progressbar:
progressbar.label = "Freezing dataset"
# Generate and persist the manifest.
manifest = self.generate_manifest(progressbar=progressbar)
self._storage_broker.put_manifest(manifest)
# Generate and persist overlays from any item metadata that has been
# added.
overlays = self._generate_overlays()
for overlay_name, overlay in overlays.items():
self._put_overlay(overlay_name, overlay)
# Change the type of the dataset from "protodataset" to "dataset" and
# add a "frozen_at" time stamp to the administrative metadata.
datetime_obj = datetime.datetime.utcnow()
metadata_update = {
"type": "dataset",
"frozen_at": dtoolcore.utils.timestamp(datetime_obj)
}
self._admin_metadata.update(metadata_update)
self._storage_broker.put_admin_metadata(self._admin_metadata)
# Clean up using the storage broker's post freeze hook.
self._storage_broker.post_freeze_hook() | python | def freeze(self, progressbar=None):
"""
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
"""
# Call the storage broker pre_freeze hook.
self._storage_broker.pre_freeze_hook()
if progressbar:
progressbar.label = "Freezing dataset"
# Generate and persist the manifest.
manifest = self.generate_manifest(progressbar=progressbar)
self._storage_broker.put_manifest(manifest)
# Generate and persist overlays from any item metadata that has been
# added.
overlays = self._generate_overlays()
for overlay_name, overlay in overlays.items():
self._put_overlay(overlay_name, overlay)
# Change the type of the dataset from "protodataset" to "dataset" and
# add a "frozen_at" time stamp to the administrative metadata.
datetime_obj = datetime.datetime.utcnow()
metadata_update = {
"type": "dataset",
"frozen_at": dtoolcore.utils.timestamp(datetime_obj)
}
self._admin_metadata.update(metadata_update)
self._storage_broker.put_admin_metadata(self._admin_metadata)
# Clean up using the storage broker's post freeze hook.
self._storage_broker.post_freeze_hook() | [
"def",
"freeze",
"(",
"self",
",",
"progressbar",
"=",
"None",
")",
":",
"# Call the storage broker pre_freeze hook.",
"self",
".",
"_storage_broker",
".",
"pre_freeze_hook",
"(",
")",
"if",
"progressbar",
":",
"progressbar",
".",
"label",
"=",
"\"Freezing dataset\"... | Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`. | [
"Convert",
":",
"class",
":",
"dtoolcore",
".",
"ProtoDataSet",
"to",
":",
"class",
":",
"dtoolcore",
".",
"DataSet",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/__init__.py#L492-L524 |
elemoine/papyrus | papyrus/__init__.py | add_papyrus_handler | def add_papyrus_handler(self, route_name_prefix, base_url, handler):
""" Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``.
"""
route_name = route_name_prefix + '_read_many'
self.add_handler(route_name, base_url, handler,
action='read_many', request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_handler(route_name, base_url + '/{id}', handler,
action='read_one', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_handler(route_name, base_url + '/count', handler,
action='count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_handler(route_name, base_url, handler,
action='create', request_method='POST')
route_name = route_name_prefix + '_update'
self.add_handler(route_name, base_url + '/{id}', handler,
action='update', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_handler(route_name, base_url + '/{id}', handler,
action='delete', request_method='DELETE') | python | def add_papyrus_handler(self, route_name_prefix, base_url, handler):
""" Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``.
"""
route_name = route_name_prefix + '_read_many'
self.add_handler(route_name, base_url, handler,
action='read_many', request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_handler(route_name, base_url + '/{id}', handler,
action='read_one', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_handler(route_name, base_url + '/count', handler,
action='count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_handler(route_name, base_url, handler,
action='create', request_method='POST')
route_name = route_name_prefix + '_update'
self.add_handler(route_name, base_url + '/{id}', handler,
action='update', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_handler(route_name, base_url + '/{id}', handler,
action='delete', request_method='DELETE') | [
"def",
"add_papyrus_handler",
"(",
"self",
",",
"route_name_prefix",
",",
"base_url",
",",
"handler",
")",
":",
"route_name",
"=",
"route_name_prefix",
"+",
"'_read_many'",
"self",
".",
"add_handler",
"(",
"route_name",
",",
"base_url",
",",
"handler",
",",
"act... | Add a Papyrus handler, i.e. a handler defining the MapFish
HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_handler(
'spots', '/spots', 'mypackage.handlers.SpotHandler')
Arguments:
``route_name_prefix`` The prefix used for the route names
passed to ``config.add_handler``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
``handler`` a dotted name or a reference to a handler class,
e.g. ``'mypackage.handlers.MyHandler'``. | [
"Add",
"a",
"Papyrus",
"handler",
"i",
".",
"e",
".",
"a",
"handler",
"defining",
"the",
"MapFish",
"HTTP",
"interface",
"."
] | train | https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/__init__.py#L2-L41 |
elemoine/papyrus | papyrus/__init__.py | add_papyrus_routes | def add_papyrus_routes(self, route_name_prefix, base_url):
""" A helper method that adds routes to view callables that, together,
implement the MapFish HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_routes('spots', '/spots')
config.scan()
Arguments:
``route_name_prefix' The prefix used for the route names
passed to ``config.add_route``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
"""
route_name = route_name_prefix + '_read_many'
self.add_route(route_name, base_url, request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_route(route_name, base_url + '/{id}', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_route(route_name, base_url + '/count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_route(route_name, base_url, request_method='POST')
route_name = route_name_prefix + '_update'
self.add_route(route_name, base_url + '/{id}', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_route(route_name, base_url + '/{id}', request_method='DELETE') | python | def add_papyrus_routes(self, route_name_prefix, base_url):
""" A helper method that adds routes to view callables that, together,
implement the MapFish HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_routes('spots', '/spots')
config.scan()
Arguments:
``route_name_prefix' The prefix used for the route names
passed to ``config.add_route``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash!
"""
route_name = route_name_prefix + '_read_many'
self.add_route(route_name, base_url, request_method='GET')
route_name = route_name_prefix + '_read_one'
self.add_route(route_name, base_url + '/{id}', request_method='GET')
route_name = route_name_prefix + '_count'
self.add_route(route_name, base_url + '/count', request_method='GET')
route_name = route_name_prefix + '_create'
self.add_route(route_name, base_url, request_method='POST')
route_name = route_name_prefix + '_update'
self.add_route(route_name, base_url + '/{id}', request_method='PUT')
route_name = route_name_prefix + '_delete'
self.add_route(route_name, base_url + '/{id}', request_method='DELETE') | [
"def",
"add_papyrus_routes",
"(",
"self",
",",
"route_name_prefix",
",",
"base_url",
")",
":",
"route_name",
"=",
"route_name_prefix",
"+",
"'_read_many'",
"self",
".",
"add_route",
"(",
"route_name",
",",
"base_url",
",",
"request_method",
"=",
"'GET'",
")",
"r... | A helper method that adds routes to view callables that, together,
implement the MapFish HTTP interface.
Example::
import papyrus
config.include(papyrus)
config.add_papyrus_routes('spots', '/spots')
config.scan()
Arguments:
``route_name_prefix' The prefix used for the route names
passed to ``config.add_route``.
``base_url`` The web service's base URL, e.g. ``/spots``. No
trailing slash! | [
"A",
"helper",
"method",
"that",
"adds",
"routes",
"to",
"view",
"callables",
"that",
"together",
"implement",
"the",
"MapFish",
"HTTP",
"interface",
"."
] | train | https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/__init__.py#L44-L74 |
bovee/Aston | aston/peak/peak_models.py | peak_model | def peak_model(f):
"""
Given a function that models a peak, add scale and location arguments to
For all functions, v is vertical offset, h is height
x is horizontal offset (1st moment), w is width (2nd moment),
s is skewness (3rd moment), e is excess (4th moment)
"""
@wraps(f)
def wrapped_f(t, **kw):
# load kwargs with default values
# do this here instead of in the def because we want to parse
# all of kwargs later to copy values to pass into f
def_vals = {'v': 0.0, 'h': 1.0, 'x': 0.0, 'w': 1.0, 's': 1.1, 'e': 1.0}
for v in def_vals:
if v not in kw:
kw[v] = def_vals[v]
# this copies all of the defaults into what the peak function needs
anames, _, _, _ = inspect.getargspec(f)
fkw = dict([(arg, kw[arg]) for arg in anames if arg in kw])
# some functions use location or width parameters explicitly
# if not, adjust the timeseries accordingly
ta = t
if 'x' not in anames:
ta = ta - kw['x']
if 'w' not in anames:
ta = ta / kw['w']
# finally call the function
mod = f(ta, **fkw)
# recalcualte, making the peak maximize at x
mod = f(ta + ta[mod.argmax()], **fkw)
return kw['v'] + kw['h'] / max(mod) * mod
args = set(['v', 'h', 'x', 'w'])
anames, _, _, _ = inspect.getargspec(f)
wrapped_f._peakargs = list(args.union([a for a in anames
if a not in ('t', 'r')]))
return wrapped_f | python | def peak_model(f):
"""
Given a function that models a peak, add scale and location arguments to
For all functions, v is vertical offset, h is height
x is horizontal offset (1st moment), w is width (2nd moment),
s is skewness (3rd moment), e is excess (4th moment)
"""
@wraps(f)
def wrapped_f(t, **kw):
# load kwargs with default values
# do this here instead of in the def because we want to parse
# all of kwargs later to copy values to pass into f
def_vals = {'v': 0.0, 'h': 1.0, 'x': 0.0, 'w': 1.0, 's': 1.1, 'e': 1.0}
for v in def_vals:
if v not in kw:
kw[v] = def_vals[v]
# this copies all of the defaults into what the peak function needs
anames, _, _, _ = inspect.getargspec(f)
fkw = dict([(arg, kw[arg]) for arg in anames if arg in kw])
# some functions use location or width parameters explicitly
# if not, adjust the timeseries accordingly
ta = t
if 'x' not in anames:
ta = ta - kw['x']
if 'w' not in anames:
ta = ta / kw['w']
# finally call the function
mod = f(ta, **fkw)
# recalcualte, making the peak maximize at x
mod = f(ta + ta[mod.argmax()], **fkw)
return kw['v'] + kw['h'] / max(mod) * mod
args = set(['v', 'h', 'x', 'w'])
anames, _, _, _ = inspect.getargspec(f)
wrapped_f._peakargs = list(args.union([a for a in anames
if a not in ('t', 'r')]))
return wrapped_f | [
"def",
"peak_model",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapped_f",
"(",
"t",
",",
"*",
"*",
"kw",
")",
":",
"# load kwargs with default values",
"# do this here instead of in the def because we want to parse",
"# all of kwargs later to copy values... | Given a function that models a peak, add scale and location arguments to
For all functions, v is vertical offset, h is height
x is horizontal offset (1st moment), w is width (2nd moment),
s is skewness (3rd moment), e is excess (4th moment) | [
"Given",
"a",
"function",
"that",
"models",
"a",
"peak",
"add",
"scale",
"and",
"location",
"arguments",
"to"
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/peak_models.py#L48-L88 |
MakersF/LoLScraper | lol_scraper/match_downloader.py | download_matches | def download_matches(match_downloaded_callback, on_exit_callback, conf, synchronize_callback= True):
"""
:param match_downloaded_callback: function when a match is downloaded function is called with the match
and the tier (league) of the lowest player in the match
as parameters
:param on_exit_callback: function when this function is terminating on_exit_callback is called
with the remaining players to download, the downloaded
players, the id of the remaining matches to download and
the id of the downloaded matches
:param conf: dict a dictionary containing all the configuration parameters
:param synchronize_callback: bool Synchronize the calls to match_downloaded_callback
If set to True the calls are wrapped by a lock, so that only
one at a time is executing
:return: None
"""
logger = logging.getLogger(__name__)
if conf['logging_level'] != logging.NOTSET:
logger.setLevel(conf['logging_level'])
else:
# possibly set the level to warning
pass
def checkpoint(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches):
logger.info("Reached the checkpoint."
.format(datetime.datetime.now().strftime("%m-%d %H:%M:%S"), len(downloaded_matches)))
if on_exit_callback:
on_exit_callback(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches)
players_to_analyze = set(conf['seed_players_id'])
downloaded_matches = set(conf['downloaded_matches'])
logger.info("{} previously downloaded matches".format(len(downloaded_matches)))
matches_to_download = set(conf['matches_to_download'])
logger.info("{} matches to download".format(len(matches_to_download)))
analyzed_players = set()
pta_lock = threading.Lock()
players_available_condition = threading.Condition(pta_lock)
mtd_lock = threading.Lock()
matches_Available_condition = threading.Condition(mtd_lock)
user_function_lock = threading.Lock() if synchronize_callback else NoOpContextManager()
logger_lock = threading.Lock()
player_downloader_threads = []
match_downloader_threads = []
try:
def create_thread():
if len(player_downloader_threads) < max_players_download_threads:
player_downloader = PlayerDownloader(conf, players_to_analyze, analyzed_players, pta_lock, players_available_condition,
matches_to_download , mtd_lock, matches_Available_condition,
logger, logger_lock)
player_downloader.start()
player_downloader_threads.append(player_downloader)
with logger_lock:
logger.info("Adding a player download thread. Threads: " + str(len(player_downloader_threads)))
else:
with logger_lock:
logger.debug("Tried adding a player download thread, but there are already the maximum number:"
" " + str(max_players_download_threads))
def shutdown_thread():
if len(player_downloader_threads) > 1:
player_downloader_threads.pop().shutdown()
with logger_lock:
logger.info("Removing a player downloader thread. Threads: " + str(len(player_downloader_threads)))
else:
with logger_lock:
logger.debug("Tried removing a player download thread, but there is only one left")
logger.info("Starting fetching..")
# Start one player downloader thread
create_thread()
for _ in range(matches_download_threads):
match_downloader = MatchDownloader(conf, players_to_analyze, pta_lock, players_available_condition,
matches_to_download, downloaded_matches, mtd_lock, matches_Available_condition,
match_downloaded_callback, user_function_lock,
logger, logger_lock)
match_downloader.start()
match_downloader_threads.append(match_downloader)
auto_tuner = ThreadAutoTuner(create_thread, shutdown_thread)
for i, _ in enumerate(do_every(1)):
# Pool the exit flag every second
if conf.get('exit', False):
break
if i % 5 == 0:
with mtd_lock:
matches_in_queue = len(matches_to_download)
# The lock happens in the property. Since it is not re-entrant, do not lock now
total_players = sum(th.total_downloads for th in player_downloader_threads)
auto_tuner.update_thread_number(total_players, matches_in_queue)
# Execute every LOGGING_INTERVAL seconds
if i % logging_interval == 0:
with mtd_lock:
matches_in_queue = len(matches_to_download)
total_matches = sum(th.total_downloads for th in match_downloader_threads)
with pta_lock:
players_in_queue = len(players_to_analyze)
total_players = sum(th.total_downloads for th in player_downloader_threads)
with logger_lock:
logger.info("Players in queue: {}. Downloaded players: {}. Matches in queue: {}. Downloaded matches: {}"
.format(players_in_queue, total_players, matches_in_queue, total_matches))
# Notify all the waiting threads so they can exit
with pta_lock:
players_available_condition.notify_all()
with mtd_lock:
matches_Available_condition.notify_all()
logger.info("Terminating fetching")
finally:
conf['exit'] = True
# Joining threads before saving the state
for thread in player_downloader_threads + match_downloader_threads:
thread.join()
# Always call the checkpoint, so that we can resume the download in case of exceptions.
logger.info("Calling checkpoint callback")
checkpoint(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches) | python | def download_matches(match_downloaded_callback, on_exit_callback, conf, synchronize_callback= True):
"""
:param match_downloaded_callback: function when a match is downloaded function is called with the match
and the tier (league) of the lowest player in the match
as parameters
:param on_exit_callback: function when this function is terminating on_exit_callback is called
with the remaining players to download, the downloaded
players, the id of the remaining matches to download and
the id of the downloaded matches
:param conf: dict a dictionary containing all the configuration parameters
:param synchronize_callback: bool Synchronize the calls to match_downloaded_callback
If set to True the calls are wrapped by a lock, so that only
one at a time is executing
:return: None
"""
logger = logging.getLogger(__name__)
if conf['logging_level'] != logging.NOTSET:
logger.setLevel(conf['logging_level'])
else:
# possibly set the level to warning
pass
def checkpoint(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches):
logger.info("Reached the checkpoint."
.format(datetime.datetime.now().strftime("%m-%d %H:%M:%S"), len(downloaded_matches)))
if on_exit_callback:
on_exit_callback(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches)
players_to_analyze = set(conf['seed_players_id'])
downloaded_matches = set(conf['downloaded_matches'])
logger.info("{} previously downloaded matches".format(len(downloaded_matches)))
matches_to_download = set(conf['matches_to_download'])
logger.info("{} matches to download".format(len(matches_to_download)))
analyzed_players = set()
pta_lock = threading.Lock()
players_available_condition = threading.Condition(pta_lock)
mtd_lock = threading.Lock()
matches_Available_condition = threading.Condition(mtd_lock)
user_function_lock = threading.Lock() if synchronize_callback else NoOpContextManager()
logger_lock = threading.Lock()
player_downloader_threads = []
match_downloader_threads = []
try:
def create_thread():
if len(player_downloader_threads) < max_players_download_threads:
player_downloader = PlayerDownloader(conf, players_to_analyze, analyzed_players, pta_lock, players_available_condition,
matches_to_download , mtd_lock, matches_Available_condition,
logger, logger_lock)
player_downloader.start()
player_downloader_threads.append(player_downloader)
with logger_lock:
logger.info("Adding a player download thread. Threads: " + str(len(player_downloader_threads)))
else:
with logger_lock:
logger.debug("Tried adding a player download thread, but there are already the maximum number:"
" " + str(max_players_download_threads))
def shutdown_thread():
if len(player_downloader_threads) > 1:
player_downloader_threads.pop().shutdown()
with logger_lock:
logger.info("Removing a player downloader thread. Threads: " + str(len(player_downloader_threads)))
else:
with logger_lock:
logger.debug("Tried removing a player download thread, but there is only one left")
logger.info("Starting fetching..")
# Start one player downloader thread
create_thread()
for _ in range(matches_download_threads):
match_downloader = MatchDownloader(conf, players_to_analyze, pta_lock, players_available_condition,
matches_to_download, downloaded_matches, mtd_lock, matches_Available_condition,
match_downloaded_callback, user_function_lock,
logger, logger_lock)
match_downloader.start()
match_downloader_threads.append(match_downloader)
auto_tuner = ThreadAutoTuner(create_thread, shutdown_thread)
for i, _ in enumerate(do_every(1)):
# Pool the exit flag every second
if conf.get('exit', False):
break
if i % 5 == 0:
with mtd_lock:
matches_in_queue = len(matches_to_download)
# The lock happens in the property. Since it is not re-entrant, do not lock now
total_players = sum(th.total_downloads for th in player_downloader_threads)
auto_tuner.update_thread_number(total_players, matches_in_queue)
# Execute every LOGGING_INTERVAL seconds
if i % logging_interval == 0:
with mtd_lock:
matches_in_queue = len(matches_to_download)
total_matches = sum(th.total_downloads for th in match_downloader_threads)
with pta_lock:
players_in_queue = len(players_to_analyze)
total_players = sum(th.total_downloads for th in player_downloader_threads)
with logger_lock:
logger.info("Players in queue: {}. Downloaded players: {}. Matches in queue: {}. Downloaded matches: {}"
.format(players_in_queue, total_players, matches_in_queue, total_matches))
# Notify all the waiting threads so they can exit
with pta_lock:
players_available_condition.notify_all()
with mtd_lock:
matches_Available_condition.notify_all()
logger.info("Terminating fetching")
finally:
conf['exit'] = True
# Joining threads before saving the state
for thread in player_downloader_threads + match_downloader_threads:
thread.join()
# Always call the checkpoint, so that we can resume the download in case of exceptions.
logger.info("Calling checkpoint callback")
checkpoint(players_to_analyze, analyzed_players, matches_to_download, downloaded_matches) | [
"def",
"download_matches",
"(",
"match_downloaded_callback",
",",
"on_exit_callback",
",",
"conf",
",",
"synchronize_callback",
"=",
"True",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"conf",
"[",
"'logging_level'",
"]",
"!... | :param match_downloaded_callback: function when a match is downloaded function is called with the match
and the tier (league) of the lowest player in the match
as parameters
:param on_exit_callback: function when this function is terminating on_exit_callback is called
with the remaining players to download, the downloaded
players, the id of the remaining matches to download and
the id of the downloaded matches
:param conf: dict a dictionary containing all the configuration parameters
:param synchronize_callback: bool Synchronize the calls to match_downloaded_callback
If set to True the calls are wrapped by a lock, so that only
one at a time is executing
:return: None | [
":",
"param",
"match_downloaded_callback",
":",
"function",
"when",
"a",
"match",
"is",
"downloaded",
"function",
"is",
"called",
"with",
"the",
"match",
"and",
"the",
"tier",
"(",
"league",
")",
"of",
"the",
"lowest",
"player",
"in",
"the",
"match",
"as",
... | train | https://github.com/MakersF/LoLScraper/blob/71d9f2ef24159f2ba5d21467aac1ab785c2bb7e6/lol_scraper/match_downloader.py#L369-L498 |
ryanvarley/ExoData | exodata/astroclasses.py | _findNearest | def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = (abs(arr-value)).argmin()
return arr[idx] | python | def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = (abs(arr-value)).argmin()
return arr[idx] | [
"def",
"_findNearest",
"(",
"arr",
",",
"value",
")",
":",
"arr",
"=",
"np",
".",
"array",
"(",
"arr",
")",
"# find nearest value in array",
"idx",
"=",
"(",
"abs",
"(",
"arr",
"-",
"value",
")",
")",
".",
"argmin",
"(",
")",
"return",
"arr",
"[",
... | Finds the value in arr that value is closest to | [
"Finds",
"the",
"value",
"in",
"arr",
"that",
"value",
"is",
"closest",
"to"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L943-L949 |
ryanvarley/ExoData | exodata/astroclasses.py | _createMagConversionDict | def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 0x03000000:
starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode("utf-8") for x in row[3:]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData
return magDict | python | def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 0x03000000:
starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode("utf-8") for x in row[3:]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData
return magDict | [
"def",
"_createMagConversionDict",
"(",
")",
":",
"magnitude_conversion_filepath",
"=",
"resource_stream",
"(",
"__name__",
",",
"'data/magnitude_conversion.dat'",
")",
"raw_table",
"=",
"np",
".",
"loadtxt",
"(",
"magnitude_conversion_filepath",
",",
"'|S5'",
")",
"mag... | loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K | [
"loads",
"magnitude_conversion",
".",
"dat",
"which",
"is",
"table",
"A%",
"1995ApJS",
"..",
"101",
"..",
"117K"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L1253-L1269 |
ryanvarley/ExoData | exodata/astroclasses.py | _BaseObject._getParentClass | def _getParentClass(self, startClass, parentClass):
""" gets the parent class by calling successive parent classes with .parent until parentclass is matched.
"""
try:
if not startClass: # reached system with no hits
raise AttributeError
except AttributeError: # i.e calling binary on an object without one
raise HierarchyError('This object ({0}) has no {1} as a parent object'.format(self.name, parentClass))
if startClass.classType == parentClass:
return startClass
else:
return self._getParentClass(startClass.parent, parentClass) | python | def _getParentClass(self, startClass, parentClass):
""" gets the parent class by calling successive parent classes with .parent until parentclass is matched.
"""
try:
if not startClass: # reached system with no hits
raise AttributeError
except AttributeError: # i.e calling binary on an object without one
raise HierarchyError('This object ({0}) has no {1} as a parent object'.format(self.name, parentClass))
if startClass.classType == parentClass:
return startClass
else:
return self._getParentClass(startClass.parent, parentClass) | [
"def",
"_getParentClass",
"(",
"self",
",",
"startClass",
",",
"parentClass",
")",
":",
"try",
":",
"if",
"not",
"startClass",
":",
"# reached system with no hits",
"raise",
"AttributeError",
"except",
"AttributeError",
":",
"# i.e calling binary on an object without one"... | gets the parent class by calling successive parent classes with .parent until parentclass is matched. | [
"gets",
"the",
"parent",
"class",
"by",
"calling",
"successive",
"parent",
"classes",
"with",
".",
"parent",
"until",
"parentclass",
"is",
"matched",
"."
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L45-L57 |
ryanvarley/ExoData | exodata/astroclasses.py | StarAndPlanetCommon.T | def T(self):
""" Looks for the temperature in the catalogue, if absent it calculates it using calcTemperature()
:return: planet temperature
"""
paramTemp = self.getParam('temperature')
if not paramTemp is np.nan:
return paramTemp
elif ed_params.estimateMissingValues:
self.flags.addFlag('Calculated Temperature')
return self.calcTemperature()
else:
return np.nan | python | def T(self):
""" Looks for the temperature in the catalogue, if absent it calculates it using calcTemperature()
:return: planet temperature
"""
paramTemp = self.getParam('temperature')
if not paramTemp is np.nan:
return paramTemp
elif ed_params.estimateMissingValues:
self.flags.addFlag('Calculated Temperature')
return self.calcTemperature()
else:
return np.nan | [
"def",
"T",
"(",
"self",
")",
":",
"paramTemp",
"=",
"self",
".",
"getParam",
"(",
"'temperature'",
")",
"if",
"not",
"paramTemp",
"is",
"np",
".",
"nan",
":",
"return",
"paramTemp",
"elif",
"ed_params",
".",
"estimateMissingValues",
":",
"self",
".",
"f... | Looks for the temperature in the catalogue, if absent it calculates it using calcTemperature()
:return: planet temperature | [
"Looks",
"for",
"the",
"temperature",
"in",
"the",
"catalogue",
"if",
"absent",
"it",
"calculates",
"it",
"using",
"calcTemperature",
"()"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L383-L396 |
ryanvarley/ExoData | exodata/astroclasses.py | Star.d | def d(self):
""" Note this should work from child parents as .d propergates, calculates using the star estimation method
estimateDistance and estimateAbsoluteMagnitude
"""
# TODO this will only work from a star or below. good thing?
d = self.parent.d
if ed_params.estimateMissingValues:
if d is np.nan:
d = self.estimateDistance()
if d is not np.nan:
self.flags.addFlag('Estimated Distance')
return d
else:
return np.nan | python | def d(self):
""" Note this should work from child parents as .d propergates, calculates using the star estimation method
estimateDistance and estimateAbsoluteMagnitude
"""
# TODO this will only work from a star or below. good thing?
d = self.parent.d
if ed_params.estimateMissingValues:
if d is np.nan:
d = self.estimateDistance()
if d is not np.nan:
self.flags.addFlag('Estimated Distance')
return d
else:
return np.nan | [
"def",
"d",
"(",
"self",
")",
":",
"# TODO this will only work from a star or below. good thing?",
"d",
"=",
"self",
".",
"parent",
".",
"d",
"if",
"ed_params",
".",
"estimateMissingValues",
":",
"if",
"d",
"is",
"np",
".",
"nan",
":",
"d",
"=",
"self",
".",... | Note this should work from child parents as .d propergates, calculates using the star estimation method
estimateDistance and estimateAbsoluteMagnitude | [
"Note",
"this",
"should",
"work",
"from",
"child",
"parents",
"as",
".",
"d",
"propergates",
"calculates",
"using",
"the",
"star",
"estimation",
"method",
"estimateDistance",
"and",
"estimateAbsoluteMagnitude"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L464-L477 |
ryanvarley/ExoData | exodata/astroclasses.py | Star._get_or_convert_magnitude | def _get_or_convert_magnitude(self, mag_letter):
""" Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value
:return:
"""
allowed_mags = "UBVJIHKLMN"
catalogue_mags = 'BVIJHK'
if mag_letter not in allowed_mags or not len(mag_letter) == 1:
raise ValueError("Magnitude letter must be a single letter in {0}".format(allowed_mags))
mag_str = 'mag'+mag_letter
mag_val = self.getParam(mag_str)
if isNanOrNone(mag_val) and ed_params.estimateMissingValues: # then we need to estimate it!
# old style dict comprehension for python 2.6
mag_dict = dict(('mag'+letter, self.getParam('mag'+letter)) for letter in catalogue_mags)
mag_class = Magnitude(self.spectralType, **mag_dict)
try:
mag_conversion = mag_class.convert(mag_letter)
# logger.debug('Star Class: Conversion to {0} successful, got {1}'.format(mag_str, mag_conversion))
self.flags.addFlag('Estimated mag{0}'.format(mag_letter))
return mag_conversion
except ValueError as e: # cant convert
logger.exception(e)
# logger.debug('Cant convert to {0}'.format(mag_letter))
return np.nan
else:
# logger.debug('returning {0}={1} from catalogue'.format(mag_str, mag_val))
return mag_val | python | def _get_or_convert_magnitude(self, mag_letter):
""" Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value
:return:
"""
allowed_mags = "UBVJIHKLMN"
catalogue_mags = 'BVIJHK'
if mag_letter not in allowed_mags or not len(mag_letter) == 1:
raise ValueError("Magnitude letter must be a single letter in {0}".format(allowed_mags))
mag_str = 'mag'+mag_letter
mag_val = self.getParam(mag_str)
if isNanOrNone(mag_val) and ed_params.estimateMissingValues: # then we need to estimate it!
# old style dict comprehension for python 2.6
mag_dict = dict(('mag'+letter, self.getParam('mag'+letter)) for letter in catalogue_mags)
mag_class = Magnitude(self.spectralType, **mag_dict)
try:
mag_conversion = mag_class.convert(mag_letter)
# logger.debug('Star Class: Conversion to {0} successful, got {1}'.format(mag_str, mag_conversion))
self.flags.addFlag('Estimated mag{0}'.format(mag_letter))
return mag_conversion
except ValueError as e: # cant convert
logger.exception(e)
# logger.debug('Cant convert to {0}'.format(mag_letter))
return np.nan
else:
# logger.debug('returning {0}={1} from catalogue'.format(mag_str, mag_val))
return mag_val | [
"def",
"_get_or_convert_magnitude",
"(",
"self",
",",
"mag_letter",
")",
":",
"allowed_mags",
"=",
"\"UBVJIHKLMN\"",
"catalogue_mags",
"=",
"'BVIJHK'",
"if",
"mag_letter",
"not",
"in",
"allowed_mags",
"or",
"not",
"len",
"(",
"mag_letter",
")",
"==",
"1",
":",
... | Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value
:return: | [
"Takes",
"input",
"of",
"the",
"magnitude",
"letter",
"and",
"ouputs",
"the",
"magnitude",
"fetched",
"from",
"the",
"catalogue",
"or",
"a",
"converted",
"value",
":",
"return",
":"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L488-L516 |
ryanvarley/ExoData | exodata/astroclasses.py | Star.getLimbdarkeningCoeff | def getLimbdarkeningCoeff(self, wavelength=1.22): # TODO replace with pylightcurve
""" Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity.
:param wavelength: microns
:type wavelength: float
:return: limb darkening coefficients 1 and 2
"""
# TODO check this returns correct value - im not certain
# The intervals of values in the tables
tempind = [ 3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500., 5750., 6000., 6250.,
6500., 6750., 7000., 7250., 7500., 7750., 8000., 8250., 8500., 8750., 9000., 9250.,
9500., 9750., 10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750., 12000., 12250.,
12500., 12750., 13000., 14000., 15000., 16000., 17000., 19000., 20000., 21000., 22000., 23000.,
24000., 25000., 26000., 27000., 28000., 29000., 30000., 31000., 32000., 33000., 34000., 35000.,
36000., 37000., 38000., 39000., 40000., 41000., 42000., 43000., 44000., 45000., 46000., 47000.,
48000., 49000., 50000.]
lggind = [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5.]
mhind = [-5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, -0.3, -0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.]
# Choose the values in the table nearest our parameters
tempselect = _findNearest(tempind, float(self.T))
lgselect = _findNearest(lggind, float(self.calcLogg()))
mhselect = _findNearest(mhind, float(self.Z))
quadratic_filepath = resource_stream(__name__, 'data/quadratic.dat')
coeffTable = np.loadtxt(quadratic_filepath)
foundValues = False
for i in range(len(coeffTable)):
if coeffTable[i, 2] == lgselect and coeffTable[i, 3] == tempselect and coeffTable[i, 4] == mhselect:
if coeffTable[i, 0] == 1:
u1array = coeffTable[i, 8:] # Limb darkening parameter u1 for each wl in waveind
u2array = coeffTable[i+1, 8:]
foundValues = True
break
if not foundValues:
raise ValueError('No limb darkening values could be found') # TODO replace with better exception
waveind = [0.365, 0.445, 0.551, 0.658, 0.806, 1.22, 1.63, 2.19, 3.45] # Wavelengths available in table
# Interpolates the value at wavelength from values in the table (waveind)
u1AtWavelength = np.interp(wavelength, waveind, u1array, left=0, right=0)
u2AtWavelength = np.interp(wavelength, waveind, u2array, left=0, right=0)
return u1AtWavelength, u2AtWavelength | python | def getLimbdarkeningCoeff(self, wavelength=1.22): # TODO replace with pylightcurve
""" Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity.
:param wavelength: microns
:type wavelength: float
:return: limb darkening coefficients 1 and 2
"""
# TODO check this returns correct value - im not certain
# The intervals of values in the tables
tempind = [ 3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500., 5750., 6000., 6250.,
6500., 6750., 7000., 7250., 7500., 7750., 8000., 8250., 8500., 8750., 9000., 9250.,
9500., 9750., 10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750., 12000., 12250.,
12500., 12750., 13000., 14000., 15000., 16000., 17000., 19000., 20000., 21000., 22000., 23000.,
24000., 25000., 26000., 27000., 28000., 29000., 30000., 31000., 32000., 33000., 34000., 35000.,
36000., 37000., 38000., 39000., 40000., 41000., 42000., 43000., 44000., 45000., 46000., 47000.,
48000., 49000., 50000.]
lggind = [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5.]
mhind = [-5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, -0.3, -0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.]
# Choose the values in the table nearest our parameters
tempselect = _findNearest(tempind, float(self.T))
lgselect = _findNearest(lggind, float(self.calcLogg()))
mhselect = _findNearest(mhind, float(self.Z))
quadratic_filepath = resource_stream(__name__, 'data/quadratic.dat')
coeffTable = np.loadtxt(quadratic_filepath)
foundValues = False
for i in range(len(coeffTable)):
if coeffTable[i, 2] == lgselect and coeffTable[i, 3] == tempselect and coeffTable[i, 4] == mhselect:
if coeffTable[i, 0] == 1:
u1array = coeffTable[i, 8:] # Limb darkening parameter u1 for each wl in waveind
u2array = coeffTable[i+1, 8:]
foundValues = True
break
if not foundValues:
raise ValueError('No limb darkening values could be found') # TODO replace with better exception
waveind = [0.365, 0.445, 0.551, 0.658, 0.806, 1.22, 1.63, 2.19, 3.45] # Wavelengths available in table
# Interpolates the value at wavelength from values in the table (waveind)
u1AtWavelength = np.interp(wavelength, waveind, u1array, left=0, right=0)
u2AtWavelength = np.interp(wavelength, waveind, u2array, left=0, right=0)
return u1AtWavelength, u2AtWavelength | [
"def",
"getLimbdarkeningCoeff",
"(",
"self",
",",
"wavelength",
"=",
"1.22",
")",
":",
"# TODO replace with pylightcurve",
"# TODO check this returns correct value - im not certain",
"# The intervals of values in the tables",
"tempind",
"=",
"[",
"3500.",
",",
"3750.",
",",
"... | Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity.
:param wavelength: microns
:type wavelength: float
:return: limb darkening coefficients 1 and 2 | [
"Looks",
"up",
"quadratic",
"limb",
"darkening",
"parameter",
"from",
"the",
"star",
"based",
"on",
"T",
"logg",
"and",
"metalicity",
"."
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L578-L624 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.isTransiting | def isTransiting(self):
""" Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue
version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented
"""
try:
isTransiting = self.params['istransiting']
except KeyError:
return False
if isTransiting == '1':
return True
else:
return False | python | def isTransiting(self):
""" Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue
version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented
"""
try:
isTransiting = self.params['istransiting']
except KeyError:
return False
if isTransiting == '1':
return True
else:
return False | [
"def",
"isTransiting",
"(",
"self",
")",
":",
"try",
":",
"isTransiting",
"=",
"self",
".",
"params",
"[",
"'istransiting'",
"]",
"except",
"KeyError",
":",
"return",
"False",
"if",
"isTransiting",
"==",
"'1'",
":",
"return",
"True",
"else",
":",
"return",... | Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue
version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented | [
"Checks",
"the",
"the",
"istransiting",
"tag",
"to",
"see",
"if",
"the",
"planet",
"transits",
".",
"Note",
"that",
"this",
"only",
"works",
"as",
"of",
"catalogue",
"version",
"ee12343381ae4106fd2db908e25ffc537a2ee98c",
"(",
"11th",
"March",
"2014",
")",
"where... | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L645-L657 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.calcTransitDuration | def calcTransitDuration(self, circular=False):
""" Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`)
"""
try:
if circular:
return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i)
else:
return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td
except (ValueError,
AttributeError, # caused by trying to rescale nan i.e. missing i value
HierarchyError): # i.e. planets that dont orbit stars
return np.nan | python | def calcTransitDuration(self, circular=False):
""" Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`)
"""
try:
if circular:
return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i)
else:
return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td
except (ValueError,
AttributeError, # caused by trying to rescale nan i.e. missing i value
HierarchyError): # i.e. planets that dont orbit stars
return np.nan | [
"def",
"calcTransitDuration",
"(",
"self",
",",
"circular",
"=",
"False",
")",
":",
"try",
":",
"if",
"circular",
":",
"return",
"eq",
".",
"transitDurationCircular",
"(",
"self",
".",
"P",
",",
"self",
".",
"star",
".",
"R",
",",
"self",
".",
"R",
"... | Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`) | [
"Estimation",
"of",
"the",
"primary",
"transit",
"time",
"assuming",
"a",
"circular",
"orbit",
"(",
"see",
":",
"py",
":",
"func",
":",
"equations",
".",
"transitDuration",
")"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L659-L671 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.calcTemperature | def calcTemperature(self):
""" Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially
equations.starTemperature.
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp.
"""
try:
return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p
except (ValueError, HierarchyError): # ie missing value (.a) returning nan
return np.nan | python | def calcTemperature(self):
""" Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially
equations.starTemperature.
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp.
"""
try:
return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p
except (ValueError, HierarchyError): # ie missing value (.a) returning nan
return np.nan | [
"def",
"calcTemperature",
"(",
"self",
")",
":",
"try",
":",
"return",
"eq",
".",
"MeanPlanetTemp",
"(",
"self",
".",
"albedo",
",",
"self",
".",
"star",
".",
"T",
",",
"self",
".",
"star",
".",
"R",
",",
"self",
".",
"a",
")",
".",
"T_p",
"excep... | Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially
equations.starTemperature.
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp. | [
"Calculates",
"the",
"temperature",
"using",
"which",
"uses",
"equations",
".",
"MeanPlanetTemp",
"albedo",
"assumption",
"and",
"potentially",
"equations",
".",
"starTemperature",
"."
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L733-L743 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.calcSMA | def calcSMA(self):
""" Calculates the semi-major axis from Keplers Third Law
"""
try:
return eq.KeplersThirdLaw(None, self.star.M, self.P).a
except HierarchyError:
return np.nan | python | def calcSMA(self):
""" Calculates the semi-major axis from Keplers Third Law
"""
try:
return eq.KeplersThirdLaw(None, self.star.M, self.P).a
except HierarchyError:
return np.nan | [
"def",
"calcSMA",
"(",
"self",
")",
":",
"try",
":",
"return",
"eq",
".",
"KeplersThirdLaw",
"(",
"None",
",",
"self",
".",
"star",
".",
"M",
",",
"self",
".",
"P",
")",
".",
"a",
"except",
"HierarchyError",
":",
"return",
"np",
".",
"nan"
] | Calculates the semi-major axis from Keplers Third Law | [
"Calculates",
"the",
"semi",
"-",
"major",
"axis",
"from",
"Keplers",
"Third",
"Law"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L751-L757 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.calcSMAfromT | def calcSMAfromT(self, epsilon=0.7):
""" Calculates the semi-major axis based on planet temperature
"""
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a | python | def calcSMAfromT(self, epsilon=0.7):
""" Calculates the semi-major axis based on planet temperature
"""
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a | [
"def",
"calcSMAfromT",
"(",
"self",
",",
"epsilon",
"=",
"0.7",
")",
":",
"return",
"eq",
".",
"MeanPlanetTemp",
"(",
"self",
".",
"albedo",
"(",
")",
",",
"self",
".",
"star",
".",
"T",
",",
"self",
".",
"star",
".",
"R",
",",
"epsilon",
",",
"s... | Calculates the semi-major axis based on planet temperature | [
"Calculates",
"the",
"semi",
"-",
"major",
"axis",
"based",
"on",
"planet",
"temperature"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L759-L763 |
ryanvarley/ExoData | exodata/astroclasses.py | Planet.calcPeriod | def calcPeriod(self):
""" calculates period using a and stellar mass
"""
return eq.KeplersThirdLaw(self.a, self.star.M).P | python | def calcPeriod(self):
""" calculates period using a and stellar mass
"""
return eq.KeplersThirdLaw(self.a, self.star.M).P | [
"def",
"calcPeriod",
"(",
"self",
")",
":",
"return",
"eq",
".",
"KeplersThirdLaw",
"(",
"self",
".",
"a",
",",
"self",
".",
"star",
".",
"M",
")",
".",
"P"
] | calculates period using a and stellar mass | [
"calculates",
"period",
"using",
"a",
"and",
"stellar",
"mass"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L765-L769 |
ryanvarley/ExoData | exodata/astroclasses.py | Parameters.addParam | def addParam(self, key, value, attrib=None):
""" Checks the key dosnt already exist, adds alternate names to a seperate list
Future
- format input and add units
- logging
"""
if key in self.rejectTags:
return False # TODO Replace with exception
# Temporary code to handle the seperation tag than can occur several times with different units.
# TODO code a full multi unit solution (github issue #1)
if key == 'separation':
if attrib is None:
return False # reject seperations without a unit
try:
if not attrib['unit'] == 'AU':
return False # reject for now
except KeyError: # a seperation attribute exists but not one for units
return False
if key in self.params: # if already exists
if key == 'name':
try: # if flagged as a primary or popular name use this one, an option should be made to use either
if attrib['type'] == 'pri': # first names or popular names.
oldname = self.params['name']
self.params['altnames'].append(oldname)
self.params['name'] = value
else:
self.params['altnames'].append(value)
except (KeyError, TypeError): # KeyError = no type key in attrib dict, TypeError = not a dict
self.params['altnames'].append(value)
elif key == 'list':
self.params['list'].append(value)
else:
try:
name = self.params['name']
except KeyError:
name = 'Unnamed'
print('rejected duplicate {0}: {1} in {2}'.format(key, value, name)) # TODO: log rejected value
return False # TODO Replace with exception
else: # If the key doesn't already exist and isn't rejected
# Some tags have no value but a upperlimit in the attributes
if value is None and attrib is not None:
try:
value = attrib['upperlimit']
except KeyError:
try:
value = attrib['lowerlimit']
except KeyError:
return False
if key == 'rightascension':
value = _ra_string_to_unit(value)
elif key == 'declination':
value = _dec_string_to_unit(value)
elif key in self._defaultUnits:
try:
value = float(value) * self._defaultUnits[key]
except:
print('caught an error with {0} - {1}'.format(key, value))
self.params[key] = value | python | def addParam(self, key, value, attrib=None):
""" Checks the key dosnt already exist, adds alternate names to a seperate list
Future
- format input and add units
- logging
"""
if key in self.rejectTags:
return False # TODO Replace with exception
# Temporary code to handle the seperation tag than can occur several times with different units.
# TODO code a full multi unit solution (github issue #1)
if key == 'separation':
if attrib is None:
return False # reject seperations without a unit
try:
if not attrib['unit'] == 'AU':
return False # reject for now
except KeyError: # a seperation attribute exists but not one for units
return False
if key in self.params: # if already exists
if key == 'name':
try: # if flagged as a primary or popular name use this one, an option should be made to use either
if attrib['type'] == 'pri': # first names or popular names.
oldname = self.params['name']
self.params['altnames'].append(oldname)
self.params['name'] = value
else:
self.params['altnames'].append(value)
except (KeyError, TypeError): # KeyError = no type key in attrib dict, TypeError = not a dict
self.params['altnames'].append(value)
elif key == 'list':
self.params['list'].append(value)
else:
try:
name = self.params['name']
except KeyError:
name = 'Unnamed'
print('rejected duplicate {0}: {1} in {2}'.format(key, value, name)) # TODO: log rejected value
return False # TODO Replace with exception
else: # If the key doesn't already exist and isn't rejected
# Some tags have no value but a upperlimit in the attributes
if value is None and attrib is not None:
try:
value = attrib['upperlimit']
except KeyError:
try:
value = attrib['lowerlimit']
except KeyError:
return False
if key == 'rightascension':
value = _ra_string_to_unit(value)
elif key == 'declination':
value = _dec_string_to_unit(value)
elif key in self._defaultUnits:
try:
value = float(value) * self._defaultUnits[key]
except:
print('caught an error with {0} - {1}'.format(key, value))
self.params[key] = value | [
"def",
"addParam",
"(",
"self",
",",
"key",
",",
"value",
",",
"attrib",
"=",
"None",
")",
":",
"if",
"key",
"in",
"self",
".",
"rejectTags",
":",
"return",
"False",
"# TODO Replace with exception",
"# Temporary code to handle the seperation tag than can occur several... | Checks the key dosnt already exist, adds alternate names to a seperate list
Future
- format input and add units
- logging | [
"Checks",
"the",
"key",
"dosnt",
"already",
"exist",
"adds",
"alternate",
"names",
"to",
"a",
"seperate",
"list"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L829-L894 |
ryanvarley/ExoData | exodata/astroclasses.py | SpectralType.roundedSpecClass | def roundedSpecClass(self):
""" Spectral class with rounded class number ie A8.5V is A9 """
try:
classnumber = str(int(np.around(self.classNumber)))
except TypeError:
classnumber = str(self.classNumber)
return self.classLetter + classnumber | python | def roundedSpecClass(self):
""" Spectral class with rounded class number ie A8.5V is A9 """
try:
classnumber = str(int(np.around(self.classNumber)))
except TypeError:
classnumber = str(self.classNumber)
return self.classLetter + classnumber | [
"def",
"roundedSpecClass",
"(",
"self",
")",
":",
"try",
":",
"classnumber",
"=",
"str",
"(",
"int",
"(",
"np",
".",
"around",
"(",
"self",
".",
"classNumber",
")",
")",
")",
"except",
"TypeError",
":",
"classnumber",
"=",
"str",
"(",
"self",
".",
"c... | Spectral class with rounded class number ie A8.5V is A9 | [
"Spectral",
"class",
"with",
"rounded",
"class",
"number",
"ie",
"A8",
".",
"5V",
"is",
"A9"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L981-L988 |
ryanvarley/ExoData | exodata/astroclasses.py | SpectralType._parseSpecType | def _parseSpecType(self, classString):
""" This class attempts to parse the spectral type. It should probably use more advanced matching use regex
"""
try:
classString = str(classString)
except UnicodeEncodeError:
# This is for the benefit of 1RXS1609 which currently has the spectral type K7\pm 1V
# TODO add unicode support and handling for this case / ammend the target
return False
# some initial cases
if classString == '' or classString == 'nan':
return False
possNumbers = range(10)
possLType = ('III', 'II', 'Iab', 'Ia0', 'Ia', 'Ib', 'IV', 'V') # in order of unique matches
# remove spaces, remove slashes
classString = classString.replace(' ', '')
classString = classString.replace('-', '/')
classString = classString.replace('\\', '/')
classString = classString.split('/')[0] # TODO we do not consider slashed classes yet (intemediates)
# check first 3 chars for spectral types
stellarClass = classString[:3]
if stellarClass in _possSpectralClasses:
self.classLetter = stellarClass
elif stellarClass[:2] in _possSpectralClasses: # needed because A5V wouldnt match before
self.classLetter = stellarClass[:2]
elif stellarClass[0] in _possSpectralClasses:
self.classLetter = stellarClass[0]
else:
return False # assume a non standard class and fail
# get number
try:
numIndex = len(self.classLetter)
classNum = int(classString[numIndex])
if classNum in possNumbers:
self.classNumber = int(classNum) # don't consider decimals here, done at the type check
typeString = classString[numIndex+1:]
else:
return False # invalid number received
except IndexError: # reached the end of the string
return True
except ValueError: # i.e its a letter - fail # TODO multi letter checking
typeString = classString[1:]
if typeString == '': # ie there is no more information as in 'A8'
return True
# Now check for a decimal and handle those cases
if typeString[0] == '.':
# handle decimal cases, we check each number in turn, add them as strings and then convert to float and add
# to original number
decimalNumbers = '.'
for number in typeString[1:]:
try:
if int(number) in possNumbers:
decimalNumbers += number
else:
print('Something went wrong in decimal checking') # TODO replace with logging
return False # somethings gone wrong
except ValueError:
break # recevied a non-number (probably L class)
# add decimal to classNum
try:
self.classNumber += float(decimalNumbers)
except ValueError: # probably trying to convert '.' to a float
pass
typeString = typeString[len(decimalNumbers):]
if len(typeString) is 0:
return True
# Handle luminosity class
for possL in possLType: # match each possible case in turn (in order of uniqueness)
Lcase = typeString[:len(possL)] # match from front with length to minimise matching say IV in '<3 CIV'
if possL == Lcase:
self.lumType = possL
return True
if not self.classNumber == '':
return True
else: # if there no number asumme we have a name ie 'Catac. var.'
self.classLetter = ''
self.classNumber = ''
self.lumType = ''
return False | python | def _parseSpecType(self, classString):
""" This class attempts to parse the spectral type. It should probably use more advanced matching use regex
"""
try:
classString = str(classString)
except UnicodeEncodeError:
# This is for the benefit of 1RXS1609 which currently has the spectral type K7\pm 1V
# TODO add unicode support and handling for this case / ammend the target
return False
# some initial cases
if classString == '' or classString == 'nan':
return False
possNumbers = range(10)
possLType = ('III', 'II', 'Iab', 'Ia0', 'Ia', 'Ib', 'IV', 'V') # in order of unique matches
# remove spaces, remove slashes
classString = classString.replace(' ', '')
classString = classString.replace('-', '/')
classString = classString.replace('\\', '/')
classString = classString.split('/')[0] # TODO we do not consider slashed classes yet (intemediates)
# check first 3 chars for spectral types
stellarClass = classString[:3]
if stellarClass in _possSpectralClasses:
self.classLetter = stellarClass
elif stellarClass[:2] in _possSpectralClasses: # needed because A5V wouldnt match before
self.classLetter = stellarClass[:2]
elif stellarClass[0] in _possSpectralClasses:
self.classLetter = stellarClass[0]
else:
return False # assume a non standard class and fail
# get number
try:
numIndex = len(self.classLetter)
classNum = int(classString[numIndex])
if classNum in possNumbers:
self.classNumber = int(classNum) # don't consider decimals here, done at the type check
typeString = classString[numIndex+1:]
else:
return False # invalid number received
except IndexError: # reached the end of the string
return True
except ValueError: # i.e its a letter - fail # TODO multi letter checking
typeString = classString[1:]
if typeString == '': # ie there is no more information as in 'A8'
return True
# Now check for a decimal and handle those cases
if typeString[0] == '.':
# handle decimal cases, we check each number in turn, add them as strings and then convert to float and add
# to original number
decimalNumbers = '.'
for number in typeString[1:]:
try:
if int(number) in possNumbers:
decimalNumbers += number
else:
print('Something went wrong in decimal checking') # TODO replace with logging
return False # somethings gone wrong
except ValueError:
break # recevied a non-number (probably L class)
# add decimal to classNum
try:
self.classNumber += float(decimalNumbers)
except ValueError: # probably trying to convert '.' to a float
pass
typeString = typeString[len(decimalNumbers):]
if len(typeString) is 0:
return True
# Handle luminosity class
for possL in possLType: # match each possible case in turn (in order of uniqueness)
Lcase = typeString[:len(possL)] # match from front with length to minimise matching say IV in '<3 CIV'
if possL == Lcase:
self.lumType = possL
return True
if not self.classNumber == '':
return True
else: # if there no number asumme we have a name ie 'Catac. var.'
self.classLetter = ''
self.classNumber = ''
self.lumType = ''
return False | [
"def",
"_parseSpecType",
"(",
"self",
",",
"classString",
")",
":",
"try",
":",
"classString",
"=",
"str",
"(",
"classString",
")",
"except",
"UnicodeEncodeError",
":",
"# This is for the benefit of 1RXS1609 which currently has the spectral type K7\\pm 1V",
"# TODO add unicod... | This class attempts to parse the spectral type. It should probably use more advanced matching use regex | [
"This",
"class",
"attempts",
"to",
"parse",
"the",
"spectral",
"type",
".",
"It",
"should",
"probably",
"use",
"more",
"advanced",
"matching",
"use",
"regex"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L1004-L1093 |
ryanvarley/ExoData | exodata/astroclasses.py | Magnitude.convert | def convert(self, to_mag, from_mag=None):
""" Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995)
ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars
If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all
magnitudes are first converted to V, and then to the requested magnitude.
:param to_mag: magnitude to convert to
:param from_mag: magnitude to convert from
:return:
"""
allowed_mags = "UBVJIHKLMN"
if from_mag:
if to_mag == 'V': # If V mag is requested (1/3) - from mag specified
return self._convert_to_from('V', from_mag)
if from_mag == 'V':
magV = self.magV
else:
magV = self._convert_to_from('V', from_mag)
return self._convert_to_from(to_mag, 'V', magV)
# if we can convert from any magnitude, try V first
elif not isNanOrNone(self.magV):
if to_mag == 'V': # If V mag is requested (2/3) - no need to convert
return self.magV
else:
return self._convert_to_from(to_mag, 'V', self.magV)
else: # Otherwise lets try all other magnitudes in turn
order = "UBJHKLMN" # V is the intermediate step from the others, done by default if possible
for mag_letter in order:
try:
magV = self._convert_to_from('V', mag_letter)
if to_mag == 'V': # If V mag is requested (3/3) - try all other mags to convert
logging.debug('Converted to magV from {0} got {1}'.format(mag_letter, magV))
return magV
else:
mag_val = self._convert_to_from(to_mag, 'V', magV)
logging.debug('Converted to mag{0} from {1} got {2}'.format(to_mag, mag_letter, mag_val))
return mag_val
except ValueError:
continue # this conversion may not be possible, try another
raise ValueError('Could not convert from any provided magnitudes') | python | def convert(self, to_mag, from_mag=None):
""" Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995)
ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars
If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all
magnitudes are first converted to V, and then to the requested magnitude.
:param to_mag: magnitude to convert to
:param from_mag: magnitude to convert from
:return:
"""
allowed_mags = "UBVJIHKLMN"
if from_mag:
if to_mag == 'V': # If V mag is requested (1/3) - from mag specified
return self._convert_to_from('V', from_mag)
if from_mag == 'V':
magV = self.magV
else:
magV = self._convert_to_from('V', from_mag)
return self._convert_to_from(to_mag, 'V', magV)
# if we can convert from any magnitude, try V first
elif not isNanOrNone(self.magV):
if to_mag == 'V': # If V mag is requested (2/3) - no need to convert
return self.magV
else:
return self._convert_to_from(to_mag, 'V', self.magV)
else: # Otherwise lets try all other magnitudes in turn
order = "UBJHKLMN" # V is the intermediate step from the others, done by default if possible
for mag_letter in order:
try:
magV = self._convert_to_from('V', mag_letter)
if to_mag == 'V': # If V mag is requested (3/3) - try all other mags to convert
logging.debug('Converted to magV from {0} got {1}'.format(mag_letter, magV))
return magV
else:
mag_val = self._convert_to_from(to_mag, 'V', magV)
logging.debug('Converted to mag{0} from {1} got {2}'.format(to_mag, mag_letter, mag_val))
return mag_val
except ValueError:
continue # this conversion may not be possible, try another
raise ValueError('Could not convert from any provided magnitudes') | [
"def",
"convert",
"(",
"self",
",",
"to_mag",
",",
"from_mag",
"=",
"None",
")",
":",
"allowed_mags",
"=",
"\"UBVJIHKLMN\"",
"if",
"from_mag",
":",
"if",
"to_mag",
"==",
"'V'",
":",
"# If V mag is requested (1/3) - from mag specified",
"return",
"self",
".",
"_c... | Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995)
ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars
If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all
magnitudes are first converted to V, and then to the requested magnitude.
:param to_mag: magnitude to convert to
:param from_mag: magnitude to convert from
:return: | [
"Converts",
"magnitudes",
"using",
"UBVRIJHKLMNQ",
"photometry",
"in",
"Taurus",
"-",
"Auriga",
"(",
"Kenyon",
"+",
"1995",
")",
"ReadMe",
"+",
"ftp1995ApJS",
"..",
"101",
"..",
"117K",
"Colors",
"for",
"main",
"-",
"sequence",
"stars"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L1151-L1195 |
ryanvarley/ExoData | exodata/astroclasses.py | Magnitude._convert_to_from | def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') | python | def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') | [
"def",
"_convert_to_from",
"(",
"self",
",",
"to_mag",
",",
"from_mag",
",",
"fromVMag",
"=",
"None",
")",
":",
"lumtype",
"=",
"self",
".",
"spectral_type",
".",
"lumType",
"# rounds decimal types, TODO perhaps we should interpolate?",
"specClass",
"=",
"self",
"."... | Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag | [
"Converts",
"from",
"or",
"to",
"V",
"mag",
"using",
"the",
"conversion",
"tables"
] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L1197-L1250 |
bovee/Aston | aston/tracefile/agilent_uv.py | AgilentMWD2._get_str | def _get_str(self, f, off):
"""
Convenience function to quickly pull out strings.
"""
f.seek(off)
return f.read(2 * struct.unpack('>B', f.read(1))[0]).decode('utf-16') | python | def _get_str(self, f, off):
"""
Convenience function to quickly pull out strings.
"""
f.seek(off)
return f.read(2 * struct.unpack('>B', f.read(1))[0]).decode('utf-16') | [
"def",
"_get_str",
"(",
"self",
",",
"f",
",",
"off",
")",
":",
"f",
".",
"seek",
"(",
"off",
")",
"return",
"f",
".",
"read",
"(",
"2",
"*",
"struct",
".",
"unpack",
"(",
"'>B'",
",",
"f",
".",
"read",
"(",
"1",
")",
")",
"[",
"0",
"]",
... | Convenience function to quickly pull out strings. | [
"Convenience",
"function",
"to",
"quickly",
"pull",
"out",
"strings",
"."
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/tracefile/agilent_uv.py#L180-L185 |
bovee/Aston | aston/spectra/isotopes.py | delta13c_constants | def delta13c_constants():
"""
Constants for calculating delta13C values from ratios.
From website of Verkouteren & Lee 2001 Anal. Chem.
"""
# possible values for constants (from NIST)
cst = OrderedDict()
cst['Craig'] = {'S13': 0.0112372, 'S18': 0.002079,
'K': 0.008333, 'A': 0.5}
cst['IAEA'] = {'S13': 0.0112372, 'S18': 0.00206716068,
'K': 0.0091993, 'A': 0.5}
cst['Werner'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0093704, 'A': 0.516}
cst['Santrock'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
cst['Assonov'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Assonov2'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Isodat'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
return cst | python | def delta13c_constants():
"""
Constants for calculating delta13C values from ratios.
From website of Verkouteren & Lee 2001 Anal. Chem.
"""
# possible values for constants (from NIST)
cst = OrderedDict()
cst['Craig'] = {'S13': 0.0112372, 'S18': 0.002079,
'K': 0.008333, 'A': 0.5}
cst['IAEA'] = {'S13': 0.0112372, 'S18': 0.00206716068,
'K': 0.0091993, 'A': 0.5}
cst['Werner'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0093704, 'A': 0.516}
cst['Santrock'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
cst['Assonov'] = {'S13': 0.0112372, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Assonov2'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0102819162, 'A': 0.528}
cst['Isodat'] = {'S13': 0.0111802, 'S18': 0.0020052,
'K': 0.0099235, 'A': 0.516}
return cst | [
"def",
"delta13c_constants",
"(",
")",
":",
"# possible values for constants (from NIST)",
"cst",
"=",
"OrderedDict",
"(",
")",
"cst",
"[",
"'Craig'",
"]",
"=",
"{",
"'S13'",
":",
"0.0112372",
",",
"'S18'",
":",
"0.002079",
",",
"'K'",
":",
"0.008333",
",",
... | Constants for calculating delta13C values from ratios.
From website of Verkouteren & Lee 2001 Anal. Chem. | [
"Constants",
"for",
"calculating",
"delta13C",
"values",
"from",
"ratios",
".",
"From",
"website",
"of",
"Verkouteren",
"&",
"Lee",
"2001",
"Anal",
".",
"Chem",
"."
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/spectra/isotopes.py#L10-L31 |
bovee/Aston | aston/spectra/isotopes.py | delta13c_craig | def delta13c_craig(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Craig', d18ostd=23.5):
"""
Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17
"""
# the constants for the calculations
# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5
k = delta13c_constants()[ks]
# TODO: not clear why need to multiply by 2?
r13, r18 = k['S13'], 2 * k['S18']
r17 = 2 * (k['K'] * k['S18'] ** k['A'])
a = (r18 + r13 * r17) * (1. + r13 + r17)
# the coefficients for the calculations
eqn_mat = np.array([[r13, 0.5 * r17],
[r13 * ((r17 ** 2 + r17 - r18) / a),
1 - 0.5 * r17 * ((r13 ** 2 + r13 - r18) / a)]])
# precalculate the d45 and d46 of the standard versus PDB
r45d45std = (eqn_mat[0, 0] * d13cstd + eqn_mat[0, 1] * d18ostd)
d46std = eqn_mat[1, 0] * d13cstd + eqn_mat[1, 1] * d18ostd
# calculate the d45 and d46 of our sample versus PDB
# in r45d45, r45 of PDB = r13 + r17 of PDB
r45d45 = 1000. * (r45sam / r45std - 1.) * \
(r13 + r17 + 0.001 * r45d45std) + r45d45std
d46 = 1000. * (r46sam / r46std - 1.) * (1. + 0.001 * d46std) + d46std
# solve the system of equations
x = np.linalg.solve(eqn_mat, np.array([r45d45, d46]))
return x[0] | python | def delta13c_craig(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Craig', d18ostd=23.5):
"""
Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17
"""
# the constants for the calculations
# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5
k = delta13c_constants()[ks]
# TODO: not clear why need to multiply by 2?
r13, r18 = k['S13'], 2 * k['S18']
r17 = 2 * (k['K'] * k['S18'] ** k['A'])
a = (r18 + r13 * r17) * (1. + r13 + r17)
# the coefficients for the calculations
eqn_mat = np.array([[r13, 0.5 * r17],
[r13 * ((r17 ** 2 + r17 - r18) / a),
1 - 0.5 * r17 * ((r13 ** 2 + r13 - r18) / a)]])
# precalculate the d45 and d46 of the standard versus PDB
r45d45std = (eqn_mat[0, 0] * d13cstd + eqn_mat[0, 1] * d18ostd)
d46std = eqn_mat[1, 0] * d13cstd + eqn_mat[1, 1] * d18ostd
# calculate the d45 and d46 of our sample versus PDB
# in r45d45, r45 of PDB = r13 + r17 of PDB
r45d45 = 1000. * (r45sam / r45std - 1.) * \
(r13 + r17 + 0.001 * r45d45std) + r45d45std
d46 = 1000. * (r46sam / r46std - 1.) * (1. + 0.001 * d46std) + d46std
# solve the system of equations
x = np.linalg.solve(eqn_mat, np.array([r45d45, d46]))
return x[0] | [
"def",
"delta13c_craig",
"(",
"r45sam",
",",
"r46sam",
",",
"d13cstd",
",",
"r45std",
",",
"r46std",
",",
"ks",
"=",
"'Craig'",
",",
"d18ostd",
"=",
"23.5",
")",
":",
"# the constants for the calculations",
"# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5",
... | Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17 | [
"Algorithm",
"from",
"Craig",
"1957",
"."
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/spectra/isotopes.py#L34-L74 |
bovee/Aston | aston/spectra/isotopes.py | delta13c_santrock | def delta13c_santrock(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Santrock', d18ostd=23.5):
"""
Given the measured isotope signals of a sample and a
standard and the delta-13C of that standard, calculate
the delta-13C of the sample.
Algorithm from Santrock, Studley & Hayes 1985 Anal. Chem.
"""
k = delta13c_constants()[ks]
# function for calculating 17R from 18R
def c17(r):
return k['K'] * r ** k['A']
rcpdb, rosmow = k['S13'], k['S18']
# known delta values for the ref peak
r13std = (d13cstd / 1000. + 1) * rcpdb
r18std = (d18ostd / 1000. + 1) * rosmow
# determine the correction factors
c45 = r13std + 2 * c17(r18std)
c46 = c17(r18std) ** 2 + 2 * r13std * c17(r18std) + 2 * r18std
# correct the voltage ratios to ion ratios
r45 = (r45sam / r45std) * c45
r46 = (r46sam / r46std) * c46
def rf(r18):
return -3 * c17(r18) ** 2 + 2 * r45 * c17(r18) + 2 * r18 - r46
# r18 = scipy.optimize.root(rf, r18std).x[0] # use with scipy 0.11.0
r18 = fsolve(rf, r18std)[0]
r13 = r45 - 2 * c17(r18)
return 1000 * (r13 / rcpdb - 1) | python | def delta13c_santrock(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Santrock', d18ostd=23.5):
"""
Given the measured isotope signals of a sample and a
standard and the delta-13C of that standard, calculate
the delta-13C of the sample.
Algorithm from Santrock, Studley & Hayes 1985 Anal. Chem.
"""
k = delta13c_constants()[ks]
# function for calculating 17R from 18R
def c17(r):
return k['K'] * r ** k['A']
rcpdb, rosmow = k['S13'], k['S18']
# known delta values for the ref peak
r13std = (d13cstd / 1000. + 1) * rcpdb
r18std = (d18ostd / 1000. + 1) * rosmow
# determine the correction factors
c45 = r13std + 2 * c17(r18std)
c46 = c17(r18std) ** 2 + 2 * r13std * c17(r18std) + 2 * r18std
# correct the voltage ratios to ion ratios
r45 = (r45sam / r45std) * c45
r46 = (r46sam / r46std) * c46
def rf(r18):
return -3 * c17(r18) ** 2 + 2 * r45 * c17(r18) + 2 * r18 - r46
# r18 = scipy.optimize.root(rf, r18std).x[0] # use with scipy 0.11.0
r18 = fsolve(rf, r18std)[0]
r13 = r45 - 2 * c17(r18)
return 1000 * (r13 / rcpdb - 1) | [
"def",
"delta13c_santrock",
"(",
"r45sam",
",",
"r46sam",
",",
"d13cstd",
",",
"r45std",
",",
"r46std",
",",
"ks",
"=",
"'Santrock'",
",",
"d18ostd",
"=",
"23.5",
")",
":",
"k",
"=",
"delta13c_constants",
"(",
")",
"[",
"ks",
"]",
"# function for calculati... | Given the measured isotope signals of a sample and a
standard and the delta-13C of that standard, calculate
the delta-13C of the sample.
Algorithm from Santrock, Studley & Hayes 1985 Anal. Chem. | [
"Given",
"the",
"measured",
"isotope",
"signals",
"of",
"a",
"sample",
"and",
"a",
"standard",
"and",
"the",
"delta",
"-",
"13C",
"of",
"that",
"standard",
"calculate",
"the",
"delta",
"-",
"13C",
"of",
"the",
"sample",
"."
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/spectra/isotopes.py#L77-L110 |
timdiels/pytil | pytil/pkg_resources.py | resource_copy | def resource_copy(package_or_requirement, resource_name, destination):
'''
Copy file/dir resource to destination.
Parameters
----------
package_or_requirement : str
resource_name : str
destination : ~pathlib.Path
Path to copy to, it must not exist.
'''
args = package_or_requirement, resource_name
if resource_isdir(*args):
destination.mkdir()
for name in resource_listdir(*args):
resource_copy(
package_or_requirement,
str(Path(resource_name) / name),
destination / name
)
else:
with destination.open('wb') as f:
with resource_stream(*args) as source:
shutil.copyfileobj(source, f) | python | def resource_copy(package_or_requirement, resource_name, destination):
'''
Copy file/dir resource to destination.
Parameters
----------
package_or_requirement : str
resource_name : str
destination : ~pathlib.Path
Path to copy to, it must not exist.
'''
args = package_or_requirement, resource_name
if resource_isdir(*args):
destination.mkdir()
for name in resource_listdir(*args):
resource_copy(
package_or_requirement,
str(Path(resource_name) / name),
destination / name
)
else:
with destination.open('wb') as f:
with resource_stream(*args) as source:
shutil.copyfileobj(source, f) | [
"def",
"resource_copy",
"(",
"package_or_requirement",
",",
"resource_name",
",",
"destination",
")",
":",
"args",
"=",
"package_or_requirement",
",",
"resource_name",
"if",
"resource_isdir",
"(",
"*",
"args",
")",
":",
"destination",
".",
"mkdir",
"(",
")",
"fo... | Copy file/dir resource to destination.
Parameters
----------
package_or_requirement : str
resource_name : str
destination : ~pathlib.Path
Path to copy to, it must not exist. | [
"Copy",
"file",
"/",
"dir",
"resource",
"to",
"destination",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/pkg_resources.py#L42-L65 |
bovee/Aston | aston/peak/peak_finding.py | simple_peak_find | def simple_peak_find(s, init_slope=500, start_slope=500, end_slope=200,
min_peak_height=50, max_peak_width=1.5):
"""
Given a Series, return a list of tuples indicating when
peaks start and stop and what their baseline is.
[(t_start, t_end, hints) ...]
"""
point_gap = 10
def slid_win(itr, size=2):
"""Returns a sliding window of size 'size' along itr."""
itr, buf = iter(itr), []
for _ in range(size):
buf += [next(itr)]
for l in itr:
yield buf
buf = buf[1:] + [l]
yield buf
# TODO: check these smoothing defaults
y, t = s.values, s.index.astype(float)
smooth_y = movingaverage(y, 9)
dxdt = np.gradient(smooth_y) / np.gradient(t)
# dxdt = -savitzkygolay(ts, 5, 3, deriv=1).y / np.gradient(t)
init_slopes = np.arange(len(dxdt))[dxdt > init_slope]
if len(init_slopes) == 0:
return []
# get the first points of any "runs" as a peak start
# runs can have a gap of up to 10 points in them
peak_sts = [init_slopes[0]]
peak_sts += [j for i, j in slid_win(init_slopes, 2) if j - i > 10]
peak_sts.sort()
en_slopes = np.arange(len(dxdt))[dxdt < -end_slope]
if len(en_slopes) == 0:
return []
# filter out any lone points farther than 10 away from their neighbors
en_slopes = [en_slopes[0]]
en_slopes += [i[1] for i in slid_win(en_slopes, 3)
if i[1] - i[0] < point_gap or i[2] - i[1] < point_gap]
en_slopes += [en_slopes[-1]]
# get the last points of any "runs" as a peak end
peak_ens = [j for i, j in slid_win(en_slopes[::-1], 2)
if i - j > point_gap] + [en_slopes[-1]]
peak_ens.sort()
# avals = np.arange(len(t))[np.abs(t - 0.675) < 0.25]
# print([i for i in en_slopes if i in avals])
# print([(t[i], i) for i in peak_ens if i in avals])
peak_list = []
pk2 = 0
for pk in peak_sts:
# don't allow overlapping peaks
if pk < pk2:
continue
# track backwards to find the true start
while dxdt[pk] > start_slope and pk > 0:
pk -= 1
# now find where the peak ends
dist_to_end = np.array(peak_ens) - pk
pos_end = pk + dist_to_end[dist_to_end > 0]
for pk2 in pos_end:
if (y[pk2] - y[pk]) / (t[pk2] - t[pk]) > start_slope:
# if the baseline beneath the peak is too large, let's
# keep going to the next dip
peak_list.append({'t0': t[pk], 't1': t[pk2]})
pk = pk2
elif t[pk2] - t[pk] > max_peak_width:
# make sure that peak is short enough
pk2 = pk + np.abs(t[pk:] - t[pk] - max_peak_width).argmin()
break
else:
break
else:
# if no end point is found, the end point
# is the end of the timeseries
pk2 = len(t) - 1
if pk == pk2:
continue
pk_hgt = max(y[pk:pk2]) - min(y[pk:pk2])
if pk_hgt < min_peak_height:
continue
peak_list.append({'t0': t[pk], 't1': t[pk2]})
return peak_list | python | def simple_peak_find(s, init_slope=500, start_slope=500, end_slope=200,
min_peak_height=50, max_peak_width=1.5):
"""
Given a Series, return a list of tuples indicating when
peaks start and stop and what their baseline is.
[(t_start, t_end, hints) ...]
"""
point_gap = 10
def slid_win(itr, size=2):
"""Returns a sliding window of size 'size' along itr."""
itr, buf = iter(itr), []
for _ in range(size):
buf += [next(itr)]
for l in itr:
yield buf
buf = buf[1:] + [l]
yield buf
# TODO: check these smoothing defaults
y, t = s.values, s.index.astype(float)
smooth_y = movingaverage(y, 9)
dxdt = np.gradient(smooth_y) / np.gradient(t)
# dxdt = -savitzkygolay(ts, 5, 3, deriv=1).y / np.gradient(t)
init_slopes = np.arange(len(dxdt))[dxdt > init_slope]
if len(init_slopes) == 0:
return []
# get the first points of any "runs" as a peak start
# runs can have a gap of up to 10 points in them
peak_sts = [init_slopes[0]]
peak_sts += [j for i, j in slid_win(init_slopes, 2) if j - i > 10]
peak_sts.sort()
en_slopes = np.arange(len(dxdt))[dxdt < -end_slope]
if len(en_slopes) == 0:
return []
# filter out any lone points farther than 10 away from their neighbors
en_slopes = [en_slopes[0]]
en_slopes += [i[1] for i in slid_win(en_slopes, 3)
if i[1] - i[0] < point_gap or i[2] - i[1] < point_gap]
en_slopes += [en_slopes[-1]]
# get the last points of any "runs" as a peak end
peak_ens = [j for i, j in slid_win(en_slopes[::-1], 2)
if i - j > point_gap] + [en_slopes[-1]]
peak_ens.sort()
# avals = np.arange(len(t))[np.abs(t - 0.675) < 0.25]
# print([i for i in en_slopes if i in avals])
# print([(t[i], i) for i in peak_ens if i in avals])
peak_list = []
pk2 = 0
for pk in peak_sts:
# don't allow overlapping peaks
if pk < pk2:
continue
# track backwards to find the true start
while dxdt[pk] > start_slope and pk > 0:
pk -= 1
# now find where the peak ends
dist_to_end = np.array(peak_ens) - pk
pos_end = pk + dist_to_end[dist_to_end > 0]
for pk2 in pos_end:
if (y[pk2] - y[pk]) / (t[pk2] - t[pk]) > start_slope:
# if the baseline beneath the peak is too large, let's
# keep going to the next dip
peak_list.append({'t0': t[pk], 't1': t[pk2]})
pk = pk2
elif t[pk2] - t[pk] > max_peak_width:
# make sure that peak is short enough
pk2 = pk + np.abs(t[pk:] - t[pk] - max_peak_width).argmin()
break
else:
break
else:
# if no end point is found, the end point
# is the end of the timeseries
pk2 = len(t) - 1
if pk == pk2:
continue
pk_hgt = max(y[pk:pk2]) - min(y[pk:pk2])
if pk_hgt < min_peak_height:
continue
peak_list.append({'t0': t[pk], 't1': t[pk2]})
return peak_list | [
"def",
"simple_peak_find",
"(",
"s",
",",
"init_slope",
"=",
"500",
",",
"start_slope",
"=",
"500",
",",
"end_slope",
"=",
"200",
",",
"min_peak_height",
"=",
"50",
",",
"max_peak_width",
"=",
"1.5",
")",
":",
"point_gap",
"=",
"10",
"def",
"slid_win",
"... | Given a Series, return a list of tuples indicating when
peaks start and stop and what their baseline is.
[(t_start, t_end, hints) ...] | [
"Given",
"a",
"Series",
"return",
"a",
"list",
"of",
"tuples",
"indicating",
"when",
"peaks",
"start",
"and",
"stop",
"and",
"what",
"their",
"baseline",
"is",
".",
"[",
"(",
"t_start",
"t_end",
"hints",
")",
"...",
"]"
] | train | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/peak/peak_finding.py#L8-L95 |
mwhooker/jones | jones/zkutil.py | walk | def walk(zk, path='/'):
"""Yields all paths under `path`."""
children = zk.get_children(path)
yield path
for child in children:
if path == '/':
subpath = "/%s" % child
else:
subpath = "%s/%s" % (path, child)
for child in walk(zk, subpath):
yield child | python | def walk(zk, path='/'):
"""Yields all paths under `path`."""
children = zk.get_children(path)
yield path
for child in children:
if path == '/':
subpath = "/%s" % child
else:
subpath = "%s/%s" % (path, child)
for child in walk(zk, subpath):
yield child | [
"def",
"walk",
"(",
"zk",
",",
"path",
"=",
"'/'",
")",
":",
"children",
"=",
"zk",
".",
"get_children",
"(",
"path",
")",
"yield",
"path",
"for",
"child",
"in",
"children",
":",
"if",
"path",
"==",
"'/'",
":",
"subpath",
"=",
"\"/%s\"",
"%",
"chil... | Yields all paths under `path`. | [
"Yields",
"all",
"paths",
"under",
"path",
"."
] | train | https://github.com/mwhooker/jones/blob/121e89572ca063f456b8e94cbb8cbee26c307a8f/jones/zkutil.py#L15-L26 |
timdiels/pytil | pytil/dict.py | pretty_print_head | def pretty_print_head(dict_, count=10): #TODO only format and rename to pretty_head
'''
Pretty print some items of a dict.
For an unordered dict, ``count`` arbitrary items will be printed.
Parameters
----------
dict_ : ~typing.Dict
Dict to print from.
count : int
Number of items to print.
Raises
------
ValueError
When ``count < 1``.
'''
if count < 1:
raise ValueError('`count` must be at least 1')
pprint(dict(take(count, dict_.items()))) | python | def pretty_print_head(dict_, count=10): #TODO only format and rename to pretty_head
'''
Pretty print some items of a dict.
For an unordered dict, ``count`` arbitrary items will be printed.
Parameters
----------
dict_ : ~typing.Dict
Dict to print from.
count : int
Number of items to print.
Raises
------
ValueError
When ``count < 1``.
'''
if count < 1:
raise ValueError('`count` must be at least 1')
pprint(dict(take(count, dict_.items()))) | [
"def",
"pretty_print_head",
"(",
"dict_",
",",
"count",
"=",
"10",
")",
":",
"#TODO only format and rename to pretty_head",
"if",
"count",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'`count` must be at least 1'",
")",
"pprint",
"(",
"dict",
"(",
"take",
"(",
"c... | Pretty print some items of a dict.
For an unordered dict, ``count`` arbitrary items will be printed.
Parameters
----------
dict_ : ~typing.Dict
Dict to print from.
count : int
Number of items to print.
Raises
------
ValueError
When ``count < 1``. | [
"Pretty",
"print",
"some",
"items",
"of",
"a",
"dict",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/dict.py#L26-L46 |
timdiels/pytil | pytil/dict.py | invert | def invert(dict_): #TODO return a MultiDict right away
'''
Invert dict by swapping each value with its key.
Parameters
----------
dict_ : ~typing.Dict[~typing.Hashable, ~typing.Hashable]
Dict to invert.
Returns
-------
~typing.Dict[~typing.Hashable, ~typing.Set[~typing.Hashable]]
Dict with keys and values swapped.
See also
--------
pytil.multi_dict.MultiDict : Multi-dict view of a ``Dict[Hashable, Set[Hashable]]`` dict.
Notes
-----
If your dict never has 2 keys mapped to the same value, you can convert it
to a ``Dict[Hashable, Hashable]`` dict using::
from pytil.multi_dict import MultiDict
inverted_dict = dict(MultiDict(inverted_dict))
Examples
--------
>>> invert({1: 2, 3: 4})
{2: {1}, 4: {3}}
>>> invert({1: 2, 3: 2, 4: 5})
{2: {1,3}, 5: {4}}
'''
result = defaultdict(lambda: set())
for k, val in dict_.items():
result[val].add(k)
return dict(result) | python | def invert(dict_): #TODO return a MultiDict right away
'''
Invert dict by swapping each value with its key.
Parameters
----------
dict_ : ~typing.Dict[~typing.Hashable, ~typing.Hashable]
Dict to invert.
Returns
-------
~typing.Dict[~typing.Hashable, ~typing.Set[~typing.Hashable]]
Dict with keys and values swapped.
See also
--------
pytil.multi_dict.MultiDict : Multi-dict view of a ``Dict[Hashable, Set[Hashable]]`` dict.
Notes
-----
If your dict never has 2 keys mapped to the same value, you can convert it
to a ``Dict[Hashable, Hashable]`` dict using::
from pytil.multi_dict import MultiDict
inverted_dict = dict(MultiDict(inverted_dict))
Examples
--------
>>> invert({1: 2, 3: 4})
{2: {1}, 4: {3}}
>>> invert({1: 2, 3: 2, 4: 5})
{2: {1,3}, 5: {4}}
'''
result = defaultdict(lambda: set())
for k, val in dict_.items():
result[val].add(k)
return dict(result) | [
"def",
"invert",
"(",
"dict_",
")",
":",
"#TODO return a MultiDict right away",
"result",
"=",
"defaultdict",
"(",
"lambda",
":",
"set",
"(",
")",
")",
"for",
"k",
",",
"val",
"in",
"dict_",
".",
"items",
"(",
")",
":",
"result",
"[",
"val",
"]",
".",
... | Invert dict by swapping each value with its key.
Parameters
----------
dict_ : ~typing.Dict[~typing.Hashable, ~typing.Hashable]
Dict to invert.
Returns
-------
~typing.Dict[~typing.Hashable, ~typing.Set[~typing.Hashable]]
Dict with keys and values swapped.
See also
--------
pytil.multi_dict.MultiDict : Multi-dict view of a ``Dict[Hashable, Set[Hashable]]`` dict.
Notes
-----
If your dict never has 2 keys mapped to the same value, you can convert it
to a ``Dict[Hashable, Hashable]`` dict using::
from pytil.multi_dict import MultiDict
inverted_dict = dict(MultiDict(inverted_dict))
Examples
--------
>>> invert({1: 2, 3: 4})
{2: {1}, 4: {3}}
>>> invert({1: 2, 3: 2, 4: 5})
{2: {1,3}, 5: {4}} | [
"Invert",
"dict",
"by",
"swapping",
"each",
"value",
"with",
"its",
"key",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/dict.py#L70-L107 |
LasLabs/python-helpscout | helpscout/request_paginator/__init__.py | RequestPaginator.delete | def delete(self, json=None):
"""Send a DELETE request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('delete', url=self.endpoint, json=json) | python | def delete(self, json=None):
"""Send a DELETE request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('delete', url=self.endpoint, json=json) | [
"def",
"delete",
"(",
"self",
",",
"json",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call",
"(",
"'delete'",
",",
"url",
"=",
"self",
".",
"endpoint",
",",
"json",
"=",
"json",
")"
] | Send a DELETE request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data. | [
"Send",
"a",
"DELETE",
"request",
"and",
"return",
"the",
"JSON",
"decoded",
"result",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/request_paginator/__init__.py#L92-L101 |
LasLabs/python-helpscout | helpscout/request_paginator/__init__.py | RequestPaginator.get | def get(self, params=None):
"""Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('get', url=self.endpoint, params=params) | python | def get(self, params=None):
"""Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('get', url=self.endpoint, params=params) | [
"def",
"get",
"(",
"self",
",",
"params",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call",
"(",
"'get'",
",",
"url",
"=",
"self",
".",
"endpoint",
",",
"params",
"=",
"params",
")"
] | Send a POST request and return the JSON decoded result.
Args:
params (dict, optional): Mapping of parameters to send in request.
Returns:
mixed: JSON decoded response data. | [
"Send",
"a",
"POST",
"request",
"and",
"return",
"the",
"JSON",
"decoded",
"result",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/request_paginator/__init__.py#L103-L112 |
LasLabs/python-helpscout | helpscout/request_paginator/__init__.py | RequestPaginator.post | def post(self, json=None):
"""Send a POST request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('post', url=self.endpoint, json=json) | python | def post(self, json=None):
"""Send a POST request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('post', url=self.endpoint, json=json) | [
"def",
"post",
"(",
"self",
",",
"json",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call",
"(",
"'post'",
",",
"url",
"=",
"self",
".",
"endpoint",
",",
"json",
"=",
"json",
")"
] | Send a POST request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data. | [
"Send",
"a",
"POST",
"request",
"and",
"return",
"the",
"JSON",
"decoded",
"result",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/request_paginator/__init__.py#L114-L123 |
LasLabs/python-helpscout | helpscout/request_paginator/__init__.py | RequestPaginator.put | def put(self, json=None):
"""Send a PUT request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('put', url=self.endpoint, json=json) | python | def put(self, json=None):
"""Send a PUT request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data.
"""
return self._call('put', url=self.endpoint, json=json) | [
"def",
"put",
"(",
"self",
",",
"json",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call",
"(",
"'put'",
",",
"url",
"=",
"self",
".",
"endpoint",
",",
"json",
"=",
"json",
")"
] | Send a PUT request and return the JSON decoded result.
Args:
json (dict, optional): Object to encode and send in request.
Returns:
mixed: JSON decoded response data. | [
"Send",
"a",
"PUT",
"request",
"and",
"return",
"the",
"JSON",
"decoded",
"result",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/request_paginator/__init__.py#L125-L134 |
LasLabs/python-helpscout | helpscout/request_paginator/__init__.py | RequestPaginator._call | def _call(self, method, *args, **kwargs):
"""Call the remote service and return the response data."""
assert self.session
if not kwargs.get('verify'):
kwargs['verify'] = self.SSL_VERIFY
response = self.session.request(method, *args, **kwargs)
response_json = response.text and response.json() or {}
if response.status_code < 200 or response.status_code >= 300:
message = response_json.get('error', response_json.get('message'))
raise HelpScoutRemoteException(response.status_code, message)
self.page_current = response_json.get(self.PAGE_CURRENT, 1)
self.page_total = response_json.get(self.PAGE_TOTAL, 1)
try:
return response_json[self.PAGE_DATA_MULTI]
except KeyError:
pass
try:
return [response_json[self.PAGE_DATA_SINGLE]]
except KeyError:
pass
return None | python | def _call(self, method, *args, **kwargs):
"""Call the remote service and return the response data."""
assert self.session
if not kwargs.get('verify'):
kwargs['verify'] = self.SSL_VERIFY
response = self.session.request(method, *args, **kwargs)
response_json = response.text and response.json() or {}
if response.status_code < 200 or response.status_code >= 300:
message = response_json.get('error', response_json.get('message'))
raise HelpScoutRemoteException(response.status_code, message)
self.page_current = response_json.get(self.PAGE_CURRENT, 1)
self.page_total = response_json.get(self.PAGE_TOTAL, 1)
try:
return response_json[self.PAGE_DATA_MULTI]
except KeyError:
pass
try:
return [response_json[self.PAGE_DATA_SINGLE]]
except KeyError:
pass
return None | [
"def",
"_call",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"session",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'verify'",
")",
":",
"kwargs",
"[",
"'verify'",
"]",
"=",
"self",
".",
"SS... | Call the remote service and return the response data. | [
"Call",
"the",
"remote",
"service",
"and",
"return",
"the",
"response",
"data",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/request_paginator/__init__.py#L136-L164 |
StorjOld/heartbeat | heartbeat/util.py | KeyedPRF.pad | def pad(data, length):
"""This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to
"""
if (len(data) > length):
return data[0:length]
else:
return data + b"\0" * (length - len(data)) | python | def pad(data, length):
"""This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to
"""
if (len(data) > length):
return data[0:length]
else:
return data + b"\0" * (length - len(data)) | [
"def",
"pad",
"(",
"data",
",",
"length",
")",
":",
"if",
"(",
"len",
"(",
"data",
")",
">",
"length",
")",
":",
"return",
"data",
"[",
"0",
":",
"length",
"]",
"else",
":",
"return",
"data",
"+",
"b\"\\0\"",
"*",
"(",
"length",
"-",
"len",
"("... | This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to | [
"This",
"function",
"returns",
"a",
"padded",
"version",
"of",
"the",
"input",
"data",
"to",
"the",
"given",
"length",
".",
"this",
"function",
"will",
"shorten",
"the",
"given",
"data",
"to",
"the",
"length",
"specified",
"if",
"necessary",
".",
"post",
"... | train | https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/util.py#L53-L64 |
StorjOld/heartbeat | heartbeat/util.py | KeyedPRF.eval | def eval(self, x):
"""This method returns the evaluation of the function with input x
:param x: this is the input as a Long
"""
aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size)
while True:
nonce = 0
data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(),
(number.size(self.range) + 7) // 8)
num = self.mask & number.bytes_to_long(aes.encrypt(data))
if (num < self.range):
return num
nonce += 1 | python | def eval(self, x):
"""This method returns the evaluation of the function with input x
:param x: this is the input as a Long
"""
aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size)
while True:
nonce = 0
data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(),
(number.size(self.range) + 7) // 8)
num = self.mask & number.bytes_to_long(aes.encrypt(data))
if (num < self.range):
return num
nonce += 1 | [
"def",
"eval",
"(",
"self",
",",
"x",
")",
":",
"aes",
"=",
"AES",
".",
"new",
"(",
"self",
".",
"key",
",",
"AES",
".",
"MODE_CFB",
",",
"\"\\0\"",
"*",
"AES",
".",
"block_size",
")",
"while",
"True",
":",
"nonce",
"=",
"0",
"data",
"=",
"Keye... | This method returns the evaluation of the function with input x
:param x: this is the input as a Long | [
"This",
"method",
"returns",
"the",
"evaluation",
"of",
"the",
"function",
"with",
"input",
"x"
] | train | https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/util.py#L83-L96 |
timdiels/pytil | pytil/set.py | _locate_bin | def _locate_bin(bins, n):
"""
Find the bin where list n has ended up: Follow bin references until
we find a bin that has not moved.
"""
while bins[n] != n:
n = bins[n]
return n | python | def _locate_bin(bins, n):
"""
Find the bin where list n has ended up: Follow bin references until
we find a bin that has not moved.
"""
while bins[n] != n:
n = bins[n]
return n | [
"def",
"_locate_bin",
"(",
"bins",
",",
"n",
")",
":",
"while",
"bins",
"[",
"n",
"]",
"!=",
"n",
":",
"n",
"=",
"bins",
"[",
"n",
"]",
"return",
"n"
] | Find the bin where list n has ended up: Follow bin references until
we find a bin that has not moved. | [
"Find",
"the",
"bin",
"where",
"list",
"n",
"has",
"ended",
"up",
":",
"Follow",
"bin",
"references",
"until",
"we",
"find",
"a",
"bin",
"that",
"has",
"not",
"moved",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/set.py#L22-L29 |
timdiels/pytil | pytil/set.py | merge_by_overlap | def merge_by_overlap(sets):
'''
Of a list of sets, merge those that overlap, in place.
The result isn't necessarily a subsequence of the original ``sets``.
Parameters
----------
sets : ~typing.Sequence[~typing.Set[~typing.Any]]
Sets of which to merge those that overlap. Empty sets are ignored.
Notes
-----
Implementation is based on `this StackOverflow answer`_. It outperforms all
other algorithms in the thread (visited at dec 2015) on python3.4 using a
wide range of inputs.
.. _this StackOverflow answer: http://stackoverflow.com/a/9453249/1031434
Examples
--------
>>> merge_by_overlap([{1,2}, set(), {2,3}, {4,5,6}, {6,7}])
[{1,2,3}, {4,5,6,7}]
'''
data = sets
bins = list(range(len(data))) # Initialize each bin[n] == n
nums = dict()
for r, row in enumerate(data):
if not row:
data[r] = None
else:
for num in row:
if num not in nums:
# New number: tag it with a pointer to this row's bin
nums[num] = r
continue
else:
dest = _locate_bin(bins, nums[num])
if dest == r:
continue # already in the same bin
if dest > r:
dest, r = r, dest # always merge into the smallest bin
data[dest].update(data[r])
data[r] = None
# Update our indices to reflect the move
bins[r] = dest
r = dest
# Remove empty bins
for i in reversed(range(len(data))):
if not data[i]:
del data[i] | python | def merge_by_overlap(sets):
'''
Of a list of sets, merge those that overlap, in place.
The result isn't necessarily a subsequence of the original ``sets``.
Parameters
----------
sets : ~typing.Sequence[~typing.Set[~typing.Any]]
Sets of which to merge those that overlap. Empty sets are ignored.
Notes
-----
Implementation is based on `this StackOverflow answer`_. It outperforms all
other algorithms in the thread (visited at dec 2015) on python3.4 using a
wide range of inputs.
.. _this StackOverflow answer: http://stackoverflow.com/a/9453249/1031434
Examples
--------
>>> merge_by_overlap([{1,2}, set(), {2,3}, {4,5,6}, {6,7}])
[{1,2,3}, {4,5,6,7}]
'''
data = sets
bins = list(range(len(data))) # Initialize each bin[n] == n
nums = dict()
for r, row in enumerate(data):
if not row:
data[r] = None
else:
for num in row:
if num not in nums:
# New number: tag it with a pointer to this row's bin
nums[num] = r
continue
else:
dest = _locate_bin(bins, nums[num])
if dest == r:
continue # already in the same bin
if dest > r:
dest, r = r, dest # always merge into the smallest bin
data[dest].update(data[r])
data[r] = None
# Update our indices to reflect the move
bins[r] = dest
r = dest
# Remove empty bins
for i in reversed(range(len(data))):
if not data[i]:
del data[i] | [
"def",
"merge_by_overlap",
"(",
"sets",
")",
":",
"data",
"=",
"sets",
"bins",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"data",
")",
")",
")",
"# Initialize each bin[n] == n",
"nums",
"=",
"dict",
"(",
")",
"for",
"r",
",",
"row",
"in",
"enumerate",... | Of a list of sets, merge those that overlap, in place.
The result isn't necessarily a subsequence of the original ``sets``.
Parameters
----------
sets : ~typing.Sequence[~typing.Set[~typing.Any]]
Sets of which to merge those that overlap. Empty sets are ignored.
Notes
-----
Implementation is based on `this StackOverflow answer`_. It outperforms all
other algorithms in the thread (visited at dec 2015) on python3.4 using a
wide range of inputs.
.. _this StackOverflow answer: http://stackoverflow.com/a/9453249/1031434
Examples
--------
>>> merge_by_overlap([{1,2}, set(), {2,3}, {4,5,6}, {6,7}])
[{1,2,3}, {4,5,6,7}] | [
"Of",
"a",
"list",
"of",
"sets",
"merge",
"those",
"that",
"overlap",
"in",
"place",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/set.py#L34-L88 |
LasLabs/python-helpscout | helpscout/auth_proxy.py | AuthProxy.auth_proxy | def auth_proxy(self, method):
"""Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable.
"""
def _proxy(*args, **kwargs):
"""The actual proxy, which instantiates and authenticates the API.
Args:
*args (mixed): Args to send to class instantiation.
**kwargs (mixed): Kwargs to send to class instantiation.
Returns:
mixed: The result of the authenticated callable.
"""
return method(self.session, *args, **kwargs)
return _proxy | python | def auth_proxy(self, method):
"""Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable.
"""
def _proxy(*args, **kwargs):
"""The actual proxy, which instantiates and authenticates the API.
Args:
*args (mixed): Args to send to class instantiation.
**kwargs (mixed): Kwargs to send to class instantiation.
Returns:
mixed: The result of the authenticated callable.
"""
return method(self.session, *args, **kwargs)
return _proxy | [
"def",
"auth_proxy",
"(",
"self",
",",
"method",
")",
":",
"def",
"_proxy",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"The actual proxy, which instantiates and authenticates the API.\n\n Args:\n *args (mixed): Args to send to class ins... | Authentication proxy for API requests.
This is required because the API objects are naive of ``HelpScout``,
so they would otherwise be unauthenticated.
Args:
method (callable): A method call that should be authenticated. It
should accept a ``requests.Session`` as its first parameter,
which should be used for the actual API call.
Returns:
mixed: The results of the authenticated callable. | [
"Authentication",
"proxy",
"for",
"API",
"requests",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/auth_proxy.py#L47-L73 |
LasLabs/python-helpscout | helpscout/apis/users.py | Users.find_in_mailbox | def find_in_mailbox(cls, session, mailbox_or_id):
"""Get the users that are associated to a Mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox_or_id (MailboxRef or int): Mailbox of the ID of the
mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.User): Users
iterator.
"""
if hasattr(mailbox_or_id, 'id'):
mailbox_or_id = mailbox_or_id.id
return cls(
'/mailboxes/%d/users.json' % mailbox_or_id,
session=session,
) | python | def find_in_mailbox(cls, session, mailbox_or_id):
"""Get the users that are associated to a Mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox_or_id (MailboxRef or int): Mailbox of the ID of the
mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.User): Users
iterator.
"""
if hasattr(mailbox_or_id, 'id'):
mailbox_or_id = mailbox_or_id.id
return cls(
'/mailboxes/%d/users.json' % mailbox_or_id,
session=session,
) | [
"def",
"find_in_mailbox",
"(",
"cls",
",",
"session",
",",
"mailbox_or_id",
")",
":",
"if",
"hasattr",
"(",
"mailbox_or_id",
",",
"'id'",
")",
":",
"mailbox_or_id",
"=",
"mailbox_or_id",
".",
"id",
"return",
"cls",
"(",
"'/mailboxes/%d/users.json'",
"%",
"mail... | Get the users that are associated to a Mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox_or_id (MailboxRef or int): Mailbox of the ID of the
mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.User): Users
iterator. | [
"Get",
"the",
"users",
"that",
"are",
"associated",
"to",
"a",
"Mailbox",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/users.py#L50-L67 |
jic-dtool/dtoolcore | dtoolcore/filehasher.py | _hash_the_file | def _hash_the_file(hasher, filename):
"""Helper function for creating hash functions.
See implementation of :func:`dtoolcore.filehasher.shasum`
for more usage details.
"""
BUF_SIZE = 65536
with open(filename, 'rb') as f:
buf = f.read(BUF_SIZE)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(BUF_SIZE)
return hasher | python | def _hash_the_file(hasher, filename):
"""Helper function for creating hash functions.
See implementation of :func:`dtoolcore.filehasher.shasum`
for more usage details.
"""
BUF_SIZE = 65536
with open(filename, 'rb') as f:
buf = f.read(BUF_SIZE)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(BUF_SIZE)
return hasher | [
"def",
"_hash_the_file",
"(",
"hasher",
",",
"filename",
")",
":",
"BUF_SIZE",
"=",
"65536",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"buf",
"=",
"f",
".",
"read",
"(",
"BUF_SIZE",
")",
"while",
"len",
"(",
"buf",
")",
">"... | Helper function for creating hash functions.
See implementation of :func:`dtoolcore.filehasher.shasum`
for more usage details. | [
"Helper",
"function",
"for",
"creating",
"hash",
"functions",
"."
] | train | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/filehasher.py#L17-L29 |
Locu/chronology | jia/jia/compute.py | enable_precompute | def enable_precompute(panel):
"""Schedule a precompute task for `panel`"""
use_metis = panel['data_source']['source_type'] == 'querybuilder'
if use_metis:
query = panel['data_source']['query']
else:
query = "u'''%s'''" % panel['data_source']['code']
precompute = panel['data_source']['precompute']
timeframe = panel['data_source']['timeframe']
bucket_width = precompute['bucket_width']['value']
time_scale = precompute['bucket_width']['scale']['name']
bucket_width_seconds = get_seconds(bucket_width, time_scale)
if timeframe['mode']['value'] == 'recent':
untrusted_time = precompute['untrusted_time']['value']
untrusted_time_scale = precompute['untrusted_time']['scale']['name']
untrusted_time_seconds = get_seconds(untrusted_time, untrusted_time_scale)
# Schedule the task with an interval equal to the bucket_width
interval = bucket_width_seconds
elif timeframe['mode']['value'] == 'range':
untrusted_time_seconds = 0
# Schedule the task with an interval of 0 so it only runs once
interval = 0
task_code = PRECOMPUTE_INITIALIZATION_CODE % (query, timeframe,
bucket_width_seconds,
untrusted_time_seconds,
use_metis)
result = scheduler_client.schedule(task_code, interval)
if result['status'] != 'success':
raise RuntimeError(result.get('reason'))
return result['id'] | python | def enable_precompute(panel):
"""Schedule a precompute task for `panel`"""
use_metis = panel['data_source']['source_type'] == 'querybuilder'
if use_metis:
query = panel['data_source']['query']
else:
query = "u'''%s'''" % panel['data_source']['code']
precompute = panel['data_source']['precompute']
timeframe = panel['data_source']['timeframe']
bucket_width = precompute['bucket_width']['value']
time_scale = precompute['bucket_width']['scale']['name']
bucket_width_seconds = get_seconds(bucket_width, time_scale)
if timeframe['mode']['value'] == 'recent':
untrusted_time = precompute['untrusted_time']['value']
untrusted_time_scale = precompute['untrusted_time']['scale']['name']
untrusted_time_seconds = get_seconds(untrusted_time, untrusted_time_scale)
# Schedule the task with an interval equal to the bucket_width
interval = bucket_width_seconds
elif timeframe['mode']['value'] == 'range':
untrusted_time_seconds = 0
# Schedule the task with an interval of 0 so it only runs once
interval = 0
task_code = PRECOMPUTE_INITIALIZATION_CODE % (query, timeframe,
bucket_width_seconds,
untrusted_time_seconds,
use_metis)
result = scheduler_client.schedule(task_code, interval)
if result['status'] != 'success':
raise RuntimeError(result.get('reason'))
return result['id'] | [
"def",
"enable_precompute",
"(",
"panel",
")",
":",
"use_metis",
"=",
"panel",
"[",
"'data_source'",
"]",
"[",
"'source_type'",
"]",
"==",
"'querybuilder'",
"if",
"use_metis",
":",
"query",
"=",
"panel",
"[",
"'data_source'",
"]",
"[",
"'query'",
"]",
"else"... | Schedule a precompute task for `panel` | [
"Schedule",
"a",
"precompute",
"task",
"for",
"panel"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L256-L289 |
Locu/chronology | jia/jia/compute.py | disable_precompute | def disable_precompute(panel):
"""Cancel precomputation for `panel`"""
task_id = panel['data_source']['precompute']['task_id']
result = scheduler_client.cancel(task_id)
if result['status'] != 'success':
raise RuntimeError(result.get('reason')) | python | def disable_precompute(panel):
"""Cancel precomputation for `panel`"""
task_id = panel['data_source']['precompute']['task_id']
result = scheduler_client.cancel(task_id)
if result['status'] != 'success':
raise RuntimeError(result.get('reason')) | [
"def",
"disable_precompute",
"(",
"panel",
")",
":",
"task_id",
"=",
"panel",
"[",
"'data_source'",
"]",
"[",
"'precompute'",
"]",
"[",
"'task_id'",
"]",
"result",
"=",
"scheduler_client",
".",
"cancel",
"(",
"task_id",
")",
"if",
"result",
"[",
"'status'",
... | Cancel precomputation for `panel` | [
"Cancel",
"precomputation",
"for",
"panel"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L292-L297 |
Locu/chronology | jia/jia/compute.py | QueryCompute._get_timeframe_bounds | def _get_timeframe_bounds(self, timeframe, bucket_width):
"""
Get a `bucket_width` aligned `start_time` and `end_time` from a
`timeframe` dict
"""
if bucket_width:
bucket_width_seconds = bucket_width
bucket_width = epoch_time_to_kronos_time(bucket_width)
# TODO(derek): Potential optimization by setting the end_time equal to the
# untrusted_time if end_time > untrusted_time and the results are not being
# output to the user (only for caching)
if timeframe['mode']['value'] == 'recent':
# Set end_time equal to now and align to bucket width
end_time = kronos_time_now()
original_end_time = end_time
duration = get_seconds(timeframe['value'], timeframe['scale']['name'])
duration = epoch_time_to_kronos_time(duration)
start_time = original_end_time - duration
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
if (end_time % bucket_width) != 0:
end_time += bucket_width - (end_time % bucket_width)
if (start_time % bucket_width) != 0:
start_time -= (start_time % bucket_width)
start = kronos_time_to_datetime(start_time)
end = kronos_time_to_datetime(end_time)
elif timeframe['mode']['value'] == 'range':
end = datetime.datetime.strptime(timeframe['to'], DT_FORMAT)
end_seconds = datetime_to_epoch_time(end)
start = datetime.datetime.strptime(timeframe['from'], DT_FORMAT)
start_seconds = datetime_to_epoch_time(start)
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
start_bump = start_seconds % bucket_width_seconds
start -= datetime.timedelta(seconds=start_bump)
if (end_seconds % bucket_width_seconds) != 0:
end_bump = bucket_width_seconds - (end_seconds % bucket_width_seconds)
end += datetime.timedelta(seconds=end_bump)
else:
raise ValueError("Timeframe mode must be 'recent' or 'range'")
return start, end | python | def _get_timeframe_bounds(self, timeframe, bucket_width):
"""
Get a `bucket_width` aligned `start_time` and `end_time` from a
`timeframe` dict
"""
if bucket_width:
bucket_width_seconds = bucket_width
bucket_width = epoch_time_to_kronos_time(bucket_width)
# TODO(derek): Potential optimization by setting the end_time equal to the
# untrusted_time if end_time > untrusted_time and the results are not being
# output to the user (only for caching)
if timeframe['mode']['value'] == 'recent':
# Set end_time equal to now and align to bucket width
end_time = kronos_time_now()
original_end_time = end_time
duration = get_seconds(timeframe['value'], timeframe['scale']['name'])
duration = epoch_time_to_kronos_time(duration)
start_time = original_end_time - duration
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
if (end_time % bucket_width) != 0:
end_time += bucket_width - (end_time % bucket_width)
if (start_time % bucket_width) != 0:
start_time -= (start_time % bucket_width)
start = kronos_time_to_datetime(start_time)
end = kronos_time_to_datetime(end_time)
elif timeframe['mode']['value'] == 'range':
end = datetime.datetime.strptime(timeframe['to'], DT_FORMAT)
end_seconds = datetime_to_epoch_time(end)
start = datetime.datetime.strptime(timeframe['from'], DT_FORMAT)
start_seconds = datetime_to_epoch_time(start)
if bucket_width:
# Align values to the bucket width
# TODO(derek): Warn the user that the timeframe has been altered to fit
# the bucket width
start_bump = start_seconds % bucket_width_seconds
start -= datetime.timedelta(seconds=start_bump)
if (end_seconds % bucket_width_seconds) != 0:
end_bump = bucket_width_seconds - (end_seconds % bucket_width_seconds)
end += datetime.timedelta(seconds=end_bump)
else:
raise ValueError("Timeframe mode must be 'recent' or 'range'")
return start, end | [
"def",
"_get_timeframe_bounds",
"(",
"self",
",",
"timeframe",
",",
"bucket_width",
")",
":",
"if",
"bucket_width",
":",
"bucket_width_seconds",
"=",
"bucket_width",
"bucket_width",
"=",
"epoch_time_to_kronos_time",
"(",
"bucket_width",
")",
"# TODO(derek): Potential opti... | Get a `bucket_width` aligned `start_time` and `end_time` from a
`timeframe` dict | [
"Get",
"a",
"bucket_width",
"aligned",
"start_time",
"and",
"end_time",
"from",
"a",
"timeframe",
"dict"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L129-L180 |
Locu/chronology | jia/jia/compute.py | QueryCompute._run_query | def _run_query(self, start_time, end_time, unique_id=None):
"""Executes a Python query string and returns events
Acts as a wrapper around exec that injects necessary local variables into
the scope of the user-provided query blob.
:param start_time: Python datetime to be injected into query
:param end_time: Python datetime to be injected into query
:param unique_id: An unused flag that allows the scheduler to hash this
function uniquely based on its args when it passes through
"""
# XXX(derek): DEPRECATION WARNING
# Use of the implicit Kronos client in pycode queries is deprecated
client = KronosClient(self._app.config['KRONOS_URL'],
namespace=self._app.config['KRONOS_NAMESPACE'],
blocking=False,
sleep_block=0.2)
locals_dict = {
'kronos_client': client,
'events': [],
'start_time': start_time,
'end_time': end_time,
}
try:
exec self._query in {}, locals_dict # No globals.
except:
_, exception, tb = sys.exc_info()
raise PyCodeError(exception, traceback.format_tb(tb))
# Retrieve the `events` variable as computed by the pycode.
events = locals_dict.get('events', [])
return events | python | def _run_query(self, start_time, end_time, unique_id=None):
"""Executes a Python query string and returns events
Acts as a wrapper around exec that injects necessary local variables into
the scope of the user-provided query blob.
:param start_time: Python datetime to be injected into query
:param end_time: Python datetime to be injected into query
:param unique_id: An unused flag that allows the scheduler to hash this
function uniquely based on its args when it passes through
"""
# XXX(derek): DEPRECATION WARNING
# Use of the implicit Kronos client in pycode queries is deprecated
client = KronosClient(self._app.config['KRONOS_URL'],
namespace=self._app.config['KRONOS_NAMESPACE'],
blocking=False,
sleep_block=0.2)
locals_dict = {
'kronos_client': client,
'events': [],
'start_time': start_time,
'end_time': end_time,
}
try:
exec self._query in {}, locals_dict # No globals.
except:
_, exception, tb = sys.exc_info()
raise PyCodeError(exception, traceback.format_tb(tb))
# Retrieve the `events` variable as computed by the pycode.
events = locals_dict.get('events', [])
return events | [
"def",
"_run_query",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"unique_id",
"=",
"None",
")",
":",
"# XXX(derek): DEPRECATION WARNING",
"# Use of the implicit Kronos client in pycode queries is deprecated",
"client",
"=",
"KronosClient",
"(",
"self",
".",
"_ap... | Executes a Python query string and returns events
Acts as a wrapper around exec that injects necessary local variables into
the scope of the user-provided query blob.
:param start_time: Python datetime to be injected into query
:param end_time: Python datetime to be injected into query
:param unique_id: An unused flag that allows the scheduler to hash this
function uniquely based on its args when it passes through | [
"Executes",
"a",
"Python",
"query",
"string",
"and",
"returns",
"events"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L182-L214 |
Locu/chronology | jia/jia/compute.py | QueryCompute.compute | def compute(self, use_cache=True):
"""Call a user defined query and return events with optional help from
the cache.
:param use_cache: Specifies whether the cache should be used when possible
"""
if use_cache:
if not self._bucket_width:
raise ValueError('QueryCompute must be initialized with a bucket_width'
' to use caching features.')
return list(self._query_cache.retrieve_interval(self._start_time,
self._end_time,
compute_missing=True))
else:
if self._metis:
return self._run_metis(self._start_time, self._end_time)
else:
return self._run_query(self._start_time, self._end_time) | python | def compute(self, use_cache=True):
"""Call a user defined query and return events with optional help from
the cache.
:param use_cache: Specifies whether the cache should be used when possible
"""
if use_cache:
if not self._bucket_width:
raise ValueError('QueryCompute must be initialized with a bucket_width'
' to use caching features.')
return list(self._query_cache.retrieve_interval(self._start_time,
self._end_time,
compute_missing=True))
else:
if self._metis:
return self._run_metis(self._start_time, self._end_time)
else:
return self._run_query(self._start_time, self._end_time) | [
"def",
"compute",
"(",
"self",
",",
"use_cache",
"=",
"True",
")",
":",
"if",
"use_cache",
":",
"if",
"not",
"self",
".",
"_bucket_width",
":",
"raise",
"ValueError",
"(",
"'QueryCompute must be initialized with a bucket_width'",
"' to use caching features.'",
")",
... | Call a user defined query and return events with optional help from
the cache.
:param use_cache: Specifies whether the cache should be used when possible | [
"Call",
"a",
"user",
"defined",
"query",
"and",
"return",
"events",
"with",
"optional",
"help",
"from",
"the",
"cache",
"."
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L223-L240 |
Locu/chronology | jia/jia/compute.py | QueryCompute.cache | def cache(self):
"""Call a user defined query and cache the results"""
if not self._bucket_width or self._untrusted_time is None:
raise ValueError('QueryCompute must be initialized with a bucket_width '
'and an untrusted_time in order to write to the cache.')
now = datetime.datetime.now()
untrusted_time = now - datetime.timedelta(seconds=self._untrusted_time)
list(self._query_cache.compute_and_cache_missing_buckets(
self._start_time,
self._end_time,
untrusted_time)) | python | def cache(self):
"""Call a user defined query and cache the results"""
if not self._bucket_width or self._untrusted_time is None:
raise ValueError('QueryCompute must be initialized with a bucket_width '
'and an untrusted_time in order to write to the cache.')
now = datetime.datetime.now()
untrusted_time = now - datetime.timedelta(seconds=self._untrusted_time)
list(self._query_cache.compute_and_cache_missing_buckets(
self._start_time,
self._end_time,
untrusted_time)) | [
"def",
"cache",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_bucket_width",
"or",
"self",
".",
"_untrusted_time",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'QueryCompute must be initialized with a bucket_width '",
"'and an untrusted_time in order to write to t... | Call a user defined query and cache the results | [
"Call",
"a",
"user",
"defined",
"query",
"and",
"cache",
"the",
"results"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/jia/compute.py#L242-L253 |
LasLabs/python-helpscout | helpscout/apis/customers.py | Customers.list | def list(cls, session, first_name=None, last_name=None, email=None,
modified_since=None):
"""List the customers.
Customers can be filtered on any combination of first name, last name,
email, and modifiedSince.
Args:
session (requests.sessions.Session): Authenticated session.
first_name (str, optional): First name of customer.
last_name (str, optional): Last name of customer.
email (str, optional): Email address of customer.
modified_since (datetime.datetime, optional): If modified after
this date.
Returns:
RequestPaginator(output_type=helpscout.models.Customer): Customers
iterator.
"""
return super(Customers, cls).list(
session,
data=cls.__object__.get_non_empty_vals({
'firstName': first_name,
'lastName': last_name,
'email': email,
'modifiedSince': modified_since,
})
) | python | def list(cls, session, first_name=None, last_name=None, email=None,
modified_since=None):
"""List the customers.
Customers can be filtered on any combination of first name, last name,
email, and modifiedSince.
Args:
session (requests.sessions.Session): Authenticated session.
first_name (str, optional): First name of customer.
last_name (str, optional): Last name of customer.
email (str, optional): Email address of customer.
modified_since (datetime.datetime, optional): If modified after
this date.
Returns:
RequestPaginator(output_type=helpscout.models.Customer): Customers
iterator.
"""
return super(Customers, cls).list(
session,
data=cls.__object__.get_non_empty_vals({
'firstName': first_name,
'lastName': last_name,
'email': email,
'modifiedSince': modified_since,
})
) | [
"def",
"list",
"(",
"cls",
",",
"session",
",",
"first_name",
"=",
"None",
",",
"last_name",
"=",
"None",
",",
"email",
"=",
"None",
",",
"modified_since",
"=",
"None",
")",
":",
"return",
"super",
"(",
"Customers",
",",
"cls",
")",
".",
"list",
"(",... | List the customers.
Customers can be filtered on any combination of first name, last name,
email, and modifiedSince.
Args:
session (requests.sessions.Session): Authenticated session.
first_name (str, optional): First name of customer.
last_name (str, optional): Last name of customer.
email (str, optional): Email address of customer.
modified_since (datetime.datetime, optional): If modified after
this date.
Returns:
RequestPaginator(output_type=helpscout.models.Customer): Customers
iterator. | [
"List",
"the",
"customers",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/customers.py#L38-L65 |
LasLabs/python-helpscout | helpscout/apis/customers.py | Customers.search | def search(cls, session, queries):
"""Search for a customer given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Customers, cls).search(session, queries, SearchCustomer) | python | def search(cls, session, queries):
"""Search for a customer given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Customers, cls).search(session, queries, SearchCustomer) | [
"def",
"search",
"(",
"cls",
",",
"session",
",",
"queries",
")",
":",
"return",
"super",
"(",
"Customers",
",",
"cls",
")",
".",
"search",
"(",
"session",
",",
"queries",
",",
"SearchCustomer",
")"
] | Search for a customer given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator. | [
"Search",
"for",
"a",
"customer",
"given",
"a",
"domain",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/customers.py#L68-L84 |
Locu/chronology | jia/scheduler/auth.py | create_token | def create_token(key, payload):
"""Auth token generator
payload should be a json encodable data structure
"""
token = hmac.new(key)
token.update(json.dumps(payload))
return token.hexdigest() | python | def create_token(key, payload):
"""Auth token generator
payload should be a json encodable data structure
"""
token = hmac.new(key)
token.update(json.dumps(payload))
return token.hexdigest() | [
"def",
"create_token",
"(",
"key",
",",
"payload",
")",
":",
"token",
"=",
"hmac",
".",
"new",
"(",
"key",
")",
"token",
".",
"update",
"(",
"json",
".",
"dumps",
"(",
"payload",
")",
")",
"return",
"token",
".",
"hexdigest",
"(",
")"
] | Auth token generator
payload should be a json encodable data structure | [
"Auth",
"token",
"generator"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/auth.py#L5-L12 |
Locu/chronology | jia/scheduler/__init__.py | get_app | def get_app(settings_file=None):
"""Get scheduler app singleton
The app configuration is performed when the function is run for the first
time.
Because the scheduler is a threaded enviroment, it is important that this
function be thread-safe. The scheduler instance is not created until the
`commands/runscheduler.py` script is executed, and this function is first
invoked by the scheduler.py management script. In other words, this function
is guaranteed to run to completion (by the management script) before the
scheduler thread is spawned. Should that ever change, locks would need to be
added here.
"""
global _APP
if _APP:
return _APP
_APP = Flask(__name__)
db.init_app(_APP)
migrate = Migrate(_APP, db, directory='scheduler/migrations')
_APP.config.from_pyfile('../jia/conf/default_settings.py')
if settings_file:
if not settings_file.startswith('/'):
settings_file = os.path.join(os.pardir, settings_file)
_APP.config.from_pyfile(settings_file, silent=True)
_APP.config.update(PORT=_APP.config['SCHEDULER_PORT'])
_APP.config.update(SQLALCHEMY_DATABASE_URI=_APP.config['SCHEDULER_DATABASE_URI'])
_APP.secret_key = _APP.config['SECRET_KEY']
from scheduler.views import scheduler
_APP.register_blueprint(scheduler)
return _APP | python | def get_app(settings_file=None):
"""Get scheduler app singleton
The app configuration is performed when the function is run for the first
time.
Because the scheduler is a threaded enviroment, it is important that this
function be thread-safe. The scheduler instance is not created until the
`commands/runscheduler.py` script is executed, and this function is first
invoked by the scheduler.py management script. In other words, this function
is guaranteed to run to completion (by the management script) before the
scheduler thread is spawned. Should that ever change, locks would need to be
added here.
"""
global _APP
if _APP:
return _APP
_APP = Flask(__name__)
db.init_app(_APP)
migrate = Migrate(_APP, db, directory='scheduler/migrations')
_APP.config.from_pyfile('../jia/conf/default_settings.py')
if settings_file:
if not settings_file.startswith('/'):
settings_file = os.path.join(os.pardir, settings_file)
_APP.config.from_pyfile(settings_file, silent=True)
_APP.config.update(PORT=_APP.config['SCHEDULER_PORT'])
_APP.config.update(SQLALCHEMY_DATABASE_URI=_APP.config['SCHEDULER_DATABASE_URI'])
_APP.secret_key = _APP.config['SECRET_KEY']
from scheduler.views import scheduler
_APP.register_blueprint(scheduler)
return _APP | [
"def",
"get_app",
"(",
"settings_file",
"=",
"None",
")",
":",
"global",
"_APP",
"if",
"_APP",
":",
"return",
"_APP",
"_APP",
"=",
"Flask",
"(",
"__name__",
")",
"db",
".",
"init_app",
"(",
"_APP",
")",
"migrate",
"=",
"Migrate",
"(",
"_APP",
",",
"d... | Get scheduler app singleton
The app configuration is performed when the function is run for the first
time.
Because the scheduler is a threaded enviroment, it is important that this
function be thread-safe. The scheduler instance is not created until the
`commands/runscheduler.py` script is executed, and this function is first
invoked by the scheduler.py management script. In other words, this function
is guaranteed to run to completion (by the management script) before the
scheduler thread is spawned. Should that ever change, locks would need to be
added here. | [
"Get",
"scheduler",
"app",
"singleton"
] | train | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/jia/scheduler/__init__.py#L14-L49 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.create | def create(cls, session, record, imported=False, auto_reply=False):
"""Create a conversation.
Please note that conversation cannot be created with more than 100
threads, if attempted the API will respond with HTTP 412.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.models.Conversation): The conversation
to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
auto_reply (bool): The ``auto_reply`` request parameter enables
auto replies to be sent when a conversation is created via the
API. When ``auto_reply`` is set to ``True``, an auto reply will
be sent as long as there is at least one ``customer`` thread in
the conversation.
Returns:
helpscout.models.Conversation: Newly created conversation.
"""
return super(Conversations, cls).create(
session,
record,
imported=imported,
auto_reply=auto_reply,
) | python | def create(cls, session, record, imported=False, auto_reply=False):
"""Create a conversation.
Please note that conversation cannot be created with more than 100
threads, if attempted the API will respond with HTTP 412.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.models.Conversation): The conversation
to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
auto_reply (bool): The ``auto_reply`` request parameter enables
auto replies to be sent when a conversation is created via the
API. When ``auto_reply`` is set to ``True``, an auto reply will
be sent as long as there is at least one ``customer`` thread in
the conversation.
Returns:
helpscout.models.Conversation: Newly created conversation.
"""
return super(Conversations, cls).create(
session,
record,
imported=imported,
auto_reply=auto_reply,
) | [
"def",
"create",
"(",
"cls",
",",
"session",
",",
"record",
",",
"imported",
"=",
"False",
",",
"auto_reply",
"=",
"False",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"create",
"(",
"session",
",",
"record",
",",
"imported... | Create a conversation.
Please note that conversation cannot be created with more than 100
threads, if attempted the API will respond with HTTP 412.
Args:
session (requests.sessions.Session): Authenticated session.
record (helpscout.models.Conversation): The conversation
to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
auto_reply (bool): The ``auto_reply`` request parameter enables
auto replies to be sent when a conversation is created via the
API. When ``auto_reply`` is set to ``True``, an auto reply will
be sent as long as there is at least one ``customer`` thread in
the conversation.
Returns:
helpscout.models.Conversation: Newly created conversation. | [
"Create",
"a",
"conversation",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L61-L90 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.create_attachment | def create_attachment(cls, session, attachment):
"""Create an attachment.
An attachment must be sent to the API before it can be used in a
thread. Use this method to create the attachment, then use the
resulting hash when creating a thread.
Note that HelpScout only supports attachments of 10MB or lower.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to be
created.
Returns:
helpscout.models.Attachment: The newly created attachment (hash
property only). Use this hash when associating the attachment with
a new thread.
"""
return super(Conversations, cls).create(
session,
attachment,
endpoint_override='/attachments.json',
out_type=Attachment,
) | python | def create_attachment(cls, session, attachment):
"""Create an attachment.
An attachment must be sent to the API before it can be used in a
thread. Use this method to create the attachment, then use the
resulting hash when creating a thread.
Note that HelpScout only supports attachments of 10MB or lower.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to be
created.
Returns:
helpscout.models.Attachment: The newly created attachment (hash
property only). Use this hash when associating the attachment with
a new thread.
"""
return super(Conversations, cls).create(
session,
attachment,
endpoint_override='/attachments.json',
out_type=Attachment,
) | [
"def",
"create_attachment",
"(",
"cls",
",",
"session",
",",
"attachment",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"create",
"(",
"session",
",",
"attachment",
",",
"endpoint_override",
"=",
"'/attachments.json'",
",",
"out_typ... | Create an attachment.
An attachment must be sent to the API before it can be used in a
thread. Use this method to create the attachment, then use the
resulting hash when creating a thread.
Note that HelpScout only supports attachments of 10MB or lower.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to be
created.
Returns:
helpscout.models.Attachment: The newly created attachment (hash
property only). Use this hash when associating the attachment with
a new thread. | [
"Create",
"an",
"attachment",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L93-L117 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.create_thread | def create_thread(cls, session, conversation, thread, imported=False):
"""Create a conversation thread.
Please note that threads cannot be added to conversations with 100
threads (or more), if attempted the API will respond with HTTP 412.
Args:
conversation (helpscout.models.Conversation): The conversation
that the thread is being added to.
session (requests.sessions.Session): Authenticated session.
thread (helpscout.models.Thread): The thread to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
Returns:
helpscout.models.Conversation: Conversation including newly created
thread.
"""
return super(Conversations, cls).create(
session,
thread,
endpoint_override='/conversations/%s.json' % conversation.id,
imported=imported,
) | python | def create_thread(cls, session, conversation, thread, imported=False):
"""Create a conversation thread.
Please note that threads cannot be added to conversations with 100
threads (or more), if attempted the API will respond with HTTP 412.
Args:
conversation (helpscout.models.Conversation): The conversation
that the thread is being added to.
session (requests.sessions.Session): Authenticated session.
thread (helpscout.models.Thread): The thread to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
Returns:
helpscout.models.Conversation: Conversation including newly created
thread.
"""
return super(Conversations, cls).create(
session,
thread,
endpoint_override='/conversations/%s.json' % conversation.id,
imported=imported,
) | [
"def",
"create_thread",
"(",
"cls",
",",
"session",
",",
"conversation",
",",
"thread",
",",
"imported",
"=",
"False",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"create",
"(",
"session",
",",
"thread",
",",
"endpoint_override... | Create a conversation thread.
Please note that threads cannot be added to conversations with 100
threads (or more), if attempted the API will respond with HTTP 412.
Args:
conversation (helpscout.models.Conversation): The conversation
that the thread is being added to.
session (requests.sessions.Session): Authenticated session.
thread (helpscout.models.Thread): The thread to be created.
imported (bool, optional): The ``imported`` request parameter
enables conversations to be created for historical purposes (i.e.
if moving from a different platform, you can import your
history). When ``imported`` is set to ``True``, no outgoing
emails or notifications will be generated.
Returns:
helpscout.models.Conversation: Conversation including newly created
thread. | [
"Create",
"a",
"conversation",
"thread",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L120-L146 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.delete_attachment | def delete_attachment(cls, session, attachment):
"""Delete an attachment.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to
be deleted.
Returns:
NoneType: Nothing.
"""
return super(Conversations, cls).delete(
session,
attachment,
endpoint_override='/attachments/%s.json' % attachment.id,
out_type=Attachment,
) | python | def delete_attachment(cls, session, attachment):
"""Delete an attachment.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to
be deleted.
Returns:
NoneType: Nothing.
"""
return super(Conversations, cls).delete(
session,
attachment,
endpoint_override='/attachments/%s.json' % attachment.id,
out_type=Attachment,
) | [
"def",
"delete_attachment",
"(",
"cls",
",",
"session",
",",
"attachment",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"delete",
"(",
"session",
",",
"attachment",
",",
"endpoint_override",
"=",
"'/attachments/%s.json'",
"%",
"atta... | Delete an attachment.
Args:
session (requests.sessions.Session): Authenticated session.
attachment (helpscout.models.Attachment): The attachment to
be deleted.
Returns:
NoneType: Nothing. | [
"Delete",
"an",
"attachment",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L149-L165 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.find_customer | def find_customer(cls, session, mailbox, customer):
"""Return conversations for a specific customer in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
customer (helpscout.models.Customer): Customer to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/customers/%s/conversations.json' % (
mailbox.id, customer.id,
),
session=session,
) | python | def find_customer(cls, session, mailbox, customer):
"""Return conversations for a specific customer in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
customer (helpscout.models.Customer): Customer to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/customers/%s/conversations.json' % (
mailbox.id, customer.id,
),
session=session,
) | [
"def",
"find_customer",
"(",
"cls",
",",
"session",
",",
"mailbox",
",",
"customer",
")",
":",
"return",
"cls",
"(",
"'/mailboxes/%d/customers/%s/conversations.json'",
"%",
"(",
"mailbox",
".",
"id",
",",
"customer",
".",
"id",
",",
")",
",",
"session",
"=",... | Return conversations for a specific customer in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
customer (helpscout.models.Customer): Customer to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator. | [
"Return",
"conversations",
"for",
"a",
"specific",
"customer",
"in",
"a",
"mailbox",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L168-L185 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.find_user | def find_user(cls, session, mailbox, user):
"""Return conversations for a specific user in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
user (helpscout.models.User): User to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/users/%s/conversations.json' % (
mailbox.id, user.id,
),
session=session,
) | python | def find_user(cls, session, mailbox, user):
"""Return conversations for a specific user in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
user (helpscout.models.User): User to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/users/%s/conversations.json' % (
mailbox.id, user.id,
),
session=session,
) | [
"def",
"find_user",
"(",
"cls",
",",
"session",
",",
"mailbox",
",",
"user",
")",
":",
"return",
"cls",
"(",
"'/mailboxes/%d/users/%s/conversations.json'",
"%",
"(",
"mailbox",
".",
"id",
",",
"user",
".",
"id",
",",
")",
",",
"session",
"=",
"session",
... | Return conversations for a specific user in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to search.
user (helpscout.models.User): User to search for.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator. | [
"Return",
"conversations",
"for",
"a",
"specific",
"user",
"in",
"a",
"mailbox",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L188-L205 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.get_attachment_data | def get_attachment_data(cls, session, attachment_id):
"""Return a specific attachment's data.
Args:
session (requests.sessions.Session): Authenticated session.
attachment_id (int): The ID of the attachment from which to get
data.
Returns:
helpscout.models.AttachmentData: An attachment data singleton, if
existing. Otherwise ``None``.
"""
return cls(
'/attachments/%d/data.json' % attachment_id,
singleton=True,
session=session,
out_type=AttachmentData,
) | python | def get_attachment_data(cls, session, attachment_id):
"""Return a specific attachment's data.
Args:
session (requests.sessions.Session): Authenticated session.
attachment_id (int): The ID of the attachment from which to get
data.
Returns:
helpscout.models.AttachmentData: An attachment data singleton, if
existing. Otherwise ``None``.
"""
return cls(
'/attachments/%d/data.json' % attachment_id,
singleton=True,
session=session,
out_type=AttachmentData,
) | [
"def",
"get_attachment_data",
"(",
"cls",
",",
"session",
",",
"attachment_id",
")",
":",
"return",
"cls",
"(",
"'/attachments/%d/data.json'",
"%",
"attachment_id",
",",
"singleton",
"=",
"True",
",",
"session",
"=",
"session",
",",
"out_type",
"=",
"AttachmentD... | Return a specific attachment's data.
Args:
session (requests.sessions.Session): Authenticated session.
attachment_id (int): The ID of the attachment from which to get
data.
Returns:
helpscout.models.AttachmentData: An attachment data singleton, if
existing. Otherwise ``None``. | [
"Return",
"a",
"specific",
"attachment",
"s",
"data",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L208-L225 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.list | def list(cls, session, mailbox):
"""Return conversations in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
endpoint = '/mailboxes/%d/conversations.json' % mailbox.id
return super(Conversations, cls).list(session, endpoint) | python | def list(cls, session, mailbox):
"""Return conversations in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
endpoint = '/mailboxes/%d/conversations.json' % mailbox.id
return super(Conversations, cls).list(session, endpoint) | [
"def",
"list",
"(",
"cls",
",",
"session",
",",
"mailbox",
")",
":",
"endpoint",
"=",
"'/mailboxes/%d/conversations.json'",
"%",
"mailbox",
".",
"id",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"list",
"(",
"session",
",",
"endpoint",
"... | Return conversations in a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator. | [
"Return",
"conversations",
"in",
"a",
"mailbox",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L228-L240 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.list_folder | def list_folder(cls, session, mailbox, folder):
"""Return conversations in a specific folder of a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox that folder is in.
folder (helpscout.models.Folder): Folder to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/folders/%s/conversations.json' % (
mailbox.id, folder.id,
),
session=session,
) | python | def list_folder(cls, session, mailbox, folder):
"""Return conversations in a specific folder of a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox that folder is in.
folder (helpscout.models.Folder): Folder to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator.
"""
return cls(
'/mailboxes/%d/folders/%s/conversations.json' % (
mailbox.id, folder.id,
),
session=session,
) | [
"def",
"list_folder",
"(",
"cls",
",",
"session",
",",
"mailbox",
",",
"folder",
")",
":",
"return",
"cls",
"(",
"'/mailboxes/%d/folders/%s/conversations.json'",
"%",
"(",
"mailbox",
".",
"id",
",",
"folder",
".",
"id",
",",
")",
",",
"session",
"=",
"sess... | Return conversations in a specific folder of a mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox (helpscout.models.Mailbox): Mailbox that folder is in.
folder (helpscout.models.Folder): Folder to list.
Returns:
RequestPaginator(output_type=helpscout.models.Conversation):
Conversations iterator. | [
"Return",
"conversations",
"in",
"a",
"specific",
"folder",
"of",
"a",
"mailbox",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L243-L260 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.search | def search(cls, session, queries):
"""Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Conversations, cls).search(
session, queries, SearchConversation,
) | python | def search(cls, session, queries):
"""Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator.
"""
return super(Conversations, cls).search(
session, queries, SearchConversation,
) | [
"def",
"search",
"(",
"cls",
",",
"session",
",",
"queries",
")",
":",
"return",
"super",
"(",
"Conversations",
",",
"cls",
")",
".",
"search",
"(",
"session",
",",
"queries",
",",
"SearchConversation",
",",
")"
] | Search for a conversation given a domain.
Args:
session (requests.sessions.Session): Authenticated session.
queries (helpscout.models.Domain or iter): The queries for the
domain. If a ``Domain`` object is provided, it will simply be
returned. Otherwise, a ``Domain`` object will be generated
from the complex queries. In this case, the queries should
conform to the interface in
:func:`helpscout.domain.Domain.from_tuple`.
Returns:
RequestPaginator(output_type=helpscout.models.SearchCustomer):
SearchCustomer iterator. | [
"Search",
"for",
"a",
"conversation",
"given",
"a",
"domain",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L263-L281 |
LasLabs/python-helpscout | helpscout/apis/conversations.py | Conversations.update_thread | def update_thread(cls, session, conversation, thread):
"""Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread.
"""
data = thread.to_api()
data['reload'] = True
return cls(
'/conversations/%s/threads/%d.json' % (
conversation.id, thread.id,
),
data=data,
request_type=RequestPaginator.PUT,
singleton=True,
session=session,
) | python | def update_thread(cls, session, conversation, thread):
"""Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread.
"""
data = thread.to_api()
data['reload'] = True
return cls(
'/conversations/%s/threads/%d.json' % (
conversation.id, thread.id,
),
data=data,
request_type=RequestPaginator.PUT,
singleton=True,
session=session,
) | [
"def",
"update_thread",
"(",
"cls",
",",
"session",
",",
"conversation",
",",
"thread",
")",
":",
"data",
"=",
"thread",
".",
"to_api",
"(",
")",
"data",
"[",
"'reload'",
"]",
"=",
"True",
"return",
"cls",
"(",
"'/conversations/%s/threads/%d.json'",
"%",
"... | Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread. | [
"Update",
"a",
"thread",
"."
] | train | https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L284-L307 |
timdiels/pytil | pytil/logging.py | set_level | def set_level(logger, level):
'''
Temporarily change log level of logger.
Parameters
----------
logger : str or ~logging.Logger
Logger name or logger whose log level to change.
level : int
Log level to set.
Examples
--------
>>> with set_level('sqlalchemy.engine', logging.INFO):
... pass # sqlalchemy log level is set to INFO in this block
'''
if isinstance(logger, str):
logger = logging.getLogger(logger)
original = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(original) | python | def set_level(logger, level):
'''
Temporarily change log level of logger.
Parameters
----------
logger : str or ~logging.Logger
Logger name or logger whose log level to change.
level : int
Log level to set.
Examples
--------
>>> with set_level('sqlalchemy.engine', logging.INFO):
... pass # sqlalchemy log level is set to INFO in this block
'''
if isinstance(logger, str):
logger = logging.getLogger(logger)
original = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(original) | [
"def",
"set_level",
"(",
"logger",
",",
"level",
")",
":",
"if",
"isinstance",
"(",
"logger",
",",
"str",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger",
")",
"original",
"=",
"logger",
".",
"level",
"logger",
".",
"setLevel",
"(",... | Temporarily change log level of logger.
Parameters
----------
logger : str or ~logging.Logger
Logger name or logger whose log level to change.
level : int
Log level to set.
Examples
--------
>>> with set_level('sqlalchemy.engine', logging.INFO):
... pass # sqlalchemy log level is set to INFO in this block | [
"Temporarily",
"change",
"log",
"level",
"of",
"logger",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/logging.py#L26-L49 |
timdiels/pytil | pytil/logging.py | configure | def configure(log_file):
'''
Configure root logger to log INFO to stderr and DEBUG to log file.
The log file is appended to. Stderr uses a terse format, while the log file
uses a verbose unambiguous format.
Root level is set to INFO.
Parameters
----------
log_file : ~pathlib.Path
File to log to.
Returns
-------
~typing.Tuple[~logging.StreamHandler, ~logging.FileHandler]
Stderr and file handler respectively.
'''
# Note: do not use logging.basicConfig as it does not play along with caplog in testing
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# log info to stderr in terse format
stderr_handler = logging.StreamHandler() # to stderr
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(logging.Formatter('{levelname[0]}: {message}', style='{'))
root_logger.addHandler(stderr_handler)
# log debug to file in full format
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('{levelname[0]} {asctime} {name} ({module}:{lineno}):\n{message}\n', style='{'))
root_logger.addHandler(file_handler)
return stderr_handler, file_handler | python | def configure(log_file):
'''
Configure root logger to log INFO to stderr and DEBUG to log file.
The log file is appended to. Stderr uses a terse format, while the log file
uses a verbose unambiguous format.
Root level is set to INFO.
Parameters
----------
log_file : ~pathlib.Path
File to log to.
Returns
-------
~typing.Tuple[~logging.StreamHandler, ~logging.FileHandler]
Stderr and file handler respectively.
'''
# Note: do not use logging.basicConfig as it does not play along with caplog in testing
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# log info to stderr in terse format
stderr_handler = logging.StreamHandler() # to stderr
stderr_handler.setLevel(logging.INFO)
stderr_handler.setFormatter(logging.Formatter('{levelname[0]}: {message}', style='{'))
root_logger.addHandler(stderr_handler)
# log debug to file in full format
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('{levelname[0]} {asctime} {name} ({module}:{lineno}):\n{message}\n', style='{'))
root_logger.addHandler(file_handler)
return stderr_handler, file_handler | [
"def",
"configure",
"(",
"log_file",
")",
":",
"# Note: do not use logging.basicConfig as it does not play along with caplog in testing",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"root_logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"# log in... | Configure root logger to log INFO to stderr and DEBUG to log file.
The log file is appended to. Stderr uses a terse format, while the log file
uses a verbose unambiguous format.
Root level is set to INFO.
Parameters
----------
log_file : ~pathlib.Path
File to log to.
Returns
-------
~typing.Tuple[~logging.StreamHandler, ~logging.FileHandler]
Stderr and file handler respectively. | [
"Configure",
"root",
"logger",
"to",
"log",
"INFO",
"to",
"stderr",
"and",
"DEBUG",
"to",
"log",
"file",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/logging.py#L51-L86 |
Knoema/knoema-python-driver | knoema/data_reader.py | MnemonicsDataReader.get_pandasframe | def get_pandasframe(self):
"""The method loads data from dataset"""
if self.dataset:
self._load_dimensions()
return self._get_pandasframe_one_dataset()
return self._get_pandasframe_across_datasets() | python | def get_pandasframe(self):
"""The method loads data from dataset"""
if self.dataset:
self._load_dimensions()
return self._get_pandasframe_one_dataset()
return self._get_pandasframe_across_datasets() | [
"def",
"get_pandasframe",
"(",
"self",
")",
":",
"if",
"self",
".",
"dataset",
":",
"self",
".",
"_load_dimensions",
"(",
")",
"return",
"self",
".",
"_get_pandasframe_one_dataset",
"(",
")",
"return",
"self",
".",
"_get_pandasframe_across_datasets",
"(",
")"
] | The method loads data from dataset | [
"The",
"method",
"loads",
"data",
"from",
"dataset"
] | train | https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/data_reader.py#L443-L448 |
Knoema/knoema-python-driver | knoema/data_reader.py | KnoemaSeries.add_value | def add_value(self, value, index_point):
"""The function is addeing new value to provied index. If index does not exist"""
if index_point not in self.index:
self.values.append(value)
self.index.append(index_point) | python | def add_value(self, value, index_point):
"""The function is addeing new value to provied index. If index does not exist"""
if index_point not in self.index:
self.values.append(value)
self.index.append(index_point) | [
"def",
"add_value",
"(",
"self",
",",
"value",
",",
"index_point",
")",
":",
"if",
"index_point",
"not",
"in",
"self",
".",
"index",
":",
"self",
".",
"values",
".",
"append",
"(",
"value",
")",
"self",
".",
"index",
".",
"append",
"(",
"index_point",
... | The function is addeing new value to provied index. If index does not exist | [
"The",
"function",
"is",
"addeing",
"new",
"value",
"to",
"provied",
"index",
".",
"If",
"index",
"does",
"not",
"exist"
] | train | https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/data_reader.py#L459-L463 |
Knoema/knoema-python-driver | knoema/data_reader.py | KnoemaSeries.get_pandas_series | def get_pandas_series(self):
"""The function creates pandas series based on index and values"""
return pandas.Series(self.values, self.index, name=self.name) | python | def get_pandas_series(self):
"""The function creates pandas series based on index and values"""
return pandas.Series(self.values, self.index, name=self.name) | [
"def",
"get_pandas_series",
"(",
"self",
")",
":",
"return",
"pandas",
".",
"Series",
"(",
"self",
".",
"values",
",",
"self",
".",
"index",
",",
"name",
"=",
"self",
".",
"name",
")"
] | The function creates pandas series based on index and values | [
"The",
"function",
"creates",
"pandas",
"series",
"based",
"on",
"index",
"and",
"values"
] | train | https://github.com/Knoema/knoema-python-driver/blob/e98b13db3e4df51c208c272e2977bfbe4c6e5532/knoema/data_reader.py#L465-L467 |
timdiels/pytil | pytil/path.py | remove | def remove(path, force=False):
'''
Remove file or directory (recursively), if it exists.
On NFS file systems, if a directory contains :file:`.nfs*` temporary files
(sometimes created when deleting a file), it waits for them to go away.
Parameters
----------
path : ~pathlib.Path
Path to remove.
force : bool
If True, will remove files and directories even if they are read-only
(as if first doing ``chmod -R +w``).
'''
if not path.exists():
return
else:
if force:
with suppress(FileNotFoundError):
chmod(path, 0o700, '+', recursive=True)
if path.is_dir() and not path.is_symlink():
# Note: shutil.rmtree did not handle NFS well
# First remove all files
for dir_, dirs, files in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
for file in files:
with suppress(FileNotFoundError):
(dir_ / file).unlink()
for file in dirs: # Note: os.walk treats symlinks to directories as directories
file = dir_ / file
if file.is_symlink():
with suppress(FileNotFoundError):
file.unlink()
# Now remove all dirs, being careful of any lingering .nfs* files
for dir_, _, _ in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
with suppress(FileNotFoundError):
# wait for .nfs* files
children = list(dir_.iterdir())
while children:
# only wait for nfs temporary files
if any(not child.name.startswith('.nfs') for child in children):
dir_.rmdir() # raises dir not empty
# wait and go again
time.sleep(.1)
children = list(dir_.iterdir())
# rm
dir_.rmdir()
else:
with suppress(FileNotFoundError):
path.unlink() | python | def remove(path, force=False):
'''
Remove file or directory (recursively), if it exists.
On NFS file systems, if a directory contains :file:`.nfs*` temporary files
(sometimes created when deleting a file), it waits for them to go away.
Parameters
----------
path : ~pathlib.Path
Path to remove.
force : bool
If True, will remove files and directories even if they are read-only
(as if first doing ``chmod -R +w``).
'''
if not path.exists():
return
else:
if force:
with suppress(FileNotFoundError):
chmod(path, 0o700, '+', recursive=True)
if path.is_dir() and not path.is_symlink():
# Note: shutil.rmtree did not handle NFS well
# First remove all files
for dir_, dirs, files in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
for file in files:
with suppress(FileNotFoundError):
(dir_ / file).unlink()
for file in dirs: # Note: os.walk treats symlinks to directories as directories
file = dir_ / file
if file.is_symlink():
with suppress(FileNotFoundError):
file.unlink()
# Now remove all dirs, being careful of any lingering .nfs* files
for dir_, _, _ in os.walk(str(path), topdown=False): # bottom-up walk
dir_ = Path(dir_)
with suppress(FileNotFoundError):
# wait for .nfs* files
children = list(dir_.iterdir())
while children:
# only wait for nfs temporary files
if any(not child.name.startswith('.nfs') for child in children):
dir_.rmdir() # raises dir not empty
# wait and go again
time.sleep(.1)
children = list(dir_.iterdir())
# rm
dir_.rmdir()
else:
with suppress(FileNotFoundError):
path.unlink() | [
"def",
"remove",
"(",
"path",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"path",
".",
"exists",
"(",
")",
":",
"return",
"else",
":",
"if",
"force",
":",
"with",
"suppress",
"(",
"FileNotFoundError",
")",
":",
"chmod",
"(",
"path",
",",
"0o... | Remove file or directory (recursively), if it exists.
On NFS file systems, if a directory contains :file:`.nfs*` temporary files
(sometimes created when deleting a file), it waits for them to go away.
Parameters
----------
path : ~pathlib.Path
Path to remove.
force : bool
If True, will remove files and directories even if they are read-only
(as if first doing ``chmod -R +w``). | [
"Remove",
"file",
"or",
"directory",
"(",
"recursively",
")",
"if",
"it",
"exists",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L33-L89 |
timdiels/pytil | pytil/path.py | chmod | def chmod(path, mode, operator='=', recursive=False):
'''
Change file mode bits.
When recursively chmodding a directory, executable bits in ``mode`` are
ignored when applying to a regular file. E.g. ``chmod(path, mode=0o777,
recursive=True)`` would apply ``mode=0o666`` to regular files.
Symlinks are ignored.
Parameters
----------
path : ~pathlib.Path
Path to chmod.
mode : int
Mode bits to apply, e.g. ``0o777``.
operator : str
How to apply the mode bits to the file, one of:
'='
Replace mode with given mode.
'+'
Add to current mode.
'-'
Subtract from current mode.
recursive : bool
Whether to chmod recursively.
'''
if mode > 0o777 and operator != '=':
raise ValueError('Special bits (i.e. >0o777) only supported when using "=" operator')
# first chmod path
if operator == '+':
mode_ = path.stat().st_mode | mode
elif operator == '-':
mode_ = path.stat().st_mode & ~mode
else:
mode_ = mode
if path.is_symlink():
# Do not chmod or follow symlinks
return
path.chmod(mode_)
# then its children
def chmod_children(parent, files, mode_mask, operator):
for file in files:
with suppress(FileNotFoundError):
file = parent / file
if not file.is_symlink():
chmod(file, mode & mode_mask, operator)
if recursive and path.is_dir():
for parent, dirs, files in os.walk(str(path)):
parent = Path(parent)
chmod_children(parent, dirs, 0o777777, operator)
chmod_children(parent, files, 0o777666, operator) | python | def chmod(path, mode, operator='=', recursive=False):
'''
Change file mode bits.
When recursively chmodding a directory, executable bits in ``mode`` are
ignored when applying to a regular file. E.g. ``chmod(path, mode=0o777,
recursive=True)`` would apply ``mode=0o666`` to regular files.
Symlinks are ignored.
Parameters
----------
path : ~pathlib.Path
Path to chmod.
mode : int
Mode bits to apply, e.g. ``0o777``.
operator : str
How to apply the mode bits to the file, one of:
'='
Replace mode with given mode.
'+'
Add to current mode.
'-'
Subtract from current mode.
recursive : bool
Whether to chmod recursively.
'''
if mode > 0o777 and operator != '=':
raise ValueError('Special bits (i.e. >0o777) only supported when using "=" operator')
# first chmod path
if operator == '+':
mode_ = path.stat().st_mode | mode
elif operator == '-':
mode_ = path.stat().st_mode & ~mode
else:
mode_ = mode
if path.is_symlink():
# Do not chmod or follow symlinks
return
path.chmod(mode_)
# then its children
def chmod_children(parent, files, mode_mask, operator):
for file in files:
with suppress(FileNotFoundError):
file = parent / file
if not file.is_symlink():
chmod(file, mode & mode_mask, operator)
if recursive and path.is_dir():
for parent, dirs, files in os.walk(str(path)):
parent = Path(parent)
chmod_children(parent, dirs, 0o777777, operator)
chmod_children(parent, files, 0o777666, operator) | [
"def",
"chmod",
"(",
"path",
",",
"mode",
",",
"operator",
"=",
"'='",
",",
"recursive",
"=",
"False",
")",
":",
"if",
"mode",
">",
"0o777",
"and",
"operator",
"!=",
"'='",
":",
"raise",
"ValueError",
"(",
"'Special bits (i.e. >0o777) only supported when using... | Change file mode bits.
When recursively chmodding a directory, executable bits in ``mode`` are
ignored when applying to a regular file. E.g. ``chmod(path, mode=0o777,
recursive=True)`` would apply ``mode=0o666`` to regular files.
Symlinks are ignored.
Parameters
----------
path : ~pathlib.Path
Path to chmod.
mode : int
Mode bits to apply, e.g. ``0o777``.
operator : str
How to apply the mode bits to the file, one of:
'='
Replace mode with given mode.
'+'
Add to current mode.
'-'
Subtract from current mode.
recursive : bool
Whether to chmod recursively. | [
"Change",
"file",
"mode",
"bits",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L91-L146 |
timdiels/pytil | pytil/path.py | TemporaryDirectory | def TemporaryDirectory(suffix=None, prefix=None, dir=None, on_error='ignore'): # @ReservedAssignment
'''
An extension to `tempfile.TemporaryDirectory`.
Unlike with `python:tempfile`, a :py:class:`~pathlib.Path` is yielded on
``__enter__``, not a `str`.
Parameters
----------
suffix : str
See `tempfile.TemporaryDirectory`.
prefix : str
See `tempfile.TemporaryDirectory`.
dir : ~pathlib.Path
See `tempfile.TemporaryDirectory`, but pass a :py:class:`~pathlib.Path` instead.
on_error : str
Handling of failure to delete directory (happens frequently on NFS), one of:
raise
Raise exception on failure.
ignore
Fail silently.
'''
if dir:
dir = str(dir) # @ReservedAssignment
temp_dir = tempfile.TemporaryDirectory(suffix, prefix, dir)
try:
yield Path(temp_dir.name)
finally:
try:
temp_dir.cleanup()
except OSError as ex:
print(ex)
# Suppress relevant errors if ignoring failed delete
if on_error != 'ignore' or ex.errno != errno.ENOTEMPTY:
raise | python | def TemporaryDirectory(suffix=None, prefix=None, dir=None, on_error='ignore'): # @ReservedAssignment
'''
An extension to `tempfile.TemporaryDirectory`.
Unlike with `python:tempfile`, a :py:class:`~pathlib.Path` is yielded on
``__enter__``, not a `str`.
Parameters
----------
suffix : str
See `tempfile.TemporaryDirectory`.
prefix : str
See `tempfile.TemporaryDirectory`.
dir : ~pathlib.Path
See `tempfile.TemporaryDirectory`, but pass a :py:class:`~pathlib.Path` instead.
on_error : str
Handling of failure to delete directory (happens frequently on NFS), one of:
raise
Raise exception on failure.
ignore
Fail silently.
'''
if dir:
dir = str(dir) # @ReservedAssignment
temp_dir = tempfile.TemporaryDirectory(suffix, prefix, dir)
try:
yield Path(temp_dir.name)
finally:
try:
temp_dir.cleanup()
except OSError as ex:
print(ex)
# Suppress relevant errors if ignoring failed delete
if on_error != 'ignore' or ex.errno != errno.ENOTEMPTY:
raise | [
"def",
"TemporaryDirectory",
"(",
"suffix",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"dir",
"=",
"None",
",",
"on_error",
"=",
"'ignore'",
")",
":",
"# @ReservedAssignment",
"if",
"dir",
":",
"dir",
"=",
"str",
"(",
"dir",
")",
"# @ReservedAssignment"... | An extension to `tempfile.TemporaryDirectory`.
Unlike with `python:tempfile`, a :py:class:`~pathlib.Path` is yielded on
``__enter__``, not a `str`.
Parameters
----------
suffix : str
See `tempfile.TemporaryDirectory`.
prefix : str
See `tempfile.TemporaryDirectory`.
dir : ~pathlib.Path
See `tempfile.TemporaryDirectory`, but pass a :py:class:`~pathlib.Path` instead.
on_error : str
Handling of failure to delete directory (happens frequently on NFS), one of:
raise
Raise exception on failure.
ignore
Fail silently. | [
"An",
"extension",
"to",
"tempfile",
".",
"TemporaryDirectory",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L149-L184 |
timdiels/pytil | pytil/path.py | hash | def hash(path, hash_function=hashlib.sha512): # @ReservedAssignment
'''
Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored.
'''
hash_ = hash_function()
if path.is_dir():
for directory, directories, files in os.walk(str(path), topdown=True):
# Note:
# - directory: path to current directory in walk relative to current working direcotry
# - directories/files: dir/file names
# Note: file names can contain nearly any character (even newlines).
# hash like (ignore the whitespace):
#
# h(relative-dir-path)
# h(dir_name)
# h(dir_name2)
# ,
# h(file_name) h(file_content)
# h(file_name2) h(file_content2)
# ;
# h(relative-dir-path2)
# ...
hash_.update(hash_function(str(Path(directory).relative_to(path)).encode()).digest())
for name in sorted(directories):
hash_.update(hash_function(name.encode()).digest())
hash_.update(b',')
for name in sorted(files):
hash_.update(hash_function(name.encode()).digest())
hash_.update(hash(Path(directory) / name).digest())
hash_.update(b';')
else:
with path.open('rb') as f:
while True:
buffer = f.read(65536)
if not buffer:
break
hash_.update(buffer)
return hash_ | python | def hash(path, hash_function=hashlib.sha512): # @ReservedAssignment
'''
Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored.
'''
hash_ = hash_function()
if path.is_dir():
for directory, directories, files in os.walk(str(path), topdown=True):
# Note:
# - directory: path to current directory in walk relative to current working direcotry
# - directories/files: dir/file names
# Note: file names can contain nearly any character (even newlines).
# hash like (ignore the whitespace):
#
# h(relative-dir-path)
# h(dir_name)
# h(dir_name2)
# ,
# h(file_name) h(file_content)
# h(file_name2) h(file_content2)
# ;
# h(relative-dir-path2)
# ...
hash_.update(hash_function(str(Path(directory).relative_to(path)).encode()).digest())
for name in sorted(directories):
hash_.update(hash_function(name.encode()).digest())
hash_.update(b',')
for name in sorted(files):
hash_.update(hash_function(name.encode()).digest())
hash_.update(hash(Path(directory) / name).digest())
hash_.update(b';')
else:
with path.open('rb') as f:
while True:
buffer = f.read(65536)
if not buffer:
break
hash_.update(buffer)
return hash_ | [
"def",
"hash",
"(",
"path",
",",
"hash_function",
"=",
"hashlib",
".",
"sha512",
")",
":",
"# @ReservedAssignment",
"hash_",
"=",
"hash_function",
"(",
")",
"if",
"path",
".",
"is_dir",
"(",
")",
":",
"for",
"directory",
",",
"directories",
",",
"files",
... | Hash file or directory.
Parameters
----------
path : ~pathlib.Path
File or directory to hash.
hash_function : ~typing.Callable[[], hash object]
Function which creates a hashlib hash object when called. Defaults to
``hashlib.sha512``.
Returns
-------
hash object
hashlib hash object of file/directory contents. File/directory stat data
is ignored. The directory digest covers file/directory contents and
their location relative to the directory being digested. The directory
name itself is ignored. | [
"Hash",
"file",
"or",
"directory",
"."
] | train | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L188-L243 |
DataKitchen/DKCloudCommand | DKCloudCommand/modules/DKCloudAPIMock.py | DKCloudAPIMock.delete_orderrun | def delete_orderrun(self, orderrun_id):
"""
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if orderrun_id == 'good':
rc.set(rc.DK_SUCCESS, None, None)
else:
rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun')
return rc | python | def delete_orderrun(self, orderrun_id):
"""
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if orderrun_id == 'good':
rc.set(rc.DK_SUCCESS, None, None)
else:
rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun')
return rc | [
"def",
"delete_orderrun",
"(",
"self",
",",
"orderrun_id",
")",
":",
"rc",
"=",
"DKReturnCode",
"(",
")",
"if",
"orderrun_id",
"==",
"'good'",
":",
"rc",
".",
"set",
"(",
"rc",
".",
"DK_SUCCESS",
",",
"None",
",",
"None",
")",
"else",
":",
"rc",
".",... | :param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode | [
":",
"param",
"self",
":",
"self",
":",
"param",
"orderrun_id",
":",
"string",
";",
"good",
"return",
"a",
"good",
"value",
";",
"bad",
"return",
"a",
"bad",
"value",
":",
"rtype",
":",
"DKReturnCode"
] | train | https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudAPIMock.py#L37-L48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.