repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
usc-isi-i2/etk | etk/knowledge_graph_schema.py | KGSchema.is_date | def is_date(v) -> (bool, date):
"""
Boolean function for checking if v is a date
Args:
v:
Returns: bool
"""
if isinstance(v, date):
return True, v
try:
reg = r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T' \
r'([0-5][0-9])(?::([0-5][0-9])(?::([0-5][0-9]))?)?)?)?)?$'
match = re.match(reg, v)
if match:
groups = match.groups()
patterns = ['%Y', '%m', '%d', '%H', '%M', '%S']
d = datetime.strptime('-'.join([x for x in groups if x]),
'-'.join([patterns[i] for i in range(len(patterns)) if groups[i]]))
return True, d
except:
pass
return False, v | python | def is_date(v) -> (bool, date):
"""
Boolean function for checking if v is a date
Args:
v:
Returns: bool
"""
if isinstance(v, date):
return True, v
try:
reg = r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T' \
r'([0-5][0-9])(?::([0-5][0-9])(?::([0-5][0-9]))?)?)?)?)?$'
match = re.match(reg, v)
if match:
groups = match.groups()
patterns = ['%Y', '%m', '%d', '%H', '%M', '%S']
d = datetime.strptime('-'.join([x for x in groups if x]),
'-'.join([patterns[i] for i in range(len(patterns)) if groups[i]]))
return True, d
except:
pass
return False, v | [
"def",
"is_date",
"(",
"v",
")",
"->",
"(",
"bool",
",",
"date",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"date",
")",
":",
"return",
"True",
",",
"v",
"try",
":",
"reg",
"=",
"r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T'",
"r'([0-... | Boolean function for checking if v is a date
Args:
v:
Returns: bool | [
"Boolean",
"function",
"for",
"checking",
"if",
"v",
"is",
"a",
"date"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L171-L194 | train | 34,200 |
usc-isi-i2/etk | etk/knowledge_graph_schema.py | KGSchema.is_location | def is_location(v) -> (bool, str):
"""
Boolean function for checking if v is a location format
Args:
v:
Returns: bool
"""
def convert2float(value):
try:
float_num = float(value)
return float_num
except ValueError:
return False
if not isinstance(v, str):
return False, v
split_lst = v.split(":")
if len(split_lst) != 5:
return False, v
if convert2float(split_lst[3]):
longitude = abs(convert2float(split_lst[3]))
if longitude > 90:
return False, v
if convert2float(split_lst[4]):
latitude = abs(convert2float(split_lst[3]))
if latitude > 180:
return False, v
return True, v | python | def is_location(v) -> (bool, str):
"""
Boolean function for checking if v is a location format
Args:
v:
Returns: bool
"""
def convert2float(value):
try:
float_num = float(value)
return float_num
except ValueError:
return False
if not isinstance(v, str):
return False, v
split_lst = v.split(":")
if len(split_lst) != 5:
return False, v
if convert2float(split_lst[3]):
longitude = abs(convert2float(split_lst[3]))
if longitude > 90:
return False, v
if convert2float(split_lst[4]):
latitude = abs(convert2float(split_lst[3]))
if latitude > 180:
return False, v
return True, v | [
"def",
"is_location",
"(",
"v",
")",
"->",
"(",
"bool",
",",
"str",
")",
":",
"def",
"convert2float",
"(",
"value",
")",
":",
"try",
":",
"float_num",
"=",
"float",
"(",
"value",
")",
"return",
"float_num",
"except",
"ValueError",
":",
"return",
"False... | Boolean function for checking if v is a location format
Args:
v:
Returns: bool | [
"Boolean",
"function",
"for",
"checking",
"if",
"v",
"is",
"a",
"location",
"format"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L197-L227 | train | 34,201 |
usc-isi-i2/etk | etk/document.py | Document.select_segments | def select_segments(self, jsonpath: str) -> List[Segment]:
"""
Dereferences the json_path inside the document and returns the selected elements.
This method should compile and cache the compiled json_path in case the same path
is reused by multiple extractors.
Args:
jsonpath (str): a valid JSON path.
Returns: A list of Segments object that contains the elements selected by the json path.
"""
path = self.etk.parse_json_path(jsonpath)
matches = path.find(self.cdr_document)
segments = list()
for a_match in matches:
this_segment = Segment(str(a_match.full_path), a_match.value, self)
segments.append(this_segment)
return segments | python | def select_segments(self, jsonpath: str) -> List[Segment]:
"""
Dereferences the json_path inside the document and returns the selected elements.
This method should compile and cache the compiled json_path in case the same path
is reused by multiple extractors.
Args:
jsonpath (str): a valid JSON path.
Returns: A list of Segments object that contains the elements selected by the json path.
"""
path = self.etk.parse_json_path(jsonpath)
matches = path.find(self.cdr_document)
segments = list()
for a_match in matches:
this_segment = Segment(str(a_match.full_path), a_match.value, self)
segments.append(this_segment)
return segments | [
"def",
"select_segments",
"(",
"self",
",",
"jsonpath",
":",
"str",
")",
"->",
"List",
"[",
"Segment",
"]",
":",
"path",
"=",
"self",
".",
"etk",
".",
"parse_json_path",
"(",
"jsonpath",
")",
"matches",
"=",
"path",
".",
"find",
"(",
"self",
".",
"cd... | Dereferences the json_path inside the document and returns the selected elements.
This method should compile and cache the compiled json_path in case the same path
is reused by multiple extractors.
Args:
jsonpath (str): a valid JSON path.
Returns: A list of Segments object that contains the elements selected by the json path. | [
"Dereferences",
"the",
"json_path",
"inside",
"the",
"document",
"and",
"returns",
"the",
"selected",
"elements",
".",
"This",
"method",
"should",
"compile",
"and",
"cache",
"the",
"compiled",
"json_path",
"in",
"case",
"the",
"same",
"path",
"is",
"reused",
"... | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/document.py#L81-L100 | train | 34,202 |
usc-isi-i2/etk | examples/elicit_gdelt_mapping/elicit_gdelt_module.py | GdeltModule.attribute_value | def attribute_value(self, doc: Document, attribute_name: str):
"""
Access data using attribute name rather than the numeric indices
Returns: the value for the attribute
"""
return doc.cdr_document.get(self.header_translation_table[attribute_name]) | python | def attribute_value(self, doc: Document, attribute_name: str):
"""
Access data using attribute name rather than the numeric indices
Returns: the value for the attribute
"""
return doc.cdr_document.get(self.header_translation_table[attribute_name]) | [
"def",
"attribute_value",
"(",
"self",
",",
"doc",
":",
"Document",
",",
"attribute_name",
":",
"str",
")",
":",
"return",
"doc",
".",
"cdr_document",
".",
"get",
"(",
"self",
".",
"header_translation_table",
"[",
"attribute_name",
"]",
")"
] | Access data using attribute name rather than the numeric indices
Returns: the value for the attribute | [
"Access",
"data",
"using",
"attribute",
"name",
"rather",
"than",
"the",
"numeric",
"indices"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/examples/elicit_gdelt_mapping/elicit_gdelt_module.py#L120-L127 | train | 34,203 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | all_cities | def all_cities():
"""
Get a list of all Backpage city names.
Returns:
list of city names as Strings
"""
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities | python | def all_cities():
"""
Get a list of all Backpage city names.
Returns:
list of city names as Strings
"""
cities = []
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
cities.append(row[0])
cities.sort()
return cities | [
"def",
"all_cities",
"(",
")",
":",
"cities",
"=",
"[",
"]",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/CityPops.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":",
"reader",
"="... | Get a list of all Backpage city names.
Returns:
list of city names as Strings | [
"Get",
"a",
"list",
"of",
"all",
"Backpage",
"city",
"names",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L16-L30 | train | 34,204 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | city_nums | def city_nums():
"""
Get a dictionary of Backpage city names mapped to their 'legend' value.
Returns:
dictionary of Backpage city names mapped to their numeric value
"""
city_nums = {}
first_row = 1
num = 0
fname = pkg_resources.resource_filename(__name__, 'resources/Distance_Matrix.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
if first_row == 1:
first_row = 0
else:
city_nums[row[0]] = num
num = num + 1
return city_nums | python | def city_nums():
"""
Get a dictionary of Backpage city names mapped to their 'legend' value.
Returns:
dictionary of Backpage city names mapped to their numeric value
"""
city_nums = {}
first_row = 1
num = 0
fname = pkg_resources.resource_filename(__name__, 'resources/Distance_Matrix.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
if first_row == 1:
first_row = 0
else:
city_nums[row[0]] = num
num = num + 1
return city_nums | [
"def",
"city_nums",
"(",
")",
":",
"city_nums",
"=",
"{",
"}",
"first_row",
"=",
"1",
"num",
"=",
"0",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/Distance_Matrix.csv'",
")",
"with",
"open",
"(",
"fname",
",",
... | Get a dictionary of Backpage city names mapped to their 'legend' value.
Returns:
dictionary of Backpage city names mapped to their numeric value | [
"Get",
"a",
"dictionary",
"of",
"Backpage",
"city",
"names",
"mapped",
"to",
"their",
"legend",
"value",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L86-L106 | train | 34,205 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | date_clean | def date_clean(date, dashboard_style=False):
"""
Clean the numerical date value in order to present it.
Args:
boo: numerical date (20160205)
Returns:
Stringified version of the input date ("2016-02-05")
"""
if dashboard_style:
dt = str(date)
out = dt[4:6] + '/' + dt[6:] + '/' + dt[:4]
else:
dt = str(date)
out = dt[:4] + '-' + dt[4:6] + '-' + dt[6:]
return out | python | def date_clean(date, dashboard_style=False):
"""
Clean the numerical date value in order to present it.
Args:
boo: numerical date (20160205)
Returns:
Stringified version of the input date ("2016-02-05")
"""
if dashboard_style:
dt = str(date)
out = dt[4:6] + '/' + dt[6:] + '/' + dt[:4]
else:
dt = str(date)
out = dt[:4] + '-' + dt[4:6] + '-' + dt[6:]
return out | [
"def",
"date_clean",
"(",
"date",
",",
"dashboard_style",
"=",
"False",
")",
":",
"if",
"dashboard_style",
":",
"dt",
"=",
"str",
"(",
"date",
")",
"out",
"=",
"dt",
"[",
"4",
":",
"6",
"]",
"+",
"'/'",
"+",
"dt",
"[",
"6",
":",
"]",
"+",
"'/'"... | Clean the numerical date value in order to present it.
Args:
boo: numerical date (20160205)
Returns:
Stringified version of the input date ("2016-02-05") | [
"Clean",
"the",
"numerical",
"date",
"value",
"in",
"order",
"to",
"present",
"it",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L118-L133 | train | 34,206 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | date_range | def date_range(start, end, boo):
"""
Return list of dates within a specified range, inclusive.
Args:
start: earliest date to include, String ("2015-11-25")
end: latest date to include, String ("2015-12-01")
boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings
"""
earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')
latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')
num_days = (latest - earliest).days + 1
all_days = [latest - timedelta(days=x) for x in range(num_days)]
all_days.reverse()
output = []
if boo:
# Return as Integer, yyyymmdd
for d in all_days:
output.append(int(str(d).replace('-', '')[:8]))
else:
# Return as String, yyyy-mm-dd
for d in all_days:
output.append(str(d)[:10])
return output | python | def date_range(start, end, boo):
"""
Return list of dates within a specified range, inclusive.
Args:
start: earliest date to include, String ("2015-11-25")
end: latest date to include, String ("2015-12-01")
boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings
"""
earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')
latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')
num_days = (latest - earliest).days + 1
all_days = [latest - timedelta(days=x) for x in range(num_days)]
all_days.reverse()
output = []
if boo:
# Return as Integer, yyyymmdd
for d in all_days:
output.append(int(str(d).replace('-', '')[:8]))
else:
# Return as String, yyyy-mm-dd
for d in all_days:
output.append(str(d)[:10])
return output | [
"def",
"date_range",
"(",
"start",
",",
"end",
",",
"boo",
")",
":",
"earliest",
"=",
"datetime",
".",
"strptime",
"(",
"start",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
",",
"'%Y %m %d'",
")",
"latest",
"=",
"datetime",
".",
"strptime",
"(",
"en... | Return list of dates within a specified range, inclusive.
Args:
start: earliest date to include, String ("2015-11-25")
end: latest date to include, String ("2015-12-01")
boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings | [
"Return",
"list",
"of",
"dates",
"within",
"a",
"specified",
"range",
"inclusive",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L145-L172 | train | 34,207 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | ethnicities_clean | def ethnicities_clean():
""" Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings """
eths_clean = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Ethnicity_Groups.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
first = []
for row in reader:
if first:
for i in range(len(first)):
if first[i] and row[i]:
eths_clean[first[i]] = row[i]
first = []
else:
first = deepcopy(row)
return eths_clean | python | def ethnicities_clean():
""" Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings """
eths_clean = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Ethnicity_Groups.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
first = []
for row in reader:
if first:
for i in range(len(first)):
if first[i] and row[i]:
eths_clean[first[i]] = row[i]
first = []
else:
first = deepcopy(row)
return eths_clean | [
"def",
"ethnicities_clean",
"(",
")",
":",
"eths_clean",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/Ethnicity_Groups.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":"... | Get dictionary of unformatted ethnicity types mapped to clean corresponding ethnicity strings | [
"Get",
"dictionary",
"of",
"unformatted",
"ethnicity",
"types",
"mapped",
"to",
"clean",
"corresponding",
"ethnicity",
"strings"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L264-L280 | train | 34,208 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | formal_cities | def formal_cities(reverse=False):
"""
Get a dictionary that maps all Backpage city names to their presentable, formal names.
Returns:
dictionary of Backpage city names mapped to formal city names
"""
output = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Formal_City_Name_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
if not reverse:
# Informal to formal
output[row[0]] = row[1]
else:
# Formal to informal
output[row[1]] = row[0]
return output | python | def formal_cities(reverse=False):
"""
Get a dictionary that maps all Backpage city names to their presentable, formal names.
Returns:
dictionary of Backpage city names mapped to formal city names
"""
output = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Formal_City_Name_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
if not reverse:
# Informal to formal
output[row[0]] = row[1]
else:
# Formal to informal
output[row[1]] = row[0]
return output | [
"def",
"formal_cities",
"(",
"reverse",
"=",
"False",
")",
":",
"output",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/Formal_City_Name_Pairs.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")... | Get a dictionary that maps all Backpage city names to their presentable, formal names.
Returns:
dictionary of Backpage city names mapped to formal city names | [
"Get",
"a",
"dictionary",
"that",
"maps",
"all",
"Backpage",
"city",
"names",
"to",
"their",
"presentable",
"formal",
"names",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L295-L313 | train | 34,209 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | get_lats | def get_lats():
"""
Get a dictionary that maps Backpage city names to their respective latitudes.
Returns:
dictionary that maps city names (Strings) to latitudes (Floats)
"""
lats = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')
with open(fname, 'rb') as csvfile:
# Read latitude/longitude coordinates
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
word = row[0].lower()
word = re.sub(' ', '', word)
lats[word] = float(row[1])
return lats | python | def get_lats():
"""
Get a dictionary that maps Backpage city names to their respective latitudes.
Returns:
dictionary that maps city names (Strings) to latitudes (Floats)
"""
lats = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')
with open(fname, 'rb') as csvfile:
# Read latitude/longitude coordinates
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
word = row[0].lower()
word = re.sub(' ', '', word)
lats[word] = float(row[1])
return lats | [
"def",
"get_lats",
"(",
")",
":",
"lats",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/Latitudes-Longitudes.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rb'",
")",
"as",
"csvfile",
":",
"# Read... | Get a dictionary that maps Backpage city names to their respective latitudes.
Returns:
dictionary that maps city names (Strings) to latitudes (Floats) | [
"Get",
"a",
"dictionary",
"that",
"maps",
"Backpage",
"city",
"names",
"to",
"their",
"respective",
"latitudes",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L376-L393 | train | 34,210 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | get_longs | def get_longs():
"""
Get a dictionary that maps Backpage city names to their respective longitudes.
Returns:
dictionary that maps city names (Strings) to longitudes (Floats)
"""
longs = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')
with open(fname, 'rb') as csvfile:
# Read latitude/longitude coordinates
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
word = row[0].lower()
word = re.sub(' ', '', word)
longs[word] = float(row[2])
return longs | python | def get_longs():
"""
Get a dictionary that maps Backpage city names to their respective longitudes.
Returns:
dictionary that maps city names (Strings) to longitudes (Floats)
"""
longs = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Latitudes-Longitudes.csv')
with open(fname, 'rb') as csvfile:
# Read latitude/longitude coordinates
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
word = row[0].lower()
word = re.sub(' ', '', word)
longs[word] = float(row[2])
return longs | [
"def",
"get_longs",
"(",
")",
":",
"longs",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/Latitudes-Longitudes.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rb'",
")",
"as",
"csvfile",
":",
"# Re... | Get a dictionary that maps Backpage city names to their respective longitudes.
Returns:
dictionary that maps city names (Strings) to longitudes (Floats) | [
"Get",
"a",
"dictionary",
"that",
"maps",
"Backpage",
"city",
"names",
"to",
"their",
"respective",
"longitudes",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L396-L412 | train | 34,211 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | get_regions | def get_regions():
"""
Get a dictionary of state names mapped to their respective region numeric values.
New England -> 0
Mid Atlantic -> 1
Midwest East -> 2
Midwest West -> 3
Southeast -> 4
Southwest -> 5
Mountain West -> 6
Pacific -> 7
Alaska -> 8
Hawaii -> 9
Returns:
dictionary of state names mapped to region numbers
"""
new_england = ['Connecticut', 'Maine', 'Massachusetts', 'New Hampshire', 'Rhode Island', 'Vermont']
mid_atlantic = ['New Jersey', 'New York', 'Pennsylvania', 'Delaware', 'Maryland', 'District of Columbia']
midwest_east = ['Illinois', 'Indiana', 'Michigan', 'Ohio', 'Wisconsin']
midwest_west = ['Iowa', 'Kansas', 'Minnesota', 'Missouri', 'Nebraska', 'North Dakota', 'South Dakota']
southeast = ['Florida', 'Georgia', 'South Carolina', 'Virginia', 'Alabama', 'Kentucky', 'Mississippi', 'Tennessee', 'Arkansas', 'Louisiana', 'West Virginia', 'North Carolina']
southwest = ['Texas', 'Oklahoma', 'New Mexico', 'Arizona']
mtn_west = ['Montana', 'Idaho', 'Wyoming', 'Colorado', 'Nevada', 'Utah']
pacific = ['Washington', 'Oregon', 'California']
alaska = ['Alaska']
hawaii = ['Hawaii']
regions = []
regions.append(new_england)
regions.append(mid_atlantic)
regions.append(midwest_east)
regions.append(midwest_west)
regions.append(southeast)
regions.append(southwest)
regions.append(mtn_west)
regions.append(pacific)
regions.append(alaska)
regions.append(hawaii)
# Map each state to its region number
output = {}
for i in range(len(regions)):
states = regions[i]
for j in range(len(states)):
output[states[j]] = i
return output | python | def get_regions():
"""
Get a dictionary of state names mapped to their respective region numeric values.
New England -> 0
Mid Atlantic -> 1
Midwest East -> 2
Midwest West -> 3
Southeast -> 4
Southwest -> 5
Mountain West -> 6
Pacific -> 7
Alaska -> 8
Hawaii -> 9
Returns:
dictionary of state names mapped to region numbers
"""
new_england = ['Connecticut', 'Maine', 'Massachusetts', 'New Hampshire', 'Rhode Island', 'Vermont']
mid_atlantic = ['New Jersey', 'New York', 'Pennsylvania', 'Delaware', 'Maryland', 'District of Columbia']
midwest_east = ['Illinois', 'Indiana', 'Michigan', 'Ohio', 'Wisconsin']
midwest_west = ['Iowa', 'Kansas', 'Minnesota', 'Missouri', 'Nebraska', 'North Dakota', 'South Dakota']
southeast = ['Florida', 'Georgia', 'South Carolina', 'Virginia', 'Alabama', 'Kentucky', 'Mississippi', 'Tennessee', 'Arkansas', 'Louisiana', 'West Virginia', 'North Carolina']
southwest = ['Texas', 'Oklahoma', 'New Mexico', 'Arizona']
mtn_west = ['Montana', 'Idaho', 'Wyoming', 'Colorado', 'Nevada', 'Utah']
pacific = ['Washington', 'Oregon', 'California']
alaska = ['Alaska']
hawaii = ['Hawaii']
regions = []
regions.append(new_england)
regions.append(mid_atlantic)
regions.append(midwest_east)
regions.append(midwest_west)
regions.append(southeast)
regions.append(southwest)
regions.append(mtn_west)
regions.append(pacific)
regions.append(alaska)
regions.append(hawaii)
# Map each state to its region number
output = {}
for i in range(len(regions)):
states = regions[i]
for j in range(len(states)):
output[states[j]] = i
return output | [
"def",
"get_regions",
"(",
")",
":",
"new_england",
"=",
"[",
"'Connecticut'",
",",
"'Maine'",
",",
"'Massachusetts'",
",",
"'New Hampshire'",
",",
"'Rhode Island'",
",",
"'Vermont'",
"]",
"mid_atlantic",
"=",
"[",
"'New Jersey'",
",",
"'New York'",
",",
"'Penns... | Get a dictionary of state names mapped to their respective region numeric values.
New England -> 0
Mid Atlantic -> 1
Midwest East -> 2
Midwest West -> 3
Southeast -> 4
Southwest -> 5
Mountain West -> 6
Pacific -> 7
Alaska -> 8
Hawaii -> 9
Returns:
dictionary of state names mapped to region numbers | [
"Get",
"a",
"dictionary",
"of",
"state",
"names",
"mapped",
"to",
"their",
"respective",
"region",
"numeric",
"values",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L445-L492 | train | 34,212 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | populations | def populations():
"""
Get a dictionary of Backpage city names mapped to their citizen populations.
Returns:
dictionary of Backpage city names mapped to their populations (integers)
"""
city_pops = {}
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
city_pops[row[0]] = int(row[1])
return city_pops | python | def populations():
"""
Get a dictionary of Backpage city names mapped to their citizen populations.
Returns:
dictionary of Backpage city names mapped to their populations (integers)
"""
city_pops = {}
fname = pkg_resources.resource_filename(__name__, 'resources/CityPops.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
city_pops[row[0]] = int(row[1])
return city_pops | [
"def",
"populations",
"(",
")",
":",
"city_pops",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/CityPops.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":",
"reader",
... | Get a dictionary of Backpage city names mapped to their citizen populations.
Returns:
dictionary of Backpage city names mapped to their populations (integers) | [
"Get",
"a",
"dictionary",
"of",
"Backpage",
"city",
"names",
"mapped",
"to",
"their",
"citizen",
"populations",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L495-L508 | train | 34,213 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | state_names | def state_names():
""" Get the set of all US state names """
names = set()
fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
names.add(row[0])
return names | python | def state_names():
""" Get the set of all US state names """
names = set()
fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
names.add(row[0])
return names | [
"def",
"state_names",
"(",
")",
":",
"names",
"=",
"set",
"(",
")",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/States.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":",
"reader"... | Get the set of all US state names | [
"Get",
"the",
"set",
"of",
"all",
"US",
"state",
"names"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L540-L549 | train | 34,214 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | state_nums | def state_nums():
"""
Get a dictionary of state names mapped to their 'legend' value.
Returns:
dictionary of state names mapped to their numeric value
"""
st_nums = {}
fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
i = 0
for row in reader:
st_nums[row[0]] = i
i = i + 1
return st_nums | python | def state_nums():
"""
Get a dictionary of state names mapped to their 'legend' value.
Returns:
dictionary of state names mapped to their numeric value
"""
st_nums = {}
fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
i = 0
for row in reader:
st_nums[row[0]] = i
i = i + 1
return st_nums | [
"def",
"state_nums",
"(",
")",
":",
"st_nums",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/States.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":",
"reader",
"=",... | Get a dictionary of state names mapped to their 'legend' value.
Returns:
dictionary of state names mapped to their numeric value | [
"Get",
"a",
"dictionary",
"of",
"state",
"names",
"mapped",
"to",
"their",
"legend",
"value",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L552-L567 | train | 34,215 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | states | def states():
"""
Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states
"""
states = {}
fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
states[row[0]] = row[1]
return states | python | def states():
"""
Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states
"""
states = {}
fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
states[row[0]] = row[1]
return states | [
"def",
"states",
"(",
")",
":",
"states",
"=",
"{",
"}",
"fname",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"__name__",
",",
"'resources/City_State_Pairs.csv'",
")",
"with",
"open",
"(",
"fname",
",",
"'rU'",
")",
"as",
"csvfile",
":",
"reader",
... | Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states | [
"Get",
"a",
"dictionary",
"of",
"Backpage",
"city",
"names",
"mapped",
"to",
"their",
"respective",
"states",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L583-L597 | train | 34,216 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/utils.py | today | def today(boo):
"""
Return today's date as either a String or a Number, as specified by the User.
Args:
boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30")
Returns:
either a Number or a string, dependent upon the user's input
"""
tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')
if boo:
return int(str(tod).replace('-', '')[:8])
else:
return str(tod)[:10] | python | def today(boo):
"""
Return today's date as either a String or a Number, as specified by the User.
Args:
boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30")
Returns:
either a Number or a string, dependent upon the user's input
"""
tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')
if boo:
return int(str(tod).replace('-', '')[:8])
else:
return str(tod)[:10] | [
"def",
"today",
"(",
"boo",
")",
":",
"tod",
"=",
"datetime",
".",
"strptime",
"(",
"datetime",
".",
"today",
"(",
")",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"' '",
")",
",",
"'%Y %m %d'",
")",
"if",
... | Return today's date as either a String or a Number, as specified by the User.
Args:
boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30")
Returns:
either a Number or a string, dependent upon the user's input | [
"Return",
"today",
"s",
"date",
"as",
"either",
"a",
"String",
"or",
"a",
"Number",
"as",
"specified",
"by",
"the",
"User",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/utils.py#L615-L628 | train | 34,217 |
usc-isi-i2/etk | etk/extractors/glossary_extractor.py | GlossaryExtractor._generate_ngrams_with_context | def _generate_ngrams_with_context(self, tokens: List[Token]) -> chain:
"""Generates the 1-gram to n-grams tuples of the list of tokens"""
chained_ngrams_iter = self._generate_ngrams_with_context_helper(iter(tokens), 1)
for n in range(2, self._ngrams + 1):
ngrams_iter = tee(tokens, n)
for j in range(1, n):
for k in range(j):
next(ngrams_iter[j], None)
ngrams_iter_with_context = self._generate_ngrams_with_context_helper(zip(*ngrams_iter), n)
chained_ngrams_iter = chain(chained_ngrams_iter, ngrams_iter_with_context)
return chained_ngrams_iter | python | def _generate_ngrams_with_context(self, tokens: List[Token]) -> chain:
"""Generates the 1-gram to n-grams tuples of the list of tokens"""
chained_ngrams_iter = self._generate_ngrams_with_context_helper(iter(tokens), 1)
for n in range(2, self._ngrams + 1):
ngrams_iter = tee(tokens, n)
for j in range(1, n):
for k in range(j):
next(ngrams_iter[j], None)
ngrams_iter_with_context = self._generate_ngrams_with_context_helper(zip(*ngrams_iter), n)
chained_ngrams_iter = chain(chained_ngrams_iter, ngrams_iter_with_context)
return chained_ngrams_iter | [
"def",
"_generate_ngrams_with_context",
"(",
"self",
",",
"tokens",
":",
"List",
"[",
"Token",
"]",
")",
"->",
"chain",
":",
"chained_ngrams_iter",
"=",
"self",
".",
"_generate_ngrams_with_context_helper",
"(",
"iter",
"(",
"tokens",
")",
",",
"1",
")",
"for",... | Generates the 1-gram to n-grams tuples of the list of tokens | [
"Generates",
"the",
"1",
"-",
"gram",
"to",
"n",
"-",
"grams",
"tuples",
"of",
"the",
"list",
"of",
"tokens"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/glossary_extractor.py#L86-L96 | train | 34,218 |
usc-isi-i2/etk | etk/extractors/glossary_extractor.py | GlossaryExtractor._populate_trie | def _populate_trie(self, values: List[str]) -> CharTrie:
"""Takes a list and inserts its elements into a new trie and returns it"""
if self._default_tokenizer:
return reduce(self._populate_trie_reducer, iter(values), CharTrie())
return reduce(self._populate_trie_reducer_regex, iter(values), CharTrie()) | python | def _populate_trie(self, values: List[str]) -> CharTrie:
"""Takes a list and inserts its elements into a new trie and returns it"""
if self._default_tokenizer:
return reduce(self._populate_trie_reducer, iter(values), CharTrie())
return reduce(self._populate_trie_reducer_regex, iter(values), CharTrie()) | [
"def",
"_populate_trie",
"(",
"self",
",",
"values",
":",
"List",
"[",
"str",
"]",
")",
"->",
"CharTrie",
":",
"if",
"self",
".",
"_default_tokenizer",
":",
"return",
"reduce",
"(",
"self",
".",
"_populate_trie_reducer",
",",
"iter",
"(",
"values",
")",
... | Takes a list and inserts its elements into a new trie and returns it | [
"Takes",
"a",
"list",
"and",
"inserts",
"its",
"elements",
"into",
"a",
"new",
"trie",
"and",
"returns",
"it"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/glossary_extractor.py#L98-L102 | train | 34,219 |
usc-isi-i2/etk | etk/extractors/glossary_extractor.py | GlossaryExtractor._generate_ngrams_with_context_helper | def _generate_ngrams_with_context_helper(ngrams_iter: iter, ngrams_len: int) -> map:
"""Updates the end index"""
return map(lambda term: (term[1], term[0], term[0] + ngrams_len), enumerate(ngrams_iter)) | python | def _generate_ngrams_with_context_helper(ngrams_iter: iter, ngrams_len: int) -> map:
"""Updates the end index"""
return map(lambda term: (term[1], term[0], term[0] + ngrams_len), enumerate(ngrams_iter)) | [
"def",
"_generate_ngrams_with_context_helper",
"(",
"ngrams_iter",
":",
"iter",
",",
"ngrams_len",
":",
"int",
")",
"->",
"map",
":",
"return",
"map",
"(",
"lambda",
"term",
":",
"(",
"term",
"[",
"1",
"]",
",",
"term",
"[",
"0",
"]",
",",
"term",
"[",... | Updates the end index | [
"Updates",
"the",
"end",
"index"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/glossary_extractor.py#L137-L139 | train | 34,220 |
usc-isi-i2/etk | etk/extractors/glossary_extractor.py | GlossaryExtractor._combine_ngrams | def _combine_ngrams(ngrams, joiner) -> str:
"""Construct keys for checking in trie"""
if isinstance(ngrams, str):
return ngrams
else:
combined = joiner.join(ngrams)
return combined | python | def _combine_ngrams(ngrams, joiner) -> str:
"""Construct keys for checking in trie"""
if isinstance(ngrams, str):
return ngrams
else:
combined = joiner.join(ngrams)
return combined | [
"def",
"_combine_ngrams",
"(",
"ngrams",
",",
"joiner",
")",
"->",
"str",
":",
"if",
"isinstance",
"(",
"ngrams",
",",
"str",
")",
":",
"return",
"ngrams",
"else",
":",
"combined",
"=",
"joiner",
".",
"join",
"(",
"ngrams",
")",
"return",
"combined"
] | Construct keys for checking in trie | [
"Construct",
"keys",
"for",
"checking",
"in",
"trie"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/glossary_extractor.py#L142-L148 | train | 34,221 |
usc-isi-i2/etk | etk/extractors/spacy_rule_extractor.py | SpacyRuleExtractor.extract | def extract(self, text: str) -> List[Extraction]:
"""
Extract from text
Args:
text (str): input str to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
"""
doc = self._tokenizer.tokenize_to_spacy_doc(text)
self._load_matcher()
matches = [x for x in self._matcher(doc) if x[1] != x[2]]
pos_filtered_matches = []
neg_filtered_matches = []
for idx, start, end in matches:
span_doc = self._tokenizer.tokenize_to_spacy_doc(doc[start:end].text)
this_spacy_rule = self._matcher.get(idx)
relations = self._find_relation(span_doc, this_spacy_rule)
rule_id, _ = self._hash_map[idx]
this_rule = self._rule_lst[rule_id]
if self._filter_match(doc[start:end], relations, this_rule.patterns):
value = self._form_output(doc[start:end], this_rule.output_format, relations, this_rule.patterns)
if this_rule.polarity:
pos_filtered_matches.append((start, end, value, rule_id, relations))
else:
neg_filtered_matches.append((start, end, value, rule_id, relations))
return_lst = []
if pos_filtered_matches:
longest_lst_pos = self._get_longest(pos_filtered_matches)
if neg_filtered_matches:
longest_lst_neg = self._get_longest(neg_filtered_matches)
return_lst = self._reject_neg(longest_lst_pos, longest_lst_neg)
else:
return_lst = longest_lst_pos
extractions = []
for (start, end, value, rule_id, relation) in return_lst:
this_extraction = Extraction(value=value,
extractor_name=self.name,
start_token=start,
end_token=end,
start_char=doc[start].idx,
end_char=doc[end-1].idx+len(doc[end-1]),
rule_id=rule_id.split("rule_id##")[0],
match_mapping=relation)
extractions.append(this_extraction)
return extractions | python | def extract(self, text: str) -> List[Extraction]:
"""
Extract from text
Args:
text (str): input str to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
"""
doc = self._tokenizer.tokenize_to_spacy_doc(text)
self._load_matcher()
matches = [x for x in self._matcher(doc) if x[1] != x[2]]
pos_filtered_matches = []
neg_filtered_matches = []
for idx, start, end in matches:
span_doc = self._tokenizer.tokenize_to_spacy_doc(doc[start:end].text)
this_spacy_rule = self._matcher.get(idx)
relations = self._find_relation(span_doc, this_spacy_rule)
rule_id, _ = self._hash_map[idx]
this_rule = self._rule_lst[rule_id]
if self._filter_match(doc[start:end], relations, this_rule.patterns):
value = self._form_output(doc[start:end], this_rule.output_format, relations, this_rule.patterns)
if this_rule.polarity:
pos_filtered_matches.append((start, end, value, rule_id, relations))
else:
neg_filtered_matches.append((start, end, value, rule_id, relations))
return_lst = []
if pos_filtered_matches:
longest_lst_pos = self._get_longest(pos_filtered_matches)
if neg_filtered_matches:
longest_lst_neg = self._get_longest(neg_filtered_matches)
return_lst = self._reject_neg(longest_lst_pos, longest_lst_neg)
else:
return_lst = longest_lst_pos
extractions = []
for (start, end, value, rule_id, relation) in return_lst:
this_extraction = Extraction(value=value,
extractor_name=self.name,
start_token=start,
end_token=end,
start_char=doc[start].idx,
end_char=doc[end-1].idx+len(doc[end-1]),
rule_id=rule_id.split("rule_id##")[0],
match_mapping=relation)
extractions.append(this_extraction)
return extractions | [
"def",
"extract",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"doc",
"=",
"self",
".",
"_tokenizer",
".",
"tokenize_to_spacy_doc",
"(",
"text",
")",
"self",
".",
"_load_matcher",
"(",
")",
"matches",
"=",
"[",
... | Extract from text
Args:
text (str): input str to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches. | [
"Extract",
"from",
"text"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_rule_extractor.py#L134-L185 | train | 34,222 |
usc-isi-i2/etk | etk/extractors/spacy_rule_extractor.py | SpacyRuleExtractor._load_matcher | def _load_matcher(self) -> None:
"""
Add constructed spacy rule to Matcher
"""
for id_key in self._rule_lst:
if self._rule_lst[id_key].active:
pattern_lst = [a_pattern.spacy_token_lst for a_pattern in self._rule_lst[id_key].patterns]
for spacy_rule_id, spacy_rule in enumerate(itertools.product(*pattern_lst)):
self._matcher.add(self._construct_key(id_key, spacy_rule_id), None, list(spacy_rule)) | python | def _load_matcher(self) -> None:
"""
Add constructed spacy rule to Matcher
"""
for id_key in self._rule_lst:
if self._rule_lst[id_key].active:
pattern_lst = [a_pattern.spacy_token_lst for a_pattern in self._rule_lst[id_key].patterns]
for spacy_rule_id, spacy_rule in enumerate(itertools.product(*pattern_lst)):
self._matcher.add(self._construct_key(id_key, spacy_rule_id), None, list(spacy_rule)) | [
"def",
"_load_matcher",
"(",
"self",
")",
"->",
"None",
":",
"for",
"id_key",
"in",
"self",
".",
"_rule_lst",
":",
"if",
"self",
".",
"_rule_lst",
"[",
"id_key",
"]",
".",
"active",
":",
"pattern_lst",
"=",
"[",
"a_pattern",
".",
"spacy_token_lst",
"for"... | Add constructed spacy rule to Matcher | [
"Add",
"constructed",
"spacy",
"rule",
"to",
"Matcher"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_rule_extractor.py#L187-L196 | train | 34,223 |
usc-isi-i2/etk | etk/utilities.py | Utility.make_json_serializable | def make_json_serializable(doc: Dict):
"""
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None
"""
for k, v in doc.items():
if isinstance(v, datetime.date):
doc[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, datetime.datetime):
doc[k] = v.isoformat() | python | def make_json_serializable(doc: Dict):
"""
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None
"""
for k, v in doc.items():
if isinstance(v, datetime.date):
doc[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, datetime.datetime):
doc[k] = v.isoformat() | [
"def",
"make_json_serializable",
"(",
"doc",
":",
"Dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"doc",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"datetime",
".",
"date",
")",
":",
"doc",
"[",
"k",
"]",
"=",
"v",
".",
"strf... | Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None | [
"Make",
"the",
"document",
"JSON",
"serializable",
".",
"This",
"is",
"a",
"poor",
"man",
"s",
"implementation",
"that",
"handles",
"dates",
"and",
"nothing",
"else",
".",
"This",
"method",
"modifies",
"the",
"given",
"document",
"in",
"place",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/utilities.py#L11-L26 | train | 34,224 |
usc-isi-i2/etk | etk/black_list_filter.py | BlackListFilter.filter | def filter(self, extractions, case_sensitive=False) -> List[Extraction]:
"""filters out the extraction if extracted value is in the blacklist"""
filtered_extractions = []
if not isinstance(extractions, list):
extractions = [extractions]
for extraction in extractions:
if case_sensitive:
try:
if extraction.value.lower() not in self.black_list:
filtered_extractions.append(extraction)
except Exception as e:
print('Error in BlackListFilter: {} while filtering out extraction: {}'.format(e, extraction.value))
# most likely it s a unicode character which is messing things up, return it
filtered_extractions.append(extraction)
else:
if extraction.value not in self.black_list:
filtered_extractions.append(extraction)
return filtered_extractions | python | def filter(self, extractions, case_sensitive=False) -> List[Extraction]:
"""filters out the extraction if extracted value is in the blacklist"""
filtered_extractions = []
if not isinstance(extractions, list):
extractions = [extractions]
for extraction in extractions:
if case_sensitive:
try:
if extraction.value.lower() not in self.black_list:
filtered_extractions.append(extraction)
except Exception as e:
print('Error in BlackListFilter: {} while filtering out extraction: {}'.format(e, extraction.value))
# most likely it s a unicode character which is messing things up, return it
filtered_extractions.append(extraction)
else:
if extraction.value not in self.black_list:
filtered_extractions.append(extraction)
return filtered_extractions | [
"def",
"filter",
"(",
"self",
",",
"extractions",
",",
"case_sensitive",
"=",
"False",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"filtered_extractions",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"extractions",
",",
"list",
")",
":",
"extractions... | filters out the extraction if extracted value is in the blacklist | [
"filters",
"out",
"the",
"extraction",
"if",
"extracted",
"value",
"is",
"in",
"the",
"blacklist"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/black_list_filter.py#L9-L27 | train | 34,225 |
usc-isi-i2/etk | etk/ontology_api.py | rdf_generation | def rdf_generation(kg_object) -> str:
"""
Convert input knowledge graph object into n-triples RDF
:param kg_object: str, dict, or json object
:return: n-triples RDF in str
"""
import json
if isinstance(kg_object, dict):
kg_object = json.dumps(kg_object)
g = Graph()
g.parse(data=kg_object, format='json-ld')
return g.serialize(format='nt').decode('utf-8') | python | def rdf_generation(kg_object) -> str:
"""
Convert input knowledge graph object into n-triples RDF
:param kg_object: str, dict, or json object
:return: n-triples RDF in str
"""
import json
if isinstance(kg_object, dict):
kg_object = json.dumps(kg_object)
g = Graph()
g.parse(data=kg_object, format='json-ld')
return g.serialize(format='nt').decode('utf-8') | [
"def",
"rdf_generation",
"(",
"kg_object",
")",
"->",
"str",
":",
"import",
"json",
"if",
"isinstance",
"(",
"kg_object",
",",
"dict",
")",
":",
"kg_object",
"=",
"json",
".",
"dumps",
"(",
"kg_object",
")",
"g",
"=",
"Graph",
"(",
")",
"g",
".",
"pa... | Convert input knowledge graph object into n-triples RDF
:param kg_object: str, dict, or json object
:return: n-triples RDF in str | [
"Convert",
"input",
"knowledge",
"graph",
"object",
"into",
"n",
"-",
"triples",
"RDF"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_api.py#L829-L842 | train | 34,226 |
usc-isi-i2/etk | etk/ontology_api.py | OntologyDatatypeProperty.is_legal_object | def is_legal_object(self, data_type: str) -> bool:
"""
Do data_type validation according to the rules of the XML xsd schema.
Args:
data_type:
Returns:
"""
data_type = str(data_type)
ranges = self.included_ranges()
return not ranges or data_type in ranges or self.super_properties() and \
any(x.is_legal_object(data_type) for x in self.super_properties()) | python | def is_legal_object(self, data_type: str) -> bool:
"""
Do data_type validation according to the rules of the XML xsd schema.
Args:
data_type:
Returns:
"""
data_type = str(data_type)
ranges = self.included_ranges()
return not ranges or data_type in ranges or self.super_properties() and \
any(x.is_legal_object(data_type) for x in self.super_properties()) | [
"def",
"is_legal_object",
"(",
"self",
",",
"data_type",
":",
"str",
")",
"->",
"bool",
":",
"data_type",
"=",
"str",
"(",
"data_type",
")",
"ranges",
"=",
"self",
".",
"included_ranges",
"(",
")",
"return",
"not",
"ranges",
"or",
"data_type",
"in",
"ran... | Do data_type validation according to the rules of the XML xsd schema.
Args:
data_type:
Returns: | [
"Do",
"data_type",
"validation",
"according",
"to",
"the",
"rules",
"of",
"the",
"XML",
"xsd",
"schema",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_api.py#L242-L255 | train | 34,227 |
usc-isi-i2/etk | etk/ontology_api.py | Ontology.get_entity | def get_entity(self, uri: str) -> OntologyClass:
"""
Find an ontology entity based on URI
:param uri: URIRef or str
:return: the OntologyEntity having the specified uri, or None
"""
return self.entities.get(str(uri), None) | python | def get_entity(self, uri: str) -> OntologyClass:
"""
Find an ontology entity based on URI
:param uri: URIRef or str
:return: the OntologyEntity having the specified uri, or None
"""
return self.entities.get(str(uri), None) | [
"def",
"get_entity",
"(",
"self",
",",
"uri",
":",
"str",
")",
"->",
"OntologyClass",
":",
"return",
"self",
".",
"entities",
".",
"get",
"(",
"str",
"(",
"uri",
")",
",",
"None",
")"
] | Find an ontology entity based on URI
:param uri: URIRef or str
:return: the OntologyEntity having the specified uri, or None | [
"Find",
"an",
"ontology",
"entity",
"based",
"on",
"URI"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_api.py#L599-L606 | train | 34,228 |
usc-isi-i2/etk | etk/ontology_api.py | Ontology.merge_with_master_config | def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:
"""
Merge current ontology with input master config.
:param config: master config, should be str or dict
:param defaults: a dict that sets default color and icon
:param delete_orphan_fields: if a property doesn't exist in the ontology then delete it
:return: merged master config in dict
"""
if isinstance(config, str):
import json
config = json.loads(config)
properties = self.all_properties()
config['fields'] = config.get('fields', dict())
fields = config['fields']
d_color = defaults.get('color', 'white')
d_icon = defaults.get('icon', 'icons:default')
if delete_orphan_fields:
exist = {p.name() for p in properties}
unexist = set(fields.keys()) - exist
for name in unexist:
del fields[name]
for p in properties:
field = fields.get(p.name(), {'show_in_search': False,
'combine_fields': False,
'number_of_rules': 0,
'glossaries': [],
'use_in_network_search': False,
'case_sensitive': False,
'show_as_link': 'text',
'blacklists': [],
'show_in_result': 'no',
'rule_extractor_enabled': False,
'search_importance': 1,
'group_name': '',
'show_in_facets': False,
'predefined_extractor': 'none',
'rule_extraction_target': ''})
config['fields'][p.name()] = field
field['screen_label'] = ' '.join(p.label())
field['description'] = '\n'.join(p.definition())
field['name'] = p.name()
# color
if 'color' not in field:
color = self.__merge_close_ancestor_color(p, fields, attr='color')
field['color'] = color if color else d_color
# icon
if 'icon' not in field:
icon = self.__merge_close_ancestor_color(p, fields, attr='icon')
field['icon'] = icon if icon else d_icon
# type
if isinstance(p, OntologyObjectProperty):
field['type'] = 'kg_id'
else:
try:
field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))
except StopIteration:
field['type'] = None
return config | python | def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:
"""
Merge current ontology with input master config.
:param config: master config, should be str or dict
:param defaults: a dict that sets default color and icon
:param delete_orphan_fields: if a property doesn't exist in the ontology then delete it
:return: merged master config in dict
"""
if isinstance(config, str):
import json
config = json.loads(config)
properties = self.all_properties()
config['fields'] = config.get('fields', dict())
fields = config['fields']
d_color = defaults.get('color', 'white')
d_icon = defaults.get('icon', 'icons:default')
if delete_orphan_fields:
exist = {p.name() for p in properties}
unexist = set(fields.keys()) - exist
for name in unexist:
del fields[name]
for p in properties:
field = fields.get(p.name(), {'show_in_search': False,
'combine_fields': False,
'number_of_rules': 0,
'glossaries': [],
'use_in_network_search': False,
'case_sensitive': False,
'show_as_link': 'text',
'blacklists': [],
'show_in_result': 'no',
'rule_extractor_enabled': False,
'search_importance': 1,
'group_name': '',
'show_in_facets': False,
'predefined_extractor': 'none',
'rule_extraction_target': ''})
config['fields'][p.name()] = field
field['screen_label'] = ' '.join(p.label())
field['description'] = '\n'.join(p.definition())
field['name'] = p.name()
# color
if 'color' not in field:
color = self.__merge_close_ancestor_color(p, fields, attr='color')
field['color'] = color if color else d_color
# icon
if 'icon' not in field:
icon = self.__merge_close_ancestor_color(p, fields, attr='icon')
field['icon'] = icon if icon else d_icon
# type
if isinstance(p, OntologyObjectProperty):
field['type'] = 'kg_id'
else:
try:
field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))
except StopIteration:
field['type'] = None
return config | [
"def",
"merge_with_master_config",
"(",
"self",
",",
"config",
",",
"defaults",
"=",
"{",
"}",
",",
"delete_orphan_fields",
"=",
"False",
")",
"->",
"dict",
":",
"if",
"isinstance",
"(",
"config",
",",
"str",
")",
":",
"import",
"json",
"config",
"=",
"j... | Merge current ontology with input master config.
:param config: master config, should be str or dict
:param defaults: a dict that sets default color and icon
:param delete_orphan_fields: if a property doesn't exist in the ontology then delete it
:return: merged master config in dict | [
"Merge",
"current",
"ontology",
"with",
"input",
"master",
"config",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/ontology_api.py#L618-L680 | train | 34,229 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/misc.py | phone_text_subs | def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output | python | def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output | [
"def",
"phone_text_subs",
"(",
")",
":",
"Small",
"=",
"{",
"'zero'",
":",
"0",
",",
"'zer0'",
":",
"0",
",",
"'one'",
":",
"1",
",",
"'two'",
":",
"2",
",",
"'three'",
":",
"3",
",",
"'four'",
":",
"4",
",",
"'fuor'",
":",
"4",
",",
"'five'",
... | Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers | [
"Gets",
"a",
"dictionary",
"of",
"dictionaries",
"that",
"each",
"contain",
"alphabetic",
"number",
"manifestations",
"mapped",
"to",
"their",
"actual",
"Number",
"value",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/misc.py#L26-L89 | train | 34,230 |
usc-isi-i2/etk | etk/extractors/sentence_extractor.py | SentenceExtractor.extract | def extract(self, text: str) -> List[Extraction]:
"""
Splits text by sentences.
Args:
text (str): Input text to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
"""
doc = self._parser(text)
extractions = list()
for sent in doc.sents:
this_extraction = Extraction(value=sent.text,
extractor_name=self.name,
start_token=sent[0],
end_token=sent[-1],
start_char=sent.text[0],
end_char=sent.text[-1])
extractions.append(this_extraction)
return extractions | python | def extract(self, text: str) -> List[Extraction]:
"""
Splits text by sentences.
Args:
text (str): Input text to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches.
"""
doc = self._parser(text)
extractions = list()
for sent in doc.sents:
this_extraction = Extraction(value=sent.text,
extractor_name=self.name,
start_token=sent[0],
end_token=sent[-1],
start_char=sent.text[0],
end_char=sent.text[-1])
extractions.append(this_extraction)
return extractions | [
"def",
"extract",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"doc",
"=",
"self",
".",
"_parser",
"(",
"text",
")",
"extractions",
"=",
"list",
"(",
")",
"for",
"sent",
"in",
"doc",
".",
"sents",
":",
"th... | Splits text by sentences.
Args:
text (str): Input text to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches. | [
"Splits",
"text",
"by",
"sentences",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/sentence_extractor.py#L57-L80 | train | 34,231 |
usc-isi-i2/etk | etk/extractors/table_extractor.py | TableExtraction.gen_html | def gen_html(row_list):
""" Return html table string from a list of data rows """
table = "<table>"
for row in row_list:
table += "<tr>"
cells = row["cells"]
for c in cells:
t = c['cell'] if c else ''
table += t
table += "</tr>"
table += "</table>"
return table | python | def gen_html(row_list):
""" Return html table string from a list of data rows """
table = "<table>"
for row in row_list:
table += "<tr>"
cells = row["cells"]
for c in cells:
t = c['cell'] if c else ''
table += t
table += "</tr>"
table += "</table>"
return table | [
"def",
"gen_html",
"(",
"row_list",
")",
":",
"table",
"=",
"\"<table>\"",
"for",
"row",
"in",
"row_list",
":",
"table",
"+=",
"\"<tr>\"",
"cells",
"=",
"row",
"[",
"\"cells\"",
"]",
"for",
"c",
"in",
"cells",
":",
"t",
"=",
"c",
"[",
"'cell'",
"]",
... | Return html table string from a list of data rows | [
"Return",
"html",
"table",
"string",
"from",
"a",
"list",
"of",
"data",
"rows"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/table_extractor.py#L463-L474 | train | 34,232 |
usc-isi-i2/etk | etk/extractors/html_content_extractor.py | HTMLContentExtractor.extract | def extract(self, html_text: str, strategy: Strategy=Strategy.ALL_TEXT) \
-> List[Extraction]:
"""
Extracts text from an HTML page using a variety of strategies
Args:
html_text (str): html page in string
strategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of
Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED
Returns:
List[Extraction]: typically a singleton list with the extracted text
"""
if html_text:
if strategy == Strategy.ALL_TEXT:
soup = BeautifulSoup(html_text, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(self._tag_visible, texts)
all_text = u" ".join(t.strip() for t in visible_texts)
return [Extraction(all_text, self.name)]
else:
relax = strategy == Strategy.MAIN_CONTENT_RELAXED
readable = Document(html_text, recallPriority=relax).summary(html_partial=False)
clean_text = BeautifulSoup(readable.encode('utf-8'), 'lxml').strings
readability_text = ' '.join(clean_text)
return [Extraction(readability_text, self.name)]
else:
return [] | python | def extract(self, html_text: str, strategy: Strategy=Strategy.ALL_TEXT) \
-> List[Extraction]:
"""
Extracts text from an HTML page using a variety of strategies
Args:
html_text (str): html page in string
strategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of
Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED
Returns:
List[Extraction]: typically a singleton list with the extracted text
"""
if html_text:
if strategy == Strategy.ALL_TEXT:
soup = BeautifulSoup(html_text, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(self._tag_visible, texts)
all_text = u" ".join(t.strip() for t in visible_texts)
return [Extraction(all_text, self.name)]
else:
relax = strategy == Strategy.MAIN_CONTENT_RELAXED
readable = Document(html_text, recallPriority=relax).summary(html_partial=False)
clean_text = BeautifulSoup(readable.encode('utf-8'), 'lxml').strings
readability_text = ' '.join(clean_text)
return [Extraction(readability_text, self.name)]
else:
return [] | [
"def",
"extract",
"(",
"self",
",",
"html_text",
":",
"str",
",",
"strategy",
":",
"Strategy",
"=",
"Strategy",
".",
"ALL_TEXT",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"if",
"html_text",
":",
"if",
"strategy",
"==",
"Strategy",
".",
"ALL_TEXT",
... | Extracts text from an HTML page using a variety of strategies
Args:
html_text (str): html page in string
strategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of
Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED
Returns:
List[Extraction]: typically a singleton list with the extracted text | [
"Extracts",
"text",
"from",
"an",
"HTML",
"page",
"using",
"a",
"variety",
"of",
"strategies"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/html_content_extractor.py#L41-L69 | train | 34,233 |
usc-isi-i2/etk | etk/data_extractors/htiExtractors/parser_helpers.py | clean_part_ethn | def clean_part_ethn(body):
"""
Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese")
"""
# patterns that can create false positive situations
patterns_to_remove = [r'black ?or ?african', r'african ?or ?black', r'no ?black', r'no ?african', r'no ?aa', r'white ?men',
r'white ?gentlemen', r'no ?spanish', r'speak ?spanish', r'black ?(guys|men|hair|client)', r'dark ?hair',
r'(dark ?)?brown ?hair', r'white ?tie']
# indian states to convert the term 'indian'
indian_states = ['awadhi', 'badhi', 'bhutia', 'garhwali', 'halbi', 'kamboj', 'bhattarai', 'bhotiya', 'pardeshi',
'bengali', 'madra', 'tamil', 'rajasthani', 'adivasi']
for p in patterns_to_remove:
body = re.sub(p, '', body)
for i in indian_states:
body = body.replace(i, 'indian')
# regex substitutions
body = re.sub(r'hong ?kong', 'chinese', body)
body = re.sub(r'snow ?bunn(y|ies)', 'white', body)
body = re.sub(r'a\ss\si\sa\sn', 'asian', body)
body = re.sub(r'l\sa\st\si\sn\sa', 'latina', body)
# convert many ethnicity variations into standardized ones (e.g. china -> chinese)
for sub in eth_subs:
body = body.replace(sub, eth_subs[sub])
body = re.sub(r' +', ' ', body)
return body | python | def clean_part_ethn(body):
"""
Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese")
"""
# patterns that can create false positive situations
patterns_to_remove = [r'black ?or ?african', r'african ?or ?black', r'no ?black', r'no ?african', r'no ?aa', r'white ?men',
r'white ?gentlemen', r'no ?spanish', r'speak ?spanish', r'black ?(guys|men|hair|client)', r'dark ?hair',
r'(dark ?)?brown ?hair', r'white ?tie']
# indian states to convert the term 'indian'
indian_states = ['awadhi', 'badhi', 'bhutia', 'garhwali', 'halbi', 'kamboj', 'bhattarai', 'bhotiya', 'pardeshi',
'bengali', 'madra', 'tamil', 'rajasthani', 'adivasi']
for p in patterns_to_remove:
body = re.sub(p, '', body)
for i in indian_states:
body = body.replace(i, 'indian')
# regex substitutions
body = re.sub(r'hong ?kong', 'chinese', body)
body = re.sub(r'snow ?bunn(y|ies)', 'white', body)
body = re.sub(r'a\ss\si\sa\sn', 'asian', body)
body = re.sub(r'l\sa\st\si\sn\sa', 'latina', body)
# convert many ethnicity variations into standardized ones (e.g. china -> chinese)
for sub in eth_subs:
body = body.replace(sub, eth_subs[sub])
body = re.sub(r' +', ' ', body)
return body | [
"def",
"clean_part_ethn",
"(",
"body",
")",
":",
"# patterns that can create false positive situations",
"patterns_to_remove",
"=",
"[",
"r'black ?or ?african'",
",",
"r'african ?or ?black'",
",",
"r'no ?black'",
",",
"r'no ?african'",
",",
"r'no ?aa'",
",",
"r'white ?men'",
... | Prepare a string to be parsed for ethnicities.
Returns a "translated" string (e.g. all instances of "china" converted to "chinese") | [
"Prepare",
"a",
"string",
"to",
"be",
"parsed",
"for",
"ethnicities",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/parser_helpers.py#L70-L103 | train | 34,234 |
usc-isi-i2/etk | etk/knowledge_graph.py | KnowledgeGraph._add_doc_value | def _add_doc_value(self, field_name: str, jsonpath: str) -> None:
"""
Add a value to knowledge graph by giving a jsonpath
Args:
field_name: str
jsonpath: str
Returns:
"""
path = self.origin_doc.etk.parse_json_path(jsonpath)
matches = path.find(self.origin_doc.value)
all_valid = True
invalid = []
for a_match in matches:
# If the value is the empty string, we treat is a None.
if a_match.value:
valid = self._add_value(field_name, a_match.value, provenance_path=str(a_match.full_path))
if not valid:
invalid.append(field_name + ":" + str(a_match.value))
all_valid = all_valid and valid
if not all_valid:
raise KgValueError("Some kg value type invalid according to schema: " + json.dumps(invalid)) | python | def _add_doc_value(self, field_name: str, jsonpath: str) -> None:
"""
Add a value to knowledge graph by giving a jsonpath
Args:
field_name: str
jsonpath: str
Returns:
"""
path = self.origin_doc.etk.parse_json_path(jsonpath)
matches = path.find(self.origin_doc.value)
all_valid = True
invalid = []
for a_match in matches:
# If the value is the empty string, we treat is a None.
if a_match.value:
valid = self._add_value(field_name, a_match.value, provenance_path=str(a_match.full_path))
if not valid:
invalid.append(field_name + ":" + str(a_match.value))
all_valid = all_valid and valid
if not all_valid:
raise KgValueError("Some kg value type invalid according to schema: " + json.dumps(invalid)) | [
"def",
"_add_doc_value",
"(",
"self",
",",
"field_name",
":",
"str",
",",
"jsonpath",
":",
"str",
")",
"->",
"None",
":",
"path",
"=",
"self",
".",
"origin_doc",
".",
"etk",
".",
"parse_json_path",
"(",
"jsonpath",
")",
"matches",
"=",
"path",
".",
"fi... | Add a value to knowledge graph by giving a jsonpath
Args:
field_name: str
jsonpath: str
Returns: | [
"Add",
"a",
"value",
"to",
"knowledge",
"graph",
"by",
"giving",
"a",
"jsonpath"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph.py#L109-L132 | train | 34,235 |
usc-isi-i2/etk | etk/knowledge_graph.py | KnowledgeGraph.add_value | def add_value(self, field_name: str, value: object = None, json_path: str = None,
json_path_extraction: str = None, keep_empty: bool = False) -> None:
"""
Add a value to knowledge graph.
Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is
called.
If the input is a value, then it is handled
Args:
field_name: str, the field name in the knowledge graph
value: the value to be added to the knowledge graph
json_path: str, if json_path is provided, then get the value at this path in the doc
json_path_extraction: str,
discard_empty: bool,
Returns:
"""
def validate(v):
if v is not None:
if isinstance(v, str):
if v.strip() != "" or keep_empty:
return True
else:
return False
else:
return True
return False
self.validate_field(field_name)
if field_name not in self._kg:
self._kg[field_name] = []
if json_path:
self._add_doc_value(field_name, json_path)
if validate(value):
if not isinstance(value, list):
value = [value]
all_valid = True
invalid = []
for a_value in value:
if isinstance(a_value, Extraction):
valid = self._add_single_value(field_name, a_value.value, provenance_path=str(json_path_extraction),
keep_empty=keep_empty)
elif isinstance(a_value, Segment):
valid = self._add_single_value(field_name, a_value.value, provenance_path=a_value.json_path,
keep_empty=keep_empty)
else:
valid = self._add_single_value(field_name, a_value, provenance_path=json_path_extraction,
reference_type="constant", keep_empty=keep_empty)
all_valid = all_valid and valid
if not valid:
invalid.append(field_name + ":" + str(a_value))
if not all_valid:
print("Some kg value type invalid according to schema:" + json.dumps(invalid))
# raise KgValueError("Some kg value type invalid according to schema")
# IF we did not add any value, remove the empty field we just added to kg
if len(self._kg[field_name]) == 0:
self._kg.pop(field_name) | python | def add_value(self, field_name: str, value: object = None, json_path: str = None,
json_path_extraction: str = None, keep_empty: bool = False) -> None:
"""
Add a value to knowledge graph.
Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is
called.
If the input is a value, then it is handled
Args:
field_name: str, the field name in the knowledge graph
value: the value to be added to the knowledge graph
json_path: str, if json_path is provided, then get the value at this path in the doc
json_path_extraction: str,
discard_empty: bool,
Returns:
"""
def validate(v):
if v is not None:
if isinstance(v, str):
if v.strip() != "" or keep_empty:
return True
else:
return False
else:
return True
return False
self.validate_field(field_name)
if field_name not in self._kg:
self._kg[field_name] = []
if json_path:
self._add_doc_value(field_name, json_path)
if validate(value):
if not isinstance(value, list):
value = [value]
all_valid = True
invalid = []
for a_value in value:
if isinstance(a_value, Extraction):
valid = self._add_single_value(field_name, a_value.value, provenance_path=str(json_path_extraction),
keep_empty=keep_empty)
elif isinstance(a_value, Segment):
valid = self._add_single_value(field_name, a_value.value, provenance_path=a_value.json_path,
keep_empty=keep_empty)
else:
valid = self._add_single_value(field_name, a_value, provenance_path=json_path_extraction,
reference_type="constant", keep_empty=keep_empty)
all_valid = all_valid and valid
if not valid:
invalid.append(field_name + ":" + str(a_value))
if not all_valid:
print("Some kg value type invalid according to schema:" + json.dumps(invalid))
# raise KgValueError("Some kg value type invalid according to schema")
# IF we did not add any value, remove the empty field we just added to kg
if len(self._kg[field_name]) == 0:
self._kg.pop(field_name) | [
"def",
"add_value",
"(",
"self",
",",
"field_name",
":",
"str",
",",
"value",
":",
"object",
"=",
"None",
",",
"json_path",
":",
"str",
"=",
"None",
",",
"json_path_extraction",
":",
"str",
"=",
"None",
",",
"keep_empty",
":",
"bool",
"=",
"False",
")"... | Add a value to knowledge graph.
Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is
called.
If the input is a value, then it is handled
Args:
field_name: str, the field name in the knowledge graph
value: the value to be added to the knowledge graph
json_path: str, if json_path is provided, then get the value at this path in the doc
json_path_extraction: str,
discard_empty: bool,
Returns: | [
"Add",
"a",
"value",
"to",
"knowledge",
"graph",
".",
"Input",
"can",
"either",
"be",
"a",
"value",
"or",
"a",
"json_path",
".",
"If",
"the",
"input",
"is",
"json_path",
"the",
"helper",
"function",
"_add_doc_value",
"is",
"called",
".",
"If",
"the",
"in... | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph.py#L134-L194 | train | 34,236 |
usc-isi-i2/etk | etk/knowledge_graph.py | KnowledgeGraph.get_values | def get_values(self, field_name: str) -> List[object]:
"""
Get a list of all the values of a field.
Args:
field_name:
Returns: the list of values (not the keys)
"""
result = list()
if self.validate_field(field_name):
for value_key in self._kg.get(field_name):
result.append(value_key["value"])
return result | python | def get_values(self, field_name: str) -> List[object]:
"""
Get a list of all the values of a field.
Args:
field_name:
Returns: the list of values (not the keys)
"""
result = list()
if self.validate_field(field_name):
for value_key in self._kg.get(field_name):
result.append(value_key["value"])
return result | [
"def",
"get_values",
"(",
"self",
",",
"field_name",
":",
"str",
")",
"->",
"List",
"[",
"object",
"]",
":",
"result",
"=",
"list",
"(",
")",
"if",
"self",
".",
"validate_field",
"(",
"field_name",
")",
":",
"for",
"value_key",
"in",
"self",
".",
"_k... | Get a list of all the values of a field.
Args:
field_name:
Returns: the list of values (not the keys) | [
"Get",
"a",
"list",
"of",
"all",
"the",
"values",
"of",
"a",
"field",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph.py#L207-L221 | train | 34,237 |
usc-isi-i2/etk | etk/knowledge_graph.py | KnowledgeGraph.context_resolve | def context_resolve(self, field_uri: str) -> str:
"""
According to field_uri to add corresponding context and return a resolvable field_name
:param field_uri:
:return: a field_name that can be resolved with kg's @context
"""
from rdflib.namespace import split_uri
context = self._kg["@context"] = self._kg.get("@context", dict())
nm = self.ontology.g.namespace_manager
space, name = split_uri(field_uri)
if "@vocab" not in context and None in nm.namespaces():
context["@vocab"] = nm.store.prefix(space)
if "@vocab" in context and space == context["@vocab"]:
# case #1, can directly use name
return name
if self.schema.has_field(name):
if name not in context:
prefix = [x for x in list(self.ontology.g.namespace_manager.namespaces())]
for x, y in prefix:
if space[:-1] == x:
context[name] = str(y) + name
return name
context[name] = field_uri
return name
prefix = nm.store.prefix(space)
if prefix:
context[prefix] = space
return nm.qname(field_uri)
return field_uri | python | def context_resolve(self, field_uri: str) -> str:
"""
According to field_uri to add corresponding context and return a resolvable field_name
:param field_uri:
:return: a field_name that can be resolved with kg's @context
"""
from rdflib.namespace import split_uri
context = self._kg["@context"] = self._kg.get("@context", dict())
nm = self.ontology.g.namespace_manager
space, name = split_uri(field_uri)
if "@vocab" not in context and None in nm.namespaces():
context["@vocab"] = nm.store.prefix(space)
if "@vocab" in context and space == context["@vocab"]:
# case #1, can directly use name
return name
if self.schema.has_field(name):
if name not in context:
prefix = [x for x in list(self.ontology.g.namespace_manager.namespaces())]
for x, y in prefix:
if space[:-1] == x:
context[name] = str(y) + name
return name
context[name] = field_uri
return name
prefix = nm.store.prefix(space)
if prefix:
context[prefix] = space
return nm.qname(field_uri)
return field_uri | [
"def",
"context_resolve",
"(",
"self",
",",
"field_uri",
":",
"str",
")",
"->",
"str",
":",
"from",
"rdflib",
".",
"namespace",
"import",
"split_uri",
"context",
"=",
"self",
".",
"_kg",
"[",
"\"@context\"",
"]",
"=",
"self",
".",
"_kg",
".",
"get",
"(... | According to field_uri to add corresponding context and return a resolvable field_name
:param field_uri:
:return: a field_name that can be resolved with kg's @context | [
"According",
"to",
"field_uri",
"to",
"add",
"corresponding",
"context",
"and",
"return",
"a",
"resolvable",
"field_name"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph.py#L274-L303 | train | 34,238 |
usc-isi-i2/etk | etk/tokenizer.py | Tokenizer.tokenize_to_spacy_doc | def tokenize_to_spacy_doc(self, text: str) -> Doc:
"""
Tokenize the given text, returning a spacy doc. Used for spacy rule extractor
Args:
text (string):
Returns: Doc
"""
if not self.keep_multi_space:
text = re.sub(' +', ' ', text)
doc = self.nlp(text, disable=['parser'])
for a_token in doc:
self.custom_token(a_token)
return doc | python | def tokenize_to_spacy_doc(self, text: str) -> Doc:
"""
Tokenize the given text, returning a spacy doc. Used for spacy rule extractor
Args:
text (string):
Returns: Doc
"""
if not self.keep_multi_space:
text = re.sub(' +', ' ', text)
doc = self.nlp(text, disable=['parser'])
for a_token in doc:
self.custom_token(a_token)
return doc | [
"def",
"tokenize_to_spacy_doc",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"Doc",
":",
"if",
"not",
"self",
".",
"keep_multi_space",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"' +'",
",",
"' '",
",",
"text",
")",
"doc",
"=",
"self",
".",
"nlp"... | Tokenize the given text, returning a spacy doc. Used for spacy rule extractor
Args:
text (string):
Returns: Doc | [
"Tokenize",
"the",
"given",
"text",
"returning",
"a",
"spacy",
"doc",
".",
"Used",
"for",
"spacy",
"rule",
"extractor"
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/tokenizer.py#L62-L78 | train | 34,239 |
usc-isi-i2/etk | etk/tokenizer.py | Tokenizer.reconstruct_text | def reconstruct_text(tokens: List[Token]) -> str:
"""
Given a list of tokens, reconstruct the original text with as much fidelity as possible.
Args:
[tokens]:
Returns: a string.
"""
return "".join([x.text_with_ws for x in tokens]) | python | def reconstruct_text(tokens: List[Token]) -> str:
"""
Given a list of tokens, reconstruct the original text with as much fidelity as possible.
Args:
[tokens]:
Returns: a string.
"""
return "".join([x.text_with_ws for x in tokens]) | [
"def",
"reconstruct_text",
"(",
"tokens",
":",
"List",
"[",
"Token",
"]",
")",
"->",
"str",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"x",
".",
"text_with_ws",
"for",
"x",
"in",
"tokens",
"]",
")"
] | Given a list of tokens, reconstruct the original text with as much fidelity as possible.
Args:
[tokens]:
Returns: a string. | [
"Given",
"a",
"list",
"of",
"tokens",
"reconstruct",
"the",
"original",
"text",
"with",
"as",
"much",
"fidelity",
"as",
"possible",
"."
] | aab077c984ea20f5e8ae33af622fe11d3c4df866 | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/tokenizer.py#L160-L170 | train | 34,240 |
phodge/homely | homely/_engine2.py | Engine._removecleaner | def _removecleaner(self, cleaner):
"""
Remove the cleaner from the list if it already exists. Returns True if
the cleaner was removed.
"""
oldlen = len(self._old_cleaners)
self._old_cleaners = [
oldc for oldc in self._old_cleaners
if not oldc.issame(cleaner)
]
return len(self._old_cleaners) != oldlen | python | def _removecleaner(self, cleaner):
"""
Remove the cleaner from the list if it already exists. Returns True if
the cleaner was removed.
"""
oldlen = len(self._old_cleaners)
self._old_cleaners = [
oldc for oldc in self._old_cleaners
if not oldc.issame(cleaner)
]
return len(self._old_cleaners) != oldlen | [
"def",
"_removecleaner",
"(",
"self",
",",
"cleaner",
")",
":",
"oldlen",
"=",
"len",
"(",
"self",
".",
"_old_cleaners",
")",
"self",
".",
"_old_cleaners",
"=",
"[",
"oldc",
"for",
"oldc",
"in",
"self",
".",
"_old_cleaners",
"if",
"not",
"oldc",
".",
"... | Remove the cleaner from the list if it already exists. Returns True if
the cleaner was removed. | [
"Remove",
"the",
"cleaner",
"from",
"the",
"list",
"if",
"it",
"already",
"exists",
".",
"Returns",
"True",
"if",
"the",
"cleaner",
"was",
"removed",
"."
] | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_engine2.py#L254-L264 | train | 34,241 |
phodge/homely | homely/_cli.py | add | def add(repo_path, dest_path):
'''
Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH.
'''
mkcfgdir()
try:
repo = getrepohandler(repo_path)
except NotARepo as err:
echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path))
sys.exit(1)
# if the repo isn't on disk yet, we'll need to make a local clone of it
if repo.isremote:
localrepo, needpull = addfromremote(repo, dest_path)
elif dest_path:
raise UsageError("DEST_PATH is only for repos hosted online")
else:
try:
repoid = repo.getrepoid()
except RepoHasNoCommitsError as err:
echo("ERROR: {}".format(ERR_NO_COMMITS))
sys.exit(1)
localrepo = RepoInfo(repo, repoid, None)
needpull = False
# if we don't have a local repo, then there is nothing more to do
if not localrepo:
return
# remember this new local repo
with saveconfig(RepoListConfig()) as cfg:
cfg.add_repo(localrepo)
success = run_update([localrepo], pullfirst=needpull, cancleanup=True)
if not success:
sys.exit(1) | python | def add(repo_path, dest_path):
'''
Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH.
'''
mkcfgdir()
try:
repo = getrepohandler(repo_path)
except NotARepo as err:
echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path))
sys.exit(1)
# if the repo isn't on disk yet, we'll need to make a local clone of it
if repo.isremote:
localrepo, needpull = addfromremote(repo, dest_path)
elif dest_path:
raise UsageError("DEST_PATH is only for repos hosted online")
else:
try:
repoid = repo.getrepoid()
except RepoHasNoCommitsError as err:
echo("ERROR: {}".format(ERR_NO_COMMITS))
sys.exit(1)
localrepo = RepoInfo(repo, repoid, None)
needpull = False
# if we don't have a local repo, then there is nothing more to do
if not localrepo:
return
# remember this new local repo
with saveconfig(RepoListConfig()) as cfg:
cfg.add_repo(localrepo)
success = run_update([localrepo], pullfirst=needpull, cancleanup=True)
if not success:
sys.exit(1) | [
"def",
"add",
"(",
"repo_path",
",",
"dest_path",
")",
":",
"mkcfgdir",
"(",
")",
"try",
":",
"repo",
"=",
"getrepohandler",
"(",
"repo_path",
")",
"except",
"NotARepo",
"as",
"err",
":",
"echo",
"(",
"\"ERROR: {}: {}\"",
".",
"format",
"(",
"ERR_NOT_A_REP... | Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH. | [
"Registers",
"a",
"git",
"repository",
"with",
"homely",
"so",
"that",
"it",
"will",
"run",
"its",
"HOMELY",
".",
"py",
"script",
"on",
"each",
"invocation",
"of",
"homely",
"update",
".",
"homely",
"add",
"also",
"immediately",
"executes",
"a",
"homely",
... | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_cli.py#L72-L120 | train | 34,242 |
phodge/homely | homely/_cli.py | forget | def forget(identifier):
'''
Tells homely to forget about a dotfiles repository that was previously
added. You can then run `homely update` to have homely perform automatic
cleanup of anything that was installed by that dotfiles repo.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. You may specify multiple REPOs to
remove at once.
'''
errors = False
for one in identifier:
cfg = RepoListConfig()
info = cfg.find_by_any(one, "ilc")
if not info:
warn("No repos matching %r" % one)
errors = True
continue
# update the config ...
note("Removing record of repo [%s] at %s" % (
info.shortid(), info.localrepo.repo_path))
with saveconfig(RepoListConfig()) as cfg:
cfg.remove_repo(info.repoid)
# if there were errors, then don't try and do an update
if errors:
sys.exit(1) | python | def forget(identifier):
'''
Tells homely to forget about a dotfiles repository that was previously
added. You can then run `homely update` to have homely perform automatic
cleanup of anything that was installed by that dotfiles repo.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. You may specify multiple REPOs to
remove at once.
'''
errors = False
for one in identifier:
cfg = RepoListConfig()
info = cfg.find_by_any(one, "ilc")
if not info:
warn("No repos matching %r" % one)
errors = True
continue
# update the config ...
note("Removing record of repo [%s] at %s" % (
info.shortid(), info.localrepo.repo_path))
with saveconfig(RepoListConfig()) as cfg:
cfg.remove_repo(info.repoid)
# if there were errors, then don't try and do an update
if errors:
sys.exit(1) | [
"def",
"forget",
"(",
"identifier",
")",
":",
"errors",
"=",
"False",
"for",
"one",
"in",
"identifier",
":",
"cfg",
"=",
"RepoListConfig",
"(",
")",
"info",
"=",
"cfg",
".",
"find_by_any",
"(",
"one",
",",
"\"ilc\"",
")",
"if",
"not",
"info",
":",
"w... | Tells homely to forget about a dotfiles repository that was previously
added. You can then run `homely update` to have homely perform automatic
cleanup of anything that was installed by that dotfiles repo.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. You may specify multiple REPOs to
remove at once. | [
"Tells",
"homely",
"to",
"forget",
"about",
"a",
"dotfiles",
"repository",
"that",
"was",
"previously",
"added",
".",
"You",
"can",
"then",
"run",
"homely",
"update",
"to",
"have",
"homely",
"perform",
"automatic",
"cleanup",
"of",
"anything",
"that",
"was",
... | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_cli.py#L146-L174 | train | 34,243 |
phodge/homely | homely/_cli.py | update | def update(identifiers, nopull, only):
'''
Performs a `git pull` in each of the repositories registered with
`homely add`, runs all of their HOMELY.py scripts, and then performs
automatic cleanup as necessary.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. If you specify one or more `REPO`s
then only the HOMELY.py scripts from those repositories will be run,
and automatic cleanup will not be performed (automatic cleanup is only
possible when homely has done an update of all repositories in one go).
If you do not specify a REPO, all repositories' HOMELY.py scripts will
be run.
The --nopull and --only options are useful when you are working on your
HOMELY.py script - the --nopull option stops you from wasting time checking
the internet for the same updates on every run, and the --only option
allows you to execute only the section you are working on.
'''
mkcfgdir()
setallowpull(not nopull)
cfg = RepoListConfig()
if len(identifiers):
updatedict = {}
for identifier in identifiers:
repo = cfg.find_by_any(identifier, "ilc")
if repo is None:
hint = "Try running %s add /path/to/this/repo first" % CMD
raise Fatal("Unrecognised repo %s (%s)" % (identifier, hint))
updatedict[repo.repoid] = repo
updatelist = updatedict.values()
cleanup = len(updatelist) == cfg.repo_count()
else:
updatelist = list(cfg.find_all())
cleanup = True
success = run_update(updatelist,
pullfirst=not nopull,
only=only,
cancleanup=cleanup)
if not success:
sys.exit(1) | python | def update(identifiers, nopull, only):
'''
Performs a `git pull` in each of the repositories registered with
`homely add`, runs all of their HOMELY.py scripts, and then performs
automatic cleanup as necessary.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. If you specify one or more `REPO`s
then only the HOMELY.py scripts from those repositories will be run,
and automatic cleanup will not be performed (automatic cleanup is only
possible when homely has done an update of all repositories in one go).
If you do not specify a REPO, all repositories' HOMELY.py scripts will
be run.
The --nopull and --only options are useful when you are working on your
HOMELY.py script - the --nopull option stops you from wasting time checking
the internet for the same updates on every run, and the --only option
allows you to execute only the section you are working on.
'''
mkcfgdir()
setallowpull(not nopull)
cfg = RepoListConfig()
if len(identifiers):
updatedict = {}
for identifier in identifiers:
repo = cfg.find_by_any(identifier, "ilc")
if repo is None:
hint = "Try running %s add /path/to/this/repo first" % CMD
raise Fatal("Unrecognised repo %s (%s)" % (identifier, hint))
updatedict[repo.repoid] = repo
updatelist = updatedict.values()
cleanup = len(updatelist) == cfg.repo_count()
else:
updatelist = list(cfg.find_all())
cleanup = True
success = run_update(updatelist,
pullfirst=not nopull,
only=only,
cancleanup=cleanup)
if not success:
sys.exit(1) | [
"def",
"update",
"(",
"identifiers",
",",
"nopull",
",",
"only",
")",
":",
"mkcfgdir",
"(",
")",
"setallowpull",
"(",
"not",
"nopull",
")",
"cfg",
"=",
"RepoListConfig",
"(",
")",
"if",
"len",
"(",
"identifiers",
")",
":",
"updatedict",
"=",
"{",
"}",
... | Performs a `git pull` in each of the repositories registered with
`homely add`, runs all of their HOMELY.py scripts, and then performs
automatic cleanup as necessary.
REPO
This should be the path to a local dotfiles repository that has already
been registered using `homely add`. If you specify one or more `REPO`s
then only the HOMELY.py scripts from those repositories will be run,
and automatic cleanup will not be performed (automatic cleanup is only
possible when homely has done an update of all repositories in one go).
If you do not specify a REPO, all repositories' HOMELY.py scripts will
be run.
The --nopull and --only options are useful when you are working on your
HOMELY.py script - the --nopull option stops you from wasting time checking
the internet for the same updates on every run, and the --only option
allows you to execute only the section you are working on. | [
"Performs",
"a",
"git",
"pull",
"in",
"each",
"of",
"the",
"repositories",
"registered",
"with",
"homely",
"add",
"runs",
"all",
"of",
"their",
"HOMELY",
".",
"py",
"scripts",
"and",
"then",
"performs",
"automatic",
"cleanup",
"as",
"necessary",
"."
] | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_cli.py#L185-L227 | train | 34,244 |
phodge/homely | homely/pipinstall.py | pipinstall | def pipinstall(packagename, pips=None, trypips=[], scripts=None):
"""
Install packages from pip.
The primary advantage of using this function is that homely can
automatically remove the package for you when you no longer want it.
package:
The name of the pip package to install
pips:
A list of `pip` executables to install the package with.
`['pip2.7', 'pip3.4']` would install the package using both the `pip2.7`
and `pip3.4` executables. The default is to use `['pip']` as long as you
aren't using `trypips`.
trypips:
This is a supplementary list of `pip` executables that homely will use to
install the package, but no exception will be raised if the `pip`
executables aren't available.
Note that the `pip install ...` commands are run with the `--user` option
so that the packages are installed into your home directory.
"""
# `scripts` is an alternate location for bin scripts. Useful for bad
# platforms that put pip2/pip3 scripts in the same bin dir such that they
# clobber each other.
# FIXME: `scripts` still has the following issues
# - useless if you're specifying multiple pips at once
# - won't do the uninstall/reinstall dance to reinstall something that was
# installed with a different `scripts` path
if scripts is None:
scripts = {}
if pips is None:
pips = [] if len(trypips) else ['pip']
engine = getengine()
for pip in pips:
helper = PIPInstall(packagename,
pip,
mustinstall=True,
scripts=scripts.get(pip))
engine.run(helper)
for pip in trypips:
helper = PIPInstall(packagename,
pip,
mustinstall=False,
scripts=scripts.get(pip))
engine.run(helper) | python | def pipinstall(packagename, pips=None, trypips=[], scripts=None):
"""
Install packages from pip.
The primary advantage of using this function is that homely can
automatically remove the package for you when you no longer want it.
package:
The name of the pip package to install
pips:
A list of `pip` executables to install the package with.
`['pip2.7', 'pip3.4']` would install the package using both the `pip2.7`
and `pip3.4` executables. The default is to use `['pip']` as long as you
aren't using `trypips`.
trypips:
This is a supplementary list of `pip` executables that homely will use to
install the package, but no exception will be raised if the `pip`
executables aren't available.
Note that the `pip install ...` commands are run with the `--user` option
so that the packages are installed into your home directory.
"""
# `scripts` is an alternate location for bin scripts. Useful for bad
# platforms that put pip2/pip3 scripts in the same bin dir such that they
# clobber each other.
# FIXME: `scripts` still has the following issues
# - useless if you're specifying multiple pips at once
# - won't do the uninstall/reinstall dance to reinstall something that was
# installed with a different `scripts` path
if scripts is None:
scripts = {}
if pips is None:
pips = [] if len(trypips) else ['pip']
engine = getengine()
for pip in pips:
helper = PIPInstall(packagename,
pip,
mustinstall=True,
scripts=scripts.get(pip))
engine.run(helper)
for pip in trypips:
helper = PIPInstall(packagename,
pip,
mustinstall=False,
scripts=scripts.get(pip))
engine.run(helper) | [
"def",
"pipinstall",
"(",
"packagename",
",",
"pips",
"=",
"None",
",",
"trypips",
"=",
"[",
"]",
",",
"scripts",
"=",
"None",
")",
":",
"# `scripts` is an alternate location for bin scripts. Useful for bad",
"# platforms that put pip2/pip3 scripts in the same bin dir such th... | Install packages from pip.
The primary advantage of using this function is that homely can
automatically remove the package for you when you no longer want it.
package:
The name of the pip package to install
pips:
A list of `pip` executables to install the package with.
`['pip2.7', 'pip3.4']` would install the package using both the `pip2.7`
and `pip3.4` executables. The default is to use `['pip']` as long as you
aren't using `trypips`.
trypips:
This is a supplementary list of `pip` executables that homely will use to
install the package, but no exception will be raised if the `pip`
executables aren't available.
Note that the `pip install ...` commands are run with the `--user` option
so that the packages are installed into your home directory. | [
"Install",
"packages",
"from",
"pip",
"."
] | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/pipinstall.py#L12-L60 | train | 34,245 |
phodge/homely | homely/_utils.py | getstatus | def getstatus():
"""Get the status of the previous 'homely update', or any 'homely update'
that may be running in another process.
"""
if exists(RUNFILE):
mtime = os.stat(RUNFILE).st_mtime
with open(SECTIONFILE) as f:
section = f.read().strip()
# what section?
return UpdateStatus.RUNNING, mtime, section
if exists(PAUSEFILE):
return UpdateStatus.PAUSED, None, None
mtime = None
if exists(TIMEFILE):
mtime = os.stat(TIMEFILE).st_mtime
if exists(FAILFILE):
if not mtime:
mtime = os.stat(FAILFILE).st_mtime
# TODO: return a different error code when the error was inability to
# contact one or more remote servers
with open(FAILFILE) as f:
content = f.read().strip()
if content == UpdateStatus.NOCONN:
return UpdateStatus.NOCONN, mtime, None
elif content == UpdateStatus.DIRTY:
return UpdateStatus.DIRTY, mtime, None
return UpdateStatus.FAILED, mtime, None
if mtime is None:
return UpdateStatus.NEVER, None, None
return UpdateStatus.OK, mtime, None | python | def getstatus():
"""Get the status of the previous 'homely update', or any 'homely update'
that may be running in another process.
"""
if exists(RUNFILE):
mtime = os.stat(RUNFILE).st_mtime
with open(SECTIONFILE) as f:
section = f.read().strip()
# what section?
return UpdateStatus.RUNNING, mtime, section
if exists(PAUSEFILE):
return UpdateStatus.PAUSED, None, None
mtime = None
if exists(TIMEFILE):
mtime = os.stat(TIMEFILE).st_mtime
if exists(FAILFILE):
if not mtime:
mtime = os.stat(FAILFILE).st_mtime
# TODO: return a different error code when the error was inability to
# contact one or more remote servers
with open(FAILFILE) as f:
content = f.read().strip()
if content == UpdateStatus.NOCONN:
return UpdateStatus.NOCONN, mtime, None
elif content == UpdateStatus.DIRTY:
return UpdateStatus.DIRTY, mtime, None
return UpdateStatus.FAILED, mtime, None
if mtime is None:
return UpdateStatus.NEVER, None, None
return UpdateStatus.OK, mtime, None | [
"def",
"getstatus",
"(",
")",
":",
"if",
"exists",
"(",
"RUNFILE",
")",
":",
"mtime",
"=",
"os",
".",
"stat",
"(",
"RUNFILE",
")",
".",
"st_mtime",
"with",
"open",
"(",
"SECTIONFILE",
")",
"as",
"f",
":",
"section",
"=",
"f",
".",
"read",
"(",
")... | Get the status of the previous 'homely update', or any 'homely update'
that may be running in another process. | [
"Get",
"the",
"status",
"of",
"the",
"previous",
"homely",
"update",
"or",
"any",
"homely",
"update",
"that",
"may",
"be",
"running",
"in",
"another",
"process",
"."
] | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_utils.py#L615-L648 | train | 34,246 |
phodge/homely | homely/_utils.py | RepoListConfig.find_by_any | def find_by_any(self, identifier, how):
"""
how should be a string with any or all of the characters "ilc"
"""
if "i" in how:
match = self.find_by_id(identifier)
if match:
return match
if "l" in how:
match = self.find_by_localpath(identifier)
if match:
return match
if "c" in how:
match = self.find_by_canonical(identifier)
if match:
return match | python | def find_by_any(self, identifier, how):
"""
how should be a string with any or all of the characters "ilc"
"""
if "i" in how:
match = self.find_by_id(identifier)
if match:
return match
if "l" in how:
match = self.find_by_localpath(identifier)
if match:
return match
if "c" in how:
match = self.find_by_canonical(identifier)
if match:
return match | [
"def",
"find_by_any",
"(",
"self",
",",
"identifier",
",",
"how",
")",
":",
"if",
"\"i\"",
"in",
"how",
":",
"match",
"=",
"self",
".",
"find_by_id",
"(",
"identifier",
")",
"if",
"match",
":",
"return",
"match",
"if",
"\"l\"",
"in",
"how",
":",
"mat... | how should be a string with any or all of the characters "ilc" | [
"how",
"should",
"be",
"a",
"string",
"with",
"any",
"or",
"all",
"of",
"the",
"characters",
"ilc"
] | 98ddcf3e4f29b0749645817b4866baaea8376085 | https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_utils.py#L339-L354 | train | 34,247 |
glyph/automat | automat/_discover.py | isOriginalLocation | def isOriginalLocation(attr):
"""
Attempt to discover if this appearance of a PythonAttribute
representing a class refers to the module where that class was
defined.
"""
sourceModule = inspect.getmodule(attr.load())
if sourceModule is None:
return False
currentModule = attr
while not isinstance(currentModule, PythonModule):
currentModule = currentModule.onObject
return currentModule.name == sourceModule.__name__ | python | def isOriginalLocation(attr):
"""
Attempt to discover if this appearance of a PythonAttribute
representing a class refers to the module where that class was
defined.
"""
sourceModule = inspect.getmodule(attr.load())
if sourceModule is None:
return False
currentModule = attr
while not isinstance(currentModule, PythonModule):
currentModule = currentModule.onObject
return currentModule.name == sourceModule.__name__ | [
"def",
"isOriginalLocation",
"(",
"attr",
")",
":",
"sourceModule",
"=",
"inspect",
".",
"getmodule",
"(",
"attr",
".",
"load",
"(",
")",
")",
"if",
"sourceModule",
"is",
"None",
":",
"return",
"False",
"currentModule",
"=",
"attr",
"while",
"not",
"isinst... | Attempt to discover if this appearance of a PythonAttribute
representing a class refers to the module where that class was
defined. | [
"Attempt",
"to",
"discover",
"if",
"this",
"appearance",
"of",
"a",
"PythonAttribute",
"representing",
"a",
"class",
"refers",
"to",
"the",
"module",
"where",
"that",
"class",
"was",
"defined",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_discover.py#L7-L21 | train | 34,248 |
glyph/automat | automat/_core.py | Automaton.initialState | def initialState(self, state):
"""
Set this automaton's initial state. Raises a ValueError if
this automaton already has an initial state.
"""
if self._initialState is not _NO_STATE:
raise ValueError(
"initial state already set to {}".format(self._initialState))
self._initialState = state | python | def initialState(self, state):
"""
Set this automaton's initial state. Raises a ValueError if
this automaton already has an initial state.
"""
if self._initialState is not _NO_STATE:
raise ValueError(
"initial state already set to {}".format(self._initialState))
self._initialState = state | [
"def",
"initialState",
"(",
"self",
",",
"state",
")",
":",
"if",
"self",
".",
"_initialState",
"is",
"not",
"_NO_STATE",
":",
"raise",
"ValueError",
"(",
"\"initial state already set to {}\"",
".",
"format",
"(",
"self",
".",
"_initialState",
")",
")",
"self"... | Set this automaton's initial state. Raises a ValueError if
this automaton already has an initial state. | [
"Set",
"this",
"automaton",
"s",
"initial",
"state",
".",
"Raises",
"a",
"ValueError",
"if",
"this",
"automaton",
"already",
"has",
"an",
"initial",
"state",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L56-L66 | train | 34,249 |
glyph/automat | automat/_core.py | Automaton.addTransition | def addTransition(self, inState, inputSymbol, outState, outputSymbols):
"""
Add the given transition to the outputSymbol. Raise ValueError if
there is already a transition with the same inState and inputSymbol.
"""
# keeping self._transitions in a flat list makes addTransition
# O(n^2), but state machines don't tend to have hundreds of
# transitions.
for (anInState, anInputSymbol, anOutState, _) in self._transitions:
if (anInState == inState and anInputSymbol == inputSymbol):
raise ValueError(
"already have transition from {} via {}".format(inState, inputSymbol))
self._transitions.add(
(inState, inputSymbol, outState, tuple(outputSymbols))
) | python | def addTransition(self, inState, inputSymbol, outState, outputSymbols):
"""
Add the given transition to the outputSymbol. Raise ValueError if
there is already a transition with the same inState and inputSymbol.
"""
# keeping self._transitions in a flat list makes addTransition
# O(n^2), but state machines don't tend to have hundreds of
# transitions.
for (anInState, anInputSymbol, anOutState, _) in self._transitions:
if (anInState == inState and anInputSymbol == inputSymbol):
raise ValueError(
"already have transition from {} via {}".format(inState, inputSymbol))
self._transitions.add(
(inState, inputSymbol, outState, tuple(outputSymbols))
) | [
"def",
"addTransition",
"(",
"self",
",",
"inState",
",",
"inputSymbol",
",",
"outState",
",",
"outputSymbols",
")",
":",
"# keeping self._transitions in a flat list makes addTransition",
"# O(n^2), but state machines don't tend to have hundreds of",
"# transitions.",
"for",
"(",... | Add the given transition to the outputSymbol. Raise ValueError if
there is already a transition with the same inState and inputSymbol. | [
"Add",
"the",
"given",
"transition",
"to",
"the",
"outputSymbol",
".",
"Raise",
"ValueError",
"if",
"there",
"is",
"already",
"a",
"transition",
"with",
"the",
"same",
"inState",
"and",
"inputSymbol",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L69-L83 | train | 34,250 |
glyph/automat | automat/_core.py | Automaton.outputAlphabet | def outputAlphabet(self):
"""
The full set of symbols which can be produced by this automaton.
"""
return set(
chain.from_iterable(
outputSymbols for
(inState, inputSymbol, outState, outputSymbols)
in self._transitions
)
) | python | def outputAlphabet(self):
"""
The full set of symbols which can be produced by this automaton.
"""
return set(
chain.from_iterable(
outputSymbols for
(inState, inputSymbol, outState, outputSymbols)
in self._transitions
)
) | [
"def",
"outputAlphabet",
"(",
"self",
")",
":",
"return",
"set",
"(",
"chain",
".",
"from_iterable",
"(",
"outputSymbols",
"for",
"(",
"inState",
",",
"inputSymbol",
",",
"outState",
",",
"outputSymbols",
")",
"in",
"self",
".",
"_transitions",
")",
")"
] | The full set of symbols which can be produced by this automaton. | [
"The",
"full",
"set",
"of",
"symbols",
"which",
"can",
"be",
"produced",
"by",
"this",
"automaton",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L101-L111 | train | 34,251 |
glyph/automat | automat/_core.py | Automaton.states | def states(self):
"""
All valid states; "Q" in the mathematical description of a state
machine.
"""
return frozenset(
chain.from_iterable(
(inState, outState)
for
(inState, inputSymbol, outState, outputSymbol)
in self._transitions
)
) | python | def states(self):
"""
All valid states; "Q" in the mathematical description of a state
machine.
"""
return frozenset(
chain.from_iterable(
(inState, outState)
for
(inState, inputSymbol, outState, outputSymbol)
in self._transitions
)
) | [
"def",
"states",
"(",
"self",
")",
":",
"return",
"frozenset",
"(",
"chain",
".",
"from_iterable",
"(",
"(",
"inState",
",",
"outState",
")",
"for",
"(",
"inState",
",",
"inputSymbol",
",",
"outState",
",",
"outputSymbol",
")",
"in",
"self",
".",
"_trans... | All valid states; "Q" in the mathematical description of a state
machine. | [
"All",
"valid",
"states",
";",
"Q",
"in",
"the",
"mathematical",
"description",
"of",
"a",
"state",
"machine",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L114-L126 | train | 34,252 |
glyph/automat | automat/_core.py | Transitioner.transition | def transition(self, inputSymbol):
"""
Transition between states, returning any outputs.
"""
outState, outputSymbols = self._automaton.outputForInput(self._state,
inputSymbol)
outTracer = None
if self._tracer:
outTracer = self._tracer(self._state._name(),
inputSymbol._name(),
outState._name())
self._state = outState
return (outputSymbols, outTracer) | python | def transition(self, inputSymbol):
"""
Transition between states, returning any outputs.
"""
outState, outputSymbols = self._automaton.outputForInput(self._state,
inputSymbol)
outTracer = None
if self._tracer:
outTracer = self._tracer(self._state._name(),
inputSymbol._name(),
outState._name())
self._state = outState
return (outputSymbols, outTracer) | [
"def",
"transition",
"(",
"self",
",",
"inputSymbol",
")",
":",
"outState",
",",
"outputSymbols",
"=",
"self",
".",
"_automaton",
".",
"outputForInput",
"(",
"self",
".",
"_state",
",",
"inputSymbol",
")",
"outTracer",
"=",
"None",
"if",
"self",
".",
"_tra... | Transition between states, returning any outputs. | [
"Transition",
"between",
"states",
"returning",
"any",
"outputs",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L153-L165 | train | 34,253 |
glyph/automat | automat/_methodical.py | _getArgSpec | def _getArgSpec(func):
"""
Normalize inspect.ArgSpec across python versions
and convert mutable attributes to immutable types.
:param Callable func: A function.
:return: The function's ArgSpec.
:rtype: ArgSpec
"""
spec = getArgsSpec(func)
return ArgSpec(
args=tuple(spec.args),
varargs=spec.varargs,
varkw=spec.varkw if six.PY3 else spec.keywords,
defaults=spec.defaults if spec.defaults else (),
kwonlyargs=tuple(spec.kwonlyargs) if six.PY3 else (),
kwonlydefaults=(
tuple(spec.kwonlydefaults.items())
if spec.kwonlydefaults else ()
) if six.PY3 else (),
annotations=tuple(spec.annotations.items()) if six.PY3 else (),
) | python | def _getArgSpec(func):
"""
Normalize inspect.ArgSpec across python versions
and convert mutable attributes to immutable types.
:param Callable func: A function.
:return: The function's ArgSpec.
:rtype: ArgSpec
"""
spec = getArgsSpec(func)
return ArgSpec(
args=tuple(spec.args),
varargs=spec.varargs,
varkw=spec.varkw if six.PY3 else spec.keywords,
defaults=spec.defaults if spec.defaults else (),
kwonlyargs=tuple(spec.kwonlyargs) if six.PY3 else (),
kwonlydefaults=(
tuple(spec.kwonlydefaults.items())
if spec.kwonlydefaults else ()
) if six.PY3 else (),
annotations=tuple(spec.annotations.items()) if six.PY3 else (),
) | [
"def",
"_getArgSpec",
"(",
"func",
")",
":",
"spec",
"=",
"getArgsSpec",
"(",
"func",
")",
"return",
"ArgSpec",
"(",
"args",
"=",
"tuple",
"(",
"spec",
".",
"args",
")",
",",
"varargs",
"=",
"spec",
".",
"varargs",
",",
"varkw",
"=",
"spec",
".",
"... | Normalize inspect.ArgSpec across python versions
and convert mutable attributes to immutable types.
:param Callable func: A function.
:return: The function's ArgSpec.
:rtype: ArgSpec | [
"Normalize",
"inspect",
".",
"ArgSpec",
"across",
"python",
"versions",
"and",
"convert",
"mutable",
"attributes",
"to",
"immutable",
"types",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L26-L47 | train | 34,254 |
glyph/automat | automat/_methodical.py | _getArgNames | def _getArgNames(spec):
"""
Get the name of all arguments defined in a function signature.
The name of * and ** arguments is normalized to "*args" and "**kwargs".
:param ArgSpec spec: A function to interrogate for a signature.
:return: The set of all argument names in `func`s signature.
:rtype: Set[str]
"""
return set(
spec.args
+ spec.kwonlyargs
+ (('*args',) if spec.varargs else ())
+ (('**kwargs',) if spec.varkw else ())
+ spec.annotations
) | python | def _getArgNames(spec):
"""
Get the name of all arguments defined in a function signature.
The name of * and ** arguments is normalized to "*args" and "**kwargs".
:param ArgSpec spec: A function to interrogate for a signature.
:return: The set of all argument names in `func`s signature.
:rtype: Set[str]
"""
return set(
spec.args
+ spec.kwonlyargs
+ (('*args',) if spec.varargs else ())
+ (('**kwargs',) if spec.varkw else ())
+ spec.annotations
) | [
"def",
"_getArgNames",
"(",
"spec",
")",
":",
"return",
"set",
"(",
"spec",
".",
"args",
"+",
"spec",
".",
"kwonlyargs",
"+",
"(",
"(",
"'*args'",
",",
")",
"if",
"spec",
".",
"varargs",
"else",
"(",
")",
")",
"+",
"(",
"(",
"'**kwargs'",
",",
")... | Get the name of all arguments defined in a function signature.
The name of * and ** arguments is normalized to "*args" and "**kwargs".
:param ArgSpec spec: A function to interrogate for a signature.
:return: The set of all argument names in `func`s signature.
:rtype: Set[str] | [
"Get",
"the",
"name",
"of",
"all",
"arguments",
"defined",
"in",
"a",
"function",
"signature",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L50-L66 | train | 34,255 |
glyph/automat | automat/_methodical.py | _keywords_only | def _keywords_only(f):
"""
Decorate a function so all its arguments must be passed by keyword.
A useful utility for decorators that take arguments so that they don't
accidentally get passed the thing they're decorating as their first
argument.
Only works for methods right now.
"""
@wraps(f)
def g(self, **kw):
return f(self, **kw)
return g | python | def _keywords_only(f):
"""
Decorate a function so all its arguments must be passed by keyword.
A useful utility for decorators that take arguments so that they don't
accidentally get passed the thing they're decorating as their first
argument.
Only works for methods right now.
"""
@wraps(f)
def g(self, **kw):
return f(self, **kw)
return g | [
"def",
"_keywords_only",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"g",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"return",
"f",
"(",
"self",
",",
"*",
"*",
"kw",
")",
"return",
"g"
] | Decorate a function so all its arguments must be passed by keyword.
A useful utility for decorators that take arguments so that they don't
accidentally get passed the thing they're decorating as their first
argument.
Only works for methods right now. | [
"Decorate",
"a",
"function",
"so",
"all",
"its",
"arguments",
"must",
"be",
"passed",
"by",
"keyword",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L69-L82 | train | 34,256 |
glyph/automat | automat/_methodical.py | _filterArgs | def _filterArgs(args, kwargs, inputSpec, outputSpec):
"""
Filter out arguments that were passed to input that output won't accept.
:param tuple args: The *args that input received.
:param dict kwargs: The **kwargs that input received.
:param ArgSpec inputSpec: The input's arg spec.
:param ArgSpec outputSpec: The output's arg spec.
:return: The args and kwargs that output will accept.
:rtype: Tuple[tuple, dict]
"""
named_args = tuple(zip(inputSpec.args[1:], args))
if outputSpec.varargs:
# Only return all args if the output accepts *args.
return_args = args
else:
# Filter out arguments that don't appear
# in the output's method signature.
return_args = [v for n, v in named_args if n in outputSpec.args]
# Get any of input's default arguments that were not passed.
passed_arg_names = tuple(kwargs)
for name, value in named_args:
passed_arg_names += (name, value)
defaults = zip(inputSpec.args[::-1], inputSpec.defaults[::-1])
full_kwargs = {n: v for n, v in defaults if n not in passed_arg_names}
full_kwargs.update(kwargs)
if outputSpec.varkw:
# Only pass all kwargs if the output method accepts **kwargs.
return_kwargs = full_kwargs
else:
# Filter out names that the output method does not accept.
all_accepted_names = outputSpec.args[1:] + outputSpec.kwonlyargs
return_kwargs = {n: v for n, v in full_kwargs.items()
if n in all_accepted_names}
return return_args, return_kwargs | python | def _filterArgs(args, kwargs, inputSpec, outputSpec):
"""
Filter out arguments that were passed to input that output won't accept.
:param tuple args: The *args that input received.
:param dict kwargs: The **kwargs that input received.
:param ArgSpec inputSpec: The input's arg spec.
:param ArgSpec outputSpec: The output's arg spec.
:return: The args and kwargs that output will accept.
:rtype: Tuple[tuple, dict]
"""
named_args = tuple(zip(inputSpec.args[1:], args))
if outputSpec.varargs:
# Only return all args if the output accepts *args.
return_args = args
else:
# Filter out arguments that don't appear
# in the output's method signature.
return_args = [v for n, v in named_args if n in outputSpec.args]
# Get any of input's default arguments that were not passed.
passed_arg_names = tuple(kwargs)
for name, value in named_args:
passed_arg_names += (name, value)
defaults = zip(inputSpec.args[::-1], inputSpec.defaults[::-1])
full_kwargs = {n: v for n, v in defaults if n not in passed_arg_names}
full_kwargs.update(kwargs)
if outputSpec.varkw:
# Only pass all kwargs if the output method accepts **kwargs.
return_kwargs = full_kwargs
else:
# Filter out names that the output method does not accept.
all_accepted_names = outputSpec.args[1:] + outputSpec.kwonlyargs
return_kwargs = {n: v for n, v in full_kwargs.items()
if n in all_accepted_names}
return return_args, return_kwargs | [
"def",
"_filterArgs",
"(",
"args",
",",
"kwargs",
",",
"inputSpec",
",",
"outputSpec",
")",
":",
"named_args",
"=",
"tuple",
"(",
"zip",
"(",
"inputSpec",
".",
"args",
"[",
"1",
":",
"]",
",",
"args",
")",
")",
"if",
"outputSpec",
".",
"varargs",
":"... | Filter out arguments that were passed to input that output won't accept.
:param tuple args: The *args that input received.
:param dict kwargs: The **kwargs that input received.
:param ArgSpec inputSpec: The input's arg spec.
:param ArgSpec outputSpec: The output's arg spec.
:return: The args and kwargs that output will accept.
:rtype: Tuple[tuple, dict] | [
"Filter",
"out",
"arguments",
"that",
"were",
"passed",
"to",
"input",
"that",
"output",
"won",
"t",
"accept",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L169-L206 | train | 34,257 |
glyph/automat | automat/_methodical.py | MethodicalMachine.state | def state(self, initial=False, terminal=False,
serialized=None):
"""
Declare a state, possibly an initial state or a terminal state.
This is a decorator for methods, but it will modify the method so as
not to be callable any more.
:param bool initial: is this state the initial state?
Only one state on this :class:`automat.MethodicalMachine`
may be an initial state; more than one is an error.
:param bool terminal: Is this state a terminal state?
i.e. a state that the machine can end up in?
(This is purely informational at this point.)
:param Hashable serialized: a serializable value
to be used to represent this state to external systems.
This value should be hashable;
:py:func:`unicode` is a good type to use.
"""
def decorator(stateMethod):
state = MethodicalState(machine=self,
method=stateMethod,
serialized=serialized)
if initial:
self._automaton.initialState = state
return state
return decorator | python | def state(self, initial=False, terminal=False,
serialized=None):
"""
Declare a state, possibly an initial state or a terminal state.
This is a decorator for methods, but it will modify the method so as
not to be callable any more.
:param bool initial: is this state the initial state?
Only one state on this :class:`automat.MethodicalMachine`
may be an initial state; more than one is an error.
:param bool terminal: Is this state a terminal state?
i.e. a state that the machine can end up in?
(This is purely informational at this point.)
:param Hashable serialized: a serializable value
to be used to represent this state to external systems.
This value should be hashable;
:py:func:`unicode` is a good type to use.
"""
def decorator(stateMethod):
state = MethodicalState(machine=self,
method=stateMethod,
serialized=serialized)
if initial:
self._automaton.initialState = state
return state
return decorator | [
"def",
"state",
"(",
"self",
",",
"initial",
"=",
"False",
",",
"terminal",
"=",
"False",
",",
"serialized",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"stateMethod",
")",
":",
"state",
"=",
"MethodicalState",
"(",
"machine",
"=",
"self",
",",
"me... | Declare a state, possibly an initial state or a terminal state.
This is a decorator for methods, but it will modify the method so as
not to be callable any more.
:param bool initial: is this state the initial state?
Only one state on this :class:`automat.MethodicalMachine`
may be an initial state; more than one is an error.
:param bool terminal: Is this state a terminal state?
i.e. a state that the machine can end up in?
(This is purely informational at this point.)
:param Hashable serialized: a serializable value
to be used to represent this state to external systems.
This value should be hashable;
:py:func:`unicode` is a good type to use. | [
"Declare",
"a",
"state",
"possibly",
"an",
"initial",
"state",
"or",
"a",
"terminal",
"state",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L337-L365 | train | 34,258 |
glyph/automat | automat/_methodical.py | MethodicalMachine.input | def input(self):
"""
Declare an input.
This is a decorator for methods.
"""
def decorator(inputMethod):
return MethodicalInput(automaton=self._automaton,
method=inputMethod,
symbol=self._symbol)
return decorator | python | def input(self):
"""
Declare an input.
This is a decorator for methods.
"""
def decorator(inputMethod):
return MethodicalInput(automaton=self._automaton,
method=inputMethod,
symbol=self._symbol)
return decorator | [
"def",
"input",
"(",
"self",
")",
":",
"def",
"decorator",
"(",
"inputMethod",
")",
":",
"return",
"MethodicalInput",
"(",
"automaton",
"=",
"self",
".",
"_automaton",
",",
"method",
"=",
"inputMethod",
",",
"symbol",
"=",
"self",
".",
"_symbol",
")",
"r... | Declare an input.
This is a decorator for methods. | [
"Declare",
"an",
"input",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L369-L379 | train | 34,259 |
glyph/automat | automat/_methodical.py | MethodicalMachine.output | def output(self):
"""
Declare an output.
This is a decorator for methods.
This method will be called when the state machine transitions to this
state as specified in the decorated `output` method.
"""
def decorator(outputMethod):
return MethodicalOutput(machine=self, method=outputMethod)
return decorator | python | def output(self):
"""
Declare an output.
This is a decorator for methods.
This method will be called when the state machine transitions to this
state as specified in the decorated `output` method.
"""
def decorator(outputMethod):
return MethodicalOutput(machine=self, method=outputMethod)
return decorator | [
"def",
"output",
"(",
"self",
")",
":",
"def",
"decorator",
"(",
"outputMethod",
")",
":",
"return",
"MethodicalOutput",
"(",
"machine",
"=",
"self",
",",
"method",
"=",
"outputMethod",
")",
"return",
"decorator"
] | Declare an output.
This is a decorator for methods.
This method will be called when the state machine transitions to this
state as specified in the decorated `output` method. | [
"Declare",
"an",
"output",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_methodical.py#L383-L394 | train | 34,260 |
glyph/automat | automat/_visualize.py | elementMaker | def elementMaker(name, *children, **attrs):
"""
Construct a string from the HTML element description.
"""
formattedAttrs = ' '.join('{}={}'.format(key, _gvquote(str(value)))
for key, value in sorted(attrs.items()))
formattedChildren = ''.join(children)
return u'<{name} {attrs}>{children}</{name}>'.format(
name=name,
attrs=formattedAttrs,
children=formattedChildren) | python | def elementMaker(name, *children, **attrs):
"""
Construct a string from the HTML element description.
"""
formattedAttrs = ' '.join('{}={}'.format(key, _gvquote(str(value)))
for key, value in sorted(attrs.items()))
formattedChildren = ''.join(children)
return u'<{name} {attrs}>{children}</{name}>'.format(
name=name,
attrs=formattedAttrs,
children=formattedChildren) | [
"def",
"elementMaker",
"(",
"name",
",",
"*",
"children",
",",
"*",
"*",
"attrs",
")",
":",
"formattedAttrs",
"=",
"' '",
".",
"join",
"(",
"'{}={}'",
".",
"format",
"(",
"key",
",",
"_gvquote",
"(",
"str",
"(",
"value",
")",
")",
")",
"for",
"key"... | Construct a string from the HTML element description. | [
"Construct",
"a",
"string",
"from",
"the",
"HTML",
"element",
"description",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_visualize.py#L18-L28 | train | 34,261 |
glyph/automat | automat/_visualize.py | tableMaker | def tableMaker(inputLabel, outputLabels, port, _E=elementMaker):
"""
Construct an HTML table to label a state transition.
"""
colspan = {}
if outputLabels:
colspan['colspan'] = str(len(outputLabels))
inputLabelCell = _E("td",
_E("font",
inputLabel,
face="menlo-italic"),
color="purple",
port=port,
**colspan)
pointSize = {"point-size": "9"}
outputLabelCells = [_E("td",
_E("font",
outputLabel,
**pointSize),
color="pink")
for outputLabel in outputLabels]
rows = [_E("tr", inputLabelCell)]
if outputLabels:
rows.append(_E("tr", *outputLabelCells))
return _E("table", *rows) | python | def tableMaker(inputLabel, outputLabels, port, _E=elementMaker):
"""
Construct an HTML table to label a state transition.
"""
colspan = {}
if outputLabels:
colspan['colspan'] = str(len(outputLabels))
inputLabelCell = _E("td",
_E("font",
inputLabel,
face="menlo-italic"),
color="purple",
port=port,
**colspan)
pointSize = {"point-size": "9"}
outputLabelCells = [_E("td",
_E("font",
outputLabel,
**pointSize),
color="pink")
for outputLabel in outputLabels]
rows = [_E("tr", inputLabelCell)]
if outputLabels:
rows.append(_E("tr", *outputLabelCells))
return _E("table", *rows) | [
"def",
"tableMaker",
"(",
"inputLabel",
",",
"outputLabels",
",",
"port",
",",
"_E",
"=",
"elementMaker",
")",
":",
"colspan",
"=",
"{",
"}",
"if",
"outputLabels",
":",
"colspan",
"[",
"'colspan'",
"]",
"=",
"str",
"(",
"len",
"(",
"outputLabels",
")",
... | Construct an HTML table to label a state transition. | [
"Construct",
"an",
"HTML",
"table",
"to",
"label",
"a",
"state",
"transition",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_visualize.py#L31-L60 | train | 34,262 |
glyph/automat | automat/_introspection.py | preserveName | def preserveName(f):
"""
Preserve the name of the given function on the decorated function.
"""
def decorator(decorated):
return copyfunction(decorated,
dict(name=f.__name__), dict(name=f.__name__))
return decorator | python | def preserveName(f):
"""
Preserve the name of the given function on the decorated function.
"""
def decorator(decorated):
return copyfunction(decorated,
dict(name=f.__name__), dict(name=f.__name__))
return decorator | [
"def",
"preserveName",
"(",
"f",
")",
":",
"def",
"decorator",
"(",
"decorated",
")",
":",
"return",
"copyfunction",
"(",
"decorated",
",",
"dict",
"(",
"name",
"=",
"f",
".",
"__name__",
")",
",",
"dict",
"(",
"name",
"=",
"f",
".",
"__name__",
")",... | Preserve the name of the given function on the decorated function. | [
"Preserve",
"the",
"name",
"of",
"the",
"given",
"function",
"on",
"the",
"decorated",
"function",
"."
] | 80c6eb925eeef120443f4f9c81398bea629884b8 | https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_introspection.py#L35-L42 | train | 34,263 |
heroku/kafka-helper | kafka_helper.py | get_kafka_ssl_context | def get_kafka_ssl_context():
"""
Returns an SSL context based on the certificate information in the Kafka config vars.
"""
# NOTE: We assume that Kafka environment variables are present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
#
# 1. Write the PEM certificates necessary for connecting to the Kafka brokers to physical
# files. The broker connection SSL certs are passed in environment/config variables and
# the python and ssl libraries require them in physical files. The public keys are written
# to short lived NamedTemporaryFile files; the client key is encrypted before writing to
# the short lived NamedTemporaryFile
#
# 2. Create and return an SSLContext for connecting to the Kafka brokers referencing the
# PEM certificates written above
#
# stash the kafka certs in named temporary files for loading into SSLContext. Initialize the
# SSLContext inside the with so when it goes out of scope the files are removed which has them
# existing for the shortest amount of time. As extra caution password
# protect/encrypt the client key
with NamedTemporaryFile(suffix='.crt') as cert_file, \
NamedTemporaryFile(suffix='.key') as key_file, \
NamedTemporaryFile(suffix='.crt') as trust_file:
cert_file.write(os.environ['KAFKA_CLIENT_CERT'].encode('utf-8'))
cert_file.flush()
# setup cryptography to password encrypt/protect the client key so it's not in the clear on
# the filesystem. Use the generated password in the call to load_cert_chain
passwd = standard_b64encode(os.urandom(33))
private_key = serialization.load_pem_private_key(
os.environ['KAFKA_CLIENT_CERT_KEY'].encode('utf-8'),
password=None,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(passwd)
)
key_file.write(pem)
key_file.flush()
trust_file.write(os.environ['KAFKA_TRUSTED_CERT'].encode('utf-8'))
trust_file.flush()
# create an SSLContext for passing into the kafka provider using the create_default_context
# function which creates an SSLContext with protocol set to PROTOCOL_SSLv23, OP_NO_SSLv2,
# and OP_NO_SSLv3 when purpose=SERVER_AUTH.
ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=trust_file.name)
ssl_context.load_cert_chain(cert_file.name, keyfile=key_file.name, password=passwd)
# Intentionally disabling hostname checking. The Kafka cluster runs in the cloud and Apache
# Kafka on Heroku doesn't currently provide stable hostnames. We're pinned to a specific certificate
# for this connection even though the certificate doesn't include host information. We rely
# on the ca trust_cert for this purpose.
ssl_context.check_hostname = False
return ssl_context | python | def get_kafka_ssl_context():
"""
Returns an SSL context based on the certificate information in the Kafka config vars.
"""
# NOTE: We assume that Kafka environment variables are present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
#
# 1. Write the PEM certificates necessary for connecting to the Kafka brokers to physical
# files. The broker connection SSL certs are passed in environment/config variables and
# the python and ssl libraries require them in physical files. The public keys are written
# to short lived NamedTemporaryFile files; the client key is encrypted before writing to
# the short lived NamedTemporaryFile
#
# 2. Create and return an SSLContext for connecting to the Kafka brokers referencing the
# PEM certificates written above
#
# stash the kafka certs in named temporary files for loading into SSLContext. Initialize the
# SSLContext inside the with so when it goes out of scope the files are removed which has them
# existing for the shortest amount of time. As extra caution password
# protect/encrypt the client key
with NamedTemporaryFile(suffix='.crt') as cert_file, \
NamedTemporaryFile(suffix='.key') as key_file, \
NamedTemporaryFile(suffix='.crt') as trust_file:
cert_file.write(os.environ['KAFKA_CLIENT_CERT'].encode('utf-8'))
cert_file.flush()
# setup cryptography to password encrypt/protect the client key so it's not in the clear on
# the filesystem. Use the generated password in the call to load_cert_chain
passwd = standard_b64encode(os.urandom(33))
private_key = serialization.load_pem_private_key(
os.environ['KAFKA_CLIENT_CERT_KEY'].encode('utf-8'),
password=None,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(passwd)
)
key_file.write(pem)
key_file.flush()
trust_file.write(os.environ['KAFKA_TRUSTED_CERT'].encode('utf-8'))
trust_file.flush()
# create an SSLContext for passing into the kafka provider using the create_default_context
# function which creates an SSLContext with protocol set to PROTOCOL_SSLv23, OP_NO_SSLv2,
# and OP_NO_SSLv3 when purpose=SERVER_AUTH.
ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=trust_file.name)
ssl_context.load_cert_chain(cert_file.name, keyfile=key_file.name, password=passwd)
# Intentionally disabling hostname checking. The Kafka cluster runs in the cloud and Apache
# Kafka on Heroku doesn't currently provide stable hostnames. We're pinned to a specific certificate
# for this connection even though the certificate doesn't include host information. We rely
# on the ca trust_cert for this purpose.
ssl_context.check_hostname = False
return ssl_context | [
"def",
"get_kafka_ssl_context",
"(",
")",
":",
"# NOTE: We assume that Kafka environment variables are present. If using",
"# Apache Kafka on Heroku, they will be available in your app configuration.",
"#",
"# 1. Write the PEM certificates necessary for connecting to the Kafka brokers to physical",
... | Returns an SSL context based on the certificate information in the Kafka config vars. | [
"Returns",
"an",
"SSL",
"context",
"based",
"on",
"the",
"certificate",
"information",
"in",
"the",
"Kafka",
"config",
"vars",
"."
] | 33d809ac4d6e7294ff44f18635d4ea71c882906a | https://github.com/heroku/kafka-helper/blob/33d809ac4d6e7294ff44f18635d4ea71c882906a/kafka_helper.py#L22-L81 | train | 34,264 |
heroku/kafka-helper | kafka_helper.py | get_kafka_producer | def get_kafka_producer(acks='all',
value_serializer=lambda v: json.dumps(v).encode('utf-8')):
"""
Return a KafkaProducer that uses the SSLContext created with create_ssl_context.
"""
producer = KafkaProducer(
bootstrap_servers=get_kafka_brokers(),
security_protocol='SSL',
ssl_context=get_kafka_ssl_context(),
value_serializer=value_serializer,
acks=acks
)
return producer | python | def get_kafka_producer(acks='all',
value_serializer=lambda v: json.dumps(v).encode('utf-8')):
"""
Return a KafkaProducer that uses the SSLContext created with create_ssl_context.
"""
producer = KafkaProducer(
bootstrap_servers=get_kafka_brokers(),
security_protocol='SSL',
ssl_context=get_kafka_ssl_context(),
value_serializer=value_serializer,
acks=acks
)
return producer | [
"def",
"get_kafka_producer",
"(",
"acks",
"=",
"'all'",
",",
"value_serializer",
"=",
"lambda",
"v",
":",
"json",
".",
"dumps",
"(",
"v",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
":",
"producer",
"=",
"KafkaProducer",
"(",
"bootstrap_servers",
"=",
... | Return a KafkaProducer that uses the SSLContext created with create_ssl_context. | [
"Return",
"a",
"KafkaProducer",
"that",
"uses",
"the",
"SSLContext",
"created",
"with",
"create_ssl_context",
"."
] | 33d809ac4d6e7294ff44f18635d4ea71c882906a | https://github.com/heroku/kafka-helper/blob/33d809ac4d6e7294ff44f18635d4ea71c882906a/kafka_helper.py#L98-L112 | train | 34,265 |
heroku/kafka-helper | kafka_helper.py | get_kafka_consumer | def get_kafka_consumer(topic=None,
value_deserializer=lambda v: json.loads(v.decode('utf-8'))):
"""
Return a KafkaConsumer that uses the SSLContext created with create_ssl_context.
"""
# Create the KafkaConsumer connected to the specified brokers. Use the
# SSLContext that is created with create_ssl_context.
consumer = KafkaConsumer(
topic,
bootstrap_servers=get_kafka_brokers(),
security_protocol='SSL',
ssl_context=get_kafka_ssl_context(),
value_deserializer=value_deserializer
)
return consumer | python | def get_kafka_consumer(topic=None,
value_deserializer=lambda v: json.loads(v.decode('utf-8'))):
"""
Return a KafkaConsumer that uses the SSLContext created with create_ssl_context.
"""
# Create the KafkaConsumer connected to the specified brokers. Use the
# SSLContext that is created with create_ssl_context.
consumer = KafkaConsumer(
topic,
bootstrap_servers=get_kafka_brokers(),
security_protocol='SSL',
ssl_context=get_kafka_ssl_context(),
value_deserializer=value_deserializer
)
return consumer | [
"def",
"get_kafka_consumer",
"(",
"topic",
"=",
"None",
",",
"value_deserializer",
"=",
"lambda",
"v",
":",
"json",
".",
"loads",
"(",
"v",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
":",
"# Create the KafkaConsumer connected to the specified brokers. Use the",
... | Return a KafkaConsumer that uses the SSLContext created with create_ssl_context. | [
"Return",
"a",
"KafkaConsumer",
"that",
"uses",
"the",
"SSLContext",
"created",
"with",
"create_ssl_context",
"."
] | 33d809ac4d6e7294ff44f18635d4ea71c882906a | https://github.com/heroku/kafka-helper/blob/33d809ac4d6e7294ff44f18635d4ea71c882906a/kafka_helper.py#L115-L131 | train | 34,266 |
goerz/clusterjob | clusterjob/backends/sge.py | SgeBackend.replace_body_vars | def replace_body_vars(self, body):
"""Given a multiline string that is the body of the job script, replace
the placeholders for environment variables with backend-specific
realizations, and return the modified body. See the `job_vars`
attribute for the mappings that are performed.
"""
for key, val in self.job_vars.items():
body = body.replace(key, val)
return body | python | def replace_body_vars(self, body):
"""Given a multiline string that is the body of the job script, replace
the placeholders for environment variables with backend-specific
realizations, and return the modified body. See the `job_vars`
attribute for the mappings that are performed.
"""
for key, val in self.job_vars.items():
body = body.replace(key, val)
return body | [
"def",
"replace_body_vars",
"(",
"self",
",",
"body",
")",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"job_vars",
".",
"items",
"(",
")",
":",
"body",
"=",
"body",
".",
"replace",
"(",
"key",
",",
"val",
")",
"return",
"body"
] | Given a multiline string that is the body of the job script, replace
the placeholders for environment variables with backend-specific
realizations, and return the modified body. See the `job_vars`
attribute for the mappings that are performed. | [
"Given",
"a",
"multiline",
"string",
"that",
"is",
"the",
"body",
"of",
"the",
"job",
"script",
"replace",
"the",
"placeholders",
"for",
"environment",
"variables",
"with",
"backend",
"-",
"specific",
"realizations",
"and",
"return",
"the",
"modified",
"body",
... | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/backends/sge.py#L157-L165 | train | 34,267 |
goerz/clusterjob | clusterjob/utils.py | set_executable | def set_executable(filename):
"""Set the exectuable bit on the given filename"""
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) | python | def set_executable(filename):
"""Set the exectuable bit on the given filename"""
st = os.stat(filename)
os.chmod(filename, st.st_mode | stat.S_IEXEC) | [
"def",
"set_executable",
"(",
"filename",
")",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"filename",
")",
"os",
".",
"chmod",
"(",
"filename",
",",
"st",
".",
"st_mode",
"|",
"stat",
".",
"S_IEXEC",
")"
] | Set the exectuable bit on the given filename | [
"Set",
"the",
"exectuable",
"bit",
"on",
"the",
"given",
"filename"
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/utils.py#L19-L22 | train | 34,268 |
goerz/clusterjob | clusterjob/utils.py | split_seq | def split_seq(seq, n_chunks):
"""Split the given sequence into `n_chunks`. Suitable for distributing an
array of jobs over a fixed number of workers.
>>> split_seq([1,2,3,4,5,6], 3)
[[1, 2], [3, 4], [5, 6]]
>>> split_seq([1,2,3,4,5,6], 2)
[[1, 2, 3], [4, 5, 6]]
>>> split_seq([1,2,3,4,5,6,7], 3)
[[1, 2], [3, 4, 5], [6, 7]]
"""
newseq = []
splitsize = 1.0/n_chunks*len(seq)
for i in range(n_chunks):
newseq.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))])
return newseq | python | def split_seq(seq, n_chunks):
"""Split the given sequence into `n_chunks`. Suitable for distributing an
array of jobs over a fixed number of workers.
>>> split_seq([1,2,3,4,5,6], 3)
[[1, 2], [3, 4], [5, 6]]
>>> split_seq([1,2,3,4,5,6], 2)
[[1, 2, 3], [4, 5, 6]]
>>> split_seq([1,2,3,4,5,6,7], 3)
[[1, 2], [3, 4, 5], [6, 7]]
"""
newseq = []
splitsize = 1.0/n_chunks*len(seq)
for i in range(n_chunks):
newseq.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))])
return newseq | [
"def",
"split_seq",
"(",
"seq",
",",
"n_chunks",
")",
":",
"newseq",
"=",
"[",
"]",
"splitsize",
"=",
"1.0",
"/",
"n_chunks",
"*",
"len",
"(",
"seq",
")",
"for",
"i",
"in",
"range",
"(",
"n_chunks",
")",
":",
"newseq",
".",
"append",
"(",
"seq",
... | Split the given sequence into `n_chunks`. Suitable for distributing an
array of jobs over a fixed number of workers.
>>> split_seq([1,2,3,4,5,6], 3)
[[1, 2], [3, 4], [5, 6]]
>>> split_seq([1,2,3,4,5,6], 2)
[[1, 2, 3], [4, 5, 6]]
>>> split_seq([1,2,3,4,5,6,7], 3)
[[1, 2], [3, 4, 5], [6, 7]] | [
"Split",
"the",
"given",
"sequence",
"into",
"n_chunks",
".",
"Suitable",
"for",
"distributing",
"an",
"array",
"of",
"jobs",
"over",
"a",
"fixed",
"number",
"of",
"workers",
"."
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/utils.py#L31-L46 | train | 34,269 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.status | def status(self):
"""Return the job status as one of the codes defined in the
`clusterjob.status` module.
finished, communicate with the cluster to determine the job's status.
"""
if self._status >= COMPLETED:
return self._status
else:
cmd = self.backend.cmd_status(self, finished=False)
response = self._run_cmd(cmd, self.remote, ignore_exit_code=True,
ssh=self.ssh)
status = self.backend.get_status(response, finished=False)
if status is None:
cmd = self.backend.cmd_status(self, finished=True)
response = self._run_cmd(cmd, self.remote,
ignore_exit_code=True, ssh=self.ssh)
status = self.backend.get_status(response, finished=True)
prev_status = self._status
self._status = status
if self._status not in STATUS_CODES:
raise ValueError("Invalid status code %s", self._status)
if prev_status != self._status:
if self._status >= COMPLETED:
self.run_epilogue()
self.dump()
return self._status | python | def status(self):
"""Return the job status as one of the codes defined in the
`clusterjob.status` module.
finished, communicate with the cluster to determine the job's status.
"""
if self._status >= COMPLETED:
return self._status
else:
cmd = self.backend.cmd_status(self, finished=False)
response = self._run_cmd(cmd, self.remote, ignore_exit_code=True,
ssh=self.ssh)
status = self.backend.get_status(response, finished=False)
if status is None:
cmd = self.backend.cmd_status(self, finished=True)
response = self._run_cmd(cmd, self.remote,
ignore_exit_code=True, ssh=self.ssh)
status = self.backend.get_status(response, finished=True)
prev_status = self._status
self._status = status
if self._status not in STATUS_CODES:
raise ValueError("Invalid status code %s", self._status)
if prev_status != self._status:
if self._status >= COMPLETED:
self.run_epilogue()
self.dump()
return self._status | [
"def",
"status",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
">=",
"COMPLETED",
":",
"return",
"self",
".",
"_status",
"else",
":",
"cmd",
"=",
"self",
".",
"backend",
".",
"cmd_status",
"(",
"self",
",",
"finished",
"=",
"False",
")",
"resp... | Return the job status as one of the codes defined in the
`clusterjob.status` module.
finished, communicate with the cluster to determine the job's status. | [
"Return",
"the",
"job",
"status",
"as",
"one",
"of",
"the",
"codes",
"defined",
"in",
"the",
"clusterjob",
".",
"status",
"module",
".",
"finished",
"communicate",
"with",
"the",
"cluster",
"to",
"determine",
"the",
"job",
"s",
"status",
"."
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L927-L952 | train | 34,270 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.dump | def dump(self, cache_file=None):
"""Write dump out to file `cache_file`, defaulting to
``self.cache_file``"""
if cache_file is None:
cache_file = self.cache_file
if cache_file is not None:
self.cache_file = cache_file
with open(cache_file, 'wb') as pickle_fh:
pickle.dump(
(self.remote, self.backend.name, self.max_sleep_interval,
self.job_id, self._status, self.epilogue, self.ssh,
self.scp),
pickle_fh) | python | def dump(self, cache_file=None):
"""Write dump out to file `cache_file`, defaulting to
``self.cache_file``"""
if cache_file is None:
cache_file = self.cache_file
if cache_file is not None:
self.cache_file = cache_file
with open(cache_file, 'wb') as pickle_fh:
pickle.dump(
(self.remote, self.backend.name, self.max_sleep_interval,
self.job_id, self._status, self.epilogue, self.ssh,
self.scp),
pickle_fh) | [
"def",
"dump",
"(",
"self",
",",
"cache_file",
"=",
"None",
")",
":",
"if",
"cache_file",
"is",
"None",
":",
"cache_file",
"=",
"self",
".",
"cache_file",
"if",
"cache_file",
"is",
"not",
"None",
":",
"self",
".",
"cache_file",
"=",
"cache_file",
"with",... | Write dump out to file `cache_file`, defaulting to
``self.cache_file`` | [
"Write",
"dump",
"out",
"to",
"file",
"cache_file",
"defaulting",
"to",
"self",
".",
"cache_file"
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L963-L975 | train | 34,271 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.load | def load(cls, cache_file, backend=None):
"""Instantiate AsyncResult from dumped `cache_file`.
This is the inverse of :meth:`dump`.
Parameters
----------
cache_file: str
Name of file from which the run should be read.
backend: clusterjob.backends.ClusterjobBackend or None
The backend instance for the job. If None, the backend will be
determined by the *name* of the dumped job's backend.
"""
with open(cache_file, 'rb') as pickle_fh:
(remote, backend_name, max_sleep_interval, job_id, status,
epilogue, ssh, scp) = pickle.load(pickle_fh)
if backend is None:
backend = JobScript._backends[backend_name]
ar = cls(backend)
(ar.remote, ar.max_sleep_interval, ar.job_id, ar._status, ar.epilogue,
ar.ssh, ar.scp) \
= (remote, max_sleep_interval, job_id, status, epilogue, ssh, scp)
ar.cache_file = cache_file
return ar | python | def load(cls, cache_file, backend=None):
"""Instantiate AsyncResult from dumped `cache_file`.
This is the inverse of :meth:`dump`.
Parameters
----------
cache_file: str
Name of file from which the run should be read.
backend: clusterjob.backends.ClusterjobBackend or None
The backend instance for the job. If None, the backend will be
determined by the *name* of the dumped job's backend.
"""
with open(cache_file, 'rb') as pickle_fh:
(remote, backend_name, max_sleep_interval, job_id, status,
epilogue, ssh, scp) = pickle.load(pickle_fh)
if backend is None:
backend = JobScript._backends[backend_name]
ar = cls(backend)
(ar.remote, ar.max_sleep_interval, ar.job_id, ar._status, ar.epilogue,
ar.ssh, ar.scp) \
= (remote, max_sleep_interval, job_id, status, epilogue, ssh, scp)
ar.cache_file = cache_file
return ar | [
"def",
"load",
"(",
"cls",
",",
"cache_file",
",",
"backend",
"=",
"None",
")",
":",
"with",
"open",
"(",
"cache_file",
",",
"'rb'",
")",
"as",
"pickle_fh",
":",
"(",
"remote",
",",
"backend_name",
",",
"max_sleep_interval",
",",
"job_id",
",",
"status",... | Instantiate AsyncResult from dumped `cache_file`.
This is the inverse of :meth:`dump`.
Parameters
----------
cache_file: str
Name of file from which the run should be read.
backend: clusterjob.backends.ClusterjobBackend or None
The backend instance for the job. If None, the backend will be
determined by the *name* of the dumped job's backend. | [
"Instantiate",
"AsyncResult",
"from",
"dumped",
"cache_file",
"."
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L978-L1003 | train | 34,272 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.wait | def wait(self, timeout=None):
"""Wait until the result is available or until roughly timeout seconds
pass."""
logger = logging.getLogger(__name__)
if int(self.max_sleep_interval) < int(self._min_sleep_interval):
self.max_sleep_interval = int(self._min_sleep_interval)
t0 = time.time()
sleep_seconds = min(5, self.max_sleep_interval)
status = self.status
prev_status = status
while status < COMPLETED:
logger.debug("sleep for %d seconds", sleep_seconds)
time.sleep(sleep_seconds)
if 2*sleep_seconds <= self.max_sleep_interval:
sleep_seconds *= 2
if timeout is not None:
if int(time.time() - t0) > int(timeout):
return
status = self.status
if status != prev_status:
sleep_seconds = min(5, self.max_sleep_interval)
prev_status = status | python | def wait(self, timeout=None):
"""Wait until the result is available or until roughly timeout seconds
pass."""
logger = logging.getLogger(__name__)
if int(self.max_sleep_interval) < int(self._min_sleep_interval):
self.max_sleep_interval = int(self._min_sleep_interval)
t0 = time.time()
sleep_seconds = min(5, self.max_sleep_interval)
status = self.status
prev_status = status
while status < COMPLETED:
logger.debug("sleep for %d seconds", sleep_seconds)
time.sleep(sleep_seconds)
if 2*sleep_seconds <= self.max_sleep_interval:
sleep_seconds *= 2
if timeout is not None:
if int(time.time() - t0) > int(timeout):
return
status = self.status
if status != prev_status:
sleep_seconds = min(5, self.max_sleep_interval)
prev_status = status | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"int",
"(",
"self",
".",
"max_sleep_interval",
")",
"<",
"int",
"(",
"self",
".",
"_min_sleep_interval",
")",
":"... | Wait until the result is available or until roughly timeout seconds
pass. | [
"Wait",
"until",
"the",
"result",
"is",
"available",
"or",
"until",
"roughly",
"timeout",
"seconds",
"pass",
"."
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L1005-L1026 | train | 34,273 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.successful | def successful(self):
"""Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed"""
status = self.status
assert status >= COMPLETED, "status is %s" % status
return (self.status == COMPLETED) | python | def successful(self):
"""Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed"""
status = self.status
assert status >= COMPLETED, "status is %s" % status
return (self.status == COMPLETED) | [
"def",
"successful",
"(",
"self",
")",
":",
"status",
"=",
"self",
".",
"status",
"assert",
"status",
">=",
"COMPLETED",
",",
"\"status is %s\"",
"%",
"status",
"return",
"(",
"self",
".",
"status",
"==",
"COMPLETED",
")"
] | Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed | [
"Return",
"True",
"if",
"the",
"job",
"finished",
"with",
"a",
"COMPLETED",
"status",
"False",
"if",
"it",
"finished",
"with",
"a",
"CANCELLED",
"or",
"FAILED",
"status",
".",
"Raise",
"an",
"AssertionError",
"if",
"the",
"job",
"has",
"not",
"completed"
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L1032-L1038 | train | 34,274 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.cancel | def cancel(self):
"""Instruct the cluster to cancel the running job. Has no effect if
job is not running"""
if self.status > COMPLETED:
return
cmd = self.backend.cmd_cancel(self)
self._run_cmd(cmd, self.remote, ignore_exit_code=True, ssh=self.ssh)
self._status = CANCELLED
self.dump() | python | def cancel(self):
"""Instruct the cluster to cancel the running job. Has no effect if
job is not running"""
if self.status > COMPLETED:
return
cmd = self.backend.cmd_cancel(self)
self._run_cmd(cmd, self.remote, ignore_exit_code=True, ssh=self.ssh)
self._status = CANCELLED
self.dump() | [
"def",
"cancel",
"(",
"self",
")",
":",
"if",
"self",
".",
"status",
">",
"COMPLETED",
":",
"return",
"cmd",
"=",
"self",
".",
"backend",
".",
"cmd_cancel",
"(",
"self",
")",
"self",
".",
"_run_cmd",
"(",
"cmd",
",",
"self",
".",
"remote",
",",
"ig... | Instruct the cluster to cancel the running job. Has no effect if
job is not running | [
"Instruct",
"the",
"cluster",
"to",
"cancel",
"the",
"running",
"job",
".",
"Has",
"no",
"effect",
"if",
"job",
"is",
"not",
"running"
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L1040-L1048 | train | 34,275 |
goerz/clusterjob | clusterjob/__init__.py | AsyncResult.run_epilogue | def run_epilogue(self):
"""Run the epilogue script in the current working directory.
raises:
subprocess.CalledProcessError: if the script does not finish with
exit code zero.
"""
logger = logging.getLogger(__name__)
if self.epilogue is not None:
with tempfile.NamedTemporaryFile('w', delete=False) as epilogue_fh:
epilogue_fh.write(self.epilogue)
tempfilename = epilogue_fh.name
set_executable(tempfilename)
try:
sp.check_output( [tempfilename, ], stderr=sp.STDOUT)
except sp.CalledProcessError as e:
logger.error(dedent(r'''
Epilogue script did not exit cleanly.
CWD: {cwd}
epilogue: ---
{epilogue}
---
response: ---
{response}
---
''').format(cwd=os.getcwd(), epilogue=self.epilogue,
response=e.output))
raise
finally:
os.unlink(tempfilename) | python | def run_epilogue(self):
"""Run the epilogue script in the current working directory.
raises:
subprocess.CalledProcessError: if the script does not finish with
exit code zero.
"""
logger = logging.getLogger(__name__)
if self.epilogue is not None:
with tempfile.NamedTemporaryFile('w', delete=False) as epilogue_fh:
epilogue_fh.write(self.epilogue)
tempfilename = epilogue_fh.name
set_executable(tempfilename)
try:
sp.check_output( [tempfilename, ], stderr=sp.STDOUT)
except sp.CalledProcessError as e:
logger.error(dedent(r'''
Epilogue script did not exit cleanly.
CWD: {cwd}
epilogue: ---
{epilogue}
---
response: ---
{response}
---
''').format(cwd=os.getcwd(), epilogue=self.epilogue,
response=e.output))
raise
finally:
os.unlink(tempfilename) | [
"def",
"run_epilogue",
"(",
"self",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"self",
".",
"epilogue",
"is",
"not",
"None",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'w'",
",",
"delete",
"=",
"False... | Run the epilogue script in the current working directory.
raises:
subprocess.CalledProcessError: if the script does not finish with
exit code zero. | [
"Run",
"the",
"epilogue",
"script",
"in",
"the",
"current",
"working",
"directory",
"."
] | 361760d1a6dd3cbde49c5c2158a3acd0c314a749 | https://github.com/goerz/clusterjob/blob/361760d1a6dd3cbde49c5c2158a3acd0c314a749/clusterjob/__init__.py#L1050-L1079 | train | 34,276 |
kennknowles/python-jsonpath-rw | jsonpath_rw/lexer.py | JsonPathLexer.t_ID | def t_ID(self, t):
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
t.type = self.reserved_words.get(t.value, 'ID')
return t | python | def t_ID(self, t):
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
t.type = self.reserved_words.get(t.value, 'ID')
return t | [
"def",
"t_ID",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"type",
"=",
"self",
".",
"reserved_words",
".",
"get",
"(",
"t",
".",
"value",
",",
"'ID'",
")",
"return",
"t"
] | r'[a-zA-Z_@][a-zA-Z0-9_@\-]* | [
"r",
"[",
"a",
"-",
"zA",
"-",
"Z_"
] | f615451d7b405e23e0f80b15cad03b1427b0256d | https://github.com/kennknowles/python-jsonpath-rw/blob/f615451d7b405e23e0f80b15cad03b1427b0256d/jsonpath_rw/lexer.py#L63-L66 | train | 34,277 |
kennknowles/python-jsonpath-rw | jsonpath_rw/lexer.py | JsonPathLexer.t_newline | def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos | python | def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos | [
"def",
"t_newline",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"lexer",
".",
"lineno",
"+=",
"1",
"t",
".",
"lexer",
".",
"latest_newline",
"=",
"t",
".",
"lexpos"
] | r'\n | [
"r",
"\\",
"n"
] | f615451d7b405e23e0f80b15cad03b1427b0256d | https://github.com/kennknowles/python-jsonpath-rw/blob/f615451d7b405e23e0f80b15cad03b1427b0256d/jsonpath_rw/lexer.py#L159-L162 | train | 34,278 |
kennknowles/python-jsonpath-rw | jsonpath_rw/jsonpath.py | DatumInContext.id_pseudopath | def id_pseudopath(self):
"""
Looks like a path, but with ids stuck in when available
"""
try:
pseudopath = Fields(str(self.value[auto_id_field]))
except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
pseudopath = self.path
if self.context:
return self.context.id_pseudopath.child(pseudopath)
else:
return pseudopath | python | def id_pseudopath(self):
"""
Looks like a path, but with ids stuck in when available
"""
try:
pseudopath = Fields(str(self.value[auto_id_field]))
except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
pseudopath = self.path
if self.context:
return self.context.id_pseudopath.child(pseudopath)
else:
return pseudopath | [
"def",
"id_pseudopath",
"(",
"self",
")",
":",
"try",
":",
"pseudopath",
"=",
"Fields",
"(",
"str",
"(",
"self",
".",
"value",
"[",
"auto_id_field",
"]",
")",
")",
"except",
"(",
"TypeError",
",",
"AttributeError",
",",
"KeyError",
")",
":",
"# This may ... | Looks like a path, but with ids stuck in when available | [
"Looks",
"like",
"a",
"path",
"but",
"with",
"ids",
"stuck",
"in",
"when",
"available"
] | f615451d7b405e23e0f80b15cad03b1427b0256d | https://github.com/kennknowles/python-jsonpath-rw/blob/f615451d7b405e23e0f80b15cad03b1427b0256d/jsonpath_rw/jsonpath.py#L97-L109 | train | 34,279 |
hashedin/jinjasql | jinjasql/core.py | bind | def bind(value, name):
"""A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind
"""
if isinstance(value, Markup):
return value
elif requires_in_clause(value):
raise MissingInClauseException("""Got a list or tuple.
Did you forget to apply '|inclause' to your query?""")
else:
return _bind_param(_thread_local.bind_params, name, value) | python | def bind(value, name):
"""A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind
"""
if isinstance(value, Markup):
return value
elif requires_in_clause(value):
raise MissingInClauseException("""Got a list or tuple.
Did you forget to apply '|inclause' to your query?""")
else:
return _bind_param(_thread_local.bind_params, name, value) | [
"def",
"bind",
"(",
"value",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Markup",
")",
":",
"return",
"value",
"elif",
"requires_in_clause",
"(",
"value",
")",
":",
"raise",
"MissingInClauseException",
"(",
"\"\"\"Got a list or tuple. \n ... | A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind | [
"A",
"filter",
"that",
"prints",
"%s",
"and",
"stores",
"the",
"value",
"in",
"an",
"array",
"so",
"that",
"it",
"can",
"be",
"bound",
"using",
"a",
"prepared",
"statement"
] | c81795b9280f3815f08b3b19df61868c6265bf1b | https://github.com/hashedin/jinjasql/blob/c81795b9280f3815f08b3b19df61868c6265bf1b/jinjasql/core.py#L92-L105 | train | 34,280 |
alecthomas/injector | injector/__init__.py | inject | def inject(function):
"""Decorator declaring parameters to be injected.
eg.
>>> Sizes = Key('sizes')
>>> Names = Key('names')
>>>
>>> class A:
... @inject
... def __init__(self, number: int, name: str, sizes: Sizes):
... print([number, name, sizes])
...
>>> def configure(binder):
... binder.bind(A)
... binder.bind(int, to=123)
... binder.bind(str, to='Bob')
... binder.bind(Sizes, to=[1, 2, 3])
Use the Injector to get a new instance of A:
>>> a = Injector(configure).get(A)
[123, 'Bob', [1, 2, 3]]
.. note::
This decorator is to be used on class constructors. Using it on non-constructor
methods worked in the past but it was an implementation detail rather than
a design decision.
Third party libraries may, however, provide support for injecting dependencies
into non-constructor methods or free functions in one form or another.
"""
try:
bindings = _infer_injected_bindings(function)
except _BindingNotYetAvailable:
bindings = 'deferred'
return method_wrapper(function, bindings) | python | def inject(function):
"""Decorator declaring parameters to be injected.
eg.
>>> Sizes = Key('sizes')
>>> Names = Key('names')
>>>
>>> class A:
... @inject
... def __init__(self, number: int, name: str, sizes: Sizes):
... print([number, name, sizes])
...
>>> def configure(binder):
... binder.bind(A)
... binder.bind(int, to=123)
... binder.bind(str, to='Bob')
... binder.bind(Sizes, to=[1, 2, 3])
Use the Injector to get a new instance of A:
>>> a = Injector(configure).get(A)
[123, 'Bob', [1, 2, 3]]
.. note::
This decorator is to be used on class constructors. Using it on non-constructor
methods worked in the past but it was an implementation detail rather than
a design decision.
Third party libraries may, however, provide support for injecting dependencies
into non-constructor methods or free functions in one form or another.
"""
try:
bindings = _infer_injected_bindings(function)
except _BindingNotYetAvailable:
bindings = 'deferred'
return method_wrapper(function, bindings) | [
"def",
"inject",
"(",
"function",
")",
":",
"try",
":",
"bindings",
"=",
"_infer_injected_bindings",
"(",
"function",
")",
"except",
"_BindingNotYetAvailable",
":",
"bindings",
"=",
"'deferred'",
"return",
"method_wrapper",
"(",
"function",
",",
"bindings",
")"
] | Decorator declaring parameters to be injected.
eg.
>>> Sizes = Key('sizes')
>>> Names = Key('names')
>>>
>>> class A:
... @inject
... def __init__(self, number: int, name: str, sizes: Sizes):
... print([number, name, sizes])
...
>>> def configure(binder):
... binder.bind(A)
... binder.bind(int, to=123)
... binder.bind(str, to='Bob')
... binder.bind(Sizes, to=[1, 2, 3])
Use the Injector to get a new instance of A:
>>> a = Injector(configure).get(A)
[123, 'Bob', [1, 2, 3]]
.. note::
This decorator is to be used on class constructors. Using it on non-constructor
methods worked in the past but it was an implementation detail rather than
a design decision.
Third party libraries may, however, provide support for injecting dependencies
into non-constructor methods or free functions in one form or another. | [
"Decorator",
"declaring",
"parameters",
"to",
"be",
"injected",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L904-L941 | train | 34,281 |
alecthomas/injector | injector/__init__.py | noninjectable | def noninjectable(*args):
"""Mark some parameters as not injectable.
This serves as documentation for people reading the code and will prevent
Injector from ever attempting to provide the parameters.
For example:
>>> class Service:
... pass
...
>>> class SomeClass:
... @inject
... @noninjectable('user_id')
... def __init__(self, service: Service, user_id: int):
... # ...
... pass
:func:`noninjectable` decorations can be stacked on top of
each other and the order in which a function is decorated with
:func:`inject` and :func:`noninjectable`
doesn't matter.
"""
def decorator(function):
argspec = inspect.getfullargspec(inspect.unwrap(function))
for arg in args:
if arg not in argspec.args and arg not in argspec.kwonlyargs:
raise UnknownArgument('Unable to mark unknown argument %s ' 'as non-injectable.' % arg)
existing = getattr(function, '__noninjectables__', set())
merged = existing | set(args)
function.__noninjectables__ = merged
return function
return decorator | python | def noninjectable(*args):
"""Mark some parameters as not injectable.
This serves as documentation for people reading the code and will prevent
Injector from ever attempting to provide the parameters.
For example:
>>> class Service:
... pass
...
>>> class SomeClass:
... @inject
... @noninjectable('user_id')
... def __init__(self, service: Service, user_id: int):
... # ...
... pass
:func:`noninjectable` decorations can be stacked on top of
each other and the order in which a function is decorated with
:func:`inject` and :func:`noninjectable`
doesn't matter.
"""
def decorator(function):
argspec = inspect.getfullargspec(inspect.unwrap(function))
for arg in args:
if arg not in argspec.args and arg not in argspec.kwonlyargs:
raise UnknownArgument('Unable to mark unknown argument %s ' 'as non-injectable.' % arg)
existing = getattr(function, '__noninjectables__', set())
merged = existing | set(args)
function.__noninjectables__ = merged
return function
return decorator | [
"def",
"noninjectable",
"(",
"*",
"args",
")",
":",
"def",
"decorator",
"(",
"function",
")",
":",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"inspect",
".",
"unwrap",
"(",
"function",
")",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",... | Mark some parameters as not injectable.
This serves as documentation for people reading the code and will prevent
Injector from ever attempting to provide the parameters.
For example:
>>> class Service:
... pass
...
>>> class SomeClass:
... @inject
... @noninjectable('user_id')
... def __init__(self, service: Service, user_id: int):
... # ...
... pass
:func:`noninjectable` decorations can be stacked on top of
each other and the order in which a function is decorated with
:func:`inject` and :func:`noninjectable`
doesn't matter. | [
"Mark",
"some",
"parameters",
"as",
"not",
"injectable",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L944-L979 | train | 34,282 |
alecthomas/injector | injector/__init__.py | Binder.bind | def bind(self, interface, to=None, scope=None):
"""Bind an interface to an implementation.
:param interface: Interface or :func:`Key` to bind.
:param to: Instance or class to bind to, or an explicit
:class:`Provider` subclass.
:param scope: Optional :class:`Scope` in which to bind.
"""
if type(interface) is type and issubclass(interface, (BaseMappingKey, BaseSequenceKey)):
return self.multibind(interface, to, scope=scope)
key = BindingKey.create(interface)
self._bindings[key] = self.create_binding(interface, to, scope) | python | def bind(self, interface, to=None, scope=None):
"""Bind an interface to an implementation.
:param interface: Interface or :func:`Key` to bind.
:param to: Instance or class to bind to, or an explicit
:class:`Provider` subclass.
:param scope: Optional :class:`Scope` in which to bind.
"""
if type(interface) is type and issubclass(interface, (BaseMappingKey, BaseSequenceKey)):
return self.multibind(interface, to, scope=scope)
key = BindingKey.create(interface)
self._bindings[key] = self.create_binding(interface, to, scope) | [
"def",
"bind",
"(",
"self",
",",
"interface",
",",
"to",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"if",
"type",
"(",
"interface",
")",
"is",
"type",
"and",
"issubclass",
"(",
"interface",
",",
"(",
"BaseMappingKey",
",",
"BaseSequenceKey",
")"... | Bind an interface to an implementation.
:param interface: Interface or :func:`Key` to bind.
:param to: Instance or class to bind to, or an explicit
:class:`Provider` subclass.
:param scope: Optional :class:`Scope` in which to bind. | [
"Bind",
"an",
"interface",
"to",
"an",
"implementation",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L293-L304 | train | 34,283 |
alecthomas/injector | injector/__init__.py | Binder.multibind | def multibind(self, interface, to, scope=None):
"""Creates or extends a multi-binding.
A multi-binding maps from a key to a sequence, where each element in
the sequence is provided separately.
:param interface: :func:`MappingKey` or :func:`SequenceKey` to bind to.
:param to: Instance, class to bind to, or an explicit :class:`Provider`
subclass. Must provide a sequence.
:param scope: Optional Scope in which to bind.
"""
key = BindingKey.create(interface)
if key not in self._bindings:
if isinstance(interface, dict) or isinstance(interface, type) and issubclass(interface, dict):
provider = MapBindProvider()
else:
provider = MultiBindProvider()
binding = self.create_binding(interface, provider, scope)
self._bindings[key] = binding
else:
binding = self._bindings[key]
provider = binding.provider
assert isinstance(provider, ListOfProviders)
provider.append(self.provider_for(key.interface, to)) | python | def multibind(self, interface, to, scope=None):
"""Creates or extends a multi-binding.
A multi-binding maps from a key to a sequence, where each element in
the sequence is provided separately.
:param interface: :func:`MappingKey` or :func:`SequenceKey` to bind to.
:param to: Instance, class to bind to, or an explicit :class:`Provider`
subclass. Must provide a sequence.
:param scope: Optional Scope in which to bind.
"""
key = BindingKey.create(interface)
if key not in self._bindings:
if isinstance(interface, dict) or isinstance(interface, type) and issubclass(interface, dict):
provider = MapBindProvider()
else:
provider = MultiBindProvider()
binding = self.create_binding(interface, provider, scope)
self._bindings[key] = binding
else:
binding = self._bindings[key]
provider = binding.provider
assert isinstance(provider, ListOfProviders)
provider.append(self.provider_for(key.interface, to)) | [
"def",
"multibind",
"(",
"self",
",",
"interface",
",",
"to",
",",
"scope",
"=",
"None",
")",
":",
"key",
"=",
"BindingKey",
".",
"create",
"(",
"interface",
")",
"if",
"key",
"not",
"in",
"self",
".",
"_bindings",
":",
"if",
"isinstance",
"(",
"inte... | Creates or extends a multi-binding.
A multi-binding maps from a key to a sequence, where each element in
the sequence is provided separately.
:param interface: :func:`MappingKey` or :func:`SequenceKey` to bind to.
:param to: Instance, class to bind to, or an explicit :class:`Provider`
subclass. Must provide a sequence.
:param scope: Optional Scope in which to bind. | [
"Creates",
"or",
"extends",
"a",
"multi",
"-",
"binding",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L306-L329 | train | 34,284 |
alecthomas/injector | injector/__init__.py | Binder.install | def install(self, module):
"""Install a module into this binder.
In this context the module is one of the following:
* function taking the :class:`Binder` as it's only parameter
::
def configure(binder):
bind(str, to='s')
binder.install(configure)
* instance of :class:`Module` (instance of it's subclass counts)
::
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to='s')
binder.install(MyModule())
* subclass of :class:`Module` - the subclass needs to be instantiable so if it
expects any parameters they need to be injected
::
binder.install(MyModule)
"""
if type(module) is type and issubclass(module, Module):
instance = module()
else:
instance = module
instance(self) | python | def install(self, module):
"""Install a module into this binder.
In this context the module is one of the following:
* function taking the :class:`Binder` as it's only parameter
::
def configure(binder):
bind(str, to='s')
binder.install(configure)
* instance of :class:`Module` (instance of it's subclass counts)
::
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to='s')
binder.install(MyModule())
* subclass of :class:`Module` - the subclass needs to be instantiable so if it
expects any parameters they need to be injected
::
binder.install(MyModule)
"""
if type(module) is type and issubclass(module, Module):
instance = module()
else:
instance = module
instance(self) | [
"def",
"install",
"(",
"self",
",",
"module",
")",
":",
"if",
"type",
"(",
"module",
")",
"is",
"type",
"and",
"issubclass",
"(",
"module",
",",
"Module",
")",
":",
"instance",
"=",
"module",
"(",
")",
"else",
":",
"instance",
"=",
"module",
"instanc... | Install a module into this binder.
In this context the module is one of the following:
* function taking the :class:`Binder` as it's only parameter
::
def configure(binder):
bind(str, to='s')
binder.install(configure)
* instance of :class:`Module` (instance of it's subclass counts)
::
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to='s')
binder.install(MyModule())
* subclass of :class:`Module` - the subclass needs to be instantiable so if it
expects any parameters they need to be injected
::
binder.install(MyModule) | [
"Install",
"a",
"module",
"into",
"this",
"binder",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L331-L366 | train | 34,285 |
alecthomas/injector | injector/__init__.py | Injector.get | def get(self, interface: Type[T], scope=None) -> T:
"""Get an instance of the given interface.
.. note::
Although this method is part of :class:`Injector`'s public interface
it's meant to be used in limited set of circumstances.
For example, to create some kind of root object (application object)
of your application (note that only one `get` call is needed,
inside the `Application` class and any of its dependencies
:func:`inject` can and should be used):
.. code-block:: python
class Application:
@inject
def __init__(self, dep1: Dep1, dep2: Dep2):
self.dep1 = dep1
self.dep2 = dep2
def run(self):
self.dep1.something()
injector = Injector(configuration)
application = injector.get(Application)
application.run()
:param interface: Interface whose implementation we want.
:param scope: Class of the Scope in which to resolve.
:returns: An implementation of interface.
"""
key = BindingKey.create(interface)
binding, binder = self.binder.get_binding(None, key)
scope = scope or binding.scope
if isinstance(scope, ScopeDecorator):
scope = scope.scope
# Fetch the corresponding Scope instance from the Binder.
scope_key = BindingKey.create(scope)
scope_binding, _ = binder.get_binding(None, scope_key)
scope_instance = scope_binding.provider.get(self)
log.debug(
'%sInjector.get(%r, scope=%r) using %r', self._log_prefix, interface, scope, binding.provider
)
result = scope_instance.get(key, binding.provider).get(self)
log.debug('%s -> %r', self._log_prefix, result)
return result | python | def get(self, interface: Type[T], scope=None) -> T:
"""Get an instance of the given interface.
.. note::
Although this method is part of :class:`Injector`'s public interface
it's meant to be used in limited set of circumstances.
For example, to create some kind of root object (application object)
of your application (note that only one `get` call is needed,
inside the `Application` class and any of its dependencies
:func:`inject` can and should be used):
.. code-block:: python
class Application:
@inject
def __init__(self, dep1: Dep1, dep2: Dep2):
self.dep1 = dep1
self.dep2 = dep2
def run(self):
self.dep1.something()
injector = Injector(configuration)
application = injector.get(Application)
application.run()
:param interface: Interface whose implementation we want.
:param scope: Class of the Scope in which to resolve.
:returns: An implementation of interface.
"""
key = BindingKey.create(interface)
binding, binder = self.binder.get_binding(None, key)
scope = scope or binding.scope
if isinstance(scope, ScopeDecorator):
scope = scope.scope
# Fetch the corresponding Scope instance from the Binder.
scope_key = BindingKey.create(scope)
scope_binding, _ = binder.get_binding(None, scope_key)
scope_instance = scope_binding.provider.get(self)
log.debug(
'%sInjector.get(%r, scope=%r) using %r', self._log_prefix, interface, scope, binding.provider
)
result = scope_instance.get(key, binding.provider).get(self)
log.debug('%s -> %r', self._log_prefix, result)
return result | [
"def",
"get",
"(",
"self",
",",
"interface",
":",
"Type",
"[",
"T",
"]",
",",
"scope",
"=",
"None",
")",
"->",
"T",
":",
"key",
"=",
"BindingKey",
".",
"create",
"(",
"interface",
")",
"binding",
",",
"binder",
"=",
"self",
".",
"binder",
".",
"g... | Get an instance of the given interface.
.. note::
Although this method is part of :class:`Injector`'s public interface
it's meant to be used in limited set of circumstances.
For example, to create some kind of root object (application object)
of your application (note that only one `get` call is needed,
inside the `Application` class and any of its dependencies
:func:`inject` can and should be used):
.. code-block:: python
class Application:
@inject
def __init__(self, dep1: Dep1, dep2: Dep2):
self.dep1 = dep1
self.dep2 = dep2
def run(self):
self.dep1.something()
injector = Injector(configuration)
application = injector.get(Application)
application.run()
:param interface: Interface whose implementation we want.
:param scope: Class of the Scope in which to resolve.
:returns: An implementation of interface. | [
"Get",
"an",
"instance",
"of",
"the",
"given",
"interface",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L655-L703 | train | 34,286 |
alecthomas/injector | injector/__init__.py | Injector.create_object | def create_object(self, cls: Type[T], additional_kwargs=None) -> T:
"""Create a new instance, satisfying any dependencies on cls."""
additional_kwargs = additional_kwargs or {}
log.debug('%sCreating %r object with %r', self._log_prefix, cls, additional_kwargs)
try:
instance = cls.__new__(cls)
except TypeError as e:
reraise(
e,
CallError(cls, getattr(cls.__new__, '__func__', cls.__new__), (), {}, e, self._stack),
maximum_frames=2,
)
try:
init = cls.__init__
self.call_with_injection(init, self_=instance, kwargs=additional_kwargs)
except TypeError as e:
# The reason why getattr() fallback is used here is that
# __init__.__func__ apparently doesn't exist for Key-type objects
reraise(
e,
CallError(
instance,
getattr(instance.__init__, '__func__', instance.__init__),
(),
additional_kwargs,
e,
self._stack,
),
)
return instance | python | def create_object(self, cls: Type[T], additional_kwargs=None) -> T:
"""Create a new instance, satisfying any dependencies on cls."""
additional_kwargs = additional_kwargs or {}
log.debug('%sCreating %r object with %r', self._log_prefix, cls, additional_kwargs)
try:
instance = cls.__new__(cls)
except TypeError as e:
reraise(
e,
CallError(cls, getattr(cls.__new__, '__func__', cls.__new__), (), {}, e, self._stack),
maximum_frames=2,
)
try:
init = cls.__init__
self.call_with_injection(init, self_=instance, kwargs=additional_kwargs)
except TypeError as e:
# The reason why getattr() fallback is used here is that
# __init__.__func__ apparently doesn't exist for Key-type objects
reraise(
e,
CallError(
instance,
getattr(instance.__init__, '__func__', instance.__init__),
(),
additional_kwargs,
e,
self._stack,
),
)
return instance | [
"def",
"create_object",
"(",
"self",
",",
"cls",
":",
"Type",
"[",
"T",
"]",
",",
"additional_kwargs",
"=",
"None",
")",
"->",
"T",
":",
"additional_kwargs",
"=",
"additional_kwargs",
"or",
"{",
"}",
"log",
".",
"debug",
"(",
"'%sCreating %r object with %r'"... | Create a new instance, satisfying any dependencies on cls. | [
"Create",
"a",
"new",
"instance",
"satisfying",
"any",
"dependencies",
"on",
"cls",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L708-L738 | train | 34,287 |
alecthomas/injector | injector/__init__.py | Injector.call_with_injection | def call_with_injection(self, callable, self_=None, args=(), kwargs={}):
"""Call a callable and provide it's dependencies if needed.
:param self_: Instance of a class callable belongs to if it's a method,
None otherwise.
:param args: Arguments to pass to callable.
:param kwargs: Keyword arguments to pass to callable.
:type callable: callable
:type args: tuple of objects
:type kwargs: dict of string -> object
:return: Value returned by callable.
"""
def _get_callable_bindings(callable):
if not hasattr(callable, '__bindings__'):
return {}
if callable.__bindings__ == 'deferred':
read_and_store_bindings(callable, _infer_injected_bindings(callable))
return callable.__bindings__
bindings = _get_callable_bindings(callable)
noninjectables = getattr(callable, '__noninjectables__', set())
signature = inspect.signature(callable)
full_args = args
if self_ is not None:
full_args = (self_,) + full_args
bound_arguments = signature.bind_partial(*full_args)
needed = dict(
(k, v)
for (k, v) in bindings.items()
if k not in kwargs and k not in noninjectables and k not in bound_arguments.arguments
)
dependencies = self.args_to_inject(
function=callable,
bindings=needed,
owner_key=self_.__class__ if self_ is not None else callable.__module__,
)
dependencies.update(kwargs)
try:
return callable(*full_args, **dependencies)
except TypeError as e:
reraise(e, CallError(self_, callable, args, dependencies, e, self._stack)) | python | def call_with_injection(self, callable, self_=None, args=(), kwargs={}):
"""Call a callable and provide it's dependencies if needed.
:param self_: Instance of a class callable belongs to if it's a method,
None otherwise.
:param args: Arguments to pass to callable.
:param kwargs: Keyword arguments to pass to callable.
:type callable: callable
:type args: tuple of objects
:type kwargs: dict of string -> object
:return: Value returned by callable.
"""
def _get_callable_bindings(callable):
if not hasattr(callable, '__bindings__'):
return {}
if callable.__bindings__ == 'deferred':
read_and_store_bindings(callable, _infer_injected_bindings(callable))
return callable.__bindings__
bindings = _get_callable_bindings(callable)
noninjectables = getattr(callable, '__noninjectables__', set())
signature = inspect.signature(callable)
full_args = args
if self_ is not None:
full_args = (self_,) + full_args
bound_arguments = signature.bind_partial(*full_args)
needed = dict(
(k, v)
for (k, v) in bindings.items()
if k not in kwargs and k not in noninjectables and k not in bound_arguments.arguments
)
dependencies = self.args_to_inject(
function=callable,
bindings=needed,
owner_key=self_.__class__ if self_ is not None else callable.__module__,
)
dependencies.update(kwargs)
try:
return callable(*full_args, **dependencies)
except TypeError as e:
reraise(e, CallError(self_, callable, args, dependencies, e, self._stack)) | [
"def",
"call_with_injection",
"(",
"self",
",",
"callable",
",",
"self_",
"=",
"None",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"def",
"_get_callable_bindings",
"(",
"callable",
")",
":",
"if",
"not",
"hasattr",
"(",
"callabl... | Call a callable and provide it's dependencies if needed.
:param self_: Instance of a class callable belongs to if it's a method,
None otherwise.
:param args: Arguments to pass to callable.
:param kwargs: Keyword arguments to pass to callable.
:type callable: callable
:type args: tuple of objects
:type kwargs: dict of string -> object
:return: Value returned by callable. | [
"Call",
"a",
"callable",
"and",
"provide",
"it",
"s",
"dependencies",
"if",
"needed",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L740-L786 | train | 34,288 |
alecthomas/injector | injector/__init__.py | Injector.args_to_inject | def args_to_inject(self, function, bindings, owner_key):
"""Inject arguments into a function.
:param function: The function.
:param bindings: Map of argument name to binding key to inject.
:param owner_key: A key uniquely identifying the *scope* of this function.
For a method this will be the owning class.
:returns: Dictionary of resolved arguments.
"""
dependencies = {}
key = (owner_key, function, tuple(sorted(bindings.items())))
def repr_key(k):
owner_key, function, bindings = k
return '%s.%s(injecting %s)' % (tuple(map(_describe, k[:2])) + (dict(k[2]),))
log.debug('%sProviding %r for %r', self._log_prefix, bindings, function)
if key in self._stack:
raise CircularDependency(
'circular dependency detected: %s -> %s'
% (' -> '.join(map(repr_key, self._stack)), repr_key(key))
)
self._stack += (key,)
try:
for arg, key in bindings.items():
try:
instance = self.get(key.interface)
except UnsatisfiedRequirement as e:
if not e.args[0]:
e = UnsatisfiedRequirement(owner_key, e.args[1])
raise e
dependencies[arg] = instance
finally:
self._stack = tuple(self._stack[:-1])
return dependencies | python | def args_to_inject(self, function, bindings, owner_key):
"""Inject arguments into a function.
:param function: The function.
:param bindings: Map of argument name to binding key to inject.
:param owner_key: A key uniquely identifying the *scope* of this function.
For a method this will be the owning class.
:returns: Dictionary of resolved arguments.
"""
dependencies = {}
key = (owner_key, function, tuple(sorted(bindings.items())))
def repr_key(k):
owner_key, function, bindings = k
return '%s.%s(injecting %s)' % (tuple(map(_describe, k[:2])) + (dict(k[2]),))
log.debug('%sProviding %r for %r', self._log_prefix, bindings, function)
if key in self._stack:
raise CircularDependency(
'circular dependency detected: %s -> %s'
% (' -> '.join(map(repr_key, self._stack)), repr_key(key))
)
self._stack += (key,)
try:
for arg, key in bindings.items():
try:
instance = self.get(key.interface)
except UnsatisfiedRequirement as e:
if not e.args[0]:
e = UnsatisfiedRequirement(owner_key, e.args[1])
raise e
dependencies[arg] = instance
finally:
self._stack = tuple(self._stack[:-1])
return dependencies | [
"def",
"args_to_inject",
"(",
"self",
",",
"function",
",",
"bindings",
",",
"owner_key",
")",
":",
"dependencies",
"=",
"{",
"}",
"key",
"=",
"(",
"owner_key",
",",
"function",
",",
"tuple",
"(",
"sorted",
"(",
"bindings",
".",
"items",
"(",
")",
")",... | Inject arguments into a function.
:param function: The function.
:param bindings: Map of argument name to binding key to inject.
:param owner_key: A key uniquely identifying the *scope* of this function.
For a method this will be the owning class.
:returns: Dictionary of resolved arguments. | [
"Inject",
"arguments",
"into",
"a",
"function",
"."
] | 07c7200166dcf5abc3bd425607f6c20206b8fe65 | https://github.com/alecthomas/injector/blob/07c7200166dcf5abc3bd425607f6c20206b8fe65/injector/__init__.py#L790-L828 | train | 34,289 |
timkpaine/lantern | lantern/extensions/savehooks/remove_output.py | scrub_output_pre_save | def scrub_output_pre_save(model, **kwargs):
"""scrub output before saving notebooks"""
# only run on notebooks
if model['type'] != 'notebook':
return
# only run on nbformat v4
if model['content']['nbformat'] != 4:
return
for cell in model['content']['cells']:
if cell['cell_type'] != 'code':
continue
cell['outputs'] = []
cell['execution_count'] = None | python | def scrub_output_pre_save(model, **kwargs):
"""scrub output before saving notebooks"""
# only run on notebooks
if model['type'] != 'notebook':
return
# only run on nbformat v4
if model['content']['nbformat'] != 4:
return
for cell in model['content']['cells']:
if cell['cell_type'] != 'code':
continue
cell['outputs'] = []
cell['execution_count'] = None | [
"def",
"scrub_output_pre_save",
"(",
"model",
",",
"*",
"*",
"kwargs",
")",
":",
"# only run on notebooks",
"if",
"model",
"[",
"'type'",
"]",
"!=",
"'notebook'",
":",
"return",
"# only run on nbformat v4",
"if",
"model",
"[",
"'content'",
"]",
"[",
"'nbformat'"... | scrub output before saving notebooks | [
"scrub",
"output",
"before",
"saving",
"notebooks"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/extensions/savehooks/remove_output.py#L1-L14 | train | 34,290 |
timkpaine/lantern | lantern/widgets/variable_inspector.py | VariableInspector.close | def close(self):
"""Close and remove hooks."""
if not self.closed:
self._ipython.events.unregister('post_run_cell', self._fill)
self._box.close()
self.closed = True | python | def close(self):
"""Close and remove hooks."""
if not self.closed:
self._ipython.events.unregister('post_run_cell', self._fill)
self._box.close()
self.closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"closed",
":",
"self",
".",
"_ipython",
".",
"events",
".",
"unregister",
"(",
"'post_run_cell'",
",",
"self",
".",
"_fill",
")",
"self",
".",
"_box",
".",
"close",
"(",
")",
"self",
... | Close and remove hooks. | [
"Close",
"and",
"remove",
"hooks",
"."
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/widgets/variable_inspector.py#L42-L47 | train | 34,291 |
timkpaine/lantern | lantern/widgets/variable_inspector.py | VariableInspector._fill | def _fill(self):
"""Fill self with variable information."""
types_to_exclude = ['module', 'function', 'builtin_function_or_method',
'instance', '_Feature', 'type', 'ufunc']
values = self.namespace.who_ls()
def eval(expr):
return self.namespace.shell.ev(expr)
var = [(v,
type(eval(v)).__name__,
str(_getsizeof(eval(v))),
str(_getshapeof(eval(v))) if _getshapeof(eval(v)) else '',
str(eval(v))[:200])
for v in values if (v not in ['_html', '_nms', 'NamespaceMagics', '_Jupyter']) & (type(eval(v)).__name__ not in types_to_exclude)]
self._table.value = '<div class="rendered_html jp-RenderedHTMLCommon"><table><thead><tr><th>Name</th><th>Type</th><th>Size</th><th>Shape</th><th>Value</th></tr></thead><tr><td>' + \
'</td></tr><tr><td>'.join(['{0}</td><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}'.format(v1, v2, v3, v4, v5) for v1, v2, v3, v4, v5 in var]) + \
'</td></tr></table></div>' | python | def _fill(self):
"""Fill self with variable information."""
types_to_exclude = ['module', 'function', 'builtin_function_or_method',
'instance', '_Feature', 'type', 'ufunc']
values = self.namespace.who_ls()
def eval(expr):
return self.namespace.shell.ev(expr)
var = [(v,
type(eval(v)).__name__,
str(_getsizeof(eval(v))),
str(_getshapeof(eval(v))) if _getshapeof(eval(v)) else '',
str(eval(v))[:200])
for v in values if (v not in ['_html', '_nms', 'NamespaceMagics', '_Jupyter']) & (type(eval(v)).__name__ not in types_to_exclude)]
self._table.value = '<div class="rendered_html jp-RenderedHTMLCommon"><table><thead><tr><th>Name</th><th>Type</th><th>Size</th><th>Shape</th><th>Value</th></tr></thead><tr><td>' + \
'</td></tr><tr><td>'.join(['{0}</td><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}'.format(v1, v2, v3, v4, v5) for v1, v2, v3, v4, v5 in var]) + \
'</td></tr></table></div>' | [
"def",
"_fill",
"(",
"self",
")",
":",
"types_to_exclude",
"=",
"[",
"'module'",
",",
"'function'",
",",
"'builtin_function_or_method'",
",",
"'instance'",
",",
"'_Feature'",
",",
"'type'",
",",
"'ufunc'",
"]",
"values",
"=",
"self",
".",
"namespace",
".",
"... | Fill self with variable information. | [
"Fill",
"self",
"with",
"variable",
"information",
"."
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/widgets/variable_inspector.py#L49-L67 | train | 34,292 |
timkpaine/lantern | lantern/extensions/headless/__init__.py | set_var | def set_var(var, set_='""'):
'''set var outside notebook'''
if isinstance(set_, str):
to_set = json.dumps(set_)
elif isinstance(set_, dict) or isinstance(set_, list):
try:
to_set = json.dumps(set_)
except ValueError:
raise Exception('var not jsonable')
else:
raise Exception('var must be jsonable list or dict')
os.environ['NBCONVERT_' + var] = to_set | python | def set_var(var, set_='""'):
'''set var outside notebook'''
if isinstance(set_, str):
to_set = json.dumps(set_)
elif isinstance(set_, dict) or isinstance(set_, list):
try:
to_set = json.dumps(set_)
except ValueError:
raise Exception('var not jsonable')
else:
raise Exception('var must be jsonable list or dict')
os.environ['NBCONVERT_' + var] = to_set | [
"def",
"set_var",
"(",
"var",
",",
"set_",
"=",
"'\"\"'",
")",
":",
"if",
"isinstance",
"(",
"set_",
",",
"str",
")",
":",
"to_set",
"=",
"json",
".",
"dumps",
"(",
"set_",
")",
"elif",
"isinstance",
"(",
"set_",
",",
"dict",
")",
"or",
"isinstance... | set var outside notebook | [
"set",
"var",
"outside",
"notebook"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/extensions/headless/__init__.py#L23-L35 | train | 34,293 |
timkpaine/lantern | lantern/extensions/headless/__init__.py | get_var | def get_var(var, default='""'):
'''get var inside notebook'''
ret = os.environ.get('NBCONVERT_' + var)
if ret is None:
return default
return json.loads(ret) | python | def get_var(var, default='""'):
'''get var inside notebook'''
ret = os.environ.get('NBCONVERT_' + var)
if ret is None:
return default
return json.loads(ret) | [
"def",
"get_var",
"(",
"var",
",",
"default",
"=",
"'\"\"'",
")",
":",
"ret",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'NBCONVERT_'",
"+",
"var",
")",
"if",
"ret",
"is",
"None",
":",
"return",
"default",
"return",
"json",
".",
"loads",
"(",
"re... | get var inside notebook | [
"get",
"var",
"inside",
"notebook"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/extensions/headless/__init__.py#L38-L43 | train | 34,294 |
timkpaine/lantern | lantern/plotting/plotutils.py | align_yaxis_np | def align_yaxis_np(axes):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = np.array(axes)
extrema = np.array([ax.get_ylim() for ax in axes])
# reset for divide by zero issues
for i in range(len(extrema)):
if np.isclose(extrema[i, 0], 0.0):
extrema[i, 0] = -1
if np.isclose(extrema[i, 1], 0.0):
extrema[i, 1] = 1
# upper and lower limits
lowers = extrema[:, 0]
uppers = extrema[:, 1]
# if all pos or all neg, don't scale
all_positive = False
all_negative = False
if lowers.min() > 0.0:
all_positive = True
if uppers.max() < 0.0:
all_negative = True
if all_negative or all_positive:
# don't scale
return
# pick "most centered" axis
res = abs(uppers+lowers)
min_index = np.argmin(res)
# scale positive or negative part
multiplier1 = abs(uppers[min_index]/lowers[min_index])
multiplier2 = abs(lowers[min_index]/uppers[min_index])
for i in range(len(extrema)):
# scale positive or negative part based on which induces valid
if i != min_index:
lower_change = extrema[i, 1] * -1*multiplier2
upper_change = extrema[i, 0] * -1*multiplier1
if upper_change < extrema[i, 1]:
extrema[i, 0] = lower_change
else:
extrema[i, 1] = upper_change
# bump by 10% for a margin
extrema[i, 0] *= 1.1
extrema[i, 1] *= 1.1
# set axes limits
[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))] | python | def align_yaxis_np(axes):
"""Align zeros of the two axes, zooming them out by same ratio"""
axes = np.array(axes)
extrema = np.array([ax.get_ylim() for ax in axes])
# reset for divide by zero issues
for i in range(len(extrema)):
if np.isclose(extrema[i, 0], 0.0):
extrema[i, 0] = -1
if np.isclose(extrema[i, 1], 0.0):
extrema[i, 1] = 1
# upper and lower limits
lowers = extrema[:, 0]
uppers = extrema[:, 1]
# if all pos or all neg, don't scale
all_positive = False
all_negative = False
if lowers.min() > 0.0:
all_positive = True
if uppers.max() < 0.0:
all_negative = True
if all_negative or all_positive:
# don't scale
return
# pick "most centered" axis
res = abs(uppers+lowers)
min_index = np.argmin(res)
# scale positive or negative part
multiplier1 = abs(uppers[min_index]/lowers[min_index])
multiplier2 = abs(lowers[min_index]/uppers[min_index])
for i in range(len(extrema)):
# scale positive or negative part based on which induces valid
if i != min_index:
lower_change = extrema[i, 1] * -1*multiplier2
upper_change = extrema[i, 0] * -1*multiplier1
if upper_change < extrema[i, 1]:
extrema[i, 0] = lower_change
else:
extrema[i, 1] = upper_change
# bump by 10% for a margin
extrema[i, 0] *= 1.1
extrema[i, 1] *= 1.1
# set axes limits
[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))] | [
"def",
"align_yaxis_np",
"(",
"axes",
")",
":",
"axes",
"=",
"np",
".",
"array",
"(",
"axes",
")",
"extrema",
"=",
"np",
".",
"array",
"(",
"[",
"ax",
".",
"get_ylim",
"(",
")",
"for",
"ax",
"in",
"axes",
"]",
")",
"# reset for divide by zero issues",
... | Align zeros of the two axes, zooming them out by same ratio | [
"Align",
"zeros",
"of",
"the",
"two",
"axes",
"zooming",
"them",
"out",
"by",
"same",
"ratio"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/plotting/plotutils.py#L93-L145 | train | 34,295 |
timkpaine/lantern | experimental/dash/dash-vanguard-report-master/app.py | make_dash_table | def make_dash_table(df):
''' Return a dash definitio of an HTML table for a Pandas dataframe '''
table = []
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td([row[i]]))
table.append(html.Tr(html_row))
return table | python | def make_dash_table(df):
''' Return a dash definitio of an HTML table for a Pandas dataframe '''
table = []
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td([row[i]]))
table.append(html.Tr(html_row))
return table | [
"def",
"make_dash_table",
"(",
"df",
")",
":",
"table",
"=",
"[",
"]",
"for",
"index",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"html_row",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"html_ro... | Return a dash definitio of an HTML table for a Pandas dataframe | [
"Return",
"a",
"dash",
"definitio",
"of",
"an",
"HTML",
"table",
"for",
"a",
"Pandas",
"dataframe"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/experimental/dash/dash-vanguard-report-master/app.py#L33-L41 | train | 34,296 |
timkpaine/lantern | lantern/extensions/savehooks/convert_to_script.py | script_post_save | def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
# py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script) | python | def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
# py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script) | [
"def",
"script_post_save",
"(",
"model",
",",
"os_path",
",",
"contents_manager",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"nbconvert",
".",
"exporters",
".",
"script",
"import",
"ScriptExporter",
"if",
"model",
"[",
"'type'",
"]",
"!=",
"'notebook'",
":",... | convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script` | [
"convert",
"notebooks",
"to",
"Python",
"script",
"after",
"save",
"with",
"nbconvert"
] | 40a3aad630ab40735a5221a1d96118697317a414 | https://github.com/timkpaine/lantern/blob/40a3aad630ab40735a5221a1d96118697317a414/lantern/extensions/savehooks/convert_to_script.py#L8-L32 | train | 34,297 |
kennethreitz/xerox | xerox/x11.py | copy | def copy(string, xsel=False):
"""Copy given string into system clipboard. If 'xsel' is True, this
will also copy into the primary x selection for middle click."""
try:
_cmd = ["xclip", "-selection", "clipboard"]
subprocess.Popen(_cmd, stdin=subprocess.PIPE).communicate(
string.encode('utf-8'))
if xsel:
_cmd = ["xclip", "-selection", "primary"]
subprocess.Popen(_cmd, stdin=subprocess.PIPE).communicate(
string.encode('utf-8'))
except OSError as why:
raise XclipNotFound | python | def copy(string, xsel=False):
"""Copy given string into system clipboard. If 'xsel' is True, this
will also copy into the primary x selection for middle click."""
try:
_cmd = ["xclip", "-selection", "clipboard"]
subprocess.Popen(_cmd, stdin=subprocess.PIPE).communicate(
string.encode('utf-8'))
if xsel:
_cmd = ["xclip", "-selection", "primary"]
subprocess.Popen(_cmd, stdin=subprocess.PIPE).communicate(
string.encode('utf-8'))
except OSError as why:
raise XclipNotFound | [
"def",
"copy",
"(",
"string",
",",
"xsel",
"=",
"False",
")",
":",
"try",
":",
"_cmd",
"=",
"[",
"\"xclip\"",
",",
"\"-selection\"",
",",
"\"clipboard\"",
"]",
"subprocess",
".",
"Popen",
"(",
"_cmd",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
")",
... | Copy given string into system clipboard. If 'xsel' is True, this
will also copy into the primary x selection for middle click. | [
"Copy",
"given",
"string",
"into",
"system",
"clipboard",
".",
"If",
"xsel",
"is",
"True",
"this",
"will",
"also",
"copy",
"into",
"the",
"primary",
"x",
"selection",
"for",
"middle",
"click",
"."
] | c34d2fb03ff892b31016173c520e784e86cc2d70 | https://github.com/kennethreitz/xerox/blob/c34d2fb03ff892b31016173c520e784e86cc2d70/xerox/x11.py#L10-L22 | train | 34,298 |
kennethreitz/xerox | xerox/__init__.py | main | def main():
""" Entry point for cli. """
if sys.argv[1:]: # called with input arguments
copy(' '.join(sys.argv[1:]))
elif not sys.stdin.isatty(): # piped in input
copy(''.join(sys.stdin.readlines()).rstrip('\n'))
else: # paste output
print(paste()) | python | def main():
""" Entry point for cli. """
if sys.argv[1:]: # called with input arguments
copy(' '.join(sys.argv[1:]))
elif not sys.stdin.isatty(): # piped in input
copy(''.join(sys.stdin.readlines()).rstrip('\n'))
else: # paste output
print(paste()) | [
"def",
"main",
"(",
")",
":",
"if",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
":",
"# called with input arguments",
"copy",
"(",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
")",
"elif",
"not",
"sys",
".",
"stdin",
".",
"... | Entry point for cli. | [
"Entry",
"point",
"for",
"cli",
"."
] | c34d2fb03ff892b31016173c520e784e86cc2d70 | https://github.com/kennethreitz/xerox/blob/c34d2fb03ff892b31016173c520e784e86cc2d70/xerox/__init__.py#L6-L13 | train | 34,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.