repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
data61/clkhash | clkhash/clk.py | chunks | def chunks(seq, chunk_size):
# type: (Sequence[T], int) -> Iterable[Sequence[T]]
""" Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
"""
return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size)) | python | def chunks(seq, chunk_size):
# type: (Sequence[T], int) -> Iterable[Sequence[T]]
""" Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
"""
return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size)) | [
"def",
"chunks",
"(",
"seq",
",",
"chunk_size",
")",
":",
"# type: (Sequence[T], int) -> Iterable[Sequence[T]]",
"return",
"(",
"seq",
"[",
"i",
":",
"i",
"+",
"chunk_size",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"seq",
")",
",",
"chu... | Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk. | [
"Split",
"seq",
"into",
"chunk_size",
"-",
"sized",
"chunks",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/clk.py#L179-L186 | train | 42,300 |
data61/clkhash | clkhash/randomnames.py | load_csv_data | def load_csv_data(resource_name):
# type: (str) -> List[str]
""" Loads first column of specified CSV file from package data.
"""
data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name))
if data_bytes is None:
raise ValueError("No data resource found with name {}".format(resource_name))
else:
data = data_bytes.decode('utf8')
reader = csv.reader(data.splitlines())
next(reader, None) # skip the headers
return [row[0] for row in reader] | python | def load_csv_data(resource_name):
# type: (str) -> List[str]
""" Loads first column of specified CSV file from package data.
"""
data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name))
if data_bytes is None:
raise ValueError("No data resource found with name {}".format(resource_name))
else:
data = data_bytes.decode('utf8')
reader = csv.reader(data.splitlines())
next(reader, None) # skip the headers
return [row[0] for row in reader] | [
"def",
"load_csv_data",
"(",
"resource_name",
")",
":",
"# type: (str) -> List[str]",
"data_bytes",
"=",
"pkgutil",
".",
"get_data",
"(",
"'clkhash'",
",",
"'data/{}'",
".",
"format",
"(",
"resource_name",
")",
")",
"if",
"data_bytes",
"is",
"None",
":",
"raise"... | Loads first column of specified CSV file from package data. | [
"Loads",
"first",
"column",
"of",
"specified",
"CSV",
"file",
"from",
"package",
"data",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L32-L43 | train | 42,301 |
data61/clkhash | clkhash/randomnames.py | save_csv | def save_csv(data, # type: Iterable[Tuple[Union[str, int], ...]]
headers, # type: Iterable[str]
file # type: TextIO
):
# type: (...) -> None
"""
Output generated data to file as CSV with header.
:param data: An iterable of tuples containing raw data.
:param headers: Iterable of feature names
:param file: A writeable stream in which to write the CSV
"""
print(','.join(headers), file=file)
writer = csv.writer(file)
writer.writerows(data) | python | def save_csv(data, # type: Iterable[Tuple[Union[str, int], ...]]
headers, # type: Iterable[str]
file # type: TextIO
):
# type: (...) -> None
"""
Output generated data to file as CSV with header.
:param data: An iterable of tuples containing raw data.
:param headers: Iterable of feature names
:param file: A writeable stream in which to write the CSV
"""
print(','.join(headers), file=file)
writer = csv.writer(file)
writer.writerows(data) | [
"def",
"save_csv",
"(",
"data",
",",
"# type: Iterable[Tuple[Union[str, int], ...]]",
"headers",
",",
"# type: Iterable[str]",
"file",
"# type: TextIO",
")",
":",
"# type: (...) -> None",
"print",
"(",
"','",
".",
"join",
"(",
"headers",
")",
",",
"file",
"=",
"file... | Output generated data to file as CSV with header.
:param data: An iterable of tuples containing raw data.
:param headers: Iterable of feature names
:param file: A writeable stream in which to write the CSV | [
"Output",
"generated",
"data",
"to",
"file",
"as",
"CSV",
"with",
"header",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L46-L61 | train | 42,302 |
data61/clkhash | clkhash/randomnames.py | random_date | def random_date(start, end):
# type: (datetime, datetime) -> datetime
""" Generate a random datetime between two datetime objects.
:param start: datetime of start
:param end: datetime of end
:return: random datetime between start and end
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + timedelta(seconds=random_second) | python | def random_date(start, end):
# type: (datetime, datetime) -> datetime
""" Generate a random datetime between two datetime objects.
:param start: datetime of start
:param end: datetime of end
:return: random datetime between start and end
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + timedelta(seconds=random_second) | [
"def",
"random_date",
"(",
"start",
",",
"end",
")",
":",
"# type: (datetime, datetime) -> datetime",
"delta",
"=",
"end",
"-",
"start",
"int_delta",
"=",
"(",
"delta",
".",
"days",
"*",
"24",
"*",
"60",
"*",
"60",
")",
"+",
"delta",
".",
"seconds",
"ran... | Generate a random datetime between two datetime objects.
:param start: datetime of start
:param end: datetime of end
:return: random datetime between start and end | [
"Generate",
"a",
"random",
"datetime",
"between",
"two",
"datetime",
"objects",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L64-L75 | train | 42,303 |
data61/clkhash | clkhash/randomnames.py | NameList.generate_random_person | def generate_random_person(self, n):
# type: (int) -> Iterable[Tuple[str, str, str, str]]
"""
Generator that yields details on a person with plausible name, sex and age.
:yields: Generated data for one person
tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
"""
assert self.all_male_first_names is not None
assert self.all_female_first_names is not None
assert self.all_last_names is not None
for i in range(n):
sex = 'M' if random.random() > 0.5 else 'F'
dob = random_date(self.earliest_birthday, self.latest_birthday).strftime("%Y/%m/%d")
first_name = random.choice(self.all_male_first_names) if sex == 'M' else random.choice(
self.all_female_first_names)
last_name = random.choice(self.all_last_names)
yield (
str(i),
first_name + ' ' + last_name,
dob,
sex
) | python | def generate_random_person(self, n):
# type: (int) -> Iterable[Tuple[str, str, str, str]]
"""
Generator that yields details on a person with plausible name, sex and age.
:yields: Generated data for one person
tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
"""
assert self.all_male_first_names is not None
assert self.all_female_first_names is not None
assert self.all_last_names is not None
for i in range(n):
sex = 'M' if random.random() > 0.5 else 'F'
dob = random_date(self.earliest_birthday, self.latest_birthday).strftime("%Y/%m/%d")
first_name = random.choice(self.all_male_first_names) if sex == 'M' else random.choice(
self.all_female_first_names)
last_name = random.choice(self.all_last_names)
yield (
str(i),
first_name + ' ' + last_name,
dob,
sex
) | [
"def",
"generate_random_person",
"(",
"self",
",",
"n",
")",
":",
"# type: (int) -> Iterable[Tuple[str, str, str, str]]",
"assert",
"self",
".",
"all_male_first_names",
"is",
"not",
"None",
"assert",
"self",
".",
"all_female_first_names",
"is",
"not",
"None",
"assert",
... | Generator that yields details on a person with plausible name, sex and age.
:yields: Generated data for one person
tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') ) | [
"Generator",
"that",
"yields",
"details",
"on",
"a",
"person",
"with",
"plausible",
"name",
"sex",
"and",
"age",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L106-L129 | train | 42,304 |
data61/clkhash | clkhash/randomnames.py | NameList.load_names | def load_names(self):
# type: () -> None
""" Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
"""
self.all_male_first_names = load_csv_data('male-first-names.csv')
self.all_female_first_names = load_csv_data('female-first-names.csv')
self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv') | python | def load_names(self):
# type: () -> None
""" Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
"""
self.all_male_first_names = load_csv_data('male-first-names.csv')
self.all_female_first_names = load_csv_data('female-first-names.csv')
self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv') | [
"def",
"load_names",
"(",
"self",
")",
":",
"# type: () -> None",
"self",
".",
"all_male_first_names",
"=",
"load_csv_data",
"(",
"'male-first-names.csv'",
")",
"self",
".",
"all_female_first_names",
"=",
"load_csv_data",
"(",
"'female-first-names.csv'",
")",
"self",
... | Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/ | [
"Loads",
"a",
"name",
"database",
"from",
"package",
"data"
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L131-L141 | train | 42,305 |
data61/clkhash | clkhash/randomnames.py | NameList.generate_subsets | def generate_subsets(self, sz, overlap=0.8, subsets=2):
# type: (int, float, int) -> Tuple[List, ...]
""" Return random subsets with nonempty intersection.
The random subsets are of specified size. If an element is
common to two subsets, then it is common to all subsets.
This overlap is controlled by a parameter.
:param sz: size of subsets to generate
:param overlap: size of the intersection, as fraction of the
subset length
:param subsets: number of subsets to generate
:raises ValueError: if there aren't sufficiently many names
in the list to satisfy the request; more precisely,
raises if (1 - subsets) * floor(overlap * sz)
+ subsets * sz > len(self.names).
:return: tuple of subsets
"""
overlap_sz = int(math.floor(overlap * sz))
unique_sz = sz - overlap_sz # Unique names per subset
total_unique_sz = unique_sz * subsets # Uniques in all subsets
total_sz = overlap_sz + total_unique_sz
if total_sz > len(self.names):
msg = 'insufficient names for requested size and overlap'
raise ValueError(msg)
sset = random.sample(self.names, total_sz)
# Overlapping subset, pool of unique names
sset_overlap, sset_unique = sset[:overlap_sz], sset[overlap_sz:]
assert len(sset_unique) == subsets * unique_sz
# Split pool of unique names into `subsets` chunks
uniques = (sset_unique[p * unique_sz: (p + 1) * unique_sz]
for p in range(subsets))
return tuple(sset_overlap + u for u in uniques) | python | def generate_subsets(self, sz, overlap=0.8, subsets=2):
# type: (int, float, int) -> Tuple[List, ...]
""" Return random subsets with nonempty intersection.
The random subsets are of specified size. If an element is
common to two subsets, then it is common to all subsets.
This overlap is controlled by a parameter.
:param sz: size of subsets to generate
:param overlap: size of the intersection, as fraction of the
subset length
:param subsets: number of subsets to generate
:raises ValueError: if there aren't sufficiently many names
in the list to satisfy the request; more precisely,
raises if (1 - subsets) * floor(overlap * sz)
+ subsets * sz > len(self.names).
:return: tuple of subsets
"""
overlap_sz = int(math.floor(overlap * sz))
unique_sz = sz - overlap_sz # Unique names per subset
total_unique_sz = unique_sz * subsets # Uniques in all subsets
total_sz = overlap_sz + total_unique_sz
if total_sz > len(self.names):
msg = 'insufficient names for requested size and overlap'
raise ValueError(msg)
sset = random.sample(self.names, total_sz)
# Overlapping subset, pool of unique names
sset_overlap, sset_unique = sset[:overlap_sz], sset[overlap_sz:]
assert len(sset_unique) == subsets * unique_sz
# Split pool of unique names into `subsets` chunks
uniques = (sset_unique[p * unique_sz: (p + 1) * unique_sz]
for p in range(subsets))
return tuple(sset_overlap + u for u in uniques) | [
"def",
"generate_subsets",
"(",
"self",
",",
"sz",
",",
"overlap",
"=",
"0.8",
",",
"subsets",
"=",
"2",
")",
":",
"# type: (int, float, int) -> Tuple[List, ...]",
"overlap_sz",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"overlap",
"*",
"sz",
")",
")",
"u... | Return random subsets with nonempty intersection.
The random subsets are of specified size. If an element is
common to two subsets, then it is common to all subsets.
This overlap is controlled by a parameter.
:param sz: size of subsets to generate
:param overlap: size of the intersection, as fraction of the
subset length
:param subsets: number of subsets to generate
:raises ValueError: if there aren't sufficiently many names
in the list to satisfy the request; more precisely,
raises if (1 - subsets) * floor(overlap * sz)
+ subsets * sz > len(self.names).
:return: tuple of subsets | [
"Return",
"random",
"subsets",
"with",
"nonempty",
"intersection",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/randomnames.py#L143-L182 | train | 42,306 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | _unpack_data | def _unpack_data(data):
"""
Break Xs, Ys, and metadata out into separate lists for data preprocessing.
Run basic data validation.
"""
xs = [None] * len(data)
ys = [None] * len(data)
metadata = [None] * len(data)
for idx, example in enumerate(data):
if isinstance(example, (list, tuple)):
xs[idx], ys[idx], metadata[idx] = _unpack_list(example)
if isinstance(example, dict):
xs[idx], ys[idx], metadata[idx] = _unpack_dict(example)
return xs, ys, metadata | python | def _unpack_data(data):
"""
Break Xs, Ys, and metadata out into separate lists for data preprocessing.
Run basic data validation.
"""
xs = [None] * len(data)
ys = [None] * len(data)
metadata = [None] * len(data)
for idx, example in enumerate(data):
if isinstance(example, (list, tuple)):
xs[idx], ys[idx], metadata[idx] = _unpack_list(example)
if isinstance(example, dict):
xs[idx], ys[idx], metadata[idx] = _unpack_dict(example)
return xs, ys, metadata | [
"def",
"_unpack_data",
"(",
"data",
")",
":",
"xs",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"data",
")",
"ys",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"data",
")",
"metadata",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"data",
")",
"for",
"idx",... | Break Xs, Ys, and metadata out into separate lists for data preprocessing.
Run basic data validation. | [
"Break",
"Xs",
"Ys",
"and",
"metadata",
"out",
"into",
"separate",
"lists",
"for",
"data",
"preprocessing",
".",
"Run",
"basic",
"data",
"validation",
"."
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L43-L57 | train | 42,307 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | collections | def collections(cloud=None, api_key=None, version=None, **kwargs):
"""
This is a status report endpoint. It is used to get the status on all of the collections currently trained, as
well as some basic statistics on their accuracies.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
Example usage:
.. code-block:: python
>>> collections = indicoio.collections()
{
"tag_predictor": {
"input_type": "text",
"model_type": "classification",
"number_of_samples": 224
'status': 'ready'
}, "popularity_predictor": {
"input_type": "text",
"model_type": "regression",
"number_of_samples": 231
'status': 'training'
}
}
}
"""
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "collections"}
return api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def collections(cloud=None, api_key=None, version=None, **kwargs):
"""
This is a status report endpoint. It is used to get the status on all of the collections currently trained, as
well as some basic statistics on their accuracies.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
Example usage:
.. code-block:: python
>>> collections = indicoio.collections()
{
"tag_predictor": {
"input_type": "text",
"model_type": "classification",
"number_of_samples": 224
'status': 'ready'
}, "popularity_predictor": {
"input_type": "text",
"model_type": "regression",
"number_of_samples": 231
'status': 'training'
}
}
}
"""
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "collections"}
return api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"collections",
"(",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url_params",
"=",
"{",
"\"batch\"",
":",
"False",
",",
"\"api_key\"",
":",
"api_key",
",",
"\"version\"",
":"... | This is a status report endpoint. It is used to get the status on all of the collections currently trained, as
well as some basic statistics on their accuracies.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
Example usage:
.. code-block:: python
>>> collections = indicoio.collections()
{
"tag_predictor": {
"input_type": "text",
"model_type": "classification",
"number_of_samples": 224
'status': 'ready'
}, "popularity_predictor": {
"input_type": "text",
"model_type": "regression",
"number_of_samples": 231
'status': 'training'
}
}
} | [
"This",
"is",
"a",
"status",
"report",
"endpoint",
".",
"It",
"is",
"used",
"to",
"get",
"the",
"status",
"on",
"all",
"of",
"the",
"collections",
"currently",
"trained",
"as",
"well",
"as",
"some",
"basic",
"statistics",
"on",
"their",
"accuracies",
"."
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L396-L430 | train | 42,308 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | vectorize | def vectorize(data, cloud=None, api_key=None, version=None, **kwargs):
"""
Support for raw features from the custom collections API
"""
batch = detect_batch(data)
data = data_preprocess(data, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version, "method": "vectorize"}
return api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def vectorize(data, cloud=None, api_key=None, version=None, **kwargs):
"""
Support for raw features from the custom collections API
"""
batch = detect_batch(data)
data = data_preprocess(data, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version, "method": "vectorize"}
return api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"vectorize",
"(",
"data",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"batch",
"=",
"detect_batch",
"(",
"data",
")",
"data",
"=",
"data_preprocess",
"(",
"data",
",",... | Support for raw features from the custom collections API | [
"Support",
"for",
"raw",
"features",
"from",
"the",
"custom",
"collections",
"API"
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L433-L440 | train | 42,309 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection._api_handler | def _api_handler(self, *args, **kwargs):
"""
Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body
"""
keyword_arguments = {}
keyword_arguments.update(self.keywords)
keyword_arguments.update(kwargs)
return api_handler(*args, **keyword_arguments) | python | def _api_handler(self, *args, **kwargs):
"""
Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body
"""
keyword_arguments = {}
keyword_arguments.update(self.keywords)
keyword_arguments.update(kwargs)
return api_handler(*args, **keyword_arguments) | [
"def",
"_api_handler",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"keyword_arguments",
"=",
"{",
"}",
"keyword_arguments",
".",
"update",
"(",
"self",
".",
"keywords",
")",
"keyword_arguments",
".",
"update",
"(",
"kwargs",
")",
"r... | Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body | [
"Thin",
"wrapper",
"around",
"api_handler",
"from",
"indicoio",
".",
"utils",
".",
"api",
"to",
"add",
"in",
"stored",
"keyword",
"argument",
"to",
"the",
"JSON",
"body"
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L129-L136 | train | 42,310 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.add_data | def add_data(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is the basic training endpoint. Given a piece of text and a score, either categorical
or numeric, this endpoint will train a new model given the additional piece of information.
Inputs
data - List: The text and collection/score associated with it. The length of the text (string) should ideally
be longer than 100 characters and contain at least 10 words. While the API will support
shorter text, you will find that the accuracy of results improves significantly with longer
examples. For an additional fee, this end point will support image input as well. The collection/score
can be a string or float. This is the variable associated with the text. This can either be categorical
(the tag associated with the post) or numeric (the number of Facebook shares the post
received). However it can only be one or another within a given label.
domain (optional) - String: This is an identifier that helps determine the appropriate techniques for indico
to use behind the scenes to train your model. One of {"standard", "topics"}.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
if not len(data):
raise IndicoError("No input data provided.")
batch = isinstance(data[0], (list, tuple, dict))
# standarize format for preprocessing batch of examples
if not batch:
data = [data]
X, Y, metadata = _unpack_data(data)
X = data_preprocess(X, batch=True)
data = _pack_data(X, Y, metadata)
# if a single example was passed in, unpack
if not batch:
data = data[0]
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "add_data"}
return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def add_data(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is the basic training endpoint. Given a piece of text and a score, either categorical
or numeric, this endpoint will train a new model given the additional piece of information.
Inputs
data - List: The text and collection/score associated with it. The length of the text (string) should ideally
be longer than 100 characters and contain at least 10 words. While the API will support
shorter text, you will find that the accuracy of results improves significantly with longer
examples. For an additional fee, this end point will support image input as well. The collection/score
can be a string or float. This is the variable associated with the text. This can either be categorical
(the tag associated with the post) or numeric (the number of Facebook shares the post
received). However it can only be one or another within a given label.
domain (optional) - String: This is an identifier that helps determine the appropriate techniques for indico
to use behind the scenes to train your model. One of {"standard", "topics"}.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
if not len(data):
raise IndicoError("No input data provided.")
batch = isinstance(data[0], (list, tuple, dict))
# standarize format for preprocessing batch of examples
if not batch:
data = [data]
X, Y, metadata = _unpack_data(data)
X = data_preprocess(X, batch=True)
data = _pack_data(X, Y, metadata)
# if a single example was passed in, unpack
if not batch:
data = data[0]
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "add_data"}
return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"add_data",
"(",
"self",
",",
"data",
",",
"cloud",
"=",
"None",
",",
"batch",
"=",
"False",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"len",
"(",
"data",
")",
":",
"raise",
... | This is the basic training endpoint. Given a piece of text and a score, either categorical
or numeric, this endpoint will train a new model given the additional piece of information.
Inputs
data - List: The text and collection/score associated with it. The length of the text (string) should ideally
be longer than 100 characters and contain at least 10 words. While the API will support
shorter text, you will find that the accuracy of results improves significantly with longer
examples. For an additional fee, this end point will support image input as well. The collection/score
can be a string or float. This is the variable associated with the text. This can either be categorical
(the tag associated with the post) or numeric (the number of Facebook shares the post
received). However it can only be one or another within a given label.
domain (optional) - String: This is an identifier that helps determine the appropriate techniques for indico
to use behind the scenes to train your model. One of {"standard", "topics"}.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"This",
"is",
"the",
"basic",
"training",
"endpoint",
".",
"Given",
"a",
"piece",
"of",
"text",
"and",
"a",
"score",
"either",
"categorical",
"or",
"numeric",
"this",
"endpoint",
"will",
"train",
"a",
"new",
"model",
"given",
"the",
"additional",
"piece",
... | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L138-L177 | train | 42,311 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.train | def train(self, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is the basic training endpoint. Given an existing dataset this endpoint will train a model.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "train"}
return self._api_handler(self.keywords['collection'], cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def train(self, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is the basic training endpoint. Given an existing dataset this endpoint will train a model.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "train"}
return self._api_handler(self.keywords['collection'], cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"train",
"(",
"self",
",",
"cloud",
"=",
"None",
",",
"batch",
"=",
"False",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url_params",
"=",
"{",
"\"batch\"",
":",
"batch",
",",
"\"api_key\"",
... | This is the basic training endpoint. Given an existing dataset this endpoint will train a model.
Inputs
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"This",
"is",
"the",
"basic",
"training",
"endpoint",
".",
"Given",
"an",
"existing",
"dataset",
"this",
"endpoint",
"will",
"train",
"a",
"model",
"."
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L179-L192 | train | 42,312 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.info | def info(self, cloud=None, api_key=None, version=None, **kwargs):
"""
Return the current state of the model associated with a given collection
"""
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "info"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def info(self, cloud=None, api_key=None, version=None, **kwargs):
"""
Return the current state of the model associated with a given collection
"""
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "info"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"info",
"(",
"self",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"url_params",
"=",
"{",
"\"batch\"",
":",
"False",
",",
"\"api_key\"",
":",
"api_key",
",",
"\"version... | Return the current state of the model associated with a given collection | [
"Return",
"the",
"current",
"state",
"of",
"the",
"model",
"associated",
"with",
"a",
"given",
"collection"
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L262-L267 | train | 42,313 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.remove_example | def remove_example(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is an API made to remove a single instance of training data. This is useful in cases where a
single instance of content has been modified, but the remaining examples remain valid. For
example, if a piece of content has been retagged.
Inputs
data - String: The exact text you wish to remove from the given collection. If the string
provided does not match a known piece of text then this will fail. Again, this is required if
an id is not provided, and vice-versa.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
batch = detect_batch(data)
data = data_preprocess(data, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': 'remove_example'}
return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def remove_example(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
This is an API made to remove a single instance of training data. This is useful in cases where a
single instance of content has been modified, but the remaining examples remain valid. For
example, if a piece of content has been retagged.
Inputs
data - String: The exact text you wish to remove from the given collection. If the string
provided does not match a known piece of text then this will fail. Again, this is required if
an id is not provided, and vice-versa.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
batch = detect_batch(data)
data = data_preprocess(data, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': 'remove_example'}
return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"remove_example",
"(",
"self",
",",
"data",
",",
"cloud",
"=",
"None",
",",
"batch",
"=",
"False",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"batch",
"=",
"detect_batch",
"(",
"data",
")",
"... | This is an API made to remove a single instance of training data. This is useful in cases where a
single instance of content has been modified, but the remaining examples remain valid. For
example, if a piece of content has been retagged.
Inputs
data - String: The exact text you wish to remove from the given collection. If the string
provided does not match a known piece of text then this will fail. Again, this is required if
an id is not provided, and vice-versa.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"This",
"is",
"an",
"API",
"made",
"to",
"remove",
"a",
"single",
"instance",
"of",
"training",
"data",
".",
"This",
"is",
"useful",
"in",
"cases",
"where",
"a",
"single",
"instance",
"of",
"content",
"has",
"been",
"modified",
"but",
"the",
"remaining",
... | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L269-L289 | train | 42,314 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.wait | def wait(self, interval=1, **kwargs):
"""
Block until the collection's model is completed training
"""
while True:
status = self.info(**kwargs).get('status')
if status == "ready":
break
if status != "training":
raise IndicoError("Collection status failed with: {0}".format(status))
time.sleep(interval) | python | def wait(self, interval=1, **kwargs):
"""
Block until the collection's model is completed training
"""
while True:
status = self.info(**kwargs).get('status')
if status == "ready":
break
if status != "training":
raise IndicoError("Collection status failed with: {0}".format(status))
time.sleep(interval) | [
"def",
"wait",
"(",
"self",
",",
"interval",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"while",
"True",
":",
"status",
"=",
"self",
".",
"info",
"(",
"*",
"*",
"kwargs",
")",
".",
"get",
"(",
"'status'",
")",
"if",
"status",
"==",
"\"ready\"",... | Block until the collection's model is completed training | [
"Block",
"until",
"the",
"collection",
"s",
"model",
"is",
"completed",
"training"
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L291-L301 | train | 42,315 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.register | def register(self, make_public=False, cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to register you collection in order to share read or write
access to the collection with another user.
Inputs:
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
make_public (optional) - Boolean: When True, this option gives all indico users read access to your model.
"""
kwargs['make_public'] = make_public
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "register"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def register(self, make_public=False, cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to register you collection in order to share read or write
access to the collection with another user.
Inputs:
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
make_public (optional) - Boolean: When True, this option gives all indico users read access to your model.
"""
kwargs['make_public'] = make_public
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "register"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"register",
"(",
"self",
",",
"make_public",
"=",
"False",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'make_public'",
"]",
"=",
"make_public",
"url_para... | This API endpoint allows you to register you collection in order to share read or write
access to the collection with another user.
Inputs:
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
make_public (optional) - Boolean: When True, this option gives all indico users read access to your model. | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"register",
"you",
"collection",
"in",
"order",
"to",
"share",
"read",
"or",
"write",
"access",
"to",
"the",
"collection",
"with",
"another",
"user",
"."
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L303-L319 | train | 42,316 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.authorize | def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to authorize another user to access your model in a read or write capacity.
Before calling authorize, you must first make sure your model has been registered.
Inputs:
email - String: The email of the user you would like to share access with.
permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`.
Users with `write` permissions can add new input examples and train models.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['permission_type'] = permission_type
kwargs['email'] = email
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to authorize another user to access your model in a read or write capacity.
Before calling authorize, you must first make sure your model has been registered.
Inputs:
email - String: The email of the user you would like to share access with.
permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`.
Users with `write` permissions can add new input examples and train models.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['permission_type'] = permission_type
kwargs['email'] = email
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"authorize",
"(",
"self",
",",
"email",
",",
"permission_type",
"=",
"'read'",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'permission_type'",
"]",
"=",
... | This API endpoint allows you to authorize another user to access your model in a read or write capacity.
Before calling authorize, you must first make sure your model has been registered.
Inputs:
email - String: The email of the user you would like to share access with.
permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`.
Users with `write` permissions can add new input examples and train models.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"authorize",
"another",
"user",
"to",
"access",
"your",
"model",
"in",
"a",
"read",
"or",
"write",
"capacity",
".",
"Before",
"calling",
"authorize",
"you",
"must",
"first",
"make",
"sure",
"your",
"model",
... | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L337-L356 | train | 42,317 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.deauthorize | def deauthorize(self, email, cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to remove another user's access to your collection.
Inputs:
email - String: The email of the user you would like to share access with.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['email'] = email
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "deauthorize"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | python | def deauthorize(self, email, cloud=None, api_key=None, version=None, **kwargs):
"""
This API endpoint allows you to remove another user's access to your collection.
Inputs:
email - String: The email of the user you would like to share access with.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['email'] = email
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "deauthorize"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | [
"def",
"deauthorize",
"(",
"self",
",",
"email",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'email'",
"]",
"=",
"email",
"url_params",
"=",
"{",
"\"batch\"",... | This API endpoint allows you to remove another user's access to your collection.
Inputs:
email - String: The email of the user you would like to share access with.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"This",
"API",
"endpoint",
"allows",
"you",
"to",
"remove",
"another",
"user",
"s",
"access",
"to",
"your",
"collection",
"."
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L358-L373 | train | 42,318 |
IndicoDataSolutions/IndicoIo-python | indicoio/custom/custom.py | Collection.rename | def rename(self, name, cloud=None, api_key=None, version=None, **kwargs):
"""
If you'd like to change the name you use to access a given collection, you can call the rename endpoint.
This is especially useful if the name you use for your model is not available for registration.
Inputs:
name - String: The new name used to access your model.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['name'] = name
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "rename"}
result = self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
self.keywords['collection'] = name
return result | python | def rename(self, name, cloud=None, api_key=None, version=None, **kwargs):
"""
If you'd like to change the name you use to access a given collection, you can call the rename endpoint.
This is especially useful if the name you use for your model is not available for registration.
Inputs:
name - String: The new name used to access your model.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
"""
kwargs['name'] = name
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "rename"}
result = self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
self.keywords['collection'] = name
return result | [
"def",
"rename",
"(",
"self",
",",
"name",
",",
"cloud",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'name'",
"]",
"=",
"name",
"url_params",
"=",
"{",
"\"batch\"",
":",
... | If you'd like to change the name you use to access a given collection, you can call the rename endpoint.
This is especially useful if the name you use for your model is not available for registration.
Inputs:
name - String: The new name used to access your model.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. | [
"If",
"you",
"d",
"like",
"to",
"change",
"the",
"name",
"you",
"use",
"to",
"access",
"a",
"given",
"collection",
"you",
"can",
"call",
"the",
"rename",
"endpoint",
".",
"This",
"is",
"especially",
"useful",
"if",
"the",
"name",
"you",
"use",
"for",
"... | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L375-L393 | train | 42,319 |
IndicoDataSolutions/IndicoIo-python | indicoio/utils/errors.py | convert_to_py_error | def convert_to_py_error(error_message):
"""
Raise specific exceptions for ease of error handling
"""
message = error_message.lower()
for err_msg, err_type in ERR_MSGS:
if err_msg in message:
return err_type(error_message)
else:
return IndicoError(error_message) | python | def convert_to_py_error(error_message):
"""
Raise specific exceptions for ease of error handling
"""
message = error_message.lower()
for err_msg, err_type in ERR_MSGS:
if err_msg in message:
return err_type(error_message)
else:
return IndicoError(error_message) | [
"def",
"convert_to_py_error",
"(",
"error_message",
")",
":",
"message",
"=",
"error_message",
".",
"lower",
"(",
")",
"for",
"err_msg",
",",
"err_type",
"in",
"ERR_MSGS",
":",
"if",
"err_msg",
"in",
"message",
":",
"return",
"err_type",
"(",
"error_message",
... | Raise specific exceptions for ease of error handling | [
"Raise",
"specific",
"exceptions",
"for",
"ease",
"of",
"error",
"handling"
] | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/utils/errors.py#L54-L63 | train | 42,320 |
IndicoDataSolutions/IndicoIo-python | indicoio/image/facial_localization.py | facial_localization | def facial_localization(image, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given an image, returns a list of faces found within the image.
For each face, we return a dictionary containing the upper left corner and lower right corner.
If crop is True, the cropped face is included in the dictionary.
Input should be in a numpy ndarray or a filename.
Example usage:
.. code-block:: python
>>> from indicoio import facial_localization
>>> import numpy as np
>>> img = np.zeros([image of a face])
>>> faces = facial_localization(img)
>>> len(faces)
1
:param image: The image to be analyzed.
:type image: filepath or ndarray
:rtype: List of faces (dict) found.
"""
image = data_preprocess(image, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(image, cloud=cloud, api="faciallocalization", url_params=url_params, **kwargs) | python | def facial_localization(image, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given an image, returns a list of faces found within the image.
For each face, we return a dictionary containing the upper left corner and lower right corner.
If crop is True, the cropped face is included in the dictionary.
Input should be in a numpy ndarray or a filename.
Example usage:
.. code-block:: python
>>> from indicoio import facial_localization
>>> import numpy as np
>>> img = np.zeros([image of a face])
>>> faces = facial_localization(img)
>>> len(faces)
1
:param image: The image to be analyzed.
:type image: filepath or ndarray
:rtype: List of faces (dict) found.
"""
image = data_preprocess(image, batch=batch)
url_params = {"batch": batch, "api_key": api_key, "version": version}
return api_handler(image, cloud=cloud, api="faciallocalization", url_params=url_params, **kwargs) | [
"def",
"facial_localization",
"(",
"image",
",",
"cloud",
"=",
"None",
",",
"batch",
"=",
"False",
",",
"api_key",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"image",
"=",
"data_preprocess",
"(",
"image",
",",
"batch",... | Given an image, returns a list of faces found within the image.
For each face, we return a dictionary containing the upper left corner and lower right corner.
If crop is True, the cropped face is included in the dictionary.
Input should be in a numpy ndarray or a filename.
Example usage:
.. code-block:: python
>>> from indicoio import facial_localization
>>> import numpy as np
>>> img = np.zeros([image of a face])
>>> faces = facial_localization(img)
>>> len(faces)
1
:param image: The image to be analyzed.
:type image: filepath or ndarray
:rtype: List of faces (dict) found. | [
"Given",
"an",
"image",
"returns",
"a",
"list",
"of",
"faces",
"found",
"within",
"the",
"image",
".",
"For",
"each",
"face",
"we",
"return",
"a",
"dictionary",
"containing",
"the",
"upper",
"left",
"corner",
"and",
"lower",
"right",
"corner",
".",
"If",
... | 6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa | https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/image/facial_localization.py#L7-L31 | train | 42,321 |
data61/clkhash | clkhash/key_derivation.py | hkdf | def hkdf(master_secret, # type: bytes
num_keys, # type: int
hash_algo='SHA256', # type: str
salt=None, # type: Optional[bytes]
info=None, # type: Optional[bytes]
key_size=DEFAULT_KEY_SIZE # type: int
):
# type: (...) -> Tuple[bytes, ...]
"""
Executes the HKDF key derivation function as described in rfc5869 to
derive `num_keys` keys of size `key_size` from the master_secret.
:param master_secret: input keying material
:param num_keys: the number of keys the kdf should produce
:param hash_algo: The hash function used by HKDF for the internal
HMAC calls. The choice of hash function defines the maximum
length of the output key material. Output bytes <= 255 * hash
digest size (in bytes).
:param salt: HKDF is defined to operate with and without random
salt. This is done to accommodate applications where a salt
value is not available. We stress, however, that the use of salt
adds significantly to themstrength of HKDF, ensuring
independence between different uses of the hash function,
supporting "source-independent" extraction, and strengthening
the analytical results that back the HKDF design.
Random salt differs fundamentally from the initial keying
material in two ways: it is non-secret and can be re-used.
Ideally, the salt value is a random (or pseudorandom) string
of the length HashLen. Yet, even a salt value of less quality
(shorter in size or with limited entropy) may still make a
significant contribution to the security of the output keying
material.
:param info: While the 'info' value is optional in the definition of
HKDF, it is often of great importance in applications. Its main
objective is to bind the derived key material to application-
and context-specific information. For example, 'info' may
contain a protocol number, algorithm identifiers, user
identities, etc. In particular, it may prevent the derivation
of the same keying material for different contexts (when the
same input key material (IKM) is used in such different
contexts). It may also accommodate additional inputs to the key
expansion part, if so desired (e.g., an application may want to
bind the key material to its length L, thus making L part of the
'info' field). There is one technical requirement from 'info':
it should be independent of the input key material value IKM.
:param key_size: the size of the produced keys
:return: Derived keys
"""
try:
hash_function = _HASH_FUNCTIONS[hash_algo]
except KeyError:
msg = "unsupported hash function '{}'".format(hash_algo)
raise_from(ValueError(msg), None)
hkdf = HKDF(algorithm=hash_function(),
length=num_keys * key_size,
salt=salt,
info=info,
backend=default_backend())
# hkdf.derive returns a block of num_keys * key_size bytes which we
# divide up into num_keys chunks, each of size key_size
keybytes = hkdf.derive(master_secret)
keys = tuple(keybytes[i * key_size:(i + 1) * key_size] for i in range(num_keys))
return keys | python | def hkdf(master_secret, # type: bytes
num_keys, # type: int
hash_algo='SHA256', # type: str
salt=None, # type: Optional[bytes]
info=None, # type: Optional[bytes]
key_size=DEFAULT_KEY_SIZE # type: int
):
# type: (...) -> Tuple[bytes, ...]
"""
Executes the HKDF key derivation function as described in rfc5869 to
derive `num_keys` keys of size `key_size` from the master_secret.
:param master_secret: input keying material
:param num_keys: the number of keys the kdf should produce
:param hash_algo: The hash function used by HKDF for the internal
HMAC calls. The choice of hash function defines the maximum
length of the output key material. Output bytes <= 255 * hash
digest size (in bytes).
:param salt: HKDF is defined to operate with and without random
salt. This is done to accommodate applications where a salt
value is not available. We stress, however, that the use of salt
adds significantly to themstrength of HKDF, ensuring
independence between different uses of the hash function,
supporting "source-independent" extraction, and strengthening
the analytical results that back the HKDF design.
Random salt differs fundamentally from the initial keying
material in two ways: it is non-secret and can be re-used.
Ideally, the salt value is a random (or pseudorandom) string
of the length HashLen. Yet, even a salt value of less quality
(shorter in size or with limited entropy) may still make a
significant contribution to the security of the output keying
material.
:param info: While the 'info' value is optional in the definition of
HKDF, it is often of great importance in applications. Its main
objective is to bind the derived key material to application-
and context-specific information. For example, 'info' may
contain a protocol number, algorithm identifiers, user
identities, etc. In particular, it may prevent the derivation
of the same keying material for different contexts (when the
same input key material (IKM) is used in such different
contexts). It may also accommodate additional inputs to the key
expansion part, if so desired (e.g., an application may want to
bind the key material to its length L, thus making L part of the
'info' field). There is one technical requirement from 'info':
it should be independent of the input key material value IKM.
:param key_size: the size of the produced keys
:return: Derived keys
"""
try:
hash_function = _HASH_FUNCTIONS[hash_algo]
except KeyError:
msg = "unsupported hash function '{}'".format(hash_algo)
raise_from(ValueError(msg), None)
hkdf = HKDF(algorithm=hash_function(),
length=num_keys * key_size,
salt=salt,
info=info,
backend=default_backend())
# hkdf.derive returns a block of num_keys * key_size bytes which we
# divide up into num_keys chunks, each of size key_size
keybytes = hkdf.derive(master_secret)
keys = tuple(keybytes[i * key_size:(i + 1) * key_size] for i in range(num_keys))
return keys | [
"def",
"hkdf",
"(",
"master_secret",
",",
"# type: bytes",
"num_keys",
",",
"# type: int",
"hash_algo",
"=",
"'SHA256'",
",",
"# type: str",
"salt",
"=",
"None",
",",
"# type: Optional[bytes]",
"info",
"=",
"None",
",",
"# type: Optional[bytes]",
"key_size",
"=",
... | Executes the HKDF key derivation function as described in rfc5869 to
derive `num_keys` keys of size `key_size` from the master_secret.
:param master_secret: input keying material
:param num_keys: the number of keys the kdf should produce
:param hash_algo: The hash function used by HKDF for the internal
HMAC calls. The choice of hash function defines the maximum
length of the output key material. Output bytes <= 255 * hash
digest size (in bytes).
:param salt: HKDF is defined to operate with and without random
salt. This is done to accommodate applications where a salt
value is not available. We stress, however, that the use of salt
adds significantly to themstrength of HKDF, ensuring
independence between different uses of the hash function,
supporting "source-independent" extraction, and strengthening
the analytical results that back the HKDF design.
Random salt differs fundamentally from the initial keying
material in two ways: it is non-secret and can be re-used.
Ideally, the salt value is a random (or pseudorandom) string
of the length HashLen. Yet, even a salt value of less quality
(shorter in size or with limited entropy) may still make a
significant contribution to the security of the output keying
material.
:param info: While the 'info' value is optional in the definition of
HKDF, it is often of great importance in applications. Its main
objective is to bind the derived key material to application-
and context-specific information. For example, 'info' may
contain a protocol number, algorithm identifiers, user
identities, etc. In particular, it may prevent the derivation
of the same keying material for different contexts (when the
same input key material (IKM) is used in such different
contexts). It may also accommodate additional inputs to the key
expansion part, if so desired (e.g., an application may want to
bind the key material to its length L, thus making L part of the
'info' field). There is one technical requirement from 'info':
it should be independent of the input key material value IKM.
:param key_size: the size of the produced keys
:return: Derived keys | [
"Executes",
"the",
"HKDF",
"key",
"derivation",
"function",
"as",
"described",
"in",
"rfc5869",
"to",
"derive",
"num_keys",
"keys",
"of",
"size",
"key_size",
"from",
"the",
"master_secret",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/key_derivation.py#L20-L83 | train | 42,322 |
data61/clkhash | clkhash/validate_data.py | validate_row_lengths | def validate_row_lengths(fields, # type: Sequence[FieldSpec]
data # type: Sequence[Sequence[str]]
):
# type: (...) -> None
""" Validate the `data` row lengths according to the specification
in `fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The rows to check.
:raises FormatError: When the number of entries in a row does
not match expectation.
"""
for i, row in enumerate(data):
if len(fields) != len(row):
msg = 'Row {} has {} entries when {} are expected.'.format(
i, len(row), len(fields))
raise FormatError(msg) | python | def validate_row_lengths(fields, # type: Sequence[FieldSpec]
data # type: Sequence[Sequence[str]]
):
# type: (...) -> None
""" Validate the `data` row lengths according to the specification
in `fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The rows to check.
:raises FormatError: When the number of entries in a row does
not match expectation.
"""
for i, row in enumerate(data):
if len(fields) != len(row):
msg = 'Row {} has {} entries when {} are expected.'.format(
i, len(row), len(fields))
raise FormatError(msg) | [
"def",
"validate_row_lengths",
"(",
"fields",
",",
"# type: Sequence[FieldSpec]",
"data",
"# type: Sequence[Sequence[str]]",
")",
":",
"# type: (...) -> None",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"data",
")",
":",
"if",
"len",
"(",
"fields",
")",
"!=",... | Validate the `data` row lengths according to the specification
in `fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The rows to check.
:raises FormatError: When the number of entries in a row does
not match expectation. | [
"Validate",
"the",
"data",
"row",
"lengths",
"according",
"to",
"the",
"specification",
"in",
"fields",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/validate_data.py#L30-L47 | train | 42,323 |
data61/clkhash | clkhash/validate_data.py | validate_entries | def validate_entries(fields, # type: Sequence[FieldSpec]
data # type: Sequence[Sequence[str]]
):
# type: (...) -> None
""" Validate the `data` entries according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The data to validate.
:raises EntryError: When an entry is not valid according to its
:class:`FieldSpec`.
"""
validators = [f.validate for f in fields]
for i, row in enumerate(data):
for entry, v in zip(row, validators):
try:
v(entry)
except InvalidEntryError as e:
msg = (
'Invalid entry in row {row_index}, column '
"'{column_name}'. {original_message}"
).format(
row_index=i,
column_name=cast(FieldSpec, e.field_spec).identifier,
original_message=e.args[0])
e_invalid_entry = EntryError(msg)
e_invalid_entry.field_spec = e.field_spec
e_invalid_entry.row_index = i
raise_from(e_invalid_entry, e) | python | def validate_entries(fields, # type: Sequence[FieldSpec]
data # type: Sequence[Sequence[str]]
):
# type: (...) -> None
""" Validate the `data` entries according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The data to validate.
:raises EntryError: When an entry is not valid according to its
:class:`FieldSpec`.
"""
validators = [f.validate for f in fields]
for i, row in enumerate(data):
for entry, v in zip(row, validators):
try:
v(entry)
except InvalidEntryError as e:
msg = (
'Invalid entry in row {row_index}, column '
"'{column_name}'. {original_message}"
).format(
row_index=i,
column_name=cast(FieldSpec, e.field_spec).identifier,
original_message=e.args[0])
e_invalid_entry = EntryError(msg)
e_invalid_entry.field_spec = e.field_spec
e_invalid_entry.row_index = i
raise_from(e_invalid_entry, e) | [
"def",
"validate_entries",
"(",
"fields",
",",
"# type: Sequence[FieldSpec]",
"data",
"# type: Sequence[Sequence[str]]",
")",
":",
"# type: (...) -> None",
"validators",
"=",
"[",
"f",
".",
"validate",
"for",
"f",
"in",
"fields",
"]",
"for",
"i",
",",
"row",
"in",... | Validate the `data` entries according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The data to validate.
:raises EntryError: When an entry is not valid according to its
:class:`FieldSpec`. | [
"Validate",
"the",
"data",
"entries",
"according",
"to",
"the",
"specification",
"in",
"fields",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/validate_data.py#L50-L80 | train | 42,324 |
data61/clkhash | clkhash/validate_data.py | validate_header | def validate_header(fields, # type: Sequence[FieldSpec]
column_names # type: Sequence[str]
):
# type: (...) -> None
""" Validate the `column_names` according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param column_names: A sequence of column identifier.
:raises FormatError: When the number of columns or the column
identifiers don't match the specification.
"""
if len(fields) != len(column_names):
msg = 'Header has {} columns when {} are expected.'.format(
len(column_names), len(fields))
raise FormatError(msg)
for f, column in zip(fields, column_names):
if f.identifier != column:
msg = "Column has identifier '{}' when '{}' is expected.".format(
column, f.identifier)
raise FormatError(msg) | python | def validate_header(fields, # type: Sequence[FieldSpec]
column_names # type: Sequence[str]
):
# type: (...) -> None
""" Validate the `column_names` according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param column_names: A sequence of column identifier.
:raises FormatError: When the number of columns or the column
identifiers don't match the specification.
"""
if len(fields) != len(column_names):
msg = 'Header has {} columns when {} are expected.'.format(
len(column_names), len(fields))
raise FormatError(msg)
for f, column in zip(fields, column_names):
if f.identifier != column:
msg = "Column has identifier '{}' when '{}' is expected.".format(
column, f.identifier)
raise FormatError(msg) | [
"def",
"validate_header",
"(",
"fields",
",",
"# type: Sequence[FieldSpec]",
"column_names",
"# type: Sequence[str]",
")",
":",
"# type: (...) -> None",
"if",
"len",
"(",
"fields",
")",
"!=",
"len",
"(",
"column_names",
")",
":",
"msg",
"=",
"'Header has {} columns wh... | Validate the `column_names` according to the specification in
`fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param column_names: A sequence of column identifier.
:raises FormatError: When the number of columns or the column
identifiers don't match the specification. | [
"Validate",
"the",
"column_names",
"according",
"to",
"the",
"specification",
"in",
"fields",
"."
] | ec6398d6708a063de83f7c3d6286587bff8e7121 | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/validate_data.py#L83-L105 | train | 42,325 |
tmoerman/arboreto | arboreto/core.py | infer_partial_network | def infer_partial_network(regressor_type,
regressor_kwargs,
tf_matrix,
tf_matrix_gene_names,
target_gene_name,
target_gene_expression,
include_meta=False,
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
seed=DEMON_SEED):
"""
Ties together regressor model training with regulatory links and meta data extraction.
:param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param tf_matrix: numpy matrix. The feature matrix X to use for the regression.
:param tf_matrix_gene_names: list of transcription factor names corresponding to the columns of the tf_matrix used to
train the regression model.
:param target_gene_name: the name of the target gene to infer the regulatory links for.
:param target_gene_expression: the expression profile of the target gene. Numpy array.
:param include_meta: whether to also return the meta information DataFrame.
:param early_stop_window_length: window length of the early stopping monitor.
:param seed: (optional) random seed for the regressors.
:return: if include_meta == True, return links_df, meta_df
link_df: a Pandas DataFrame['TF', 'target', 'importance'] containing inferred regulatory links and their
connection strength.
meta_df: a Pandas DataFrame['target', 'meta', 'value'] containing meta information regarding the trained
regression model.
"""
def fn():
(clean_tf_matrix, clean_tf_matrix_gene_names) = clean(tf_matrix, tf_matrix_gene_names, target_gene_name)
try:
trained_regressor = fit_model(regressor_type, regressor_kwargs, clean_tf_matrix, target_gene_expression,
early_stop_window_length, seed)
except ValueError as e:
raise ValueError("Regression for target gene {0} failed. Cause {1}.".format(target_gene_name, repr(e)))
links_df = to_links_df(regressor_type, regressor_kwargs, trained_regressor, clean_tf_matrix_gene_names,
target_gene_name)
if include_meta:
meta_df = to_meta_df(trained_regressor, target_gene_name)
return links_df, meta_df
else:
return links_df
fallback_result = (None, None) if include_meta else None
return retry(fn,
fallback_result=fallback_result,
warning_msg='infer_data failed for target {0}'.format(target_gene_name)) | python | def infer_partial_network(regressor_type,
regressor_kwargs,
tf_matrix,
tf_matrix_gene_names,
target_gene_name,
target_gene_expression,
include_meta=False,
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
seed=DEMON_SEED):
"""
Ties together regressor model training with regulatory links and meta data extraction.
:param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param tf_matrix: numpy matrix. The feature matrix X to use for the regression.
:param tf_matrix_gene_names: list of transcription factor names corresponding to the columns of the tf_matrix used to
train the regression model.
:param target_gene_name: the name of the target gene to infer the regulatory links for.
:param target_gene_expression: the expression profile of the target gene. Numpy array.
:param include_meta: whether to also return the meta information DataFrame.
:param early_stop_window_length: window length of the early stopping monitor.
:param seed: (optional) random seed for the regressors.
:return: if include_meta == True, return links_df, meta_df
link_df: a Pandas DataFrame['TF', 'target', 'importance'] containing inferred regulatory links and their
connection strength.
meta_df: a Pandas DataFrame['target', 'meta', 'value'] containing meta information regarding the trained
regression model.
"""
def fn():
(clean_tf_matrix, clean_tf_matrix_gene_names) = clean(tf_matrix, tf_matrix_gene_names, target_gene_name)
try:
trained_regressor = fit_model(regressor_type, regressor_kwargs, clean_tf_matrix, target_gene_expression,
early_stop_window_length, seed)
except ValueError as e:
raise ValueError("Regression for target gene {0} failed. Cause {1}.".format(target_gene_name, repr(e)))
links_df = to_links_df(regressor_type, regressor_kwargs, trained_regressor, clean_tf_matrix_gene_names,
target_gene_name)
if include_meta:
meta_df = to_meta_df(trained_regressor, target_gene_name)
return links_df, meta_df
else:
return links_df
fallback_result = (None, None) if include_meta else None
return retry(fn,
fallback_result=fallback_result,
warning_msg='infer_data failed for target {0}'.format(target_gene_name)) | [
"def",
"infer_partial_network",
"(",
"regressor_type",
",",
"regressor_kwargs",
",",
"tf_matrix",
",",
"tf_matrix_gene_names",
",",
"target_gene_name",
",",
"target_gene_expression",
",",
"include_meta",
"=",
"False",
",",
"early_stop_window_length",
"=",
"EARLY_STOP_WINDOW... | Ties together regressor model training with regulatory links and meta data extraction.
:param regressor_type: string. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param tf_matrix: numpy matrix. The feature matrix X to use for the regression.
:param tf_matrix_gene_names: list of transcription factor names corresponding to the columns of the tf_matrix used to
train the regression model.
:param target_gene_name: the name of the target gene to infer the regulatory links for.
:param target_gene_expression: the expression profile of the target gene. Numpy array.
:param include_meta: whether to also return the meta information DataFrame.
:param early_stop_window_length: window length of the early stopping monitor.
:param seed: (optional) random seed for the regressors.
:return: if include_meta == True, return links_df, meta_df
link_df: a Pandas DataFrame['TF', 'target', 'importance'] containing inferred regulatory links and their
connection strength.
meta_df: a Pandas DataFrame['target', 'meta', 'value'] containing meta information regarding the trained
regression model. | [
"Ties",
"together",
"regressor",
"model",
"training",
"with",
"regulatory",
"links",
"and",
"meta",
"data",
"extraction",
"."
] | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/core.py#L270-L323 | train | 42,326 |
tmoerman/arboreto | arboreto/core.py | create_graph | def create_graph(expression_matrix,
gene_names,
tf_names,
regressor_type,
regressor_kwargs,
client,
target_genes='all',
limit=None,
include_meta=False,
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
repartition_multiplier=1,
seed=DEMON_SEED):
"""
Main API function. Create a Dask computation graph.
Note: fixing the GC problems was fixed by 2 changes: [1] and [2] !!!
:param expression_matrix: numpy matrix. Rows are observations and columns are genes.
:param gene_names: list of gene names. Each entry corresponds to the expression_matrix column with same index.
:param tf_names: list of transcription factor names. Should have a non-empty intersection with gene_names.
:param regressor_type: regressor type. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param client: a dask.distributed client instance.
* Used to scatter-broadcast the tf matrix to the workers instead of simply wrapping in a delayed().
:param target_genes: either int, 'all' or a collection that is a subset of gene_names.
:param limit: optional number of top regulatory links to return. Default None.
:param include_meta: Also return the meta DataFrame. Default False.
:param early_stop_window_length: window length of the early stopping monitor.
:param repartition_multiplier: multiplier
:param seed: (optional) random seed for the regressors. Default 666.
:return: if include_meta is False, returns a Dask graph that computes the links DataFrame.
If include_meta is True, returns a tuple: the links DataFrame and the meta DataFrame.
"""
assert expression_matrix.shape[1] == len(gene_names)
assert client, "client is required"
tf_matrix, tf_matrix_gene_names = to_tf_matrix(expression_matrix, gene_names, tf_names)
future_tf_matrix = client.scatter(tf_matrix, broadcast=True)
# [1] wrap in a list of 1 -> unsure why but Matt. Rocklin does this often...
[future_tf_matrix_gene_names] = client.scatter([tf_matrix_gene_names], broadcast=True)
delayed_link_dfs = [] # collection of delayed link DataFrames
delayed_meta_dfs = [] # collection of delayed meta DataFrame
for target_gene_index in target_gene_indices(gene_names, target_genes):
target_gene_name = delayed(gene_names[target_gene_index], pure=True)
target_gene_expression = delayed(expression_matrix[:, target_gene_index], pure=True)
if include_meta:
delayed_link_df, delayed_meta_df = delayed(infer_partial_network, pure=True, nout=2)(
regressor_type, regressor_kwargs,
future_tf_matrix, future_tf_matrix_gene_names,
target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed)
if delayed_link_df is not None:
delayed_link_dfs.append(delayed_link_df)
delayed_meta_dfs.append(delayed_meta_df)
else:
delayed_link_df = delayed(infer_partial_network, pure=True)(
regressor_type, regressor_kwargs,
future_tf_matrix, future_tf_matrix_gene_names,
target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed)
if delayed_link_df is not None:
delayed_link_dfs.append(delayed_link_df)
# gather the DataFrames into one distributed DataFrame
all_links_df = from_delayed(delayed_link_dfs, meta=_GRN_SCHEMA)
all_meta_df = from_delayed(delayed_meta_dfs, meta=_META_SCHEMA)
# optionally limit the number of resulting regulatory links, descending by top importance
if limit:
maybe_limited_links_df = all_links_df.nlargest(limit, columns=['importance'])
else:
maybe_limited_links_df = all_links_df
# [2] repartition to nr of workers -> important to avoid GC problems!
# see: http://dask.pydata.org/en/latest/dataframe-performance.html#repartition-to-reduce-overhead
n_parts = len(client.ncores()) * repartition_multiplier
if include_meta:
return maybe_limited_links_df.repartition(npartitions=n_parts), \
all_meta_df.repartition(npartitions=n_parts)
else:
return maybe_limited_links_df.repartition(npartitions=n_parts) | python | def create_graph(expression_matrix,
gene_names,
tf_names,
regressor_type,
regressor_kwargs,
client,
target_genes='all',
limit=None,
include_meta=False,
early_stop_window_length=EARLY_STOP_WINDOW_LENGTH,
repartition_multiplier=1,
seed=DEMON_SEED):
"""
Main API function. Create a Dask computation graph.
Note: fixing the GC problems was fixed by 2 changes: [1] and [2] !!!
:param expression_matrix: numpy matrix. Rows are observations and columns are genes.
:param gene_names: list of gene names. Each entry corresponds to the expression_matrix column with same index.
:param tf_names: list of transcription factor names. Should have a non-empty intersection with gene_names.
:param regressor_type: regressor type. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param client: a dask.distributed client instance.
* Used to scatter-broadcast the tf matrix to the workers instead of simply wrapping in a delayed().
:param target_genes: either int, 'all' or a collection that is a subset of gene_names.
:param limit: optional number of top regulatory links to return. Default None.
:param include_meta: Also return the meta DataFrame. Default False.
:param early_stop_window_length: window length of the early stopping monitor.
:param repartition_multiplier: multiplier
:param seed: (optional) random seed for the regressors. Default 666.
:return: if include_meta is False, returns a Dask graph that computes the links DataFrame.
If include_meta is True, returns a tuple: the links DataFrame and the meta DataFrame.
"""
assert expression_matrix.shape[1] == len(gene_names)
assert client, "client is required"
tf_matrix, tf_matrix_gene_names = to_tf_matrix(expression_matrix, gene_names, tf_names)
future_tf_matrix = client.scatter(tf_matrix, broadcast=True)
# [1] wrap in a list of 1 -> unsure why but Matt. Rocklin does this often...
[future_tf_matrix_gene_names] = client.scatter([tf_matrix_gene_names], broadcast=True)
delayed_link_dfs = [] # collection of delayed link DataFrames
delayed_meta_dfs = [] # collection of delayed meta DataFrame
for target_gene_index in target_gene_indices(gene_names, target_genes):
target_gene_name = delayed(gene_names[target_gene_index], pure=True)
target_gene_expression = delayed(expression_matrix[:, target_gene_index], pure=True)
if include_meta:
delayed_link_df, delayed_meta_df = delayed(infer_partial_network, pure=True, nout=2)(
regressor_type, regressor_kwargs,
future_tf_matrix, future_tf_matrix_gene_names,
target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed)
if delayed_link_df is not None:
delayed_link_dfs.append(delayed_link_df)
delayed_meta_dfs.append(delayed_meta_df)
else:
delayed_link_df = delayed(infer_partial_network, pure=True)(
regressor_type, regressor_kwargs,
future_tf_matrix, future_tf_matrix_gene_names,
target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed)
if delayed_link_df is not None:
delayed_link_dfs.append(delayed_link_df)
# gather the DataFrames into one distributed DataFrame
all_links_df = from_delayed(delayed_link_dfs, meta=_GRN_SCHEMA)
all_meta_df = from_delayed(delayed_meta_dfs, meta=_META_SCHEMA)
# optionally limit the number of resulting regulatory links, descending by top importance
if limit:
maybe_limited_links_df = all_links_df.nlargest(limit, columns=['importance'])
else:
maybe_limited_links_df = all_links_df
# [2] repartition to nr of workers -> important to avoid GC problems!
# see: http://dask.pydata.org/en/latest/dataframe-performance.html#repartition-to-reduce-overhead
n_parts = len(client.ncores()) * repartition_multiplier
if include_meta:
return maybe_limited_links_df.repartition(npartitions=n_parts), \
all_meta_df.repartition(npartitions=n_parts)
else:
return maybe_limited_links_df.repartition(npartitions=n_parts) | [
"def",
"create_graph",
"(",
"expression_matrix",
",",
"gene_names",
",",
"tf_names",
",",
"regressor_type",
",",
"regressor_kwargs",
",",
"client",
",",
"target_genes",
"=",
"'all'",
",",
"limit",
"=",
"None",
",",
"include_meta",
"=",
"False",
",",
"early_stop_... | Main API function. Create a Dask computation graph.
Note: fixing the GC problems was fixed by 2 changes: [1] and [2] !!!
:param expression_matrix: numpy matrix. Rows are observations and columns are genes.
:param gene_names: list of gene names. Each entry corresponds to the expression_matrix column with same index.
:param tf_names: list of transcription factor names. Should have a non-empty intersection with gene_names.
:param regressor_type: regressor type. Case insensitive.
:param regressor_kwargs: dict of key-value pairs that configures the regressor.
:param client: a dask.distributed client instance.
* Used to scatter-broadcast the tf matrix to the workers instead of simply wrapping in a delayed().
:param target_genes: either int, 'all' or a collection that is a subset of gene_names.
:param limit: optional number of top regulatory links to return. Default None.
:param include_meta: Also return the meta DataFrame. Default False.
:param early_stop_window_length: window length of the early stopping monitor.
:param repartition_multiplier: multiplier
:param seed: (optional) random seed for the regressors. Default 666.
:return: if include_meta is False, returns a Dask graph that computes the links DataFrame.
If include_meta is True, returns a tuple: the links DataFrame and the meta DataFrame. | [
"Main",
"API",
"function",
".",
"Create",
"a",
"Dask",
"computation",
"graph",
"."
] | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/core.py#L364-L450 | train | 42,327 |
thumbor/libthumbor | libthumbor/crypto.py | CryptoURL.generate | def generate(self, **options):
'''Generates an encrypted URL with the specified options'''
if options.get('unsafe', False):
return unsafe_url(**options)
else:
return self.generate_new(options) | python | def generate(self, **options):
'''Generates an encrypted URL with the specified options'''
if options.get('unsafe', False):
return unsafe_url(**options)
else:
return self.generate_new(options) | [
"def",
"generate",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"if",
"options",
".",
"get",
"(",
"'unsafe'",
",",
"False",
")",
":",
"return",
"unsafe_url",
"(",
"*",
"*",
"options",
")",
"else",
":",
"return",
"self",
".",
"generate_new",
"(",
... | Generates an encrypted URL with the specified options | [
"Generates",
"an",
"encrypted",
"URL",
"with",
"the",
"specified",
"options"
] | 8114928102ff07166ce32e6d894f30124b5e169a | https://github.com/thumbor/libthumbor/blob/8114928102ff07166ce32e6d894f30124b5e169a/libthumbor/crypto.py#L52-L58 | train | 42,328 |
maraujop/requests-oauth | oauth_hook/auth.py | to_utf8 | def to_utf8(x):
"""
Tries to utf-8 encode x when possible
If x is a string returns it encoded, otherwise tries to iter x and
encode utf-8 all strings it contains, returning a list.
"""
if isinstance(x, basestring):
return x.encode('utf-8') if isinstance(x, unicode) else x
try:
l = iter(x)
except TypeError:
return x
return [to_utf8(i) for i in l] | python | def to_utf8(x):
"""
Tries to utf-8 encode x when possible
If x is a string returns it encoded, otherwise tries to iter x and
encode utf-8 all strings it contains, returning a list.
"""
if isinstance(x, basestring):
return x.encode('utf-8') if isinstance(x, unicode) else x
try:
l = iter(x)
except TypeError:
return x
return [to_utf8(i) for i in l] | [
"def",
"to_utf8",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
":",
"return",
"x",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"x",
",",
"unicode",
")",
"else",
"x",
"try",
":",
"l",
"=",
"iter",
"(",... | Tries to utf-8 encode x when possible
If x is a string returns it encoded, otherwise tries to iter x and
encode utf-8 all strings it contains, returning a list. | [
"Tries",
"to",
"utf",
"-",
"8",
"encode",
"x",
"when",
"possible"
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/auth.py#L16-L29 | train | 42,329 |
maraujop/requests-oauth | oauth_hook/hook.py | CustomSignatureMethod_HMAC_SHA1.signing_base | def signing_base(self, request, consumer, token):
"""
This method generates the OAuth signature. It's defined here to avoid circular imports.
"""
sig = (
escape(request.method),
escape(OAuthHook.get_normalized_url(request.url)),
escape(OAuthHook.get_normalized_parameters(request)),
)
key = '%s&' % escape(consumer.secret)
if token is not None:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw | python | def signing_base(self, request, consumer, token):
"""
This method generates the OAuth signature. It's defined here to avoid circular imports.
"""
sig = (
escape(request.method),
escape(OAuthHook.get_normalized_url(request.url)),
escape(OAuthHook.get_normalized_parameters(request)),
)
key = '%s&' % escape(consumer.secret)
if token is not None:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw | [
"def",
"signing_base",
"(",
"self",
",",
"request",
",",
"consumer",
",",
"token",
")",
":",
"sig",
"=",
"(",
"escape",
"(",
"request",
".",
"method",
")",
",",
"escape",
"(",
"OAuthHook",
".",
"get_normalized_url",
"(",
"request",
".",
"url",
")",
")"... | This method generates the OAuth signature. It's defined here to avoid circular imports. | [
"This",
"method",
"generates",
"the",
"OAuth",
"signature",
".",
"It",
"s",
"defined",
"here",
"to",
"avoid",
"circular",
"imports",
"."
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/hook.py#L14-L28 | train | 42,330 |
maraujop/requests-oauth | oauth_hook/hook.py | OAuthHook._split_url_string | def _split_url_string(query_string):
"""
Turns a `query_string` into a Python dictionary with unquoted values
"""
parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters | python | def _split_url_string(query_string):
"""
Turns a `query_string` into a Python dictionary with unquoted values
"""
parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters | [
"def",
"_split_url_string",
"(",
"query_string",
")",
":",
"parameters",
"=",
"parse_qs",
"(",
"to_utf8",
"(",
"query_string",
")",
",",
"keep_blank_values",
"=",
"True",
")",
"for",
"k",
",",
"v",
"in",
"parameters",
".",
"iteritems",
"(",
")",
":",
"para... | Turns a `query_string` into a Python dictionary with unquoted values | [
"Turns",
"a",
"query_string",
"into",
"a",
"Python",
"dictionary",
"with",
"unquoted",
"values"
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/hook.py#L57-L64 | train | 42,331 |
maraujop/requests-oauth | oauth_hook/hook.py | OAuthHook.get_normalized_parameters | def get_normalized_parameters(request):
"""
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
"""
# See issues #10 and #12
if ('Content-Type' not in request.headers or \
request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \
and not isinstance(request.data, basestring):
data_and_params = dict(request.data.items() + request.params.items())
for key,value in data_and_params.items():
request.data_and_params[to_utf8(key)] = to_utf8(value)
if request.data_and_params.has_key('oauth_signature'):
del request.data_and_params['oauth_signature']
items = []
for key, value in request.data_and_params.iteritems():
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((key, value))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((key, value))
else:
items.extend((key, item) for item in value)
# Include any query string parameters included in the url
query_string = urlparse(request.url)[4]
items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()])
items.sort()
return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~') | python | def get_normalized_parameters(request):
"""
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
"""
# See issues #10 and #12
if ('Content-Type' not in request.headers or \
request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \
and not isinstance(request.data, basestring):
data_and_params = dict(request.data.items() + request.params.items())
for key,value in data_and_params.items():
request.data_and_params[to_utf8(key)] = to_utf8(value)
if request.data_and_params.has_key('oauth_signature'):
del request.data_and_params['oauth_signature']
items = []
for key, value in request.data_and_params.iteritems():
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((key, value))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((key, value))
else:
items.extend((key, item) for item in value)
# Include any query string parameters included in the url
query_string = urlparse(request.url)[4]
items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()])
items.sort()
return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~') | [
"def",
"get_normalized_parameters",
"(",
"request",
")",
":",
"# See issues #10 and #12",
"if",
"(",
"'Content-Type'",
"not",
"in",
"request",
".",
"headers",
"or",
"request",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")",
".",
"startswith",
"(",
"'appl... | Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1 | [
"Returns",
"a",
"string",
"that",
"contains",
"the",
"parameters",
"that",
"must",
"be",
"signed",
".",
"This",
"function",
"is",
"called",
"by",
"SignatureMethod",
"subclass",
"CustomSignatureMethod_HMAC_SHA1"
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/hook.py#L67-L104 | train | 42,332 |
maraujop/requests-oauth | oauth_hook/hook.py | OAuthHook.get_normalized_url | def get_normalized_url(url):
"""
Returns a normalized url, without params
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (url, scheme))
# Normalized URL excludes params, query, and fragment.
return urlunparse((scheme, netloc, path, None, None, None)) | python | def get_normalized_url(url):
"""
Returns a normalized url, without params
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (url, scheme))
# Normalized URL excludes params, query, and fragment.
return urlunparse((scheme, netloc, path, None, None, None)) | [
"def",
"get_normalized_url",
"(",
"url",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
"=",
"urlparse",
"(",
"url",
")",
"# Exclude default port numbers.",
"if",
"scheme",
"==",
"'http'",
"and",
"netloc",
"[",
... | Returns a normalized url, without params | [
"Returns",
"a",
"normalized",
"url",
"without",
"params"
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/hook.py#L107-L122 | train | 42,333 |
maraujop/requests-oauth | oauth_hook/hook.py | OAuthHook.authorization_header | def authorization_header(oauth_params):
"""Return Authorization header"""
authorization_headers = 'OAuth realm="",'
authorization_headers += ','.join(['{0}="{1}"'.format(k, urllib.quote(str(v)))
for k, v in oauth_params.items()])
return authorization_headers | python | def authorization_header(oauth_params):
"""Return Authorization header"""
authorization_headers = 'OAuth realm="",'
authorization_headers += ','.join(['{0}="{1}"'.format(k, urllib.quote(str(v)))
for k, v in oauth_params.items()])
return authorization_headers | [
"def",
"authorization_header",
"(",
"oauth_params",
")",
":",
"authorization_headers",
"=",
"'OAuth realm=\"\",'",
"authorization_headers",
"+=",
"','",
".",
"join",
"(",
"[",
"'{0}=\"{1}\"'",
".",
"format",
"(",
"k",
",",
"urllib",
".",
"quote",
"(",
"str",
"("... | Return Authorization header | [
"Return",
"Authorization",
"header"
] | 51bdf115a259ce326e7894d1a68470387ecd5f22 | https://github.com/maraujop/requests-oauth/blob/51bdf115a259ce326e7894d1a68470387ecd5f22/oauth_hook/hook.py#L143-L148 | train | 42,334 |
tmoerman/arboreto | arboreto/algo.py | _prepare_input | def _prepare_input(expression_data,
gene_names,
tf_names):
"""
Wrangle the inputs into the correct formats.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings).
Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:return: a triple of:
1. a np.ndarray or scipy.sparse.csc_matrix
2. a list of gene name strings
3. a list of transcription factor name strings.
"""
if isinstance(expression_data, pd.DataFrame):
expression_matrix = expression_data.as_matrix()
gene_names = list(expression_data.columns)
else:
expression_matrix = expression_data
assert expression_matrix.shape[1] == len(gene_names)
if tf_names is None:
tf_names = gene_names
elif tf_names == 'all':
tf_names = gene_names
else:
if len(tf_names) == 0:
raise ValueError('Specified tf_names is empty')
if not set(gene_names).intersection(set(tf_names)):
raise ValueError('Intersection of gene_names and tf_names is empty.')
return expression_matrix, gene_names, tf_names | python | def _prepare_input(expression_data,
gene_names,
tf_names):
"""
Wrangle the inputs into the correct formats.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings).
Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:return: a triple of:
1. a np.ndarray or scipy.sparse.csc_matrix
2. a list of gene name strings
3. a list of transcription factor name strings.
"""
if isinstance(expression_data, pd.DataFrame):
expression_matrix = expression_data.as_matrix()
gene_names = list(expression_data.columns)
else:
expression_matrix = expression_data
assert expression_matrix.shape[1] == len(gene_names)
if tf_names is None:
tf_names = gene_names
elif tf_names == 'all':
tf_names = gene_names
else:
if len(tf_names) == 0:
raise ValueError('Specified tf_names is empty')
if not set(gene_names).intersection(set(tf_names)):
raise ValueError('Intersection of gene_names and tf_names is empty.')
return expression_matrix, gene_names, tf_names | [
"def",
"_prepare_input",
"(",
"expression_data",
",",
"gene_names",
",",
"tf_names",
")",
":",
"if",
"isinstance",
"(",
"expression_data",
",",
"pd",
".",
"DataFrame",
")",
":",
"expression_matrix",
"=",
"expression_data",
".",
"as_matrix",
"(",
")",
"gene_names... | Wrangle the inputs into the correct formats.
:param expression_data: one of:
* a pandas DataFrame (rows=observations, columns=genes)
* a dense 2D numpy.ndarray
* a sparse scipy.sparse.csc_matrix
:param gene_names: optional list of gene names (strings).
Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame.
:param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used.
:return: a triple of:
1. a np.ndarray or scipy.sparse.csc_matrix
2. a list of gene name strings
3. a list of transcription factor name strings. | [
"Wrangle",
"the",
"inputs",
"into",
"the",
"correct",
"formats",
"."
] | 3ff7b6f987b32e5774771751dea646fa6feaaa52 | https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/algo.py#L194-L231 | train | 42,335 |
rongcloud/server-sdk-python | rongcloud/base.py | RongCloudBase._http_call | def _http_call(self, url, method, **kwargs):
"""Makes a http call. Logs response information."""
logging.debug("Request[{0}]: {1}".format(method, url))
start_time = datetime.datetime.now()
logging.debug("Header: {0}".format(kwargs['headers']))
logging.debug("Params: {0}".format(kwargs['data']))
response = requests.request(method, url, verify=False, **kwargs)
duration = datetime.datetime.now() - start_time
logging.debug("Response[{0:d}]: {1}, Duration: {2}.{3}s.".format(
response.status_code, response.reason, duration.seconds,
duration.microseconds))
return response | python | def _http_call(self, url, method, **kwargs):
"""Makes a http call. Logs response information."""
logging.debug("Request[{0}]: {1}".format(method, url))
start_time = datetime.datetime.now()
logging.debug("Header: {0}".format(kwargs['headers']))
logging.debug("Params: {0}".format(kwargs['data']))
response = requests.request(method, url, verify=False, **kwargs)
duration = datetime.datetime.now() - start_time
logging.debug("Response[{0:d}]: {1}, Duration: {2}.{3}s.".format(
response.status_code, response.reason, duration.seconds,
duration.microseconds))
return response | [
"def",
"_http_call",
"(",
"self",
",",
"url",
",",
"method",
",",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"debug",
"(",
"\"Request[{0}]: {1}\"",
".",
"format",
"(",
"method",
",",
"url",
")",
")",
"start_time",
"=",
"datetime",
".",
"datetime",
"... | Makes a http call. Logs response information. | [
"Makes",
"a",
"http",
"call",
".",
"Logs",
"response",
"information",
"."
] | 3daadd8b67c84cc5d2a9419e8d45fd69c9baf976 | https://github.com/rongcloud/server-sdk-python/blob/3daadd8b67c84cc5d2a9419e8d45fd69c9baf976/rongcloud/base.py#L50-L63 | train | 42,336 |
thumbor/libthumbor | libthumbor/url.py | calculate_width_and_height | def calculate_width_and_height(url_parts, options):
'''Appends width and height information to url'''
width = options.get('width', 0)
has_width = width
height = options.get('height', 0)
has_height = height
flip = options.get('flip', False)
flop = options.get('flop', False)
if flip:
width = width * -1
if flop:
height = height * -1
if not has_width and not has_height:
if flip:
width = "-0"
if flop:
height = "-0"
if width or height:
url_parts.append('%sx%s' % (width, height)) | python | def calculate_width_and_height(url_parts, options):
'''Appends width and height information to url'''
width = options.get('width', 0)
has_width = width
height = options.get('height', 0)
has_height = height
flip = options.get('flip', False)
flop = options.get('flop', False)
if flip:
width = width * -1
if flop:
height = height * -1
if not has_width and not has_height:
if flip:
width = "-0"
if flop:
height = "-0"
if width or height:
url_parts.append('%sx%s' % (width, height)) | [
"def",
"calculate_width_and_height",
"(",
"url_parts",
",",
"options",
")",
":",
"width",
"=",
"options",
".",
"get",
"(",
"'width'",
",",
"0",
")",
"has_width",
"=",
"width",
"height",
"=",
"options",
".",
"get",
"(",
"'height'",
",",
"0",
")",
"has_hei... | Appends width and height information to url | [
"Appends",
"width",
"and",
"height",
"information",
"to",
"url"
] | 8114928102ff07166ce32e6d894f30124b5e169a | https://github.com/thumbor/libthumbor/blob/8114928102ff07166ce32e6d894f30124b5e169a/libthumbor/url.py#L22-L44 | train | 42,337 |
thumbor/libthumbor | libthumbor/url.py | url_for | def url_for(**options):
'''Returns the url for the specified options'''
url_parts = get_url_parts(**options)
image_hash = hashlib.md5(b(options['image_url'])).hexdigest()
url_parts.append(image_hash)
return "/".join(url_parts) | python | def url_for(**options):
'''Returns the url for the specified options'''
url_parts = get_url_parts(**options)
image_hash = hashlib.md5(b(options['image_url'])).hexdigest()
url_parts.append(image_hash)
return "/".join(url_parts) | [
"def",
"url_for",
"(",
"*",
"*",
"options",
")",
":",
"url_parts",
"=",
"get_url_parts",
"(",
"*",
"*",
"options",
")",
"image_hash",
"=",
"hashlib",
".",
"md5",
"(",
"b",
"(",
"options",
"[",
"'image_url'",
"]",
")",
")",
".",
"hexdigest",
"(",
")",... | Returns the url for the specified options | [
"Returns",
"the",
"url",
"for",
"the",
"specified",
"options"
] | 8114928102ff07166ce32e6d894f30124b5e169a | https://github.com/thumbor/libthumbor/blob/8114928102ff07166ce32e6d894f30124b5e169a/libthumbor/url.py#L47-L54 | train | 42,338 |
kfdm/gntp | gntp/core.py | parse_gntp | def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE') | python | def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE') | [
"def",
"parse_gntp",
"(",
"data",
",",
"password",
"=",
"None",
")",
":",
"data",
"=",
"gntp",
".",
"shim",
".",
"u",
"(",
"data",
")",
"match",
"=",
"GNTP_INFO_LINE_SHORT",
".",
"match",
"(",
"data",
")",
"if",
"not",
"match",
":",
"raise",
"errors"... | Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message | [
"Attempt",
"to",
"parse",
"a",
"message",
"as",
"a",
"GNTP",
"message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L497-L518 | train | 42,339 |
kfdm/gntp | gntp/core.py | _GNTPBase._parse_info | def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info | python | def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info | [
"def",
"_parse_info",
"(",
"self",
",",
"data",
")",
":",
"match",
"=",
"GNTP_INFO_LINE",
".",
"match",
"(",
"data",
")",
"if",
"not",
"match",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'ERROR_PARSING_INFO_LINE'",
")",
"info",
"=",
"match",
".",
"... | Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line | [
"Parse",
"the",
"first",
"line",
"of",
"a",
"GNTP",
"message",
"to",
"get",
"security",
"and",
"other",
"info",
"values"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L87-L103 | train | 42,340 |
kfdm/gntp | gntp/core.py | _GNTPBase.set_password | def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
self.password = gntp.shim.b(password)
self.encryptAlgo = encryptAlgo.upper()
if not self.encryptAlgo in self.hash_algo:
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = self.hash_algo.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime().encode('utf8')
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper() | python | def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
self.password = gntp.shim.b(password)
self.encryptAlgo = encryptAlgo.upper()
if not self.encryptAlgo in self.hash_algo:
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = self.hash_algo.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime().encode('utf8')
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper() | [
"def",
"set_password",
"(",
"self",
",",
"password",
",",
"encryptAlgo",
"=",
"'MD5'",
")",
":",
"if",
"not",
"password",
":",
"self",
".",
"info",
"[",
"'encryptionAlgorithmID'",
"]",
"=",
"None",
"self",
".",
"info",
"[",
"'keyHashAlgorithm'",
"]",
"=",
... | Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512 | [
"Set",
"a",
"password",
"for",
"a",
"GNTP",
"Message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L105-L134 | train | 42,341 |
kfdm/gntp | gntp/core.py | _GNTPBase._decode_hex | def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result | python | def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result | [
"def",
"_decode_hex",
"(",
"self",
",",
"value",
")",
":",
"result",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"value",
")",
",",
"2",
")",
":",
"tmp",
"=",
"int",
"(",
"value",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",... | Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string | [
"Helper",
"function",
"to",
"decode",
"hex",
"string",
"to",
"proper",
"hex",
"string"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L136-L146 | train | 42,342 |
kfdm/gntp | gntp/core.py | _GNTPBase._validate_password | def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True | python | def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True | [
"def",
"_validate_password",
"(",
"self",
",",
"password",
")",
":",
"self",
".",
"password",
"=",
"password",
"if",
"password",
"is",
"None",
":",
"raise",
"errors",
".",
"AuthError",
"(",
"'Missing password'",
")",
"keyHash",
"=",
"self",
".",
"info",
".... | Validate GNTP Message against stored password | [
"Validate",
"GNTP",
"Message",
"against",
"stored",
"password"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L158-L182 | train | 42,343 |
kfdm/gntp | gntp/core.py | _GNTPBase.validate | def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | python | def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | [
"def",
"validate",
"(",
"self",
")",
":",
"for",
"header",
"in",
"self",
".",
"_requiredHeaders",
":",
"if",
"not",
"self",
".",
"headers",
".",
"get",
"(",
"header",
",",
"False",
")",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'Missing Notificati... | Verify required headers | [
"Verify",
"required",
"headers"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L184-L188 | train | 42,344 |
kfdm/gntp | gntp/core.py | _GNTPBase._format_info | def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info | python | def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info | [
"def",
"_format_info",
"(",
"self",
")",
":",
"info",
"=",
"'GNTP/%s %s'",
"%",
"(",
"self",
".",
"info",
".",
"get",
"(",
"'version'",
")",
",",
"self",
".",
"info",
".",
"get",
"(",
"'messagetype'",
")",
",",
")",
"if",
"self",
".",
"info",
".",
... | Generate info line for GNTP Message
:return string: | [
"Generate",
"info",
"line",
"for",
"GNTP",
"Message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L190-L214 | train | 42,345 |
kfdm/gntp | gntp/core.py | _GNTPBase._parse_dict | def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers
"""
d = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = match.group(1).strip()
val = match.group(2).strip()
d[key] = val
return d | python | def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers
"""
d = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = match.group(1).strip()
val = match.group(2).strip()
d[key] = val
return d | [
"def",
"_parse_dict",
"(",
"self",
",",
"data",
")",
":",
"d",
"=",
"{",
"}",
"for",
"line",
"in",
"data",
".",
"split",
"(",
"'\\r\\n'",
")",
":",
"match",
"=",
"GNTP_HEADER",
".",
"match",
"(",
"line",
")",
"if",
"not",
"match",
":",
"continue",
... | Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers | [
"Helper",
"function",
"to",
"parse",
"blocks",
"of",
"GNTP",
"headers",
"into",
"a",
"dictionary"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L216-L231 | train | 42,346 |
kfdm/gntp | gntp/core.py | _GNTPBase.add_resource | def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
data = gntp.shim.b(data)
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier | python | def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
data = gntp.shim.b(data)
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier | [
"def",
"add_resource",
"(",
"self",
",",
"data",
")",
":",
"data",
"=",
"gntp",
".",
"shim",
".",
"b",
"(",
"data",
")",
"identifier",
"=",
"hashlib",
".",
"md5",
"(",
"data",
")",
".",
"hexdigest",
"(",
")",
"self",
".",
"resources",
"[",
"identif... | Add binary resource
:param string data: Binary Data | [
"Add",
"binary",
"resource"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L236-L244 | train | 42,347 |
kfdm/gntp | gntp/core.py | _GNTPBase.decode | def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self.headers = self._parse_dict(parts[0]) | python | def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self.headers = self._parse_dict(parts[0]) | [
"def",
"decode",
"(",
"self",
",",
"data",
",",
"password",
"=",
"None",
")",
":",
"self",
".",
"password",
"=",
"password",
"self",
".",
"raw",
"=",
"gntp",
".",
"shim",
".",
"u",
"(",
"data",
")",
"parts",
"=",
"self",
".",
"raw",
".",
"split",... | Decode GNTP Message
:param string data: | [
"Decode",
"GNTP",
"Message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L246-L255 | train | 42,348 |
kfdm/gntp | gntp/core.py | GNTPRegister.validate | def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | python | def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header) | [
"def",
"validate",
"(",
"self",
")",
":",
"for",
"header",
"in",
"self",
".",
"_requiredHeaders",
":",
"if",
"not",
"self",
".",
"headers",
".",
"get",
"(",
"header",
",",
"False",
")",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'Missing Registrati... | Validate required headers and validate notification headers | [
"Validate",
"required",
"headers",
"and",
"validate",
"notification",
"headers"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L307-L315 | train | 42,349 |
kfdm/gntp | gntp/core.py | GNTPRegister.decode | def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice | python | def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice | [
"def",
"decode",
"(",
"self",
",",
"data",
",",
"password",
")",
":",
"self",
".",
"raw",
"=",
"gntp",
".",
"shim",
".",
"u",
"(",
"data",
")",
"parts",
"=",
"self",
".",
"raw",
".",
"split",
"(",
"'\\r\\n\\r\\n'",
")",
"self",
".",
"info",
"=",
... | Decode existing GNTP Registration message
:param string data: Message to decode | [
"Decode",
"existing",
"GNTP",
"Registration",
"message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L317-L339 | train | 42,350 |
kfdm/gntp | gntp/core.py | GNTPRegister.add_notification | def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications)) | python | def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications)) | [
"def",
"add_notification",
"(",
"self",
",",
"name",
",",
"enabled",
"=",
"True",
")",
":",
"notice",
"=",
"{",
"}",
"notice",
"[",
"'Notification-Name'",
"]",
"=",
"name",
"notice",
"[",
"'Notification-Enabled'",
"]",
"=",
"enabled",
"self",
".",
"notific... | Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default | [
"Add",
"new",
"Notification",
"to",
"Registration",
"message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L341-L352 | train | 42,351 |
kfdm/gntp | gntp/core.py | GNTPRegister.encode | def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue() | python | def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue() | [
"def",
"encode",
"(",
"self",
")",
":",
"buff",
"=",
"_GNTPBuffer",
"(",
")",
"buff",
".",
"writeln",
"(",
"self",
".",
"_format_info",
"(",
")",
")",
"#Headers",
"for",
"k",
",",
"v",
"in",
"self",
".",
"headers",
".",
"items",
"(",
")",
":",
"b... | Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string | [
"Encode",
"a",
"GNTP",
"Registration",
"Message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/core.py#L354-L385 | train | 42,352 |
kfdm/gntp | gntp/notifier.py | GrowlNotifier.register | def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register) | python | def register(self):
"""Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once
"""
logger.info('Sending registration to %s:%s', self.hostname, self.port)
register = gntp.core.GNTPRegister()
register.add_header('Application-Name', self.applicationName)
for notification in self.notifications:
enabled = notification in self.defaultNotifications
register.add_notification(notification, enabled)
if self.applicationIcon:
if self._checkIcon(self.applicationIcon):
register.add_header('Application-Icon', self.applicationIcon)
else:
resource = register.add_resource(self.applicationIcon)
register.add_header('Application-Icon', resource)
if self.password:
register.set_password(self.password, self.passwordHash)
self.add_origin_info(register)
self.register_hook(register)
return self._send('register', register) | [
"def",
"register",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Sending registration to %s:%s'",
",",
"self",
".",
"hostname",
",",
"self",
".",
"port",
")",
"register",
"=",
"gntp",
".",
"core",
".",
"GNTPRegister",
"(",
")",
"register",
".",
"a... | Send GNTP Registration
.. warning::
Before sending notifications to Growl, you need to have
sent a registration message at least once | [
"Send",
"GNTP",
"Registration"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/notifier.py#L75-L98 | train | 42,353 |
kfdm/gntp | gntp/notifier.py | GrowlNotifier.notify | def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice) | python | def notify(self, noteType, title, description, icon=None, sticky=False,
priority=None, callback=None, identifier=None, custom={}):
"""Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function
"""
logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port)
assert noteType in self.notifications
notice = gntp.core.GNTPNotice()
notice.add_header('Application-Name', self.applicationName)
notice.add_header('Notification-Name', noteType)
notice.add_header('Notification-Title', title)
if self.password:
notice.set_password(self.password, self.passwordHash)
if sticky:
notice.add_header('Notification-Sticky', sticky)
if priority:
notice.add_header('Notification-Priority', priority)
if icon:
if self._checkIcon(icon):
notice.add_header('Notification-Icon', icon)
else:
resource = notice.add_resource(icon)
notice.add_header('Notification-Icon', resource)
if description:
notice.add_header('Notification-Text', description)
if callback:
notice.add_header('Notification-Callback-Target', callback)
if identifier:
notice.add_header('Notification-Coalescing-ID', identifier)
for key in custom:
notice.add_header(key, custom[key])
self.add_origin_info(notice)
self.notify_hook(notice)
return self._send('notify', notice) | [
"def",
"notify",
"(",
"self",
",",
"noteType",
",",
"title",
",",
"description",
",",
"icon",
"=",
"None",
",",
"sticky",
"=",
"False",
",",
"priority",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"custom",
"=",
"{... | Send a GNTP notifications
.. warning::
Must have registered with growl beforehand or messages will be ignored
:param string noteType: One of the notification names registered earlier
:param string title: Notification title (usually displayed on the notification)
:param string description: The main content of the notification
:param string icon: Icon URL path
:param boolean sticky: Sticky notification
:param integer priority: Message priority level from -2 to 2
:param string callback: URL callback
:param dict custom: Custom attributes. Key names should be prefixed with X-
according to the spec but this is not enforced by this class
.. warning::
For now, only URL callbacks are supported. In the future, the
callback argument will also support a function | [
"Send",
"a",
"GNTP",
"notifications"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/notifier.py#L100-L153 | train | 42,354 |
kfdm/gntp | gntp/notifier.py | GrowlNotifier.subscribe | def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub) | python | def subscribe(self, id, name, port):
"""Send a Subscribe request to a remote machine"""
sub = gntp.core.GNTPSubscribe()
sub.add_header('Subscriber-ID', id)
sub.add_header('Subscriber-Name', name)
sub.add_header('Subscriber-Port', port)
if self.password:
sub.set_password(self.password, self.passwordHash)
self.add_origin_info(sub)
self.subscribe_hook(sub)
return self._send('subscribe', sub) | [
"def",
"subscribe",
"(",
"self",
",",
"id",
",",
"name",
",",
"port",
")",
":",
"sub",
"=",
"gntp",
".",
"core",
".",
"GNTPSubscribe",
"(",
")",
"sub",
".",
"add_header",
"(",
"'Subscriber-ID'",
",",
"id",
")",
"sub",
".",
"add_header",
"(",
"'Subscr... | Send a Subscribe request to a remote machine | [
"Send",
"a",
"Subscribe",
"request",
"to",
"a",
"remote",
"machine"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/notifier.py#L155-L167 | train | 42,355 |
kfdm/gntp | gntp/notifier.py | GrowlNotifier.add_origin_info | def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform()) | python | def add_origin_info(self, packet):
"""Add optional Origin headers to message"""
packet.add_header('Origin-Machine-Name', platform.node())
packet.add_header('Origin-Software-Name', 'gntp.py')
packet.add_header('Origin-Software-Version', __version__)
packet.add_header('Origin-Platform-Name', platform.system())
packet.add_header('Origin-Platform-Version', platform.platform()) | [
"def",
"add_origin_info",
"(",
"self",
",",
"packet",
")",
":",
"packet",
".",
"add_header",
"(",
"'Origin-Machine-Name'",
",",
"platform",
".",
"node",
"(",
")",
")",
"packet",
".",
"add_header",
"(",
"'Origin-Software-Name'",
",",
"'gntp.py'",
")",
"packet",... | Add optional Origin headers to message | [
"Add",
"optional",
"Origin",
"headers",
"to",
"message"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/notifier.py#L169-L175 | train | 42,356 |
kfdm/gntp | gntp/notifier.py | GrowlNotifier._send | def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error() | python | def _send(self, messagetype, packet):
"""Send the GNTP Packet"""
packet.validate()
data = packet.encode()
logger.debug('To : %s:%s <%s>\n%s', self.hostname, self.port, packet.__class__, data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.socketTimeout)
try:
s.connect((self.hostname, self.port))
s.send(data)
recv_data = s.recv(1024)
while not recv_data.endswith(gntp.shim.b("\r\n\r\n")):
recv_data += s.recv(1024)
except socket.error:
# Python2.5 and Python3 compatibile exception
exc = sys.exc_info()[1]
raise errors.NetworkError(exc)
response = gntp.core.parse_gntp(recv_data)
s.close()
logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response)
if type(response) == gntp.core.GNTPOK:
return True
logger.error('Invalid response: %s', response.error())
return response.error() | [
"def",
"_send",
"(",
"self",
",",
"messagetype",
",",
"packet",
")",
":",
"packet",
".",
"validate",
"(",
")",
"data",
"=",
"packet",
".",
"encode",
"(",
")",
"logger",
".",
"debug",
"(",
"'To : %s:%s <%s>\\n%s'",
",",
"self",
".",
"hostname",
",",
"se... | Send the GNTP Packet | [
"Send",
"the",
"GNTP",
"Packet"
] | 772a5f4db3707ea0253691d930bf648d1344913a | https://github.com/kfdm/gntp/blob/772a5f4db3707ea0253691d930bf648d1344913a/gntp/notifier.py#L186-L215 | train | 42,357 |
openvax/pepdata | pepdata/iedb/alleles.py | local_path | def local_path(force_download=False):
"""Downloads allele database from IEDB, returns local path to XML file."""
return cache.fetch(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS,
force=force_download) | python | def local_path(force_download=False):
"""Downloads allele database from IEDB, returns local path to XML file."""
return cache.fetch(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS,
force=force_download) | [
"def",
"local_path",
"(",
"force_download",
"=",
"False",
")",
":",
"return",
"cache",
".",
"fetch",
"(",
"filename",
"=",
"ALLELE_XML_FILENAME",
",",
"url",
"=",
"ALLELE_XML_URL",
",",
"decompress",
"=",
"ALLELE_XML_DECOMPRESS",
",",
"force",
"=",
"force_downlo... | Downloads allele database from IEDB, returns local path to XML file. | [
"Downloads",
"allele",
"database",
"from",
"IEDB",
"returns",
"local",
"path",
"to",
"XML",
"file",
"."
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/alleles.py#L25-L31 | train | 42,358 |
openvax/pepdata | pepdata/iedb/alleles.py | delete | def delete():
"""Deletes local XML file"""
path = cache.local_path(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS)
os.remove(path) | python | def delete():
"""Deletes local XML file"""
path = cache.local_path(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS)
os.remove(path) | [
"def",
"delete",
"(",
")",
":",
"path",
"=",
"cache",
".",
"local_path",
"(",
"filename",
"=",
"ALLELE_XML_FILENAME",
",",
"url",
"=",
"ALLELE_XML_URL",
",",
"decompress",
"=",
"ALLELE_XML_DECOMPRESS",
")",
"os",
".",
"remove",
"(",
"path",
")"
] | Deletes local XML file | [
"Deletes",
"local",
"XML",
"file"
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/alleles.py#L33-L39 | train | 42,359 |
openvax/pepdata | pepdata/iedb/alleles.py | load_alleles | def load_alleles():
"""Parses the IEDB MhcAlleleName XML file and returns a list of Allele
namedtuple objects containing information about that each allele's HLA
class and source organism.
"""
result = []
path = local_path()
etree = xml.etree.ElementTree.parse(path)
for allele in etree.iterfind("MhcAlleleName"):
name_element = allele.find("DisplayedRestriction")
mhc_class_element = allele.find("Class")
# need at least a name and an HLA class
if name_element is None or mhc_class_element is None:
continue
name = name_element.text
synonyms = set([])
for synonym_element in allele.iterfind("Synonyms"):
for synonym in synonym_element.text.split(","):
synonyms.add(synonym.strip())
mhc_class = mhc_class_element.text
organism_element = allele.find("Organsim")
if organism_element is None:
organism = None
else:
organism = organism_element.text
locus_element = allele.find("Locus")
if locus_element is None:
locus = None
else:
locus = locus_element.text
allele_object = Allele(
name=name,
mhc_class=mhc_class,
locus=locus,
organism=organism,
synonyms=synonyms)
result.append(allele_object)
return result | python | def load_alleles():
"""Parses the IEDB MhcAlleleName XML file and returns a list of Allele
namedtuple objects containing information about that each allele's HLA
class and source organism.
"""
result = []
path = local_path()
etree = xml.etree.ElementTree.parse(path)
for allele in etree.iterfind("MhcAlleleName"):
name_element = allele.find("DisplayedRestriction")
mhc_class_element = allele.find("Class")
# need at least a name and an HLA class
if name_element is None or mhc_class_element is None:
continue
name = name_element.text
synonyms = set([])
for synonym_element in allele.iterfind("Synonyms"):
for synonym in synonym_element.text.split(","):
synonyms.add(synonym.strip())
mhc_class = mhc_class_element.text
organism_element = allele.find("Organsim")
if organism_element is None:
organism = None
else:
organism = organism_element.text
locus_element = allele.find("Locus")
if locus_element is None:
locus = None
else:
locus = locus_element.text
allele_object = Allele(
name=name,
mhc_class=mhc_class,
locus=locus,
organism=organism,
synonyms=synonyms)
result.append(allele_object)
return result | [
"def",
"load_alleles",
"(",
")",
":",
"result",
"=",
"[",
"]",
"path",
"=",
"local_path",
"(",
")",
"etree",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"parse",
"(",
"path",
")",
"for",
"allele",
"in",
"etree",
".",
"iterfind",
"(",
"\"MhcAll... | Parses the IEDB MhcAlleleName XML file and returns a list of Allele
namedtuple objects containing information about that each allele's HLA
class and source organism. | [
"Parses",
"the",
"IEDB",
"MhcAlleleName",
"XML",
"file",
"and",
"returns",
"a",
"list",
"of",
"Allele",
"namedtuple",
"objects",
"containing",
"information",
"about",
"that",
"each",
"allele",
"s",
"HLA",
"class",
"and",
"source",
"organism",
"."
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/alleles.py#L50-L91 | train | 42,360 |
openvax/pepdata | pepdata/iedb/alleles.py | load_alleles_dict | def load_alleles_dict():
"""Create a dictionary mapping each unique allele name to a namedtuple
containing information about that alleles class, locus, species, &c.
"""
alleles = load_alleles()
result = {}
for allele in alleles:
for name in {allele.name}.union(allele.synonyms):
result[name] = allele
return result | python | def load_alleles_dict():
"""Create a dictionary mapping each unique allele name to a namedtuple
containing information about that alleles class, locus, species, &c.
"""
alleles = load_alleles()
result = {}
for allele in alleles:
for name in {allele.name}.union(allele.synonyms):
result[name] = allele
return result | [
"def",
"load_alleles_dict",
"(",
")",
":",
"alleles",
"=",
"load_alleles",
"(",
")",
"result",
"=",
"{",
"}",
"for",
"allele",
"in",
"alleles",
":",
"for",
"name",
"in",
"{",
"allele",
".",
"name",
"}",
".",
"union",
"(",
"allele",
".",
"synonyms",
"... | Create a dictionary mapping each unique allele name to a namedtuple
containing information about that alleles class, locus, species, &c. | [
"Create",
"a",
"dictionary",
"mapping",
"each",
"unique",
"allele",
"name",
"to",
"a",
"namedtuple",
"containing",
"information",
"about",
"that",
"alleles",
"class",
"locus",
"species",
"&c",
"."
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/alleles.py#L94-L103 | train | 42,361 |
kennedyshead/aioasuswrt | aioasuswrt/connection.py | SshConnection.async_run_command | async def async_run_command(self, command, retry=False):
"""Run commands through an SSH connection.
Connect to the SSH server if not currently connected, otherwise
use the existing connection.
"""
if not self.is_connected:
await self.async_connect()
try:
result = await asyncio.wait_for(self._client.run(
"%s && %s" % (_PATH_EXPORT_COMMAND, command)), 9)
except asyncssh.misc.ChannelOpenError:
if not retry:
await self.async_connect()
return self.async_run_command(command, retry=True)
else:
self._connected = False
_LOGGER.error("No connection to host")
return []
except TimeoutError:
del self._client
self._connected = False
_LOGGER.error("Host timeout.")
return []
self._connected = True
return result.stdout.split('\n') | python | async def async_run_command(self, command, retry=False):
"""Run commands through an SSH connection.
Connect to the SSH server if not currently connected, otherwise
use the existing connection.
"""
if not self.is_connected:
await self.async_connect()
try:
result = await asyncio.wait_for(self._client.run(
"%s && %s" % (_PATH_EXPORT_COMMAND, command)), 9)
except asyncssh.misc.ChannelOpenError:
if not retry:
await self.async_connect()
return self.async_run_command(command, retry=True)
else:
self._connected = False
_LOGGER.error("No connection to host")
return []
except TimeoutError:
del self._client
self._connected = False
_LOGGER.error("Host timeout.")
return []
self._connected = True
return result.stdout.split('\n') | [
"async",
"def",
"async_run_command",
"(",
"self",
",",
"command",
",",
"retry",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"is_connected",
":",
"await",
"self",
".",
"async_connect",
"(",
")",
"try",
":",
"result",
"=",
"await",
"asyncio",
".",
... | Run commands through an SSH connection.
Connect to the SSH server if not currently connected, otherwise
use the existing connection. | [
"Run",
"commands",
"through",
"an",
"SSH",
"connection",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L28-L54 | train | 42,362 |
kennedyshead/aioasuswrt | aioasuswrt/connection.py | SshConnection.async_connect | async def async_connect(self):
"""Fetches the client or creates a new one."""
kwargs = {
'username': self._username if self._username else None,
'client_keys': [self._ssh_key] if self._ssh_key else None,
'port': self._port,
'password': self._password if self._password else None,
'known_hosts': None
}
self._client = await asyncssh.connect(self._host, **kwargs)
self._connected = True | python | async def async_connect(self):
"""Fetches the client or creates a new one."""
kwargs = {
'username': self._username if self._username else None,
'client_keys': [self._ssh_key] if self._ssh_key else None,
'port': self._port,
'password': self._password if self._password else None,
'known_hosts': None
}
self._client = await asyncssh.connect(self._host, **kwargs)
self._connected = True | [
"async",
"def",
"async_connect",
"(",
"self",
")",
":",
"kwargs",
"=",
"{",
"'username'",
":",
"self",
".",
"_username",
"if",
"self",
".",
"_username",
"else",
"None",
",",
"'client_keys'",
":",
"[",
"self",
".",
"_ssh_key",
"]",
"if",
"self",
".",
"_... | Fetches the client or creates a new one. | [
"Fetches",
"the",
"client",
"or",
"creates",
"a",
"new",
"one",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L61-L73 | train | 42,363 |
kennedyshead/aioasuswrt | aioasuswrt/connection.py | TelnetConnection.async_run_command | async def async_run_command(self, command, first_try=True):
"""Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection.
"""
await self.async_connect()
try:
with (await self._io_lock):
self._writer.write('{}\n'.format(
"%s && %s" % (
_PATH_EXPORT_COMMAND, command)).encode('ascii'))
data = ((await asyncio.wait_for(self._reader.readuntil(
self._prompt_string), 9)).split(b'\n')[1:-1])
except (BrokenPipeError, LimitOverrunError):
if first_try:
return await self.async_run_command(command, False)
else:
_LOGGER.warning("connection is lost to host.")
return[]
except TimeoutError:
_LOGGER.error("Host timeout.")
return []
finally:
self._writer.close()
return [line.decode('utf-8') for line in data] | python | async def async_run_command(self, command, first_try=True):
"""Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection.
"""
await self.async_connect()
try:
with (await self._io_lock):
self._writer.write('{}\n'.format(
"%s && %s" % (
_PATH_EXPORT_COMMAND, command)).encode('ascii'))
data = ((await asyncio.wait_for(self._reader.readuntil(
self._prompt_string), 9)).split(b'\n')[1:-1])
except (BrokenPipeError, LimitOverrunError):
if first_try:
return await self.async_run_command(command, False)
else:
_LOGGER.warning("connection is lost to host.")
return[]
except TimeoutError:
_LOGGER.error("Host timeout.")
return []
finally:
self._writer.close()
return [line.decode('utf-8') for line in data] | [
"async",
"def",
"async_run_command",
"(",
"self",
",",
"command",
",",
"first_try",
"=",
"True",
")",
":",
"await",
"self",
".",
"async_connect",
"(",
")",
"try",
":",
"with",
"(",
"await",
"self",
".",
"_io_lock",
")",
":",
"self",
".",
"_writer",
"."... | Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection. | [
"Run",
"a",
"command",
"through",
"a",
"Telnet",
"connection",
".",
"Connect",
"to",
"the",
"Telnet",
"server",
"if",
"not",
"currently",
"connected",
"otherwise",
"use",
"the",
"existing",
"connection",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L92-L118 | train | 42,364 |
kennedyshead/aioasuswrt | aioasuswrt/connection.py | TelnetConnection.async_connect | async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True | python | async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True | [
"async",
"def",
"async_connect",
"(",
"self",
")",
":",
"self",
".",
"_reader",
",",
"self",
".",
"_writer",
"=",
"await",
"asyncio",
".",
"open_connection",
"(",
"self",
".",
"_host",
",",
"self",
".",
"_port",
")",
"with",
"(",
"await",
"self",
".",
... | Connect to the ASUS-WRT Telnet server. | [
"Connect",
"to",
"the",
"ASUS",
"-",
"WRT",
"Telnet",
"server",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L120-L142 | train | 42,365 |
kennedyshead/aioasuswrt | aioasuswrt/asuswrt.py | _parse_lines | async def _parse_lines(lines, regex):
"""Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output.
"""
results = []
if inspect.iscoroutinefunction(lines):
lines = await lines
for line in lines:
if line:
match = regex.search(line)
if not match:
_LOGGER.debug("Could not parse row: %s", line)
continue
results.append(match.groupdict())
return results | python | async def _parse_lines(lines, regex):
"""Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output.
"""
results = []
if inspect.iscoroutinefunction(lines):
lines = await lines
for line in lines:
if line:
match = regex.search(line)
if not match:
_LOGGER.debug("Could not parse row: %s", line)
continue
results.append(match.groupdict())
return results | [
"async",
"def",
"_parse_lines",
"(",
"lines",
",",
"regex",
")",
":",
"results",
"=",
"[",
"]",
"if",
"inspect",
".",
"iscoroutinefunction",
"(",
"lines",
")",
":",
"lines",
"=",
"await",
"lines",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
":",
... | Parse the lines using the given regular expression.
If a line can't be parsed it is logged and skipped in the output. | [
"Parse",
"the",
"lines",
"using",
"the",
"given",
"regular",
"expression",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/asuswrt.py#L59-L74 | train | 42,366 |
kennedyshead/aioasuswrt | aioasuswrt/asuswrt.py | AsusWrt.async_get_connected_devices | async def async_get_connected_devices(self):
"""Retrieve data from ASUSWRT.
Calls various commands on the router and returns the superset of all
responses. Some commands will not work on some routers.
"""
devices = {}
dev = await self.async_get_wl()
devices.update(dev)
dev = await self.async_get_arp()
devices.update(dev)
dev = await self.async_get_neigh(devices)
devices.update(dev)
if not self.mode == 'ap':
dev = await self.async_get_leases(devices)
devices.update(dev)
ret_devices = {}
for key in devices:
if not self.require_ip or devices[key].ip is not None:
ret_devices[key] = devices[key]
return ret_devices | python | async def async_get_connected_devices(self):
"""Retrieve data from ASUSWRT.
Calls various commands on the router and returns the superset of all
responses. Some commands will not work on some routers.
"""
devices = {}
dev = await self.async_get_wl()
devices.update(dev)
dev = await self.async_get_arp()
devices.update(dev)
dev = await self.async_get_neigh(devices)
devices.update(dev)
if not self.mode == 'ap':
dev = await self.async_get_leases(devices)
devices.update(dev)
ret_devices = {}
for key in devices:
if not self.require_ip or devices[key].ip is not None:
ret_devices[key] = devices[key]
return ret_devices | [
"async",
"def",
"async_get_connected_devices",
"(",
"self",
")",
":",
"devices",
"=",
"{",
"}",
"dev",
"=",
"await",
"self",
".",
"async_get_wl",
"(",
")",
"devices",
".",
"update",
"(",
"dev",
")",
"dev",
"=",
"await",
"self",
".",
"async_get_arp",
"(",... | Retrieve data from ASUSWRT.
Calls various commands on the router and returns the superset of all
responses. Some commands will not work on some routers. | [
"Retrieve",
"data",
"from",
"ASUSWRT",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/asuswrt.py#L159-L180 | train | 42,367 |
kennedyshead/aioasuswrt | aioasuswrt/asuswrt.py | AsusWrt.async_get_current_transfer_rates | async def async_get_current_transfer_rates(self, use_cache=True):
"""Gets current transfer rates calculated in per second in bytes."""
now = datetime.utcnow()
data = await self.async_get_bytes_total(use_cache)
if self._rx_latest is None or self._tx_latest is None:
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
return self._latest_transfer_data
time_diff = now - self._latest_transfer_check
if time_diff.total_seconds() < 30:
return self._latest_transfer_data
if data[0] < self._rx_latest:
rx = data[0]
else:
rx = data[0] - self._rx_latest
if data[1] < self._tx_latest:
tx = data[1]
else:
tx = data[1] - self._tx_latest
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
self._latest_transfer_data = (
math.ceil(rx / time_diff.total_seconds()) if rx > 0 else 0,
math.ceil(tx / time_diff.total_seconds()) if tx > 0 else 0)
return self._latest_transfer_data | python | async def async_get_current_transfer_rates(self, use_cache=True):
"""Gets current transfer rates calculated in per second in bytes."""
now = datetime.utcnow()
data = await self.async_get_bytes_total(use_cache)
if self._rx_latest is None or self._tx_latest is None:
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
return self._latest_transfer_data
time_diff = now - self._latest_transfer_check
if time_diff.total_seconds() < 30:
return self._latest_transfer_data
if data[0] < self._rx_latest:
rx = data[0]
else:
rx = data[0] - self._rx_latest
if data[1] < self._tx_latest:
tx = data[1]
else:
tx = data[1] - self._tx_latest
self._latest_transfer_check = now
self._rx_latest = data[0]
self._tx_latest = data[1]
self._latest_transfer_data = (
math.ceil(rx / time_diff.total_seconds()) if rx > 0 else 0,
math.ceil(tx / time_diff.total_seconds()) if tx > 0 else 0)
return self._latest_transfer_data | [
"async",
"def",
"async_get_current_transfer_rates",
"(",
"self",
",",
"use_cache",
"=",
"True",
")",
":",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"data",
"=",
"await",
"self",
".",
"async_get_bytes_total",
"(",
"use_cache",
")",
"if",
"self",
".",
... | Gets current transfer rates calculated in per second in bytes. | [
"Gets",
"current",
"transfer",
"rates",
"calculated",
"in",
"per",
"second",
"in",
"bytes",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/asuswrt.py#L203-L233 | train | 42,368 |
kennedyshead/aioasuswrt | aioasuswrt/asuswrt.py | AsusWrt.async_current_transfer_human_readable | async def async_current_transfer_human_readable(
self, use_cache=True):
"""Gets current transfer rates in a human readable format."""
rx, tx = await self.async_get_current_transfer_rates(use_cache)
return "%s/s" % convert_size(rx), "%s/s" % convert_size(tx) | python | async def async_current_transfer_human_readable(
self, use_cache=True):
"""Gets current transfer rates in a human readable format."""
rx, tx = await self.async_get_current_transfer_rates(use_cache)
return "%s/s" % convert_size(rx), "%s/s" % convert_size(tx) | [
"async",
"def",
"async_current_transfer_human_readable",
"(",
"self",
",",
"use_cache",
"=",
"True",
")",
":",
"rx",
",",
"tx",
"=",
"await",
"self",
".",
"async_get_current_transfer_rates",
"(",
"use_cache",
")",
"return",
"\"%s/s\"",
"%",
"convert_size",
"(",
... | Gets current transfer rates in a human readable format. | [
"Gets",
"current",
"transfer",
"rates",
"in",
"a",
"human",
"readable",
"format",
"."
] | 0c4336433727abbb7b324ee29e4c5382be9aaa2b | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/asuswrt.py#L235-L240 | train | 42,369 |
openvax/pepdata | pepdata/iedb/tcell.py | load_dataframe | def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
nrows=None):
"""
Load IEDB T-cell data without aggregating multiple entries for same epitope
Parameters
----------
mhc_class: {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla: regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla: regex pattern, optional
Exclude certain HLA types
human_only: bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method string, optional
Only collect results with assay methods containing the given string
assay_group: string, optional
Only collect results with assay groups containing the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet: dictionary, optional
Remap amino acid letters to some other alphabet
nrows: int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
path = local_path()
df = pd.read_csv(
path,
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1")
print(df.head())
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
assay_group_column_key = ("Assay", "Assay Group")
assay_method_column_key = ("Assay", "Method/Technique")
epitopes = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
organism = df['Host Organism Name']
mask &= organism.str.startswith('Homo sapiens', na=False).astype('bool')
# Match known alleles such as "HLA-A*02:01",
# broader groupings such as "HLA-A2"
# and unknown alleles of the MHC-1 listed either as
# "HLA-Class I,allele undetermined"
# or
# "Class I,allele undetermined"
mhc = df[mhc_allele_column_key]
if mhc_class is not None:
# since MHC classes can be specified as either strings ("I") or integers
# standard them to be strings
if mhc_class == 1:
mhc_class = "I"
elif mhc_class == 2:
mhc_class = "II"
if mhc_class not in {"I", "II"}:
raise ValueError("Invalid MHC class: %s" % mhc_class)
allele_dict = load_alleles_dict()
mhc_class_mask = [False] * len(df)
for i, allele_name in enumerate(mhc):
allele_object = allele_dict.get(allele_name)
if allele_object and allele_object.mhc_class == mhc_class:
mhc_class_mask[i] = True
mask &= np.array(mhc_class_mask)
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df[assay_group_column_key].str.contains(assay_group)
if assay_method:
mask &= df[assay_method_column_key].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask]
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | python | def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
nrows=None):
"""
Load IEDB T-cell data without aggregating multiple entries for same epitope
Parameters
----------
mhc_class: {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla: regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla: regex pattern, optional
Exclude certain HLA types
human_only: bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method string, optional
Only collect results with assay methods containing the given string
assay_group: string, optional
Only collect results with assay groups containing the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet: dictionary, optional
Remap amino acid letters to some other alphabet
nrows: int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
path = local_path()
df = pd.read_csv(
path,
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1")
print(df.head())
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
assay_group_column_key = ("Assay", "Assay Group")
assay_method_column_key = ("Assay", "Method/Technique")
epitopes = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
organism = df['Host Organism Name']
mask &= organism.str.startswith('Homo sapiens', na=False).astype('bool')
# Match known alleles such as "HLA-A*02:01",
# broader groupings such as "HLA-A2"
# and unknown alleles of the MHC-1 listed either as
# "HLA-Class I,allele undetermined"
# or
# "Class I,allele undetermined"
mhc = df[mhc_allele_column_key]
if mhc_class is not None:
# since MHC classes can be specified as either strings ("I") or integers
# standard them to be strings
if mhc_class == 1:
mhc_class = "I"
elif mhc_class == 2:
mhc_class = "II"
if mhc_class not in {"I", "II"}:
raise ValueError("Invalid MHC class: %s" % mhc_class)
allele_dict = load_alleles_dict()
mhc_class_mask = [False] * len(df)
for i, allele_name in enumerate(mhc):
allele_object = allele_dict.get(allele_name)
if allele_object and allele_object.mhc_class == mhc_class:
mhc_class_mask[i] = True
mask &= np.array(mhc_class_mask)
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df[assay_group_column_key].str.contains(assay_group)
if assay_method:
mask &= df[assay_method_column_key].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask]
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | [
"def",
"load_dataframe",
"(",
"mhc_class",
"=",
"None",
",",
"# 1, 2, or None for neither",
"hla",
"=",
"None",
",",
"exclude_hla",
"=",
"None",
",",
"human_only",
"=",
"False",
",",
"peptide_length",
"=",
"None",
",",
"assay_method",
"=",
"None",
",",
"assay_... | Load IEDB T-cell data without aggregating multiple entries for same epitope
Parameters
----------
mhc_class: {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla: regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla: regex pattern, optional
Exclude certain HLA types
human_only: bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method string, optional
Only collect results with assay methods containing the given string
assay_group: string, optional
Only collect results with assay groups containing the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet: dictionary, optional
Remap amino acid letters to some other alphabet
nrows: int, optional
Don't load the full IEDB dataset but instead read only the first nrows | [
"Load",
"IEDB",
"T",
"-",
"cell",
"data",
"without",
"aggregating",
"multiple",
"entries",
"for",
"same",
"epitope"
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/tcell.py#L54-L192 | train | 42,370 |
openvax/pepdata | pepdata/iedb/mhc.py | load_dataframe | def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
warn_bad_lines=True,
nrows=None):
"""
Load IEDB MHC data without aggregating multiple entries for the same epitope
Parameters
----------
mhc_class : {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla : regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla : regex pattern, optional
Exclude certain HLA types
human_only : bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method : string, optional
Limit to assay methods which contain the given string
assay_group : string, optional
Limit to assay groups which contain the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet : dictionary, optional
Remap amino acid letters to some other alphabet
warn_bad_lines : bool, optional
The full MHC ligand dataset seems to contain several dozen lines with
too many fields. This currently results in a lot of warning messages
from Pandas, which you can turn off with this option (default = True)
nrows : int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
df = pd.read_csv(
local_path(),
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1",
warn_bad_lines=warn_bad_lines)
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
epitopes = df[epitope_column_key] = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
mask &= df[mhc_allele_column_key].str.startswith("HLA").astype("bool")
if mhc_class == 1:
mask &= df["MHC"]["MHC allele class"] == "I"
elif mhc_class == 2:
mask &= df["MHC"]["MHC allele class"] == "II"
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df["Assay"]["Assay Group"].str.contains(assay_group)
if assay_method:
mask &= df["Assay"]["Method/Technique"].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask].copy()
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | python | def load_dataframe(
mhc_class=None, # 1, 2, or None for neither
hla=None,
exclude_hla=None,
human_only=False,
peptide_length=None,
assay_method=None,
assay_group=None,
only_standard_amino_acids=True,
reduced_alphabet=None, # 20 letter AA strings -> simpler alphabet
warn_bad_lines=True,
nrows=None):
"""
Load IEDB MHC data without aggregating multiple entries for the same epitope
Parameters
----------
mhc_class : {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla : regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla : regex pattern, optional
Exclude certain HLA types
human_only : bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method : string, optional
Limit to assay methods which contain the given string
assay_group : string, optional
Limit to assay groups which contain the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet : dictionary, optional
Remap amino acid letters to some other alphabet
warn_bad_lines : bool, optional
The full MHC ligand dataset seems to contain several dozen lines with
too many fields. This currently results in a lot of warning messages
from Pandas, which you can turn off with this option (default = True)
nrows : int, optional
Don't load the full IEDB dataset but instead read only the first nrows
"""
df = pd.read_csv(
local_path(),
header=[0, 1],
skipinitialspace=True,
nrows=nrows,
low_memory=False,
error_bad_lines=False,
encoding="latin-1",
warn_bad_lines=warn_bad_lines)
# Sometimes the IEDB seems to put in an extra comma in the
# header line, which creates an unnamed column of NaNs.
# To deal with this, drop any columns which are all NaN
df = df.dropna(axis=1, how="all")
n = len(df)
epitope_column_key = ("Epitope", "Description")
mhc_allele_column_key = ("MHC", "Allele Name")
epitopes = df[epitope_column_key] = df[epitope_column_key].str.upper()
null_epitope_seq = epitopes.isnull()
n_null = null_epitope_seq.sum()
if n_null > 0:
logging.info("Dropping %d null sequences", n_null)
mask = ~null_epitope_seq
if only_standard_amino_acids:
# if have rare or unknown amino acids, drop the sequence
bad_epitope_seq = \
epitopes.str.contains(bad_amino_acids, na=False).astype("bool")
n_bad = bad_epitope_seq.sum()
if n_bad > 0:
logging.info("Dropping %d bad sequences", n_bad)
mask &= ~bad_epitope_seq
if human_only:
mask &= df[mhc_allele_column_key].str.startswith("HLA").astype("bool")
if mhc_class == 1:
mask &= df["MHC"]["MHC allele class"] == "I"
elif mhc_class == 2:
mask &= df["MHC"]["MHC allele class"] == "II"
if hla:
mask &= df[mhc_allele_column_key].str.contains(hla, na=False)
if exclude_hla:
mask &= ~(df[mhc_allele_column_key].str.contains(exclude_hla, na=False))
if assay_group:
mask &= df["Assay"]["Assay Group"].str.contains(assay_group)
if assay_method:
mask &= df["Assay"]["Method/Technique"].str.contains(assay_method)
if peptide_length:
assert peptide_length > 0
mask &= df[epitope_column_key].str.len() == peptide_length
df = df[mask].copy()
logging.info("Returning %d / %d entries after filtering", len(df), n)
return df | [
"def",
"load_dataframe",
"(",
"mhc_class",
"=",
"None",
",",
"# 1, 2, or None for neither",
"hla",
"=",
"None",
",",
"exclude_hla",
"=",
"None",
",",
"human_only",
"=",
"False",
",",
"peptide_length",
"=",
"None",
",",
"assay_method",
"=",
"None",
",",
"assay_... | Load IEDB MHC data without aggregating multiple entries for the same epitope
Parameters
----------
mhc_class : {None, 1, 2}
Restrict to MHC Class I or Class II (or None for neither)
hla : regex pattern, optional
Restrict results to specific HLA type used in assay
exclude_hla : regex pattern, optional
Exclude certain HLA types
human_only : bool
Restrict to human samples (default False)
peptide_length: int, optional
Restrict epitopes to amino acid strings of given length
assay_method : string, optional
Limit to assay methods which contain the given string
assay_group : string, optional
Limit to assay groups which contain the given string
only_standard_amino_acids : bool, optional
Drop sequences which use non-standard amino acids, anything outside
the core 20, such as X or U (default = True)
reduced_alphabet : dictionary, optional
Remap amino acid letters to some other alphabet
warn_bad_lines : bool, optional
The full MHC ligand dataset seems to contain several dozen lines with
too many fields. This currently results in a lot of warning messages
from Pandas, which you can turn off with this option (default = True)
nrows : int, optional
Don't load the full IEDB dataset but instead read only the first nrows | [
"Load",
"IEDB",
"MHC",
"data",
"without",
"aggregating",
"multiple",
"entries",
"for",
"the",
"same",
"epitope"
] | 2f1bad79f8084545227f4a7f895bbf08a6fb6fdc | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/mhc.py#L51-L171 | train | 42,371 |
101Loop/drf-addons | drfaddons/admin.py | CreateUpdateAdmin.get_fieldsets | def get_fieldsets(self, request, obj=None):
"""
Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com)
"""
fieldsets = list(super(CreateUpdateAdmin, self).get_fieldsets(
request=request, obj=obj))
# Create sets for future use
fields = set()
to_add = set()
# Prepare a set of existing fields in fieldset
for fs in fieldsets:
fields = fields.union(fs[1]['fields'])
# Loop over ownership info fields
for k, v in self.ownership_info['fields'].items():
# Check if current model has k attribute
# and field k is not already in fieldset
# and field k has not been excluded
if (hasattr(self.model, k)
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
# Now, let's hide fields in add form, it will be empty
# Check if readonly property is not True
# or this is an edit form
if ('readonly' in v and not v['readonly']) or obj:
to_add.add(k)
# If to_add set is not empty, add ownership info to fieldset
if len(to_add) > 0:
fieldsets.append((self.ownership_info['label'],
{'fields': tuple(to_add)}))
return tuple(fieldsets) | python | def get_fieldsets(self, request, obj=None):
"""
Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com)
"""
fieldsets = list(super(CreateUpdateAdmin, self).get_fieldsets(
request=request, obj=obj))
# Create sets for future use
fields = set()
to_add = set()
# Prepare a set of existing fields in fieldset
for fs in fieldsets:
fields = fields.union(fs[1]['fields'])
# Loop over ownership info fields
for k, v in self.ownership_info['fields'].items():
# Check if current model has k attribute
# and field k is not already in fieldset
# and field k has not been excluded
if (hasattr(self.model, k)
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
# Now, let's hide fields in add form, it will be empty
# Check if readonly property is not True
# or this is an edit form
if ('readonly' in v and not v['readonly']) or obj:
to_add.add(k)
# If to_add set is not empty, add ownership info to fieldset
if len(to_add) > 0:
fieldsets.append((self.ownership_info['label'],
{'fields': tuple(to_add)}))
return tuple(fieldsets) | [
"def",
"get_fieldsets",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"fieldsets",
"=",
"list",
"(",
"super",
"(",
"CreateUpdateAdmin",
",",
"self",
")",
".",
"get_fieldsets",
"(",
"request",
"=",
"request",
",",
"obj",
"=",
"obj",
")... | Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com) | [
"Add",
"ownership",
"info",
"fields",
"in",
"fieldset",
"with",
"proper",
"separation",
"."
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/admin.py#L83-L121 | train | 42,372 |
101Loop/drf-addons | drfaddons/admin.py | CreateUpdateAdmin.get_readonly_fields | def get_readonly_fields(self, request, obj=None):
"""
Makes `created_by`, `create_date` & `update_date` readonly when
editing.
Author: Himanshu Shankar (https://himanshus.com)
"""
# Get read only fields from super
fields = list(super(CreateUpdateAdmin, self).get_readonly_fields(
request=request, obj=obj))
# Loop over ownership info field
for k, v in self.ownership_info['fields'].items():
# Check if model has k attribute
# and field k is readonly
# and k is not already in fields
# and k is not in excluded field
# (if not checked, form.Meta.exclude has same field twice)
if (hasattr(self.model, k)
and ('readonly' in v and v['readonly'])
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
fields.append(k)
return tuple(fields) | python | def get_readonly_fields(self, request, obj=None):
"""
Makes `created_by`, `create_date` & `update_date` readonly when
editing.
Author: Himanshu Shankar (https://himanshus.com)
"""
# Get read only fields from super
fields = list(super(CreateUpdateAdmin, self).get_readonly_fields(
request=request, obj=obj))
# Loop over ownership info field
for k, v in self.ownership_info['fields'].items():
# Check if model has k attribute
# and field k is readonly
# and k is not already in fields
# and k is not in excluded field
# (if not checked, form.Meta.exclude has same field twice)
if (hasattr(self.model, k)
and ('readonly' in v and v['readonly'])
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
fields.append(k)
return tuple(fields) | [
"def",
"get_readonly_fields",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"# Get read only fields from super",
"fields",
"=",
"list",
"(",
"super",
"(",
"CreateUpdateAdmin",
",",
"self",
")",
".",
"get_readonly_fields",
"(",
"request",
"=",
... | Makes `created_by`, `create_date` & `update_date` readonly when
editing.
Author: Himanshu Shankar (https://himanshus.com) | [
"Makes",
"created_by",
"create_date",
"&",
"update_date",
"readonly",
"when",
"editing",
"."
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/admin.py#L123-L149 | train | 42,373 |
101Loop/drf-addons | drfaddons/auth.py | JSONWebTokenAuthenticationQS.get_authorization | def get_authorization(self, request):
"""
This function extracts the authorization JWT string. It first
looks for specified key in header and then looks
for the same in body part.
Parameters
----------
request: HttpRequest
This is the raw request that user has sent.
Returns
-------
auth: str
Return request's 'JWT_AUTH_KEY:' content from body or
Header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
from django.utils.six import text_type
from rest_framework import HTTP_HEADER_ENCODING
auth = request.data.get(self.key, b'') or request.META.get(
self.header_key, b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth | python | def get_authorization(self, request):
"""
This function extracts the authorization JWT string. It first
looks for specified key in header and then looks
for the same in body part.
Parameters
----------
request: HttpRequest
This is the raw request that user has sent.
Returns
-------
auth: str
Return request's 'JWT_AUTH_KEY:' content from body or
Header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
from django.utils.six import text_type
from rest_framework import HTTP_HEADER_ENCODING
auth = request.data.get(self.key, b'') or request.META.get(
self.header_key, b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth | [
"def",
"get_authorization",
"(",
"self",
",",
"request",
")",
":",
"from",
"django",
".",
"utils",
".",
"six",
"import",
"text_type",
"from",
"rest_framework",
"import",
"HTTP_HEADER_ENCODING",
"auth",
"=",
"request",
".",
"data",
".",
"get",
"(",
"self",
".... | This function extracts the authorization JWT string. It first
looks for specified key in header and then looks
for the same in body part.
Parameters
----------
request: HttpRequest
This is the raw request that user has sent.
Returns
-------
auth: str
Return request's 'JWT_AUTH_KEY:' content from body or
Header, as a bytestring.
Hide some test client ickyness where the header can be unicode. | [
"This",
"function",
"extracts",
"the",
"authorization",
"JWT",
"string",
".",
"It",
"first",
"looks",
"for",
"specified",
"key",
"in",
"header",
"and",
"then",
"looks",
"for",
"the",
"same",
"in",
"body",
"part",
"."
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/auth.py#L27-L55 | train | 42,374 |
frnmst/md-toc | md_toc/__main__.py | main | def main(args=None):
"""Call the CLI interface and wait for the result."""
retcode = 0
try:
ci = CliInterface()
args = ci.parser.parse_args()
result = args.func(args)
if result is not None:
print(result)
retcode = 0
except Exception:
retcode = 1
traceback.print_exc()
sys.exit(retcode) | python | def main(args=None):
"""Call the CLI interface and wait for the result."""
retcode = 0
try:
ci = CliInterface()
args = ci.parser.parse_args()
result = args.func(args)
if result is not None:
print(result)
retcode = 0
except Exception:
retcode = 1
traceback.print_exc()
sys.exit(retcode) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"retcode",
"=",
"0",
"try",
":",
"ci",
"=",
"CliInterface",
"(",
")",
"args",
"=",
"ci",
".",
"parser",
".",
"parse_args",
"(",
")",
"result",
"=",
"args",
".",
"func",
"(",
"args",
")",
"if",
... | Call the CLI interface and wait for the result. | [
"Call",
"the",
"CLI",
"interface",
"and",
"wait",
"for",
"the",
"result",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/__main__.py#L28-L41 | train | 42,375 |
frnmst/md-toc | md_toc/cli.py | CliToApi.write_toc | def write_toc(self, args):
"""Write the table of contents."""
# FIXME: Can this logic be moved into the create_parser function?
ordered = False
if args.ordered_list_marker is not None:
list_marker = args.ordered_list_marker
ordered = True
elif args.unordered_list_marker is not None:
list_marker = args.unordered_list_marker
else:
list_marker = md_parser[
args.parser]['list']['unordered']['default_marker']
toc_struct = build_multiple_tocs(
filenames=args.filename,
ordered=ordered,
no_links=args.no_links,
no_indentation=args.no_indentation,
no_list_coherence=args.no_list_coherence,
keep_header_levels=int(args.header_levels),
parser=args.parser,
list_marker=list_marker)
if args.in_place:
write_strings_on_files_between_markers(
filenames=args.filename,
strings=toc_struct,
marker=args.toc_marker)
else:
for toc in toc_struct:
print(toc, end='') | python | def write_toc(self, args):
"""Write the table of contents."""
# FIXME: Can this logic be moved into the create_parser function?
ordered = False
if args.ordered_list_marker is not None:
list_marker = args.ordered_list_marker
ordered = True
elif args.unordered_list_marker is not None:
list_marker = args.unordered_list_marker
else:
list_marker = md_parser[
args.parser]['list']['unordered']['default_marker']
toc_struct = build_multiple_tocs(
filenames=args.filename,
ordered=ordered,
no_links=args.no_links,
no_indentation=args.no_indentation,
no_list_coherence=args.no_list_coherence,
keep_header_levels=int(args.header_levels),
parser=args.parser,
list_marker=list_marker)
if args.in_place:
write_strings_on_files_between_markers(
filenames=args.filename,
strings=toc_struct,
marker=args.toc_marker)
else:
for toc in toc_struct:
print(toc, end='') | [
"def",
"write_toc",
"(",
"self",
",",
"args",
")",
":",
"# FIXME: Can this logic be moved into the create_parser function?",
"ordered",
"=",
"False",
"if",
"args",
".",
"ordered_list_marker",
"is",
"not",
"None",
":",
"list_marker",
"=",
"args",
".",
"ordered_list_mar... | Write the table of contents. | [
"Write",
"the",
"table",
"of",
"contents",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/cli.py#L46-L75 | train | 42,376 |
theatlantic/python-monkey-business | monkeybiz/__init__.py | patch | def patch(func=None, obj=None, name=None, avoid_doublewrap=True):
"""
Decorator for monkeypatching functions on modules and classes.
Example::
# This replaces FooClass.bar with our method
@monkeybiz.patch(FooClass)
def bar(original_bar, *args, **kwargs):
print "Patched!"
return original_bar(*args, **kwargs)
# This replaces FooClass.bar and foomodule.bar with our method
@monkeybiz.patch([FooClass, foomodule])
def bar(original_bar, *args, **kwargs):
#...
The first argument to ``monkeybiz.patch`` can be either a module, a class,
or a list of modules and/or classes. The decorator also takes optional
``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is
omitted, the name of the function being patched will be the name of the
function being decorated. If ``avoid_doublewrap`` is True (the default),
then functions and methods can only be patched once using this function.
Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its
original.
"""
if obj is None:
if isinstance(func, (type, ModuleType)):
obj = func
func = None
elif isinstance(func, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in func]):
obj = func
func = None
if func is None:
return functools.partial(patch, obj=obj, name=name, avoid_doublewrap=avoid_doublewrap)
if name is None:
name = func.__name__
if isinstance(obj, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in obj]):
return [patch(func=func, obj=o, name=name, avoid_doublewrap=avoid_doublewrap) for o in obj]
if not isinstance(obj, (ModuleType, type)):
raise ValueError(
"Argument passed to @patch decorator must be a "
"class or module, or a list of classes and modules")
try:
call = getattr(obj, name)
except AttributeError:
raise TypeError("%(func_repr)s does not exist" % {
'func_repr': '.'.join(
filter(None, [
getattr(obj, '__module__', None),
obj.__name__,
func.__name__],
)),
})
# optionally avoid multiple identical wrappings
if avoid_doublewrap and getattr(call, 'wrapper', None) is func:
return
# get underlying function (if it's an unbound method)
try:
original_callable = six.get_method_function(call)
except AttributeError:
original_callable = call
@six.wraps(func)
def wrapper(*args, **kwargs):
return func(original_callable, *args, **kwargs)
# set attributes, for future unwrapping and to avoid double-wrapping
wrapper.original = call
wrapper.wrapper = func
if six.PY2 and inspect.isclass(obj):
# rewrap staticmethod and classmethod specifically (iff obj is a class)
if hasattr(call, 'im_self'):
if call.im_self:
wrapper = classmethod(wrapper)
else:
wrapper = staticmethod(wrapper)
# finally, install the func closure as requested
setattr(obj, name, wrapper)
return getattr(obj, name) | python | def patch(func=None, obj=None, name=None, avoid_doublewrap=True):
"""
Decorator for monkeypatching functions on modules and classes.
Example::
# This replaces FooClass.bar with our method
@monkeybiz.patch(FooClass)
def bar(original_bar, *args, **kwargs):
print "Patched!"
return original_bar(*args, **kwargs)
# This replaces FooClass.bar and foomodule.bar with our method
@monkeybiz.patch([FooClass, foomodule])
def bar(original_bar, *args, **kwargs):
#...
The first argument to ``monkeybiz.patch`` can be either a module, a class,
or a list of modules and/or classes. The decorator also takes optional
``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is
omitted, the name of the function being patched will be the name of the
function being decorated. If ``avoid_doublewrap`` is True (the default),
then functions and methods can only be patched once using this function.
Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its
original.
"""
if obj is None:
if isinstance(func, (type, ModuleType)):
obj = func
func = None
elif isinstance(func, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in func]):
obj = func
func = None
if func is None:
return functools.partial(patch, obj=obj, name=name, avoid_doublewrap=avoid_doublewrap)
if name is None:
name = func.__name__
if isinstance(obj, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in obj]):
return [patch(func=func, obj=o, name=name, avoid_doublewrap=avoid_doublewrap) for o in obj]
if not isinstance(obj, (ModuleType, type)):
raise ValueError(
"Argument passed to @patch decorator must be a "
"class or module, or a list of classes and modules")
try:
call = getattr(obj, name)
except AttributeError:
raise TypeError("%(func_repr)s does not exist" % {
'func_repr': '.'.join(
filter(None, [
getattr(obj, '__module__', None),
obj.__name__,
func.__name__],
)),
})
# optionally avoid multiple identical wrappings
if avoid_doublewrap and getattr(call, 'wrapper', None) is func:
return
# get underlying function (if it's an unbound method)
try:
original_callable = six.get_method_function(call)
except AttributeError:
original_callable = call
@six.wraps(func)
def wrapper(*args, **kwargs):
return func(original_callable, *args, **kwargs)
# set attributes, for future unwrapping and to avoid double-wrapping
wrapper.original = call
wrapper.wrapper = func
if six.PY2 and inspect.isclass(obj):
# rewrap staticmethod and classmethod specifically (iff obj is a class)
if hasattr(call, 'im_self'):
if call.im_self:
wrapper = classmethod(wrapper)
else:
wrapper = staticmethod(wrapper)
# finally, install the func closure as requested
setattr(obj, name, wrapper)
return getattr(obj, name) | [
"def",
"patch",
"(",
"func",
"=",
"None",
",",
"obj",
"=",
"None",
",",
"name",
"=",
"None",
",",
"avoid_doublewrap",
"=",
"True",
")",
":",
"if",
"obj",
"is",
"None",
":",
"if",
"isinstance",
"(",
"func",
",",
"(",
"type",
",",
"ModuleType",
")",
... | Decorator for monkeypatching functions on modules and classes.
Example::
# This replaces FooClass.bar with our method
@monkeybiz.patch(FooClass)
def bar(original_bar, *args, **kwargs):
print "Patched!"
return original_bar(*args, **kwargs)
# This replaces FooClass.bar and foomodule.bar with our method
@monkeybiz.patch([FooClass, foomodule])
def bar(original_bar, *args, **kwargs):
#...
The first argument to ``monkeybiz.patch`` can be either a module, a class,
or a list of modules and/or classes. The decorator also takes optional
``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is
omitted, the name of the function being patched will be the name of the
function being decorated. If ``avoid_doublewrap`` is True (the default),
then functions and methods can only be patched once using this function.
Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its
original. | [
"Decorator",
"for",
"monkeypatching",
"functions",
"on",
"modules",
"and",
"classes",
"."
] | 07e3bfbf0f7cd6add59a2a395bf750c6a48d0c1e | https://github.com/theatlantic/python-monkey-business/blob/07e3bfbf0f7cd6add59a2a395bf750c6a48d0c1e/monkeybiz/__init__.py#L16-L105 | train | 42,377 |
101Loop/drf-addons | drfaddons/utils.py | get_mobile_number | def get_mobile_number(mobile):
"""
Returns a mobile number after removing blanks
Author: Himanshu Shankar (https://himanshus.com)
Parameters
----------
mobile: str
Returns
-------
str
"""
blanks = [' ', '.', ',', '(', ')', '-']
for b in blanks:
mobile = mobile.replace(b, '')
return mobile | python | def get_mobile_number(mobile):
"""
Returns a mobile number after removing blanks
Author: Himanshu Shankar (https://himanshus.com)
Parameters
----------
mobile: str
Returns
-------
str
"""
blanks = [' ', '.', ',', '(', ')', '-']
for b in blanks:
mobile = mobile.replace(b, '')
return mobile | [
"def",
"get_mobile_number",
"(",
"mobile",
")",
":",
"blanks",
"=",
"[",
"' '",
",",
"'.'",
",",
"','",
",",
"'('",
",",
"')'",
",",
"'-'",
"]",
"for",
"b",
"in",
"blanks",
":",
"mobile",
"=",
"mobile",
".",
"replace",
"(",
"b",
",",
"''",
")",
... | Returns a mobile number after removing blanks
Author: Himanshu Shankar (https://himanshus.com)
Parameters
----------
mobile: str
Returns
-------
str | [
"Returns",
"a",
"mobile",
"number",
"after",
"removing",
"blanks"
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/utils.py#L105-L123 | train | 42,378 |
101Loop/drf-addons | drfaddons/permissions.py | IAWPOrSuperuser.has_object_permission | def has_object_permission(self, request, view, obj):
"""
Checks if user is superuser or it has permission over object
Parameters
----------
request
view
obj
Returns
-------
"""
return (
request.user.is_superuser or
super(IAWPOrSuperuser, self).has_object_permission(
request=request, view=view, obj=obj
)
) | python | def has_object_permission(self, request, view, obj):
"""
Checks if user is superuser or it has permission over object
Parameters
----------
request
view
obj
Returns
-------
"""
return (
request.user.is_superuser or
super(IAWPOrSuperuser, self).has_object_permission(
request=request, view=view, obj=obj
)
) | [
"def",
"has_object_permission",
"(",
"self",
",",
"request",
",",
"view",
",",
"obj",
")",
":",
"return",
"(",
"request",
".",
"user",
".",
"is_superuser",
"or",
"super",
"(",
"IAWPOrSuperuser",
",",
"self",
")",
".",
"has_object_permission",
"(",
"request",... | Checks if user is superuser or it has permission over object
Parameters
----------
request
view
obj
Returns
------- | [
"Checks",
"if",
"user",
"is",
"superuser",
"or",
"it",
"has",
"permission",
"over",
"object"
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/permissions.py#L63-L82 | train | 42,379 |
frnmst/md-toc | md_toc/api.py | write_string_on_file_between_markers | def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False) | python | def write_string_on_file_between_markers(filename: str, string: str,
marker: str):
r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception.
"""
if filename == '-':
raise StdinIsNotAFileToBeWritten
final_string = marker + '\n\n' + string.rstrip() + '\n\n' + marker + '\n'
marker_line_positions = fpyutils.get_line_matches(
filename, marker, 2, loose_matching=True)
if 1 in marker_line_positions:
if 2 in marker_line_positions:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[2], filename)
else:
fpyutils.remove_line_interval(filename, marker_line_positions[1],
marker_line_positions[1], filename)
fpyutils.insert_string_at_line(
filename,
final_string,
marker_line_positions[1],
filename,
append=False) | [
"def",
"write_string_on_file_between_markers",
"(",
"filename",
":",
"str",
",",
"string",
":",
"str",
",",
"marker",
":",
"str",
")",
":",
"if",
"filename",
"==",
"'-'",
":",
"raise",
"StdinIsNotAFileToBeWritten",
"final_string",
"=",
"marker",
"+",
"'\\n\\n'",... | r"""Write the table of contents on a single file.
:parameter filename: the file that needs to be read or modified.
:parameter string: the string that will be written on the file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: str
:type string: str
:type marker: str
:returns: None
:rtype: None
:raises: StdinIsNotAFileToBeWritten or an fpyutils exception
or a built-in exception. | [
"r",
"Write",
"the",
"table",
"of",
"contents",
"on",
"a",
"single",
"file",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L35-L70 | train | 42,380 |
frnmst/md-toc | md_toc/api.py | write_strings_on_files_between_markers | def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1 | python | def write_strings_on_files_between_markers(filenames: list, strings: list,
marker: str):
r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception.
"""
assert len(filenames) == len(strings)
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(strings) > 0:
for s in strings:
assert isinstance(s, str)
file_id = 0
for f in filenames:
write_string_on_file_between_markers(f, strings[file_id], marker)
file_id += 1 | [
"def",
"write_strings_on_files_between_markers",
"(",
"filenames",
":",
"list",
",",
"strings",
":",
"list",
",",
"marker",
":",
"str",
")",
":",
"assert",
"len",
"(",
"filenames",
")",
"==",
"len",
"(",
"strings",
")",
"if",
"len",
"(",
"filenames",
")",
... | r"""Write the table of contents on multiple files.
:parameter filenames: the files that needs to be read or modified.
:parameter strings: the strings that will be written on the file. Each
string is associated with one file.
:parameter marker: a marker that will identify the start
and the end of the string.
:type filenames: list
:type string: list
:type marker: str
:returns: None
:rtype: None
:raises: an fpyutils exception or a built-in exception. | [
"r",
"Write",
"the",
"table",
"of",
"contents",
"on",
"multiple",
"files",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L73-L100 | train | 42,381 |
frnmst/md-toc | md_toc/api.py | build_toc | def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc | python | def build_toc(filename: str,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception.
"""
toc = str()
header_type_counter = dict()
header_type_curr = 0
header_type_prev = 0
header_duplicate_counter = dict()
no_of_indentation_spaces_prev = 0
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
if filename == '-':
f = sys.stdin
else:
f = open(filename, 'r')
line = f.readline()
if ordered:
list_marker_log = build_list_marker_log(parser, list_marker)
else:
list_marker_log = list()
is_within_code_fence = False
code_fence = None
is_document_end = False
if not no_indentation and not no_list_coherence:
# if indentation and list coherence.
indentation_list = build_indentation_list(parser)
while line:
# Document ending detection.
#
# This changes the state of is_within_code_fence if the
# file has no closing fence markers. This serves no practial
# purpose since the code would run correctly anyway. It is
# however more sematically correct.
#
# See the unit tests (examples 95 and 96 of the github parser)
# and the is_closing_code_fence function.
if filename != '-':
# stdin is not seekable.
file_pointer_pos = f.tell()
if f.readline() == str():
is_document_end = True
f.seek(file_pointer_pos)
# Code fence detection.
if is_within_code_fence:
is_within_code_fence = not is_closing_code_fence(
line, code_fence, is_document_end, parser)
line = f.readline()
else:
code_fence = is_opening_code_fence(line, parser)
if code_fence is not None:
# Update the status of the next line.
is_within_code_fence = True
line = f.readline()
if not is_within_code_fence or code_fence is None:
# Header detection and gathering.
header = get_md_header(line, header_duplicate_counter,
keep_header_levels, parser, no_links)
if header is not None:
header_type_curr = header['type']
# Take care of the ordered TOC.
if ordered:
increase_index_ordered_list(header_type_counter,
header_type_prev,
header_type_curr, parser)
index = header_type_counter[header_type_curr]
else:
index = 1
# Take care of list indentations.
if no_indentation:
no_of_indentation_spaces_curr = 0
# TOC list coherence checks are not necessary
# without indentation.
else:
if not no_list_coherence:
if not toc_renders_as_coherent_list(
header_type_curr, indentation_list, parser):
raise TocDoesNotRenderAsCoherentList
no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces(
header_type_curr, header_type_prev,
no_of_indentation_spaces_prev, parser, ordered,
list_marker, list_marker_log, index)
# Build a single TOC line.
toc_line_no_indent = build_toc_line_without_indentation(
header, ordered, no_links, index, parser, list_marker)
# Save the TOC line with the indentation.
toc += build_toc_line(toc_line_no_indent,
no_of_indentation_spaces_curr) + '\n'
header_type_prev = header_type_curr
no_of_indentation_spaces_prev = no_of_indentation_spaces_curr
# endif
# endif
line = f.readline()
# endwhile
f.close()
return toc | [
"def",
"build_toc",
"(",
"filename",
":",
"str",
",",
"ordered",
":",
"bool",
"=",
"False",
",",
"no_links",
":",
"bool",
"=",
"False",
",",
"no_indentation",
":",
"bool",
"=",
"False",
",",
"no_list_coherence",
":",
"bool",
"=",
"False",
",",
"keep_head... | r"""Build the table of contents of a single file.
:parameter filename: the file that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filename: str
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc, the corresponding table of contents of the file.
:rtype: str
:raises: a built-in exception. | [
"r",
"Build",
"the",
"table",
"of",
"contents",
"of",
"a",
"single",
"file",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L103-L240 | train | 42,382 |
frnmst/md-toc | md_toc/api.py | build_multiple_tocs | def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct | python | def build_multiple_tocs(filenames: list,
ordered: bool = False,
no_links: bool = False,
no_indentation: bool = False,
no_list_coherence: bool = False,
keep_header_levels: int = 3,
parser: str = 'github',
list_marker: str = '-') -> list:
r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception.
"""
if len(filenames) > 0:
for f in filenames:
assert isinstance(f, str)
if len(filenames) == 0:
filenames.append('-')
file_id = 0
toc_struct = list()
while file_id < len(filenames):
toc_struct.append(
build_toc(filenames[file_id], ordered, no_links, no_indentation,
no_list_coherence, keep_header_levels, parser,
list_marker))
file_id += 1
return toc_struct | [
"def",
"build_multiple_tocs",
"(",
"filenames",
":",
"list",
",",
"ordered",
":",
"bool",
"=",
"False",
",",
"no_links",
":",
"bool",
"=",
"False",
",",
"no_indentation",
":",
"bool",
"=",
"False",
",",
"no_list_coherence",
":",
"bool",
"=",
"False",
",",
... | r"""Parse files by line and build the table of contents of each file.
:parameter filenames: the files that needs to be read.
:parameter ordered: decides whether to build an ordered list or not.
Defaults to ``False``.
:parameter no_links: disables the use of links.
Defaults to ``False``.
:parameter no_indentation: disables indentation in the list.
Defaults to ``False``.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type filenames: list
:type ordered: bool
:type no_links: bool
:type no_indentation: bool
:type keep_header_levels: int
:type parser: str
:returns: toc_struct, the corresponding table of contents for each input
file.
:rtype: list
:raises: a built-in exception. | [
"r",
"Parse",
"files",
"by",
"line",
"and",
"build",
"the",
"table",
"of",
"contents",
"of",
"each",
"file",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L243-L291 | train | 42,383 |
frnmst/md-toc | md_toc/api.py | increase_index_ordered_list | def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker | python | def increase_index_ordered_list(header_type_count: dict,
header_type_prev: int,
header_type_curr: int,
parser: str = 'github'):
r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception.
"""
# header_type_prev might be 0 while header_type_curr can't.
assert header_type_prev >= 0
assert header_type_curr >= 1
# Base cases for a new table of contents or a new index type.
if header_type_prev == 0:
header_type_prev = header_type_curr
if (header_type_curr not in header_type_count
or header_type_prev < header_type_curr):
header_type_count[header_type_curr] = 0
header_type_count[header_type_curr] += 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_count[header_type_curr] > md_parser['github']['list'][
'ordered']['max_marker_number']:
raise GithubOverflowOrderedListMarker | [
"def",
"increase_index_ordered_list",
"(",
"header_type_count",
":",
"dict",
",",
"header_type_prev",
":",
"int",
",",
"header_type_curr",
":",
"int",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
":",
"# header_type_prev might be 0 while header_type_curr can't.",
"a... | r"""Compute the current index for ordered list table of contents.
:parameter header_type_count: the count of each header type.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers.
Defaults to ``github``.
:type header_type_count: dict
:type header_type_prev: int
:type header_type_curr: int
:type parser: str
:returns: None
:rtype: None
:raises: GithubOverflowOrderedListMarker or a built-in exception. | [
"r",
"Compute",
"the",
"current",
"index",
"for",
"ordered",
"list",
"table",
"of",
"contents",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L294-L330 | train | 42,384 |
frnmst/md-toc | md_toc/api.py | build_list_marker_log | def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log | python | def build_list_marker_log(parser: str = 'github',
list_marker: str = '.') -> list:
r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
list_marker_log = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
list_marker_log = [
str(md_parser['github']['list']['ordered']['min_marker_number']) +
list_marker
for i in range(0, md_parser['github']['header']['max_levels'])
]
elif parser == 'redcarpet':
pass
return list_marker_log | [
"def",
"build_list_marker_log",
"(",
"parser",
":",
"str",
"=",
"'github'",
",",
"list_marker",
":",
"str",
"=",
"'.'",
")",
"->",
"list",
":",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"parser",
"==",
"'cmark'",
"or",
"parser",
"==",
"'gitlab'",
"or"... | r"""Create a data structure that holds list marker information.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type parser: str
:type list_marker: str
:returns: list_marker_log, the data structure.
:rtype: list
:raises: a built-in exception.
.. note::
This function makes sense for ordered lists only. | [
"r",
"Create",
"a",
"data",
"structure",
"that",
"holds",
"list",
"marker",
"information",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L333-L368 | train | 42,385 |
frnmst/md-toc | md_toc/api.py | compute_toc_line_indentation_spaces | def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr | python | def compute_toc_line_indentation_spaces(
header_type_curr: int = 1,
header_type_prev: int = 0,
no_of_indentation_spaces_prev: int = 0,
parser: str = 'github',
ordered: bool = False,
list_marker: str = '-',
list_marker_log: list = build_list_marker_log('github', '.'),
index: int = 1) -> int:
r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces.
"""
assert header_type_curr >= 1
assert header_type_prev >= 0
assert no_of_indentation_spaces_prev >= 0
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if ordered:
assert len(
list_marker_log) == md_parser['github']['header']['max_levels']
for e in list_marker_log:
assert isinstance(e, str)
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
if header_type_prev == 0:
# Base case for the first toc line.
no_of_indentation_spaces_curr = 0
elif header_type_curr == header_type_prev:
# Base case for same indentation.
no_of_indentation_spaces_curr = no_of_indentation_spaces_prev
else:
if ordered:
list_marker_prev = str(list_marker_log[header_type_curr - 1])
else:
# list_marker for unordered lists will always be 1 character.
list_marker_prev = list_marker
# Generic cases.
if header_type_curr > header_type_prev:
# More indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev + len(list_marker_prev) +
len(' '))
elif header_type_curr < header_type_prev:
# Less indentation.
no_of_indentation_spaces_curr = (
no_of_indentation_spaces_prev -
(len(list_marker_prev) + len(' ')))
# Reset older nested list indices. If this is not performed then
# future nested ordered lists will rely on incorrect data to
# compute indentations.
if ordered:
for i in range((header_type_curr - 1) + 1,
md_parser['github']['header']['max_levels']):
list_marker_log[i] = str(
md_parser['github']['list']['ordered']
['min_marker_number']) + list_marker
# Update the data structure.
if ordered:
list_marker_log[header_type_curr - 1] = str(index) + list_marker
elif parser == 'redcarpet':
no_of_indentation_spaces_curr = 4 * (header_type_curr - 1)
return no_of_indentation_spaces_curr | [
"def",
"compute_toc_line_indentation_spaces",
"(",
"header_type_curr",
":",
"int",
"=",
"1",
",",
"header_type_prev",
":",
"int",
"=",
"0",
",",
"no_of_indentation_spaces_prev",
":",
"int",
"=",
"0",
",",
"parser",
":",
"str",
"=",
"'github'",
",",
"ordered",
... | r"""Compute the number of indentation spaces for the TOC list element.
:parameter header_type_curr: the current type of header (h[1-Inf]).
Defaults to ``1``.
:parameter header_type_prev: the previous type of header (h[1-Inf]).
Defaults to ``0``.
:parameter no_of_indentation_spaces_prev: the number of previous indentation spaces.
Defaults to ``0``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter ordered: if set to ``True``, numbers will be used
as list ids or otherwise a dash character, otherwise.
Defaults to ``False``.
:parameter list_marker: a string that contains some of the first
characters of the list element.
Defaults to ``-``.
:parameter list_marker_log: a data structure that holds list marker
information for ordered lists.
Defaults to ``build_list_marker_log('github', '.')``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:type header_type_curr: int
:type header_type_prev: int
:type no_of_indentation_spaces_prev: int
:type parser: str
:type ordered: bool
:type list_marker: str
:type list_marker_log: list
:type index: int
:returns: no_of_indentation_spaces_curr, the number of indentation spaces
for the list element.
:rtype: int
:raises: a built-in exception.
.. note::
Please note that this function
assumes that no_of_indentation_spaces_prev contains the correct
number of spaces. | [
"r",
"Compute",
"the",
"number",
"of",
"indentation",
"spaces",
"for",
"the",
"TOC",
"list",
"element",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L371-L483 | train | 42,386 |
frnmst/md-toc | md_toc/api.py | build_toc_line_without_indentation | def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent | python | def build_toc_line_without_indentation(header: dict,
ordered: bool = False,
no_links: bool = False,
index: int = 1,
parser: str = 'github',
list_marker: str = '-') -> str:
r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception.
"""
assert 'type' in header
assert 'text_original' in header
assert 'text_anchor_link' in header
assert isinstance(header['type'], int)
assert isinstance(header['text_original'], str)
assert isinstance(header['text_anchor_link'], str)
assert header['type'] >= 1
assert index >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
assert list_marker in md_parser[parser]['list']['ordered'][
'closing_markers']
else:
assert list_marker in md_parser[parser]['list']['unordered'][
'bullet_markers']
toc_line_no_indent = str()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
if ordered:
list_marker = str(index) + list_marker
# FIXME: is this always correct?
if no_links:
line = header['text_original']
else:
line = '[' + header['text_original'] + ']' + '(#' + header[
'text_anchor_link'] + ')'
toc_line_no_indent = list_marker + ' ' + line
return toc_line_no_indent | [
"def",
"build_toc_line_without_indentation",
"(",
"header",
":",
"dict",
",",
"ordered",
":",
"bool",
"=",
"False",
",",
"no_links",
":",
"bool",
"=",
"False",
",",
"index",
":",
"int",
"=",
"1",
",",
"parser",
":",
"str",
"=",
"'github'",
",",
"list_mar... | r"""Return a list element of the table of contents.
:parameter header: a data structure that contains the original
text, the trimmed text and the type of header.
:parameter ordered: if set to ``True``, numbers will be used
as list ids, otherwise a dash character. Defaults
to ``False``.
:parameter no_links: disables the use of links. Defaults to ``False``.
:parameter index: a number that will be used as list id in case of an
ordered table of contents. Defaults to ``1``.
:parameter parser: decides rules on how compute indentations.
Defaults to ``github``.
:parameter list_marker: a string that contains some of the first
characters of the list element. Defaults to ``-``.
:type header: dict
:type ordered: bool
:type no_links: bool
:type index: int
:type parser: str
:type list_marker: str
:returns: toc_line_no_indent, a single line of the table of contents
without indentation.
:rtype: str
:raises: a built-in exception. | [
"r",
"Return",
"a",
"list",
"element",
"of",
"the",
"table",
"of",
"contents",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L486-L549 | train | 42,387 |
frnmst/md-toc | md_toc/api.py | build_toc_line | def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line | python | def build_toc_line(toc_line_no_indent: str,
no_of_indentation_spaces: int = 0) -> str:
r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception.
"""
assert no_of_indentation_spaces >= 0
indentation = no_of_indentation_spaces * ' '
toc_line = indentation + toc_line_no_indent
return toc_line | [
"def",
"build_toc_line",
"(",
"toc_line_no_indent",
":",
"str",
",",
"no_of_indentation_spaces",
":",
"int",
"=",
"0",
")",
"->",
"str",
":",
"assert",
"no_of_indentation_spaces",
">=",
"0",
"indentation",
"=",
"no_of_indentation_spaces",
"*",
"' '",
"toc_line",
"... | r"""Build the TOC line.
:parameter toc_line_no_indent: the TOC line without indentation.
:parameter no_of_indentation_spaces: the number of indentation spaces.
Defaults to ``0``.
:type toc_line_no_indent: str
:type no_of_indentation_spaces: int
:returns: toc_line, a single line of the table of contents.
:rtype: str
:raises: a built-in exception. | [
"r",
"Build",
"the",
"TOC",
"line",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L552-L570 | train | 42,388 |
frnmst/md-toc | md_toc/api.py | build_anchor_link | def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage | python | def build_anchor_link(header_text_trimmed: str,
header_duplicate_counter: str,
parser: str = 'github') -> str:
r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
header_text_trimmed = header_text_trimmed.lower()
# Remove punctuation: Keep spaces, hypens and "word characters"
# only.
header_text_trimmed = re.sub(r'[^\w\s\- ]', '', header_text_trimmed)
header_text_trimmed = header_text_trimmed.replace(' ', '-')
# Check for duplicates.
ht = header_text_trimmed
# Set the initial value if we are examining the first occurrency.
# The state of header_duplicate_counter is available to the caller
# functions.
if header_text_trimmed not in header_duplicate_counter:
header_duplicate_counter[header_text_trimmed] = 0
if header_duplicate_counter[header_text_trimmed] > 0:
header_text_trimmed = header_text_trimmed + '-' + str(
header_duplicate_counter[header_text_trimmed])
header_duplicate_counter[ht] += 1
return header_text_trimmed
elif parser == 'redcarpet':
# To ensure full compatibility what follows is a direct translation
# of the rndr_header_anchor C function used in redcarpet.
STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'"
header_text_trimmed_len = len(header_text_trimmed)
inserted = 0
stripped = 0
header_text_trimmed_middle_stage = ''
for i in range(0, header_text_trimmed_len):
if header_text_trimmed[i] == '<':
while i < header_text_trimmed_len and header_text_trimmed[
i] != '>':
i += 1
elif header_text_trimmed[i] == '&':
while i < header_text_trimmed_len and header_text_trimmed[
i] != ';':
i += 1
# str.find() == -1 if character is not found in str.
# https://docs.python.org/3.6/library/stdtypes.html?highlight=find#str.find
elif not curses.ascii.isascii(
header_text_trimmed[i]) or STRIPPED.find(
header_text_trimmed[i]) != -1:
if inserted and not stripped:
header_text_trimmed_middle_stage += '-'
stripped = 1
else:
header_text_trimmed_middle_stage += header_text_trimmed[
i].lower()
stripped = 0
inserted += 1
if stripped > 0 and inserted > 0:
header_text_trimmed_middle_stage = header_text_trimmed_middle_stage[
0:-1]
if inserted == 0 and header_text_trimmed_len > 0:
hash = 5381
for i in range(0, header_text_trimmed_len):
# Get the unicode representation with ord.
# Unicode should be equal to ASCII in ASCII's range of
# characters.
hash = ((hash << 5) + hash) + ord(header_text_trimmed[i])
# This is equivalent to %x in C. In Python we don't have
# the length problem so %x is equal to %lx in this case.
# Apparently there is no %l in Python...
header_text_trimmed_middle_stage = 'part-' + '{0:x}'.format(hash)
return header_text_trimmed_middle_stage | [
"def",
"build_anchor_link",
"(",
"header_text_trimmed",
":",
"str",
",",
"header_duplicate_counter",
":",
"str",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
"->",
"str",
":",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"parser",
"==",
"'cmark'",
"or",
... | r"""Apply the specified slug rule to build the anchor link.
:parameter header_text_trimmed: the text that needs to be transformed
in a link.
:parameter header_duplicate_counter: a data structure that keeps track of
possible duplicate header links in order to avoid them. This is
meaningful only for certain values of parser.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_trimmed: str
:type header_duplicate_counter: dict
:type parser: str
:returns: None if the specified parser is not recognized, or the anchor
link, otherwise.
:rtype: str
:raises: a built-in exception.
.. note::
The licenses of each markdown parser algorithm are reported on
the 'Markdown spec' documentation page. | [
"r",
"Apply",
"the",
"specified",
"slug",
"rule",
"to",
"build",
"the",
"anchor",
"link",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L573-L665 | train | 42,389 |
frnmst/md-toc | md_toc/api.py | get_md_header | def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header | python | def get_md_header(header_text_line: str,
header_duplicate_counter: dict,
keep_header_levels: int = 3,
parser: str = 'github',
no_links: bool = False) -> dict:
r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions.
"""
result = get_atx_heading(header_text_line, keep_header_levels, parser,
no_links)
if result is None:
return result
else:
header_type, header_text_trimmed = result
header = {
'type':
header_type,
'text_original':
header_text_trimmed,
'text_anchor_link':
build_anchor_link(header_text_trimmed, header_duplicate_counter,
parser)
}
return header | [
"def",
"get_md_header",
"(",
"header_text_line",
":",
"str",
",",
"header_duplicate_counter",
":",
"dict",
",",
"keep_header_levels",
":",
"int",
"=",
"3",
",",
"parser",
":",
"str",
"=",
"'github'",
",",
"no_links",
":",
"bool",
"=",
"False",
")",
"->",
"... | r"""Build a data structure with the elements needed to create a TOC line.
:parameter header_text_line: a single markdown line that needs to be
transformed into a TOC line.
:parameter header_duplicate_counter: a data structure that contains the
number of occurrencies of each header anchor link. This is used to
avoid duplicate anchor links and it is meaningful only for certain
values of parser.
:parameter keep_header_levels: the maximum level of headers to be
considered as such when building the table of contents.
Defaults to ``3``.
:parameter parser: decides rules on how to generate anchor links.
Defaults to ``github``.
:type header_text_line: str
:type header_duplicate_counter: dict
:type keep_header_levels: int
:type parser: str
:returns: None if the input line does not correspond to one of the
designated cases or a data structure containing the necessary
components to create a table of contents line, otherwise.
:rtype: dict
:raises: a built-in exception.
.. note::
This works like a wrapper to other functions. | [
"r",
"Build",
"a",
"data",
"structure",
"with",
"the",
"elements",
"needed",
"to",
"create",
"a",
"TOC",
"line",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L843-L889 | train | 42,390 |
frnmst/md-toc | md_toc/api.py | is_valid_code_fence_indent | def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False | python | def is_valid_code_fence_indent(line: str, parser: str = 'github') -> bool:
r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
return len(line) - len(line.lstrip(
' ')) <= md_parser['github']['code fence']['min_marker_characters']
elif parser == 'redcarpet':
# TODO.
return False | [
"def",
"is_valid_code_fence_indent",
"(",
"line",
":",
"str",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
"->",
"bool",
":",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"parser",
"==",
"'cmark'",
"or",
"parser",
"==",
"'gitlab'",
"or",
"parser",
"=... | r"""Determine if the given line has valid indentation for a code block fence.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: True if the given line has valid indentation or False
otherwise.
:rtype: bool
:raises: a built-in exception. | [
"r",
"Determine",
"if",
"the",
"given",
"line",
"has",
"valid",
"indentation",
"for",
"a",
"code",
"block",
"fence",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L892-L911 | train | 42,391 |
frnmst/md-toc | md_toc/api.py | is_opening_code_fence | def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None | python | def is_opening_code_fence(line: str, parser: str = 'github'):
r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return None
line = line.lstrip(' ').rstrip('\n')
if not line.startswith(
(markers[0] * marker_min_length, markers[1] * marker_min_length)):
return None
if line == len(line) * line[0]:
info_string = str()
else:
info_string = line.lstrip(line[0])
# Backticks or tildes in info string are explicitly forbidden.
if markers[0] in info_string or markers[1] in info_string:
return None
# Solves example 107. See:
# https://github.github.com/gfm/#example-107
if line.rstrip(markers[0]) != line and line.rstrip(markers[1]) != line:
return None
return line.rstrip(info_string)
elif parser == 'redcarpet':
# TODO.
return None | [
"def",
"is_opening_code_fence",
"(",
"line",
":",
"str",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
":",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"parser",
"==",
"'cmark'",
"or",
"parser",
"==",
"'gitlab'",
"or",
"parser",
"==",
"'commonmarker'",... | r"""Determine if the given line is possibly the opening of a fenced code block.
:parameter line: a single markdown line to evaluate.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type parser: str
:returns: None if the input line is not an opening code fence. Otherwise,
returns the string which will identify the closing code fence
according to the input parsers' rules.
:rtype: typing.Optional[str]
:raises: a built-in exception. | [
"r",
"Determine",
"if",
"the",
"given",
"line",
"is",
"possibly",
"the",
"opening",
"of",
"a",
"fenced",
"code",
"block",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L914-L957 | train | 42,392 |
frnmst/md-toc | md_toc/api.py | is_closing_code_fence | def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False | python | def is_closing_code_fence(line: str,
fence: str,
is_document_end: bool = False,
parser: str = 'github') -> bool:
r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception.
"""
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker'):
markers = md_parser['github']['code fence']['marker']
marker_min_length = md_parser['github']['code fence'][
'min_marker_characters']
if not is_valid_code_fence_indent(line):
return False
# Remove opening fence indentation after it is known to be valid.
fence = fence.lstrip(' ')
# Check if fence uses valid characters.
if not fence.startswith((markers[0], markers[1])):
return False
if len(fence) < marker_min_length:
return False
# Additional security.
fence = fence.rstrip('\n').rstrip(' ')
# Check that all fence characters are equal.
if fence != len(fence) * fence[0]:
return False
# We might be inside a code block if this is not closed
# by the end of the document, according to example 95 and 96.
# This means that the end of the document corresponds to
# a closing code fence.
# Of course we first have to check that fence is a valid opening
# code fence marker.
# See:
# https://github.github.com/gfm/#example-95
# https://github.github.com/gfm/#example-96
if is_document_end:
return True
# Check if line uses the same character as fence.
line = line.lstrip(' ')
if not line.startswith(fence):
return False
line = line.rstrip('\n').rstrip(' ')
# Solves example 93 and 94. See:
# https://github.github.com/gfm/#example-93
# https://github.github.com/gfm/#example-94
if len(line) < len(fence):
return False
# Closing fence must not have alien characters.
if line != len(line) * line[0]:
return False
return True
elif parser == 'redcarpet':
# TODO.
return False | [
"def",
"is_closing_code_fence",
"(",
"line",
":",
"str",
",",
"fence",
":",
"str",
",",
"is_document_end",
":",
"bool",
"=",
"False",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
"->",
"bool",
":",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"pars... | r"""Determine if the given line is the end of a fenced code block.
:parameter line: a single markdown line to evaluate.
:paramter fence: a sequence of backticks or tildes marking the start of
the current code block. This is usually the return value of the
is_opening_code_fence function.
:parameter is_document_end: This variable tells the function that the
end of the file is reached.
Defaults to ``False``.
:parameter parser: decides rules on how to generate the anchor text.
Defaults to ``github``.
:type line: str
:type fence: str
:type is_document_end: bool
:type parser: str
:returns: True if the line ends the current code block. False otherwise.
:rtype: bool
:raises: a built-in exception. | [
"r",
"Determine",
"if",
"the",
"given",
"line",
"is",
"the",
"end",
"of",
"a",
"fenced",
"code",
"block",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L960-L1038 | train | 42,393 |
frnmst/md-toc | md_toc/api.py | build_indentation_list | def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list | python | def build_indentation_list(parser: str = 'github'):
r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception.
"""
indentation_list = list()
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
for i in range(0, md_parser[parser]['header']['max_levels']):
indentation_list.append(False)
return indentation_list | [
"def",
"build_indentation_list",
"(",
"parser",
":",
"str",
"=",
"'github'",
")",
":",
"indentation_list",
"=",
"list",
"(",
")",
"if",
"(",
"parser",
"==",
"'github'",
"or",
"parser",
"==",
"'cmark'",
"or",
"parser",
"==",
"'gitlab'",
"or",
"parser",
"=="... | r"""Create a data structure that holds the state of indentations.
:parameter parser: decides the length of the list.
Defaults to ``github``.
:type parser: str
:returns: indentation_list, a list that contains the state of
indentations given a header type.
:rtype: list
:raises: a built-in exception. | [
"r",
"Create",
"a",
"data",
"structure",
"that",
"holds",
"the",
"state",
"of",
"indentations",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L1041-L1059 | train | 42,394 |
frnmst/md-toc | md_toc/api.py | toc_renders_as_coherent_list | def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list | python | def toc_renders_as_coherent_list(
header_type_curr: int = 1,
indentation_list: list = build_indentation_list('github'),
parser: str = 'github') -> bool:
r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception.
"""
assert header_type_curr >= 1
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
assert len(
indentation_list) == md_parser[parser]['header']['max_levels']
for e in indentation_list:
assert isinstance(e, bool)
renders_as_list = True
if (parser == 'github' or parser == 'cmark' or parser == 'gitlab'
or parser == 'commonmarker' or parser == 'redcarpet'):
# Update with current information.
indentation_list[header_type_curr - 1] = True
# Reset next cells to False, as a detection mechanism.
for i in range(header_type_curr,
md_parser['github']['header']['max_levels']):
indentation_list[i] = False
# Check for previous False cells. If there is a "hole" in the list
# it means that the TOC will have "wrong" indentation spaces, thus
# either not rendering as an HTML list or not as the user intended.
i = header_type_curr - 1
while i >= 0 and indentation_list[i]:
i -= 1
if i >= 0:
renders_as_list = False
return renders_as_list | [
"def",
"toc_renders_as_coherent_list",
"(",
"header_type_curr",
":",
"int",
"=",
"1",
",",
"indentation_list",
":",
"list",
"=",
"build_indentation_list",
"(",
"'github'",
")",
",",
"parser",
":",
"str",
"=",
"'github'",
")",
"->",
"bool",
":",
"assert",
"head... | r"""Check if the TOC will render as a working list.
:parameter header_type_curr: the current type of header (h[1-Inf]).
:parameter parser: decides rules on how to generate ordered list markers
:type header_type_curr: int
:type indentation_list: list
:type parser: str
:returns: renders_as_list
:rtype: bool
:raises: a built-in exception. | [
"r",
"Check",
"if",
"the",
"TOC",
"will",
"render",
"as",
"a",
"working",
"list",
"."
] | 86d2002ecf52fa9e1e5316a31f7eb7d549cb0830 | https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/api.py#L1062-L1105 | train | 42,395 |
101Loop/drf-addons | drfaddons/models.py | CreateUpdateModel.is_owner | def is_owner(self, user):
"""
Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com)
"""
if user.is_authenticated:
return self.created_by.id == user.id
return False | python | def is_owner(self, user):
"""
Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com)
"""
if user.is_authenticated:
return self.created_by.id == user.id
return False | [
"def",
"is_owner",
"(",
"self",
",",
"user",
")",
":",
"if",
"user",
".",
"is_authenticated",
":",
"return",
"self",
".",
"created_by",
".",
"id",
"==",
"user",
".",
"id",
"return",
"False"
] | Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com) | [
"Checks",
"if",
"user",
"is",
"the",
"owner",
"of",
"object"
] | 62392c72e8bce237f4140a2b7171e89984cb15c5 | https://github.com/101Loop/drf-addons/blob/62392c72e8bce237f4140a2b7171e89984cb15c5/drfaddons/models.py#L30-L48 | train | 42,396 |
KarrLab/unitth | unitth/core.py | UnitTH.run | def run(xml_report_dir, xml_report_filter='TEST-', html_report_path='.',
generate_exec_time_graphs=True, html_report_dir='report.th',
initial_java_heap_size=None, maximum_java_heap_size=None):
""" Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size
"""
cmd = []
cmd.append('java')
if initial_java_heap_size:
cmd.append('-Xms{}'.format(initial_java_heap_size))
if maximum_java_heap_size:
cmd.append('-Xmx{}'.format(maximum_java_heap_size))
cmd.append('-Dunitth.xml.report.filter={}'.format(xml_report_filter))
cmd.append('-Dunitth.html.report.path={}'.format(html_report_path))
cmd.append('-Dunitth.generate.exectimegraphs={}'.format('{}'.format(generate_exec_time_graphs).lower()))
cmd.append('-Dunitth.report.dir={}'.format(html_report_dir))
cmd.append('-jar')
cmd.append('"{}"'.format(resource_filename('unitth', 'lib/unitth/unitth.jar')))
cmd.append(xml_report_dir)
subprocess.check_call(' '.join(cmd), shell=True) | python | def run(xml_report_dir, xml_report_filter='TEST-', html_report_path='.',
generate_exec_time_graphs=True, html_report_dir='report.th',
initial_java_heap_size=None, maximum_java_heap_size=None):
""" Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size
"""
cmd = []
cmd.append('java')
if initial_java_heap_size:
cmd.append('-Xms{}'.format(initial_java_heap_size))
if maximum_java_heap_size:
cmd.append('-Xmx{}'.format(maximum_java_heap_size))
cmd.append('-Dunitth.xml.report.filter={}'.format(xml_report_filter))
cmd.append('-Dunitth.html.report.path={}'.format(html_report_path))
cmd.append('-Dunitth.generate.exectimegraphs={}'.format('{}'.format(generate_exec_time_graphs).lower()))
cmd.append('-Dunitth.report.dir={}'.format(html_report_dir))
cmd.append('-jar')
cmd.append('"{}"'.format(resource_filename('unitth', 'lib/unitth/unitth.jar')))
cmd.append(xml_report_dir)
subprocess.check_call(' '.join(cmd), shell=True) | [
"def",
"run",
"(",
"xml_report_dir",
",",
"xml_report_filter",
"=",
"'TEST-'",
",",
"html_report_path",
"=",
"'.'",
",",
"generate_exec_time_graphs",
"=",
"True",
",",
"html_report_dir",
"=",
"'report.th'",
",",
"initial_java_heap_size",
"=",
"None",
",",
"maximum_j... | Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size | [
"Use",
"UnitTH",
"to",
"generate",
"a",
"test",
"history",
"report"
] | f260cbc6047bb39cf6c06aa65a6d65dce0e3b8da | https://github.com/KarrLab/unitth/blob/f260cbc6047bb39cf6c06aa65a6d65dce0e3b8da/unitth/core.py#L17-L51 | train | 42,397 |
jupyterhub/chartpress | chartpress.py | run_cmd | def run_cmd(call, cmd, *, echo=True, **kwargs):
"""Run a command and echo it first"""
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | python | def run_cmd(call, cmd, *, echo=True, **kwargs):
"""Run a command and echo it first"""
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | [
"def",
"run_cmd",
"(",
"call",
",",
"cmd",
",",
"*",
",",
"echo",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"echo",
":",
"print",
"(",
"'$> '",
"+",
"' '",
".",
"join",
"(",
"map",
"(",
"pipes",
".",
"quote",
",",
"cmd",
")",
")",... | Run a command and echo it first | [
"Run",
"a",
"command",
"and",
"echo",
"it",
"first"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L34-L38 | train | 42,398 |
jupyterhub/chartpress | chartpress.py | git_remote | def git_remote(git_repo):
"""Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
"""
github_token = os.getenv(GITHUB_TOKEN_KEY)
if github_token:
return 'https://{0}@github.com/{1}'.format(
github_token, git_repo)
return 'git@github.com:{0}'.format(git_repo) | python | def git_remote(git_repo):
"""Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote.
"""
github_token = os.getenv(GITHUB_TOKEN_KEY)
if github_token:
return 'https://{0}@github.com/{1}'.format(
github_token, git_repo)
return 'git@github.com:{0}'.format(git_repo) | [
"def",
"git_remote",
"(",
"git_repo",
")",
":",
"github_token",
"=",
"os",
".",
"getenv",
"(",
"GITHUB_TOKEN_KEY",
")",
"if",
"github_token",
":",
"return",
"'https://{0}@github.com/{1}'",
".",
"format",
"(",
"github_token",
",",
"git_repo",
")",
"return",
"'git... | Return the URL for remote git repository.
Depending on the system setup it returns ssh or https remote. | [
"Return",
"the",
"URL",
"for",
"remote",
"git",
"repository",
"."
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L45-L54 | train | 42,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.