repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.group_statistics | def group_statistics(self, group, selected_meta, stat_code='mean'):
"""
Provides statistics of a group based on the meta data selected.
:param group:The result of a classification or clustering.rst or biclustering algorithm
:param selected_meta: The metadata that we are interested in
:param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation
:return: returns the statistics properties of the selected metadata
"""
values = self.get_values(group, selected_meta)
if stat_code == 'mean':
res = statistics.mean(values)
elif stat_code == 'variance':
res = statistics.variance(values)
elif stat_code == 'std':
res = statistics.stdev(values)
return res | python | def group_statistics(self, group, selected_meta, stat_code='mean'):
"""
Provides statistics of a group based on the meta data selected.
:param group:The result of a classification or clustering.rst or biclustering algorithm
:param selected_meta: The metadata that we are interested in
:param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation
:return: returns the statistics properties of the selected metadata
"""
values = self.get_values(group, selected_meta)
if stat_code == 'mean':
res = statistics.mean(values)
elif stat_code == 'variance':
res = statistics.variance(values)
elif stat_code == 'std':
res = statistics.stdev(values)
return res | [
"def",
"group_statistics",
"(",
"self",
",",
"group",
",",
"selected_meta",
",",
"stat_code",
"=",
"'mean'",
")",
":",
"values",
"=",
"self",
".",
"get_values",
"(",
"group",
",",
"selected_meta",
")",
"if",
"stat_code",
"==",
"'mean'",
":",
"res",
"=",
... | Provides statistics of a group based on the meta data selected.
:param group:The result of a classification or clustering.rst or biclustering algorithm
:param selected_meta: The metadata that we are interested in
:param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation
:return: returns the statistics properties of the selected metadata | [
"Provides",
"statistics",
"of",
"a",
"group",
"based",
"on",
"the",
"meta",
"data",
"selected",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L130-L146 | train | 47,400 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.tf | def tf(cluster):
"""
Computes the term frequency and stores it as a dictionary
:param cluster: the cluster that contains the metadata
:return: tf dictionary
"""
counts = dict()
words = cluster.split(' ')
for word in words:
counts[word] = counts.get(word, 0) + 1
return counts | python | def tf(cluster):
"""
Computes the term frequency and stores it as a dictionary
:param cluster: the cluster that contains the metadata
:return: tf dictionary
"""
counts = dict()
words = cluster.split(' ')
for word in words:
counts[word] = counts.get(word, 0) + 1
return counts | [
"def",
"tf",
"(",
"cluster",
")",
":",
"counts",
"=",
"dict",
"(",
")",
"words",
"=",
"cluster",
".",
"split",
"(",
"' '",
")",
"for",
"word",
"in",
"words",
":",
"counts",
"[",
"word",
"]",
"=",
"counts",
".",
"get",
"(",
"word",
",",
"0",
")"... | Computes the term frequency and stores it as a dictionary
:param cluster: the cluster that contains the metadata
:return: tf dictionary | [
"Computes",
"the",
"term",
"frequency",
"and",
"stores",
"it",
"as",
"a",
"dictionary"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L222-L233 | train | 47,401 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.visualize_cloud_of_words | def visualize_cloud_of_words(dictionary, image_path=None):
"""
Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed
"""
from PIL import Image
if image_path is not None:
mask = np.array(Image.open(image_path))
wc = WordCloud(mask=mask, background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
else:
# Generate a word cloud image
wc = WordCloud(background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 15)
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show() | python | def visualize_cloud_of_words(dictionary, image_path=None):
"""
Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed
"""
from PIL import Image
if image_path is not None:
mask = np.array(Image.open(image_path))
wc = WordCloud(mask=mask, background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
else:
# Generate a word cloud image
wc = WordCloud(background_color='white', width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 15)
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.show() | [
"def",
"visualize_cloud_of_words",
"(",
"dictionary",
",",
"image_path",
"=",
"None",
")",
":",
"from",
"PIL",
"import",
"Image",
"if",
"image_path",
"is",
"not",
"None",
":",
"mask",
"=",
"np",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"image_path",
... | Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed | [
"Renders",
"the",
"cloud",
"of",
"words",
"representation",
"for",
"a",
"given",
"dictionary",
"of",
"frequencies"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L304-L329 | train | 47,402 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.cloud_of_words | def cloud_of_words(path_to_bog, cluster_no, image_path=None):
"""
Draws the cloud of words representation
:param path_to_bog: path to bag of words
:param cluster_no: the number of document to be visualized
:param image_path: path to the image file for the masking, None if no masking is needed
"""
dictionary = GenometricSpace.best_descriptive_meta_dict(path_to_bog, cluster_no)
GenometricSpace.visualize_cloud_of_words(dictionary, image_path) | python | def cloud_of_words(path_to_bog, cluster_no, image_path=None):
"""
Draws the cloud of words representation
:param path_to_bog: path to bag of words
:param cluster_no: the number of document to be visualized
:param image_path: path to the image file for the masking, None if no masking is needed
"""
dictionary = GenometricSpace.best_descriptive_meta_dict(path_to_bog, cluster_no)
GenometricSpace.visualize_cloud_of_words(dictionary, image_path) | [
"def",
"cloud_of_words",
"(",
"path_to_bog",
",",
"cluster_no",
",",
"image_path",
"=",
"None",
")",
":",
"dictionary",
"=",
"GenometricSpace",
".",
"best_descriptive_meta_dict",
"(",
"path_to_bog",
",",
"cluster_no",
")",
"GenometricSpace",
".",
"visualize_cloud_of_w... | Draws the cloud of words representation
:param path_to_bog: path to bag of words
:param cluster_no: the number of document to be visualized
:param image_path: path to the image file for the masking, None if no masking is needed | [
"Draws",
"the",
"cloud",
"of",
"words",
"representation"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L332-L342 | train | 47,403 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | from_pandas | def from_pandas(regs, meta=None, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Creates a GDataframe from a pandas dataframe of region and a pandas dataframe of metadata
:param regs: a pandas Dataframe of regions that is coherent with the GMQL data model
:param meta: (optional) a pandas Dataframe of metadata that is coherent with the regions
:param chr_name: (optional) which column of :attr:`~.regs` is the chromosome
:param start_name: (optional) which column of :attr:`~.regs` is the start
:param stop_name: (optional) which column of :attr:`~.regs` is the stop
:param strand_name: (optional) which column of :attr:`~.regs` is the strand
:param sample_name: (optional) which column of :attr:`~.regs` represents the sample name
of that region. If nothing is provided, all the region will be put in a single sample.
:return: a GDataframe
"""
regs = check_regs(regs, chr_name, start_name, stop_name, strand_name, sample_name)
regs = to_gmql_regions(regs)
if meta is not None:
if not check_meta(meta, regs):
raise ValueError("Error. Meta dataframe is not GMQL standard")
else:
meta = empty_meta(regs)
return GDataframe(regs, meta) | python | def from_pandas(regs, meta=None, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Creates a GDataframe from a pandas dataframe of region and a pandas dataframe of metadata
:param regs: a pandas Dataframe of regions that is coherent with the GMQL data model
:param meta: (optional) a pandas Dataframe of metadata that is coherent with the regions
:param chr_name: (optional) which column of :attr:`~.regs` is the chromosome
:param start_name: (optional) which column of :attr:`~.regs` is the start
:param stop_name: (optional) which column of :attr:`~.regs` is the stop
:param strand_name: (optional) which column of :attr:`~.regs` is the strand
:param sample_name: (optional) which column of :attr:`~.regs` represents the sample name
of that region. If nothing is provided, all the region will be put in a single sample.
:return: a GDataframe
"""
regs = check_regs(regs, chr_name, start_name, stop_name, strand_name, sample_name)
regs = to_gmql_regions(regs)
if meta is not None:
if not check_meta(meta, regs):
raise ValueError("Error. Meta dataframe is not GMQL standard")
else:
meta = empty_meta(regs)
return GDataframe(regs, meta) | [
"def",
"from_pandas",
"(",
"regs",
",",
"meta",
"=",
"None",
",",
"chr_name",
"=",
"None",
",",
"start_name",
"=",
"None",
",",
"stop_name",
"=",
"None",
",",
"strand_name",
"=",
"None",
",",
"sample_name",
"=",
"None",
")",
":",
"regs",
"=",
"check_re... | Creates a GDataframe from a pandas dataframe of region and a pandas dataframe of metadata
:param regs: a pandas Dataframe of regions that is coherent with the GMQL data model
:param meta: (optional) a pandas Dataframe of metadata that is coherent with the regions
:param chr_name: (optional) which column of :attr:`~.regs` is the chromosome
:param start_name: (optional) which column of :attr:`~.regs` is the start
:param stop_name: (optional) which column of :attr:`~.regs` is the stop
:param strand_name: (optional) which column of :attr:`~.regs` is the strand
:param sample_name: (optional) which column of :attr:`~.regs` represents the sample name
of that region. If nothing is provided, all the region will be put in a single sample.
:return: a GDataframe | [
"Creates",
"a",
"GDataframe",
"from",
"a",
"pandas",
"dataframe",
"of",
"region",
"and",
"a",
"pandas",
"dataframe",
"of",
"metadata"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L146-L167 | train | 47,404 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | check_regs | def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe
"""
if sample_name is None:
region_df.index = np.repeat(default_id_sample, len(region_df))
else:
region_df = search_column(region_df, id_sample_aliases,
id_sample_types, 'id_sample', sample_name)
region_df = region_df.set_index("id_sample", drop=True)
region_df = region_df.sort_index()
region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)
region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)
region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)
region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)
return region_df | python | def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,
strand_name=None, sample_name=None):
""" Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe
"""
if sample_name is None:
region_df.index = np.repeat(default_id_sample, len(region_df))
else:
region_df = search_column(region_df, id_sample_aliases,
id_sample_types, 'id_sample', sample_name)
region_df = region_df.set_index("id_sample", drop=True)
region_df = region_df.sort_index()
region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)
region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)
region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)
region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)
return region_df | [
"def",
"check_regs",
"(",
"region_df",
",",
"chr_name",
"=",
"None",
",",
"start_name",
"=",
"None",
",",
"stop_name",
"=",
"None",
",",
"strand_name",
"=",
"None",
",",
"sample_name",
"=",
"None",
")",
":",
"if",
"sample_name",
"is",
"None",
":",
"regio... | Modifies a region dataframe to be coherent with the GMQL data model
:param region_df: a pandas Dataframe of regions that is coherent with the GMQL data model
:param chr_name: (optional) which column of :attr:`~.region_df` is the chromosome
:param start_name: (optional) which column of :attr:`~.region_df` is the start
:param stop_name: (optional) which column of :attr:`~.region_df` is the stop
:param strand_name: (optional) which column of :attr:`~.region_df` is the strand
:return: a modified pandas Dataframe | [
"Modifies",
"a",
"region",
"dataframe",
"to",
"be",
"coherent",
"with",
"the",
"GMQL",
"data",
"model"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L170-L194 | train | 47,405 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | GDataframe.to_dataset_files | def to_dataset_files(self, local_path=None, remote_path=None):
""" Save the GDataframe to a local or remote location
:param local_path: a local path to the folder in which the data must be saved
:param remote_path: a remote dataset name that wants to be used for these data
:return: None
"""
return FrameToGMQL.to_dataset_files(self, path_local=local_path, path_remote=remote_path) | python | def to_dataset_files(self, local_path=None, remote_path=None):
""" Save the GDataframe to a local or remote location
:param local_path: a local path to the folder in which the data must be saved
:param remote_path: a remote dataset name that wants to be used for these data
:return: None
"""
return FrameToGMQL.to_dataset_files(self, path_local=local_path, path_remote=remote_path) | [
"def",
"to_dataset_files",
"(",
"self",
",",
"local_path",
"=",
"None",
",",
"remote_path",
"=",
"None",
")",
":",
"return",
"FrameToGMQL",
".",
"to_dataset_files",
"(",
"self",
",",
"path_local",
"=",
"local_path",
",",
"path_remote",
"=",
"remote_path",
")"
... | Save the GDataframe to a local or remote location
:param local_path: a local path to the folder in which the data must be saved
:param remote_path: a remote dataset name that wants to be used for these data
:return: None | [
"Save",
"the",
"GDataframe",
"to",
"a",
"local",
"or",
"remote",
"location"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L50-L57 | train | 47,406 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | GDataframe.to_GMQLDataset | def to_GMQLDataset(self, local_path=None, remote_path=None):
""" Converts the GDataframe in a GMQLDataset for later local or remote computation
:return: a GMQLDataset
"""
local = None
remote = None
if (local_path is None) and (remote_path is None):
# get a temporary path
local = TempFileManager.get_new_dataset_tmp_folder()
if local_path is not None:
local = local_path
if remote_path is not None:
remote = remote_path
self.to_dataset_files(local, remote)
if local is not None:
return Loader.load_from_path(local_path=local)
elif remote is not None:
raise NotImplementedError("The remote loading is not implemented yet!") | python | def to_GMQLDataset(self, local_path=None, remote_path=None):
""" Converts the GDataframe in a GMQLDataset for later local or remote computation
:return: a GMQLDataset
"""
local = None
remote = None
if (local_path is None) and (remote_path is None):
# get a temporary path
local = TempFileManager.get_new_dataset_tmp_folder()
if local_path is not None:
local = local_path
if remote_path is not None:
remote = remote_path
self.to_dataset_files(local, remote)
if local is not None:
return Loader.load_from_path(local_path=local)
elif remote is not None:
raise NotImplementedError("The remote loading is not implemented yet!") | [
"def",
"to_GMQLDataset",
"(",
"self",
",",
"local_path",
"=",
"None",
",",
"remote_path",
"=",
"None",
")",
":",
"local",
"=",
"None",
"remote",
"=",
"None",
"if",
"(",
"local_path",
"is",
"None",
")",
"and",
"(",
"remote_path",
"is",
"None",
")",
":",... | Converts the GDataframe in a GMQLDataset for later local or remote computation
:return: a GMQLDataset | [
"Converts",
"the",
"GDataframe",
"in",
"a",
"GMQLDataset",
"for",
"later",
"local",
"or",
"remote",
"computation"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L59-L78 | train | 47,407 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | GDataframe.project_meta | def project_meta(self, attributes):
""" Projects the specified metadata attributes to new region fields
:param attributes: a list of metadata attributes
:return: a new GDataframe with additional region fields
"""
if not isinstance(attributes, list):
raise TypeError('attributes must be a list')
meta_to_project = self.meta[attributes].applymap(lambda l: ", ".join(l))
new_regs = self.regs.merge(meta_to_project, left_index=True, right_index=True)
return GDataframe(regs=new_regs, meta=self.meta) | python | def project_meta(self, attributes):
""" Projects the specified metadata attributes to new region fields
:param attributes: a list of metadata attributes
:return: a new GDataframe with additional region fields
"""
if not isinstance(attributes, list):
raise TypeError('attributes must be a list')
meta_to_project = self.meta[attributes].applymap(lambda l: ", ".join(l))
new_regs = self.regs.merge(meta_to_project, left_index=True, right_index=True)
return GDataframe(regs=new_regs, meta=self.meta) | [
"def",
"project_meta",
"(",
"self",
",",
"attributes",
")",
":",
"if",
"not",
"isinstance",
"(",
"attributes",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'attributes must be a list'",
")",
"meta_to_project",
"=",
"self",
".",
"meta",
"[",
"attributes",... | Projects the specified metadata attributes to new region fields
:param attributes: a list of metadata attributes
:return: a new GDataframe with additional region fields | [
"Projects",
"the",
"specified",
"metadata",
"attributes",
"to",
"new",
"region",
"fields"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L85-L95 | train | 47,408 |
DEIB-GECO/PyGMQL | gmql/dataset/GDataframe.py | GDataframe.to_matrix | def to_matrix(self, index_regs=None, index_meta=None,
columns_regs=None, columns_meta=None,
values_regs=None, values_meta=None, **kwargs):
""" Transforms the GDataframe to a pivot matrix having as index and columns the
ones specified. This function is a wrapper around the pivot_table function of Pandas.
:param index_regs: list of region fields to use as index
:param index_meta: list of metadata attributes to use as index
:param columns_regs: list of region fields to use as columns
:param columns_meta: list of metadata attributes to use as columns
:param values_regs: list of region fields to use as values
:param values_meta: list of metadata attributes to use as values
:param kwargs: other parameters to pass to the pivot_table function
:return: a Pandas dataframe having as index the union of index_regs and index_meta, as
columns the union of columns_regs and columns_meta and as values ths union
of values_regs and values_meta
"""
index_regs = index_regs if index_regs is not None else []
index_meta = index_meta if index_meta is not None else []
columns_regs = columns_regs if columns_regs is not None else []
columns_meta = columns_meta if columns_meta is not None else []
values_regs = values_regs if values_regs is not None else []
values_meta = values_meta if values_meta is not None else []
index_meta_s = set(index_meta)
columns_meta_s = set(columns_meta)
values_meta_s = set(values_meta)
meta_to_project = list(index_meta_s.union(columns_meta_s)\
.union(values_meta_s)\
.difference(set(self.regs.columns)))
res = self.project_meta(meta_to_project)
pivot_columns = columns_meta + columns_regs
pivot_index = index_meta + index_regs
pivot_values = values_regs + values_meta
return res.regs.pivot_table(index=pivot_index, columns=pivot_columns, values=pivot_values, **kwargs) | python | def to_matrix(self, index_regs=None, index_meta=None,
columns_regs=None, columns_meta=None,
values_regs=None, values_meta=None, **kwargs):
""" Transforms the GDataframe to a pivot matrix having as index and columns the
ones specified. This function is a wrapper around the pivot_table function of Pandas.
:param index_regs: list of region fields to use as index
:param index_meta: list of metadata attributes to use as index
:param columns_regs: list of region fields to use as columns
:param columns_meta: list of metadata attributes to use as columns
:param values_regs: list of region fields to use as values
:param values_meta: list of metadata attributes to use as values
:param kwargs: other parameters to pass to the pivot_table function
:return: a Pandas dataframe having as index the union of index_regs and index_meta, as
columns the union of columns_regs and columns_meta and as values ths union
of values_regs and values_meta
"""
index_regs = index_regs if index_regs is not None else []
index_meta = index_meta if index_meta is not None else []
columns_regs = columns_regs if columns_regs is not None else []
columns_meta = columns_meta if columns_meta is not None else []
values_regs = values_regs if values_regs is not None else []
values_meta = values_meta if values_meta is not None else []
index_meta_s = set(index_meta)
columns_meta_s = set(columns_meta)
values_meta_s = set(values_meta)
meta_to_project = list(index_meta_s.union(columns_meta_s)\
.union(values_meta_s)\
.difference(set(self.regs.columns)))
res = self.project_meta(meta_to_project)
pivot_columns = columns_meta + columns_regs
pivot_index = index_meta + index_regs
pivot_values = values_regs + values_meta
return res.regs.pivot_table(index=pivot_index, columns=pivot_columns, values=pivot_values, **kwargs) | [
"def",
"to_matrix",
"(",
"self",
",",
"index_regs",
"=",
"None",
",",
"index_meta",
"=",
"None",
",",
"columns_regs",
"=",
"None",
",",
"columns_meta",
"=",
"None",
",",
"values_regs",
"=",
"None",
",",
"values_meta",
"=",
"None",
",",
"*",
"*",
"kwargs"... | Transforms the GDataframe to a pivot matrix having as index and columns the
ones specified. This function is a wrapper around the pivot_table function of Pandas.
:param index_regs: list of region fields to use as index
:param index_meta: list of metadata attributes to use as index
:param columns_regs: list of region fields to use as columns
:param columns_meta: list of metadata attributes to use as columns
:param values_regs: list of region fields to use as values
:param values_meta: list of metadata attributes to use as values
:param kwargs: other parameters to pass to the pivot_table function
:return: a Pandas dataframe having as index the union of index_regs and index_meta, as
columns the union of columns_regs and columns_meta and as values ths union
of values_regs and values_meta | [
"Transforms",
"the",
"GDataframe",
"to",
"a",
"pivot",
"matrix",
"having",
"as",
"index",
"and",
"columns",
"the",
"ones",
"specified",
".",
"This",
"function",
"is",
"a",
"wrapper",
"around",
"the",
"pivot_table",
"function",
"of",
"Pandas",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GDataframe.py#L97-L134 | train | 47,409 |
huffpostdata/python-pollster | pollster/api.py | Api.charts_slug_get | def charts_slug_get(self, slug, **kwargs):
"""
Chart
A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.charts_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique identifier for a Chart (required)
:return: Chart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.charts_slug_get_with_http_info(slug, **kwargs)
else:
(data) = self.charts_slug_get_with_http_info(slug, **kwargs)
return data | python | def charts_slug_get(self, slug, **kwargs):
"""
Chart
A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.charts_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique identifier for a Chart (required)
:return: Chart
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.charts_slug_get_with_http_info(slug, **kwargs)
else:
(data) = self.charts_slug_get_with_http_info(slug, **kwargs)
return data | [
"def",
"charts_slug_get",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"charts_slug_get_with_http_inf... | Chart
A Chart is chosen by Pollster editors. One example is \"Obama job approval - Democrats\". It is always based upon a single Question. Users should strongly consider basing their analysis on Questions instead. Charts are derived data; Pollster editors publish them and change them as editorial priorities change.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.charts_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique identifier for a Chart (required)
:return: Chart
If the method is called asynchronously,
returns the request thread. | [
"Chart",
"A",
"Chart",
"is",
"chosen",
"by",
"Pollster",
"editors",
".",
"One",
"example",
"is",
"\\",
"Obama",
"job",
"approval",
"-",
"Democrats",
"\\",
".",
"It",
"is",
"always",
"based",
"upon",
"a",
"single",
"Question",
".",
"Users",
"should",
"str... | 276de8d66a92577b1143fd92a70cff9c35a1dfcf | https://github.com/huffpostdata/python-pollster/blob/276de8d66a92577b1143fd92a70cff9c35a1dfcf/pollster/api.py#L140-L165 | train | 47,410 |
huffpostdata/python-pollster | pollster/api.py | Api.polls_get | def polls_get(self, **kwargs):
"""
Polls
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database. The response will contain a maximum of 25 Poll objects, even if the database contains more than 25 polls. Use the `next_cursor` parameter to fetch the rest, 25 Polls at a time.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Special string to index into the Array
:param str tags: Comma-separated list of Question tag names; only Polls containing Questions with any of the given tags will be returned.
:param str question: Question slug; only Polls that ask that Question will be returned.
:param str sort: If `updated_at`, sort the most recently updated Poll first. (This can cause race conditions when used with `cursor`.) Otherwise, sort by most recently _entered_ Poll first.
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.polls_get_with_http_info(**kwargs)
else:
(data) = self.polls_get_with_http_info(**kwargs)
return data | python | def polls_get(self, **kwargs):
"""
Polls
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database. The response will contain a maximum of 25 Poll objects, even if the database contains more than 25 polls. Use the `next_cursor` parameter to fetch the rest, 25 Polls at a time.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Special string to index into the Array
:param str tags: Comma-separated list of Question tag names; only Polls containing Questions with any of the given tags will be returned.
:param str question: Question slug; only Polls that ask that Question will be returned.
:param str sort: If `updated_at`, sort the most recently updated Poll first. (This can cause race conditions when used with `cursor`.) Otherwise, sort by most recently _entered_ Poll first.
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.polls_get_with_http_info(**kwargs)
else:
(data) = self.polls_get_with_http_info(**kwargs)
return data | [
"def",
"polls_get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"polls_get_with_http_info",
"(",
"*",
"*",
"k... | Polls
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database. The response will contain a maximum of 25 Poll objects, even if the database contains more than 25 polls. Use the `next_cursor` parameter to fetch the rest, 25 Polls at a time.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str cursor: Special string to index into the Array
:param str tags: Comma-separated list of Question tag names; only Polls containing Questions with any of the given tags will be returned.
:param str question: Question slug; only Polls that ask that Question will be returned.
:param str sort: If `updated_at`, sort the most recently updated Poll first. (This can cause race conditions when used with `cursor`.) Otherwise, sort by most recently _entered_ Poll first.
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread. | [
"Polls",
"A",
"Poll",
"on",
"Pollster",
"is",
"a",
"collection",
"of",
"questions",
"and",
"responses",
"published",
"by",
"a",
"reputable",
"survey",
"house",
".",
"This",
"endpoint",
"provides",
"raw",
"data",
"from",
"the",
"survey",
"house",
"plus",
"Pol... | 276de8d66a92577b1143fd92a70cff9c35a1dfcf | https://github.com/huffpostdata/python-pollster/blob/276de8d66a92577b1143fd92a70cff9c35a1dfcf/pollster/api.py#L500-L528 | train | 47,411 |
huffpostdata/python-pollster | pollster/api.py | Api.polls_slug_get | def polls_slug_get(self, slug, **kwargs):
"""
Poll
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique Poll identifier. For example: `gallup-26892`. (required)
:return: Poll
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.polls_slug_get_with_http_info(slug, **kwargs)
else:
(data) = self.polls_slug_get_with_http_info(slug, **kwargs)
return data | python | def polls_slug_get(self, slug, **kwargs):
"""
Poll
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique Poll identifier. For example: `gallup-26892`. (required)
:return: Poll
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.polls_slug_get_with_http_info(slug, **kwargs)
else:
(data) = self.polls_slug_get_with_http_info(slug, **kwargs)
return data | [
"def",
"polls_slug_get",
"(",
"self",
",",
"slug",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"polls_slug_get_with_http_info"... | Poll
A Poll on Pollster is a collection of questions and responses published by a reputable survey house. This endpoint provides raw data from the survey house, plus Pollster-provided metadata about each question. Pollster editors don't include every question when they enter Polls, and they don't necessarily enter every subpopulation for the responses they _do_ enter. They make editorial decisions about which questions belong in the database.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.polls_slug_get(slug, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str slug: Unique Poll identifier. For example: `gallup-26892`. (required)
:return: Poll
If the method is called asynchronously,
returns the request thread. | [
"Poll",
"A",
"Poll",
"on",
"Pollster",
"is",
"a",
"collection",
"of",
"questions",
"and",
"responses",
"published",
"by",
"a",
"reputable",
"survey",
"house",
".",
"This",
"endpoint",
"provides",
"raw",
"data",
"from",
"the",
"survey",
"house",
"plus",
"Poll... | 276de8d66a92577b1143fd92a70cff9c35a1dfcf | https://github.com/huffpostdata/python-pollster/blob/276de8d66a92577b1143fd92a70cff9c35a1dfcf/pollster/api.py#L621-L646 | train | 47,412 |
PragmaticMates/django-flatpages-i18n | flatpages_i18n/views.py | flatpage | def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or `flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
language = request.LANGUAGE_CODE
language_prefix = '/%s' % language
language_db_field = language.replace('-', '_')
if url.startswith(language_prefix):
url = url[len(language_prefix):]
kwargs = {
'{0}__{1}'.format('url_%s' % language_db_field, 'exact'): url,
'{0}__{1}'.format('sites__id', 'exact'): settings.SITE_ID
}
try:
f = get_object_or_404(FlatPage_i18n, **kwargs)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage_i18n, **kwargs)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f) | python | def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or `flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
language = request.LANGUAGE_CODE
language_prefix = '/%s' % language
language_db_field = language.replace('-', '_')
if url.startswith(language_prefix):
url = url[len(language_prefix):]
kwargs = {
'{0}__{1}'.format('url_%s' % language_db_field, 'exact'): url,
'{0}__{1}'.format('sites__id', 'exact'): settings.SITE_ID
}
try:
f = get_object_or_404(FlatPage_i18n, **kwargs)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage_i18n, **kwargs)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f) | [
"def",
"flatpage",
"(",
"request",
",",
"url",
")",
":",
"if",
"not",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"url",
"=",
"'/'",
"+",
"url",
"language",
"=",
"request",
".",
"LANGUAGE_CODE",
"language_prefix",
"=",
"'/%s'",
"%",
"language",
"la... | Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or `flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object | [
"Public",
"interface",
"to",
"the",
"flat",
"page",
"view",
"."
] | 2d3ed45c14fb0c7fd6ff5263c84f501c6a0c3e9a | https://github.com/PragmaticMates/django-flatpages-i18n/blob/2d3ed45c14fb0c7fd6ff5263c84f501c6a0c3e9a/flatpages_i18n/views.py#L28-L65 | train | 47,413 |
PragmaticMates/django-flatpages-i18n | flatpages_i18n/views.py | render_flatpage | def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(t.render({
'flatpage': f
}, request))
try:
from django.core.xheaders import populate_xheaders
populate_xheaders(request, response, FlatPage_i18n, f.id)
except ImportError:
pass
return response | python | def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(t.render({
'flatpage': f
}, request))
try:
from django.core.xheaders import populate_xheaders
populate_xheaders(request, response, FlatPage_i18n, f.id)
except ImportError:
pass
return response | [
"def",
"render_flatpage",
"(",
"request",
",",
"f",
")",
":",
"# If registration is required for accessing this page, and the user isn't",
"# logged in, redirect to the login page.",
"if",
"f",
".",
"registration_required",
"and",
"not",
"request",
".",
"user",
".",
"is_authe... | Internal interface to the flat page view. | [
"Internal",
"interface",
"to",
"the",
"flat",
"page",
"view",
"."
] | 2d3ed45c14fb0c7fd6ff5263c84f501c6a0c3e9a | https://github.com/PragmaticMates/django-flatpages-i18n/blob/2d3ed45c14fb0c7fd6ff5263c84f501c6a0c3e9a/flatpages_i18n/views.py#L69-L98 | train | 47,414 |
kevinconway/daemons | daemons/interfaces/pid.py | PidManager.pidfile | def pidfile(self):
"""Get the absolute path of the pidfile."""
return os.path.abspath(
os.path.expandvars(
os.path.expanduser(
self._pidfile,
),
),
) | python | def pidfile(self):
"""Get the absolute path of the pidfile."""
return os.path.abspath(
os.path.expandvars(
os.path.expanduser(
self._pidfile,
),
),
) | [
"def",
"pidfile",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"self",
".",
"_pidfile",
",",
")",
",",
")",
",",
")"
] | Get the absolute path of the pidfile. | [
"Get",
"the",
"absolute",
"path",
"of",
"the",
"pidfile",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/interfaces/pid.py#L25-L33 | train | 47,415 |
DEIB-GECO/PyGMQL | gmql/dataset/DataStructures/ExpressionNodes.py | SQRT | def SQRT(argument):
""" Computes the square matrix of the argument
:param argument: a dataset region field (dataset.field) or metadata (dataset['field'])
"""
if isinstance(argument, MetaField):
return argument._unary_expression("SQRT")
elif isinstance(argument, RegField):
return argument._unary_expression("SQRT")
else:
raise TypeError("You have to give as input a RegField (dataset.field)"
"or a MetaField (dataset['field']") | python | def SQRT(argument):
""" Computes the square matrix of the argument
:param argument: a dataset region field (dataset.field) or metadata (dataset['field'])
"""
if isinstance(argument, MetaField):
return argument._unary_expression("SQRT")
elif isinstance(argument, RegField):
return argument._unary_expression("SQRT")
else:
raise TypeError("You have to give as input a RegField (dataset.field)"
"or a MetaField (dataset['field']") | [
"def",
"SQRT",
"(",
"argument",
")",
":",
"if",
"isinstance",
"(",
"argument",
",",
"MetaField",
")",
":",
"return",
"argument",
".",
"_unary_expression",
"(",
"\"SQRT\"",
")",
"elif",
"isinstance",
"(",
"argument",
",",
"RegField",
")",
":",
"return",
"ar... | Computes the square matrix of the argument
:param argument: a dataset region field (dataset.field) or metadata (dataset['field']) | [
"Computes",
"the",
"square",
"matrix",
"of",
"the",
"argument"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/DataStructures/ExpressionNodes.py#L5-L16 | train | 47,416 |
DEIB-GECO/PyGMQL | gmql/managers.py | login | def login():
""" Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest.
"""
from .RemoteConnection.RemoteManager import RemoteManager
global __remote_manager, __session_manager
logger = logging.getLogger()
remote_address = get_remote_address()
res = __session_manager.get_session(remote_address)
if res is None:
# there is no session for this address, let's login as guest
warnings.warn("There is no active session for address {}. Logging as Guest user".format(remote_address))
rm = RemoteManager(address=remote_address)
rm.login()
session_type = "guest"
else:
# there is a previous session for this address, let's do an auto login
# using that access token
logger.info("Logging using stored authentication token")
rm = RemoteManager(address=remote_address, auth_token=res[1])
# if the access token is not valid anymore (therefore we are in guest mode)
# the auto_login function will perform a guest login from scratch
session_type = rm.auto_login(how=res[2])
# store the new session
__remote_manager = rm
access_time = int(time.time())
auth_token = rm.auth_token
__session_manager.add_session(remote_address, auth_token, access_time, session_type) | python | def login():
""" Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest.
"""
from .RemoteConnection.RemoteManager import RemoteManager
global __remote_manager, __session_manager
logger = logging.getLogger()
remote_address = get_remote_address()
res = __session_manager.get_session(remote_address)
if res is None:
# there is no session for this address, let's login as guest
warnings.warn("There is no active session for address {}. Logging as Guest user".format(remote_address))
rm = RemoteManager(address=remote_address)
rm.login()
session_type = "guest"
else:
# there is a previous session for this address, let's do an auto login
# using that access token
logger.info("Logging using stored authentication token")
rm = RemoteManager(address=remote_address, auth_token=res[1])
# if the access token is not valid anymore (therefore we are in guest mode)
# the auto_login function will perform a guest login from scratch
session_type = rm.auto_login(how=res[2])
# store the new session
__remote_manager = rm
access_time = int(time.time())
auth_token = rm.auth_token
__session_manager.add_session(remote_address, auth_token, access_time, session_type) | [
"def",
"login",
"(",
")",
":",
"from",
".",
"RemoteConnection",
".",
"RemoteManager",
"import",
"RemoteManager",
"global",
"__remote_manager",
",",
"__session_manager",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"remote_address",
"=",
"get_remote_address"... | Enables the user to login to the remote GMQL service.
If both username and password are None, the user will be connected as guest. | [
"Enables",
"the",
"user",
"to",
"login",
"to",
"the",
"remote",
"GMQL",
"service",
".",
"If",
"both",
"username",
"and",
"password",
"are",
"None",
"the",
"user",
"will",
"be",
"connected",
"as",
"guest",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/managers.py#L205-L233 | train | 47,417 |
kevinconway/daemons | samples/wrapper.py | main | def main(idle):
"""Any normal python logic which runs a loop. Can take arguments."""
while True:
LOG.debug("Sleeping for {0} seconds.".format(idle))
time.sleep(idle) | python | def main(idle):
"""Any normal python logic which runs a loop. Can take arguments."""
while True:
LOG.debug("Sleeping for {0} seconds.".format(idle))
time.sleep(idle) | [
"def",
"main",
"(",
"idle",
")",
":",
"while",
"True",
":",
"LOG",
".",
"debug",
"(",
"\"Sleeping for {0} seconds.\"",
".",
"format",
"(",
"idle",
")",
")",
"time",
".",
"sleep",
"(",
"idle",
")"
] | Any normal python logic which runs a loop. Can take arguments. | [
"Any",
"normal",
"python",
"logic",
"which",
"runs",
"a",
"loop",
".",
"Can",
"take",
"arguments",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/samples/wrapper.py#L28-L33 | train | 47,418 |
kevinconway/daemons | daemons/interfaces/message.py | MessageManager.step | def step(self):
"""Grab a new message and dispatch it to the handler.
This method should not be extended or overwritten. Instead,
implementations of this daemon should implement the 'get_message()'
and 'handle_message()' methods.
"""
message = self.get_message()
if message is None:
self.sleep(self.idle_time)
return None
self.dispatch(message)
# In non-greenthread environments this does nothing. In green-thread
# environments this yields the context so messages can be acted upon
# before exhausting the threadpool.
self.sleep(0) | python | def step(self):
"""Grab a new message and dispatch it to the handler.
This method should not be extended or overwritten. Instead,
implementations of this daemon should implement the 'get_message()'
and 'handle_message()' methods.
"""
message = self.get_message()
if message is None:
self.sleep(self.idle_time)
return None
self.dispatch(message)
# In non-greenthread environments this does nothing. In green-thread
# environments this yields the context so messages can be acted upon
# before exhausting the threadpool.
self.sleep(0) | [
"def",
"step",
"(",
"self",
")",
":",
"message",
"=",
"self",
".",
"get_message",
"(",
")",
"if",
"message",
"is",
"None",
":",
"self",
".",
"sleep",
"(",
"self",
".",
"idle_time",
")",
"return",
"None",
"self",
".",
"dispatch",
"(",
"message",
")",
... | Grab a new message and dispatch it to the handler.
This method should not be extended or overwritten. Instead,
implementations of this daemon should implement the 'get_message()'
and 'handle_message()' methods. | [
"Grab",
"a",
"new",
"message",
"and",
"dispatch",
"it",
"to",
"the",
"handler",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/interfaces/message.py#L35-L52 | train | 47,419 |
VeryCB/flask-slack | flask_slack/slack.py | Slack.init_app | def init_app(self, app=None):
"""Initialize application configuration"""
config = getattr(app, 'config', app)
self.team_id = config.get('TEAM_ID') | python | def init_app(self, app=None):
"""Initialize application configuration"""
config = getattr(app, 'config', app)
self.team_id = config.get('TEAM_ID') | [
"def",
"init_app",
"(",
"self",
",",
"app",
"=",
"None",
")",
":",
"config",
"=",
"getattr",
"(",
"app",
",",
"'config'",
",",
"app",
")",
"self",
".",
"team_id",
"=",
"config",
".",
"get",
"(",
"'TEAM_ID'",
")"
] | Initialize application configuration | [
"Initialize",
"application",
"configuration"
] | ec7e08e6603f0d2d06cfbaff6699df02ee507077 | https://github.com/VeryCB/flask-slack/blob/ec7e08e6603f0d2d06cfbaff6699df02ee507077/flask_slack/slack.py#L15-L19 | train | 47,420 |
VeryCB/flask-slack | flask_slack/slack.py | Slack.validate | def validate(self, command, token, team_id, method):
"""Validate request queries with registerd commands
:param command: command parameter from request
:param token: token parameter from request
:param team_id: team_id parameter from request
:param method: the request method
"""
if (team_id, command) not in self._commands:
raise SlackError('Command {0} is not found in team {1}'.format(
command, team_id))
func, _token, methods, kwargs = self._commands[(team_id, command)]
if method not in methods:
raise SlackError('{} request is not allowed'.format(method))
if token != _token:
raise SlackError('Your token {} is invalid'.format(token)) | python | def validate(self, command, token, team_id, method):
"""Validate request queries with registerd commands
:param command: command parameter from request
:param token: token parameter from request
:param team_id: team_id parameter from request
:param method: the request method
"""
if (team_id, command) not in self._commands:
raise SlackError('Command {0} is not found in team {1}'.format(
command, team_id))
func, _token, methods, kwargs = self._commands[(team_id, command)]
if method not in methods:
raise SlackError('{} request is not allowed'.format(method))
if token != _token:
raise SlackError('Your token {} is invalid'.format(token)) | [
"def",
"validate",
"(",
"self",
",",
"command",
",",
"token",
",",
"team_id",
",",
"method",
")",
":",
"if",
"(",
"team_id",
",",
"command",
")",
"not",
"in",
"self",
".",
"_commands",
":",
"raise",
"SlackError",
"(",
"'Command {0} is not found in team {1}'"... | Validate request queries with registerd commands
:param command: command parameter from request
:param token: token parameter from request
:param team_id: team_id parameter from request
:param method: the request method | [
"Validate",
"request",
"queries",
"with",
"registerd",
"commands"
] | ec7e08e6603f0d2d06cfbaff6699df02ee507077 | https://github.com/VeryCB/flask-slack/blob/ec7e08e6603f0d2d06cfbaff6699df02ee507077/flask_slack/slack.py#L85-L103 | train | 47,421 |
VeryCB/flask-slack | flask_slack/slack.py | Slack.response | def response(self, text, response_type='ephemeral', attachments=None):
"""Return a response with json format
:param text: the text returned to the client
:param response_type: optional. When `in_channel` is assigned,
both the response message and the initial
message typed by the user will be shared
in the channel.
When `ephemeral` is assigned, the response
message will be visible only to the user
that issued the command.
:param attachments: optional. A list of additional messages
for rich response.
"""
from flask import jsonify
if attachments is None:
attachments = []
data = {
'response_type': response_type,
'text': text,
'attachments': attachments,
}
return jsonify(**data) | python | def response(self, text, response_type='ephemeral', attachments=None):
"""Return a response with json format
:param text: the text returned to the client
:param response_type: optional. When `in_channel` is assigned,
both the response message and the initial
message typed by the user will be shared
in the channel.
When `ephemeral` is assigned, the response
message will be visible only to the user
that issued the command.
:param attachments: optional. A list of additional messages
for rich response.
"""
from flask import jsonify
if attachments is None:
attachments = []
data = {
'response_type': response_type,
'text': text,
'attachments': attachments,
}
return jsonify(**data) | [
"def",
"response",
"(",
"self",
",",
"text",
",",
"response_type",
"=",
"'ephemeral'",
",",
"attachments",
"=",
"None",
")",
":",
"from",
"flask",
"import",
"jsonify",
"if",
"attachments",
"is",
"None",
":",
"attachments",
"=",
"[",
"]",
"data",
"=",
"{"... | Return a response with json format
:param text: the text returned to the client
:param response_type: optional. When `in_channel` is assigned,
both the response message and the initial
message typed by the user will be shared
in the channel.
When `ephemeral` is assigned, the response
message will be visible only to the user
that issued the command.
:param attachments: optional. A list of additional messages
for rich response. | [
"Return",
"a",
"response",
"with",
"json",
"format"
] | ec7e08e6603f0d2d06cfbaff6699df02ee507077 | https://github.com/VeryCB/flask-slack/blob/ec7e08e6603f0d2d06cfbaff6699df02ee507077/flask_slack/slack.py#L105-L128 | train | 47,422 |
kevinconway/daemons | daemons/pid/simple.py | SimplePidManager.pid | def pid(self):
"""Get the pid which represents a daemonized process.
The result should be None if the process is not running.
"""
try:
with open(self.pidfile, 'r') as pidfile:
try:
pid = int(pidfile.read().strip())
except ValueError:
return None
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.EPERM:
return pid
elif e.errno == errno.ESRCH:
return None
LOG.exception(
"os.kill returned unhandled error "
"{0}".format(e.strerror)
)
sys.exit(exit.PIDFILE_ERROR)
return pid
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to read pidfile {0}.".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | python | def pid(self):
"""Get the pid which represents a daemonized process.
The result should be None if the process is not running.
"""
try:
with open(self.pidfile, 'r') as pidfile:
try:
pid = int(pidfile.read().strip())
except ValueError:
return None
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.EPERM:
return pid
elif e.errno == errno.ESRCH:
return None
LOG.exception(
"os.kill returned unhandled error "
"{0}".format(e.strerror)
)
sys.exit(exit.PIDFILE_ERROR)
return pid
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to read pidfile {0}.".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | [
"def",
"pid",
"(",
"self",
")",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"pidfile",
",",
"'r'",
")",
"as",
"pidfile",
":",
"try",
":",
"pid",
"=",
"int",
"(",
"pidfile",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
"except",
... | Get the pid which represents a daemonized process.
The result should be None if the process is not running. | [
"Get",
"the",
"pid",
"which",
"represents",
"a",
"daemonized",
"process",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/pid/simple.py#L25-L71 | train | 47,423 |
kevinconway/daemons | daemons/pid/simple.py | SimplePidManager.pid | def pid(self, pidnum):
"""Set the pid for a running process."""
try:
with open(self.pidfile, "w+") as pidfile:
pidfile.write("{0}\n".format(pidnum))
except IOError:
LOG.exception("Failed to write pidfile {0}).".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | python | def pid(self, pidnum):
"""Set the pid for a running process."""
try:
with open(self.pidfile, "w+") as pidfile:
pidfile.write("{0}\n".format(pidnum))
except IOError:
LOG.exception("Failed to write pidfile {0}).".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | [
"def",
"pid",
"(",
"self",
",",
"pidnum",
")",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"pidfile",
",",
"\"w+\"",
")",
"as",
"pidfile",
":",
"pidfile",
".",
"write",
"(",
"\"{0}\\n\"",
".",
"format",
"(",
"pidnum",
")",
")",
"except",
"IO... | Set the pid for a running process. | [
"Set",
"the",
"pid",
"for",
"a",
"running",
"process",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/pid/simple.py#L74-L85 | train | 47,424 |
kevinconway/daemons | daemons/pid/simple.py | SimplePidManager.pid | def pid(self):
"""Stop managing the current pid."""
try:
os.remove(self.pidfile)
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to clear pidfile {0}).".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | python | def pid(self):
"""Stop managing the current pid."""
try:
os.remove(self.pidfile)
except IOError:
if not os.path.isfile(self.pidfile):
return None
LOG.exception("Failed to clear pidfile {0}).".format(self.pidfile))
sys.exit(exit.PIDFILE_INACCESSIBLE) | [
"def",
"pid",
"(",
"self",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"self",
".",
"pidfile",
")",
"except",
"IOError",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"pidfile",
")",
":",
"return",
"None",
"LOG",
".",
... | Stop managing the current pid. | [
"Stop",
"managing",
"the",
"current",
"pid",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/pid/simple.py#L88-L101 | train | 47,425 |
kevinconway/daemons | daemons/message/gevent.py | GeventMessageManager.pool | def pool(self):
"""Get an gevent pool used to dispatch requests."""
self._pool = self._pool or gevent.pool.Pool(size=self.pool_size)
return self._pool | python | def pool(self):
"""Get an gevent pool used to dispatch requests."""
self._pool = self._pool or gevent.pool.Pool(size=self.pool_size)
return self._pool | [
"def",
"pool",
"(",
"self",
")",
":",
"self",
".",
"_pool",
"=",
"self",
".",
"_pool",
"or",
"gevent",
".",
"pool",
".",
"Pool",
"(",
"size",
"=",
"self",
".",
"pool_size",
")",
"return",
"self",
".",
"_pool"
] | Get an gevent pool used to dispatch requests. | [
"Get",
"an",
"gevent",
"pool",
"used",
"to",
"dispatch",
"requests",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/message/gevent.py#L18-L21 | train | 47,426 |
nathan-hoad/python-iwlib | iwlib/iwconfig.py | set_essid | def set_essid(interface, essid):
"""
Set the ESSID of a given interface
Arguments:
interface - device to work on (e.g. eth1, wlan0).
essid - ESSID to set. Must be no longer than IW_ESSID_MAX_SIZE (typically 32 characters).
"""
interface = _get_bytes(interface)
essid = _get_bytes(essid)
wrq = ffi.new('struct iwreq*')
with iwlib_socket() as sock:
if essid.lower() in (b'off', b'any'):
wrq.u.essid.flags = 0
essid = b''
elif essid.lower() == b'on':
buf = ffi.new('char []', iwlib.IW_ESSID_MAX_SIZE+1)
wrq.u.essid.pointer = buf
wrq.u.essid.length = iwlib.IW_ESSID_MAX_SIZE + 1
wrq.u.essid.flags = 0
if iwlib.iw_get_ext(sock, interface, iwlib.SIOCGIWESSID, wrq) < 0:
raise ValueError("Error retrieving previous ESSID: %s" % (os.strerror(ffi.errno)))
wrq.u.essid.flags = 1
elif len(essid) > iwlib.IW_ESSID_MAX_SIZE:
raise ValueError("ESSID '%s' is longer than the maximum %d" % (essid, iwlib.IW_ESSID_MAX_SIZE))
else:
wrq.u.essid.pointer = ffi.new_handle(essid)
wrq.u.essid.length = len(essid)
wrq.u.essid.flags = 1
if iwlib.iw_get_kernel_we_version() < 21:
wrq.u.essid.length += 1
if iwlib.iw_set_ext(sock, interface, iwlib.SIOCSIWESSID, wrq) < 0:
errno = ffi.errno
strerror = "Couldn't set essid on device '%s': %s" % (interface.decode('utf8'), os.strerror(errno))
raise OSError(errno, strerror) | python | def set_essid(interface, essid):
"""
Set the ESSID of a given interface
Arguments:
interface - device to work on (e.g. eth1, wlan0).
essid - ESSID to set. Must be no longer than IW_ESSID_MAX_SIZE (typically 32 characters).
"""
interface = _get_bytes(interface)
essid = _get_bytes(essid)
wrq = ffi.new('struct iwreq*')
with iwlib_socket() as sock:
if essid.lower() in (b'off', b'any'):
wrq.u.essid.flags = 0
essid = b''
elif essid.lower() == b'on':
buf = ffi.new('char []', iwlib.IW_ESSID_MAX_SIZE+1)
wrq.u.essid.pointer = buf
wrq.u.essid.length = iwlib.IW_ESSID_MAX_SIZE + 1
wrq.u.essid.flags = 0
if iwlib.iw_get_ext(sock, interface, iwlib.SIOCGIWESSID, wrq) < 0:
raise ValueError("Error retrieving previous ESSID: %s" % (os.strerror(ffi.errno)))
wrq.u.essid.flags = 1
elif len(essid) > iwlib.IW_ESSID_MAX_SIZE:
raise ValueError("ESSID '%s' is longer than the maximum %d" % (essid, iwlib.IW_ESSID_MAX_SIZE))
else:
wrq.u.essid.pointer = ffi.new_handle(essid)
wrq.u.essid.length = len(essid)
wrq.u.essid.flags = 1
if iwlib.iw_get_kernel_we_version() < 21:
wrq.u.essid.length += 1
if iwlib.iw_set_ext(sock, interface, iwlib.SIOCSIWESSID, wrq) < 0:
errno = ffi.errno
strerror = "Couldn't set essid on device '%s': %s" % (interface.decode('utf8'), os.strerror(errno))
raise OSError(errno, strerror) | [
"def",
"set_essid",
"(",
"interface",
",",
"essid",
")",
":",
"interface",
"=",
"_get_bytes",
"(",
"interface",
")",
"essid",
"=",
"_get_bytes",
"(",
"essid",
")",
"wrq",
"=",
"ffi",
".",
"new",
"(",
"'struct iwreq*'",
")",
"with",
"iwlib_socket",
"(",
"... | Set the ESSID of a given interface
Arguments:
interface - device to work on (e.g. eth1, wlan0).
essid - ESSID to set. Must be no longer than IW_ESSID_MAX_SIZE (typically 32 characters). | [
"Set",
"the",
"ESSID",
"of",
"a",
"given",
"interface"
] | f7604de0a27709fca139c4bada58263bdce4f08e | https://github.com/nathan-hoad/python-iwlib/blob/f7604de0a27709fca139c4bada58263bdce4f08e/iwlib/iwconfig.py#L126-L165 | train | 47,427 |
dhilipsiva/garuda | garuda/management/commands/garuda.py | ensure_data | def ensure_data():
'''
Ensure that the Garuda directory and files
'''
if not os.path.exists(GARUDA_DIR):
os.makedirs(GARUDA_DIR)
Path(f'{GARUDA_DIR}/__init__.py').touch() | python | def ensure_data():
'''
Ensure that the Garuda directory and files
'''
if not os.path.exists(GARUDA_DIR):
os.makedirs(GARUDA_DIR)
Path(f'{GARUDA_DIR}/__init__.py').touch() | [
"def",
"ensure_data",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"GARUDA_DIR",
")",
":",
"os",
".",
"makedirs",
"(",
"GARUDA_DIR",
")",
"Path",
"(",
"f'{GARUDA_DIR}/__init__.py'",
")",
".",
"touch",
"(",
")"
] | Ensure that the Garuda directory and files | [
"Ensure",
"that",
"the",
"Garuda",
"directory",
"and",
"files"
] | b8188f5d6141be9f3f9e3fddd0494143c2066184 | https://github.com/dhilipsiva/garuda/blob/b8188f5d6141be9f3f9e3fddd0494143c2066184/garuda/management/commands/garuda.py#L27-L33 | train | 47,428 |
dhilipsiva/garuda | garuda/management/commands/garuda.py | protoc_arguments | def protoc_arguments():
'''
Construct protobuf compiler arguments
'''
proto_include = resource_filename('grpc_tools', '_proto')
return [
protoc.__file__, '-I', GARUDA_DIR, f'--python_out={GARUDA_DIR}',
f'--grpc_python_out={GARUDA_DIR}', GARUDA_PROTO_PATH,
f'-I{proto_include}'] | python | def protoc_arguments():
'''
Construct protobuf compiler arguments
'''
proto_include = resource_filename('grpc_tools', '_proto')
return [
protoc.__file__, '-I', GARUDA_DIR, f'--python_out={GARUDA_DIR}',
f'--grpc_python_out={GARUDA_DIR}', GARUDA_PROTO_PATH,
f'-I{proto_include}'] | [
"def",
"protoc_arguments",
"(",
")",
":",
"proto_include",
"=",
"resource_filename",
"(",
"'grpc_tools'",
",",
"'_proto'",
")",
"return",
"[",
"protoc",
".",
"__file__",
",",
"'-I'",
",",
"GARUDA_DIR",
",",
"f'--python_out={GARUDA_DIR}'",
",",
"f'--grpc_python_out={... | Construct protobuf compiler arguments | [
"Construct",
"protobuf",
"compiler",
"arguments"
] | b8188f5d6141be9f3f9e3fddd0494143c2066184 | https://github.com/dhilipsiva/garuda/blob/b8188f5d6141be9f3f9e3fddd0494143c2066184/garuda/management/commands/garuda.py#L270-L278 | train | 47,429 |
dhilipsiva/garuda | garuda/management/commands/garuda.py | fix_grpc_import | def fix_grpc_import():
'''
Snippet to fix the gRPC import path
'''
with open(GARUDA_GRPC_PATH, 'r') as f:
filedata = f.read()
filedata = filedata.replace(
'import garuda_pb2 as garuda__pb2',
f'import {GARUDA_DIR}.garuda_pb2 as garuda__pb2')
with open(GARUDA_GRPC_PATH, 'w') as f:
f.write(filedata) | python | def fix_grpc_import():
'''
Snippet to fix the gRPC import path
'''
with open(GARUDA_GRPC_PATH, 'r') as f:
filedata = f.read()
filedata = filedata.replace(
'import garuda_pb2 as garuda__pb2',
f'import {GARUDA_DIR}.garuda_pb2 as garuda__pb2')
with open(GARUDA_GRPC_PATH, 'w') as f:
f.write(filedata) | [
"def",
"fix_grpc_import",
"(",
")",
":",
"with",
"open",
"(",
"GARUDA_GRPC_PATH",
",",
"'r'",
")",
"as",
"f",
":",
"filedata",
"=",
"f",
".",
"read",
"(",
")",
"filedata",
"=",
"filedata",
".",
"replace",
"(",
"'import garuda_pb2 as garuda__pb2'",
",",
"f'... | Snippet to fix the gRPC import path | [
"Snippet",
"to",
"fix",
"the",
"gRPC",
"import",
"path"
] | b8188f5d6141be9f3f9e3fddd0494143c2066184 | https://github.com/dhilipsiva/garuda/blob/b8188f5d6141be9f3f9e3fddd0494143c2066184/garuda/management/commands/garuda.py#L281-L291 | train | 47,430 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/datautil/score_rw_util.py | write_average_score_row | def write_average_score_row(fp, score_name, scores):
"""
Simple utility function that writes an average score row in a file designated by a file pointer.
Inputs: - fp: A file pointer.
- score_name: What it says on the tin.
- scores: An array of average score values corresponding to each of the training set percentages.
"""
row = "--" + score_name + "--"
fp.write(row)
for vector in scores:
row = list(vector)
row = [str(score) for score in row]
row = "\n" + "\t".join(row)
fp.write(row) | python | def write_average_score_row(fp, score_name, scores):
"""
Simple utility function that writes an average score row in a file designated by a file pointer.
Inputs: - fp: A file pointer.
- score_name: What it says on the tin.
- scores: An array of average score values corresponding to each of the training set percentages.
"""
row = "--" + score_name + "--"
fp.write(row)
for vector in scores:
row = list(vector)
row = [str(score) for score in row]
row = "\n" + "\t".join(row)
fp.write(row) | [
"def",
"write_average_score_row",
"(",
"fp",
",",
"score_name",
",",
"scores",
")",
":",
"row",
"=",
"\"--\"",
"+",
"score_name",
"+",
"\"--\"",
"fp",
".",
"write",
"(",
"row",
")",
"for",
"vector",
"in",
"scores",
":",
"row",
"=",
"list",
"(",
"vector... | Simple utility function that writes an average score row in a file designated by a file pointer.
Inputs: - fp: A file pointer.
- score_name: What it says on the tin.
- scores: An array of average score values corresponding to each of the training set percentages. | [
"Simple",
"utility",
"function",
"that",
"writes",
"an",
"average",
"score",
"row",
"in",
"a",
"file",
"designated",
"by",
"a",
"file",
"pointer",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/score_rw_util.py#L76-L90 | train | 47,431 |
WhyNotHugo/django-afip | django_afip/admin.py | catch_errors | def catch_errors(f):
"""
Catches specific errors in admin actions and shows a friendly error.
"""
@functools.wraps(f)
def wrapper(self, request, *args, **kwargs):
try:
return f(self, request, *args, **kwargs)
except exceptions.CertificateExpired:
self.message_user(
request,
_('The AFIP Taxpayer certificate has expired.'),
messages.ERROR,
)
except exceptions.UntrustedCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is untrusted.'),
messages.ERROR,
)
except exceptions.CorruptCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is corrupt.'),
messages.ERROR,
)
except exceptions.AuthenticationError as e:
logger.exception('AFIP auth failed')
self.message_user(
request,
_('An unknown authentication error has ocurred: %s') % e,
messages.ERROR,
)
return wrapper | python | def catch_errors(f):
"""
Catches specific errors in admin actions and shows a friendly error.
"""
@functools.wraps(f)
def wrapper(self, request, *args, **kwargs):
try:
return f(self, request, *args, **kwargs)
except exceptions.CertificateExpired:
self.message_user(
request,
_('The AFIP Taxpayer certificate has expired.'),
messages.ERROR,
)
except exceptions.UntrustedCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is untrusted.'),
messages.ERROR,
)
except exceptions.CorruptCertificate:
self.message_user(
request,
_('The AFIP Taxpayer certificate is corrupt.'),
messages.ERROR,
)
except exceptions.AuthenticationError as e:
logger.exception('AFIP auth failed')
self.message_user(
request,
_('An unknown authentication error has ocurred: %s') % e,
messages.ERROR,
)
return wrapper | [
"def",
"catch_errors",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"self",
",",
"request",
... | Catches specific errors in admin actions and shows a friendly error. | [
"Catches",
"specific",
"errors",
"in",
"admin",
"actions",
"and",
"shows",
"a",
"friendly",
"error",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/admin.py#L24-L59 | train | 47,432 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/text_graph.py | augmented_tf_idf | def augmented_tf_idf(attribute_matrix):
"""
Performs augmented TF-IDF normalization on a bag-of-words vector representation of data.
Augmented TF-IDF introduced in: Manning, C. D., Raghavan, P., & Schütze, H. (2008).
Introduction to information retrieval (Vol. 1, p. 6).
Cambridge: Cambridge university press.
Input: - attribute_matrix: A bag-of-words vector representation in SciPy sparse matrix format.
Output: - attribute_matrix: The same matrix after augmented tf-idf normalization.
"""
number_of_documents = attribute_matrix.shape[0]
max_term_frequencies = np.ones(number_of_documents, dtype=np.float64)
idf_array = np.ones(attribute_matrix.shape[1], dtype=np.float64)
# Calculate inverse document frequency
attribute_matrix = attribute_matrix.tocsc()
for j in range(attribute_matrix.shape[1]):
document_frequency = attribute_matrix.getcol(j).data.size
if document_frequency > 1:
idf_array[j] = np.log(number_of_documents/document_frequency)
# Calculate maximum term frequencies for a user
attribute_matrix = attribute_matrix.tocsr()
for i in range(attribute_matrix.shape[0]):
max_term_frequency = attribute_matrix.getrow(i).data
if max_term_frequency.size > 0:
max_term_frequency = max_term_frequency.max()
if max_term_frequency > 0.0:
max_term_frequencies[i] = max_term_frequency
# Do augmented tf-idf normalization
attribute_matrix = attribute_matrix.tocoo()
attribute_matrix.data = 0.5 + np.divide(0.5*attribute_matrix.data, np.multiply((max_term_frequencies[attribute_matrix.row]), (idf_array[attribute_matrix.col])))
attribute_matrix = attribute_matrix.tocsr()
return attribute_matrix | python | def augmented_tf_idf(attribute_matrix):
"""
Performs augmented TF-IDF normalization on a bag-of-words vector representation of data.
Augmented TF-IDF introduced in: Manning, C. D., Raghavan, P., & Schütze, H. (2008).
Introduction to information retrieval (Vol. 1, p. 6).
Cambridge: Cambridge university press.
Input: - attribute_matrix: A bag-of-words vector representation in SciPy sparse matrix format.
Output: - attribute_matrix: The same matrix after augmented tf-idf normalization.
"""
number_of_documents = attribute_matrix.shape[0]
max_term_frequencies = np.ones(number_of_documents, dtype=np.float64)
idf_array = np.ones(attribute_matrix.shape[1], dtype=np.float64)
# Calculate inverse document frequency
attribute_matrix = attribute_matrix.tocsc()
for j in range(attribute_matrix.shape[1]):
document_frequency = attribute_matrix.getcol(j).data.size
if document_frequency > 1:
idf_array[j] = np.log(number_of_documents/document_frequency)
# Calculate maximum term frequencies for a user
attribute_matrix = attribute_matrix.tocsr()
for i in range(attribute_matrix.shape[0]):
max_term_frequency = attribute_matrix.getrow(i).data
if max_term_frequency.size > 0:
max_term_frequency = max_term_frequency.max()
if max_term_frequency > 0.0:
max_term_frequencies[i] = max_term_frequency
# Do augmented tf-idf normalization
attribute_matrix = attribute_matrix.tocoo()
attribute_matrix.data = 0.5 + np.divide(0.5*attribute_matrix.data, np.multiply((max_term_frequencies[attribute_matrix.row]), (idf_array[attribute_matrix.col])))
attribute_matrix = attribute_matrix.tocsr()
return attribute_matrix | [
"def",
"augmented_tf_idf",
"(",
"attribute_matrix",
")",
":",
"number_of_documents",
"=",
"attribute_matrix",
".",
"shape",
"[",
"0",
"]",
"max_term_frequencies",
"=",
"np",
".",
"ones",
"(",
"number_of_documents",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
... | Performs augmented TF-IDF normalization on a bag-of-words vector representation of data.
Augmented TF-IDF introduced in: Manning, C. D., Raghavan, P., & Schütze, H. (2008).
Introduction to information retrieval (Vol. 1, p. 6).
Cambridge: Cambridge university press.
Input: - attribute_matrix: A bag-of-words vector representation in SciPy sparse matrix format.
Output: - attribute_matrix: The same matrix after augmented tf-idf normalization. | [
"Performs",
"augmented",
"TF",
"-",
"IDF",
"normalization",
"on",
"a",
"bag",
"-",
"of",
"-",
"words",
"vector",
"representation",
"of",
"data",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/text_graph.py#L48-L86 | train | 47,433 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/common.py | get_file_row_generator | def get_file_row_generator(file_path, separator, encoding=None):
"""
Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows.
"""
with open(file_path, encoding=encoding) as file_object:
for line in file_object:
words = line.strip().split(separator)
yield words | python | def get_file_row_generator(file_path, separator, encoding=None):
"""
Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows.
"""
with open(file_path, encoding=encoding) as file_object:
for line in file_object:
words = line.strip().split(separator)
yield words | [
"def",
"get_file_row_generator",
"(",
"file_path",
",",
"separator",
",",
"encoding",
"=",
"None",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"encoding",
")",
"as",
"file_object",
":",
"for",
"line",
"in",
"file_object",
":",
"words",
... | Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows. | [
"Reads",
"an",
"separated",
"value",
"file",
"row",
"by",
"row",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/common.py#L36-L49 | train | 47,434 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/common.py | store_pickle | def store_pickle(file_path, data):
"""
Pickle some data to a given path.
Inputs: - file_path: Target file path.
- data: The python object to be serialized via pickle.
"""
pkl_file = open(file_path, 'wb')
pickle.dump(data, pkl_file)
pkl_file.close() | python | def store_pickle(file_path, data):
"""
Pickle some data to a given path.
Inputs: - file_path: Target file path.
- data: The python object to be serialized via pickle.
"""
pkl_file = open(file_path, 'wb')
pickle.dump(data, pkl_file)
pkl_file.close() | [
"def",
"store_pickle",
"(",
"file_path",
",",
"data",
")",
":",
"pkl_file",
"=",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"pickle",
".",
"dump",
"(",
"data",
",",
"pkl_file",
")",
"pkl_file",
".",
"close",
"(",
")"
] | Pickle some data to a given path.
Inputs: - file_path: Target file path.
- data: The python object to be serialized via pickle. | [
"Pickle",
"some",
"data",
"to",
"a",
"given",
"path",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/common.py#L52-L61 | train | 47,435 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/common.py | load_pickle | def load_pickle(file_path):
"""
Unpickle some data from a given path.
Input: - file_path: Target file path.
Output: - data: The python object that was serialized and stored in disk.
"""
pkl_file = open(file_path, 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
return data | python | def load_pickle(file_path):
"""
Unpickle some data from a given path.
Input: - file_path: Target file path.
Output: - data: The python object that was serialized and stored in disk.
"""
pkl_file = open(file_path, 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
return data | [
"def",
"load_pickle",
"(",
"file_path",
")",
":",
"pkl_file",
"=",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"data",
"=",
"pickle",
".",
"load",
"(",
"pkl_file",
")",
"pkl_file",
".",
"close",
"(",
")",
"return",
"data"
] | Unpickle some data from a given path.
Input: - file_path: Target file path.
Output: - data: The python object that was serialized and stored in disk. | [
"Unpickle",
"some",
"data",
"from",
"a",
"given",
"path",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/common.py#L64-L75 | train | 47,436 |
MultipedRobotics/pyxl320 | bin/set_id.py | makeServoIDPacket | def makeServoIDPacket(curr_id, new_id):
"""
Given the current ID, returns a packet to set the servo to a new ID
"""
pkt = Packet.makeWritePacket(curr_id, xl320.XL320_ID, [new_id])
return pkt | python | def makeServoIDPacket(curr_id, new_id):
"""
Given the current ID, returns a packet to set the servo to a new ID
"""
pkt = Packet.makeWritePacket(curr_id, xl320.XL320_ID, [new_id])
return pkt | [
"def",
"makeServoIDPacket",
"(",
"curr_id",
",",
"new_id",
")",
":",
"pkt",
"=",
"Packet",
".",
"makeWritePacket",
"(",
"curr_id",
",",
"xl320",
".",
"XL320_ID",
",",
"[",
"new_id",
"]",
")",
"return",
"pkt"
] | Given the current ID, returns a packet to set the servo to a new ID | [
"Given",
"the",
"current",
"ID",
"returns",
"a",
"packet",
"to",
"set",
"the",
"servo",
"to",
"a",
"new",
"ID"
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/bin/set_id.py#L28-L33 | train | 47,437 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/common.py | normalize_rows | def normalize_rows(features):
"""
This performs row normalization to 1 of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The row normalized community indicator matrix.
"""
# Normalize each row of term frequencies to 1
features = features.tocsr()
features = normalize(features, norm="l2")
# for i in range(features.shape[0]):
# term_frequency = features.getrow(i).data
# if term_frequency.size > 0:
# features.data[features.indptr[i]: features.indptr[i + 1]] =\
# features.data[features.indptr[i]: features.indptr[i + 1]]/np.sqrt(np.sum(np.power(term_frequency, 2)))
return features | python | def normalize_rows(features):
"""
This performs row normalization to 1 of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The row normalized community indicator matrix.
"""
# Normalize each row of term frequencies to 1
features = features.tocsr()
features = normalize(features, norm="l2")
# for i in range(features.shape[0]):
# term_frequency = features.getrow(i).data
# if term_frequency.size > 0:
# features.data[features.indptr[i]: features.indptr[i + 1]] =\
# features.data[features.indptr[i]: features.indptr[i + 1]]/np.sqrt(np.sum(np.power(term_frequency, 2)))
return features | [
"def",
"normalize_rows",
"(",
"features",
")",
":",
"# Normalize each row of term frequencies to 1",
"features",
"=",
"features",
".",
"tocsr",
"(",
")",
"features",
"=",
"normalize",
"(",
"features",
",",
"norm",
"=",
"\"l2\"",
")",
"# for i in range(features.shape[0... | This performs row normalization to 1 of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The row normalized community indicator matrix. | [
"This",
"performs",
"row",
"normalization",
"to",
"1",
"of",
"community",
"embedding",
"features",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/common.py#L29-L46 | train | 47,438 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/common.py | normalize_columns | def normalize_columns(features):
"""
This performs column normalization of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix.
"""
# Calculate inverse document frequency.
features = features.tocsc()
for j in range(features.shape[1]):
document_frequency = features.getcol(j).data.size
if document_frequency > 1:
features.data[features.indptr[j]: features.indptr[j + 1]] =\
features.data[features.indptr[j]: features.indptr[j + 1]]/np.sqrt(np.log(document_frequency))
features = features.tocsr()
return features | python | def normalize_columns(features):
"""
This performs column normalization of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix.
"""
# Calculate inverse document frequency.
features = features.tocsc()
for j in range(features.shape[1]):
document_frequency = features.getcol(j).data.size
if document_frequency > 1:
features.data[features.indptr[j]: features.indptr[j + 1]] =\
features.data[features.indptr[j]: features.indptr[j + 1]]/np.sqrt(np.log(document_frequency))
features = features.tocsr()
return features | [
"def",
"normalize_columns",
"(",
"features",
")",
":",
"# Calculate inverse document frequency.",
"features",
"=",
"features",
".",
"tocsc",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"features",
".",
"shape",
"[",
"1",
"]",
")",
":",
"document_frequency",
"=",... | This performs column normalization of community embedding features.
Input: - X in R^(nxC_n): The community indicator matrix.
Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix. | [
"This",
"performs",
"column",
"normalization",
"of",
"community",
"embedding",
"features",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/common.py#L49-L67 | train | 47,439 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/eps_randomwalk/push.py | pagerank_lazy_push | def pagerank_lazy_push(s, r, w_i, a_i, push_node, rho, lazy):
"""
Performs a random step with a self-loop.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Calculate the A, B and C quantities
A = rho*r[push_node]
B = (1-rho)*(1 - lazy)*r[push_node]
C = (1-rho)*lazy*(r[push_node])
# Update approximate Pagerank and residual vectors
s[push_node] += A
r[push_node] = C
# Update residual vector at push node's adjacent nodes
r[a_i] += B * w_i | python | def pagerank_lazy_push(s, r, w_i, a_i, push_node, rho, lazy):
"""
Performs a random step with a self-loop.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Calculate the A, B and C quantities
A = rho*r[push_node]
B = (1-rho)*(1 - lazy)*r[push_node]
C = (1-rho)*lazy*(r[push_node])
# Update approximate Pagerank and residual vectors
s[push_node] += A
r[push_node] = C
# Update residual vector at push node's adjacent nodes
r[a_i] += B * w_i | [
"def",
"pagerank_lazy_push",
"(",
"s",
",",
"r",
",",
"w_i",
",",
"a_i",
",",
"push_node",
",",
"rho",
",",
"lazy",
")",
":",
"# Calculate the A, B and C quantities",
"A",
"=",
"rho",
"*",
"r",
"[",
"push_node",
"]",
"B",
"=",
"(",
"1",
"-",
"rho",
"... | Performs a random step with a self-loop.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE. | [
"Performs",
"a",
"random",
"step",
"with",
"a",
"self",
"-",
"loop",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/eps_randomwalk/push.py#L20-L38 | train | 47,440 |
mozilla-services/pyramid_multiauth | pyramid_multiauth/__init__.py | includeme | def includeme(config):
"""Include pyramid_multiauth into a pyramid configurator.
This function provides a hook for pyramid to include the default settings
for auth via pyramid_multiauth. Activate it like so:
config.include("pyramid_multiauth")
This will pull the list of registered authn policies from the deployment
settings, and configure and install each policy in order. The policies to
use can be specified in one of two ways:
* as the name of a module to be included.
* as the name of a callable along with a set of parameters.
Here's an example suite of settings:
multiauth.policies = ipauth1 ipauth2 pyramid_browserid
multiauth.policy.ipauth1.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth1.ipaddrs = 123.123.0.0/16
multiauth.policy.ipauth1.userid = local1
multiauth.policy.ipauth2.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth2.ipaddrs = 124.124.0.0/16
multiauth.policy.ipauth2.userid = local2
This will configure a MultiAuthenticationPolicy with three policy objects.
The first two will be IPAuthenticationPolicy objects created by passing
in the specified keyword arguments. The third will be a BrowserID
authentication policy just like you would get from executing:
config.include("pyramid_browserid")
As a side-effect, the configuration will also get the additional views
that pyramid_browserid sets up by default.
The *group finder function* and the *authorization policy* are also read
from configuration if specified:
multiauth.authorization_policy = mypyramidapp.acl.Custom
multiauth.groupfinder = mypyramidapp.acl.groupfinder
"""
# Grab the pyramid-wide settings, to look for any auth config.
settings = config.get_settings()
# Hook up a default AuthorizationPolicy.
# Get the authorization policy from config if present.
# Default ACLAuthorizationPolicy is usually what you want.
authz_class = settings.get("multiauth.authorization_policy",
"pyramid.authorization.ACLAuthorizationPolicy")
authz_policy = config.maybe_dotted(authz_class)()
# If the app configures one explicitly then this will get overridden.
# In autocommit mode this needs to be done before setting the authn policy.
config.set_authorization_policy(authz_policy)
# Get the groupfinder from config if present.
groupfinder = settings.get("multiauth.groupfinder", None)
groupfinder = config.maybe_dotted(groupfinder)
# Look for callable policy definitions.
# Suck them all out at once and store them in a dict for later use.
policy_definitions = get_policy_definitions(settings)
# Read and process the list of policies to load.
# We build up a list of callables which can be executed at config commit
# time to obtain the final list of policies.
# Yeah, it's complicated. But we want to be able to inherit any default
# views or other config added by the sub-policies when they're included.
# Process policies in reverse order so that things at the front of the
# list can override things at the back of the list.
policy_factories = []
policy_names = settings.get("multiauth.policies", "").split()
for policy_name in reversed(policy_names):
if policy_name in policy_definitions:
# It's a policy defined using a callable.
# Just append it straight to the list.
definition = policy_definitions[policy_name]
factory = config.maybe_dotted(definition.pop("use"))
policy_factories.append((factory, policy_name, definition))
else:
# It's a module to be directly included.
try:
factory = policy_factory_from_module(config, policy_name)
except ImportError:
err = "pyramid_multiauth: policy %r has no settings "\
"and is not importable" % (policy_name,)
raise ValueError(err)
policy_factories.append((factory, policy_name, {}))
# OK. We now have a list of callbacks which need to be called at
# commit time, and will return the policies in reverse order.
# Register a special action to pull them into our list of policies.
policies = []
def grab_policies():
for factory, name, kwds in policy_factories:
policy = factory(**kwds)
if policy:
policy._pyramid_multiauth_name = name
if not policies or policy is not policies[0]:
# Remember, they're being processed in reverse order.
# So each new policy needs to go at the front.
policies.insert(0, policy)
config.action(None, grab_policies, order=PHASE2_CONFIG)
authn_policy = MultiAuthenticationPolicy(policies, groupfinder)
config.set_authentication_policy(authn_policy) | python | def includeme(config):
"""Include pyramid_multiauth into a pyramid configurator.
This function provides a hook for pyramid to include the default settings
for auth via pyramid_multiauth. Activate it like so:
config.include("pyramid_multiauth")
This will pull the list of registered authn policies from the deployment
settings, and configure and install each policy in order. The policies to
use can be specified in one of two ways:
* as the name of a module to be included.
* as the name of a callable along with a set of parameters.
Here's an example suite of settings:
multiauth.policies = ipauth1 ipauth2 pyramid_browserid
multiauth.policy.ipauth1.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth1.ipaddrs = 123.123.0.0/16
multiauth.policy.ipauth1.userid = local1
multiauth.policy.ipauth2.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth2.ipaddrs = 124.124.0.0/16
multiauth.policy.ipauth2.userid = local2
This will configure a MultiAuthenticationPolicy with three policy objects.
The first two will be IPAuthenticationPolicy objects created by passing
in the specified keyword arguments. The third will be a BrowserID
authentication policy just like you would get from executing:
config.include("pyramid_browserid")
As a side-effect, the configuration will also get the additional views
that pyramid_browserid sets up by default.
The *group finder function* and the *authorization policy* are also read
from configuration if specified:
multiauth.authorization_policy = mypyramidapp.acl.Custom
multiauth.groupfinder = mypyramidapp.acl.groupfinder
"""
# Grab the pyramid-wide settings, to look for any auth config.
settings = config.get_settings()
# Hook up a default AuthorizationPolicy.
# Get the authorization policy from config if present.
# Default ACLAuthorizationPolicy is usually what you want.
authz_class = settings.get("multiauth.authorization_policy",
"pyramid.authorization.ACLAuthorizationPolicy")
authz_policy = config.maybe_dotted(authz_class)()
# If the app configures one explicitly then this will get overridden.
# In autocommit mode this needs to be done before setting the authn policy.
config.set_authorization_policy(authz_policy)
# Get the groupfinder from config if present.
groupfinder = settings.get("multiauth.groupfinder", None)
groupfinder = config.maybe_dotted(groupfinder)
# Look for callable policy definitions.
# Suck them all out at once and store them in a dict for later use.
policy_definitions = get_policy_definitions(settings)
# Read and process the list of policies to load.
# We build up a list of callables which can be executed at config commit
# time to obtain the final list of policies.
# Yeah, it's complicated. But we want to be able to inherit any default
# views or other config added by the sub-policies when they're included.
# Process policies in reverse order so that things at the front of the
# list can override things at the back of the list.
policy_factories = []
policy_names = settings.get("multiauth.policies", "").split()
for policy_name in reversed(policy_names):
if policy_name in policy_definitions:
# It's a policy defined using a callable.
# Just append it straight to the list.
definition = policy_definitions[policy_name]
factory = config.maybe_dotted(definition.pop("use"))
policy_factories.append((factory, policy_name, definition))
else:
# It's a module to be directly included.
try:
factory = policy_factory_from_module(config, policy_name)
except ImportError:
err = "pyramid_multiauth: policy %r has no settings "\
"and is not importable" % (policy_name,)
raise ValueError(err)
policy_factories.append((factory, policy_name, {}))
# OK. We now have a list of callbacks which need to be called at
# commit time, and will return the policies in reverse order.
# Register a special action to pull them into our list of policies.
policies = []
def grab_policies():
for factory, name, kwds in policy_factories:
policy = factory(**kwds)
if policy:
policy._pyramid_multiauth_name = name
if not policies or policy is not policies[0]:
# Remember, they're being processed in reverse order.
# So each new policy needs to go at the front.
policies.insert(0, policy)
config.action(None, grab_policies, order=PHASE2_CONFIG)
authn_policy = MultiAuthenticationPolicy(policies, groupfinder)
config.set_authentication_policy(authn_policy) | [
"def",
"includeme",
"(",
"config",
")",
":",
"# Grab the pyramid-wide settings, to look for any auth config.",
"settings",
"=",
"config",
".",
"get_settings",
"(",
")",
"# Hook up a default AuthorizationPolicy.",
"# Get the authorization policy from config if present.",
"# Default AC... | Include pyramid_multiauth into a pyramid configurator.
This function provides a hook for pyramid to include the default settings
for auth via pyramid_multiauth. Activate it like so:
config.include("pyramid_multiauth")
This will pull the list of registered authn policies from the deployment
settings, and configure and install each policy in order. The policies to
use can be specified in one of two ways:
* as the name of a module to be included.
* as the name of a callable along with a set of parameters.
Here's an example suite of settings:
multiauth.policies = ipauth1 ipauth2 pyramid_browserid
multiauth.policy.ipauth1.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth1.ipaddrs = 123.123.0.0/16
multiauth.policy.ipauth1.userid = local1
multiauth.policy.ipauth2.use = pyramid_ipauth.IPAuthentictionPolicy
multiauth.policy.ipauth2.ipaddrs = 124.124.0.0/16
multiauth.policy.ipauth2.userid = local2
This will configure a MultiAuthenticationPolicy with three policy objects.
The first two will be IPAuthenticationPolicy objects created by passing
in the specified keyword arguments. The third will be a BrowserID
authentication policy just like you would get from executing:
config.include("pyramid_browserid")
As a side-effect, the configuration will also get the additional views
that pyramid_browserid sets up by default.
The *group finder function* and the *authorization policy* are also read
from configuration if specified:
multiauth.authorization_policy = mypyramidapp.acl.Custom
multiauth.groupfinder = mypyramidapp.acl.groupfinder | [
"Include",
"pyramid_multiauth",
"into",
"a",
"pyramid",
"configurator",
"."
] | 9548aa55f726920a666791d7c89ac2b9779d2bc1 | https://github.com/mozilla-services/pyramid_multiauth/blob/9548aa55f726920a666791d7c89ac2b9779d2bc1/pyramid_multiauth/__init__.py#L188-L290 | train | 47,441 |
mozilla-services/pyramid_multiauth | pyramid_multiauth/__init__.py | get_policy_definitions | def get_policy_definitions(settings):
"""Find all multiauth policy definitions from the settings dict.
This function processes the paster deployment settings looking for items
that start with "multiauth.policy.<policyname>.". It pulls them all out
into a dict indexed by the policy name.
"""
policy_definitions = {}
for name in settings:
if not name.startswith("multiauth.policy."):
continue
value = settings[name]
name = name[len("multiauth.policy."):]
policy_name, setting_name = name.split(".", 1)
if policy_name not in policy_definitions:
policy_definitions[policy_name] = {}
policy_definitions[policy_name][setting_name] = value
return policy_definitions | python | def get_policy_definitions(settings):
"""Find all multiauth policy definitions from the settings dict.
This function processes the paster deployment settings looking for items
that start with "multiauth.policy.<policyname>.". It pulls them all out
into a dict indexed by the policy name.
"""
policy_definitions = {}
for name in settings:
if not name.startswith("multiauth.policy."):
continue
value = settings[name]
name = name[len("multiauth.policy."):]
policy_name, setting_name = name.split(".", 1)
if policy_name not in policy_definitions:
policy_definitions[policy_name] = {}
policy_definitions[policy_name][setting_name] = value
return policy_definitions | [
"def",
"get_policy_definitions",
"(",
"settings",
")",
":",
"policy_definitions",
"=",
"{",
"}",
"for",
"name",
"in",
"settings",
":",
"if",
"not",
"name",
".",
"startswith",
"(",
"\"multiauth.policy.\"",
")",
":",
"continue",
"value",
"=",
"settings",
"[",
... | Find all multiauth policy definitions from the settings dict.
This function processes the paster deployment settings looking for items
that start with "multiauth.policy.<policyname>.". It pulls them all out
into a dict indexed by the policy name. | [
"Find",
"all",
"multiauth",
"policy",
"definitions",
"from",
"the",
"settings",
"dict",
"."
] | 9548aa55f726920a666791d7c89ac2b9779d2bc1 | https://github.com/mozilla-services/pyramid_multiauth/blob/9548aa55f726920a666791d7c89ac2b9779d2bc1/pyramid_multiauth/__init__.py#L339-L356 | train | 47,442 |
Robpol86/flake8-pydocstyle | flake8_pydocstyle.py | load_file | def load_file(filename):
"""Read file to memory.
For stdin sourced files, this function does something super duper incredibly hacky and shameful. So so shameful. I'm
obtaining the original source code of the target module from the only instance of pycodestyle.Checker through the
Python garbage collector. Flake8's API doesn't give me the original source code of the module we are checking.
Instead it has pycodestyle give me an AST object of the module (already parsed). This unfortunately loses valuable
information like the kind of quotes used for strings (no way to know if a docstring was surrounded by triple double
quotes or just one single quote, thereby rendering pydocstyle's D300 error as unusable).
This will break one day. I'm sure of it. For now it fixes https://github.com/Robpol86/flake8-pydocstyle/issues/2
:param str filename: File path or 'stdin'. From Main().filename.
:return: First item is the filename or 'stdin', second are the contents of the file.
:rtype: tuple
"""
if filename in ('stdin', '-', None):
instances = [i for i in gc.get_objects() if isinstance(i, pycodestyle.Checker) or isinstance(i, pep8.Checker)]
if len(instances) != 1:
raise ValueError('Expected only 1 instance of pycodestyle.Checker, got {0} instead.'.format(len(instances)))
return 'stdin', ''.join(instances[0].lines)
with codecs.open(filename, encoding='utf-8') as handle:
return filename, handle.read() | python | def load_file(filename):
"""Read file to memory.
For stdin sourced files, this function does something super duper incredibly hacky and shameful. So so shameful. I'm
obtaining the original source code of the target module from the only instance of pycodestyle.Checker through the
Python garbage collector. Flake8's API doesn't give me the original source code of the module we are checking.
Instead it has pycodestyle give me an AST object of the module (already parsed). This unfortunately loses valuable
information like the kind of quotes used for strings (no way to know if a docstring was surrounded by triple double
quotes or just one single quote, thereby rendering pydocstyle's D300 error as unusable).
This will break one day. I'm sure of it. For now it fixes https://github.com/Robpol86/flake8-pydocstyle/issues/2
:param str filename: File path or 'stdin'. From Main().filename.
:return: First item is the filename or 'stdin', second are the contents of the file.
:rtype: tuple
"""
if filename in ('stdin', '-', None):
instances = [i for i in gc.get_objects() if isinstance(i, pycodestyle.Checker) or isinstance(i, pep8.Checker)]
if len(instances) != 1:
raise ValueError('Expected only 1 instance of pycodestyle.Checker, got {0} instead.'.format(len(instances)))
return 'stdin', ''.join(instances[0].lines)
with codecs.open(filename, encoding='utf-8') as handle:
return filename, handle.read() | [
"def",
"load_file",
"(",
"filename",
")",
":",
"if",
"filename",
"in",
"(",
"'stdin'",
",",
"'-'",
",",
"None",
")",
":",
"instances",
"=",
"[",
"i",
"for",
"i",
"in",
"gc",
".",
"get_objects",
"(",
")",
"if",
"isinstance",
"(",
"i",
",",
"pycodest... | Read file to memory.
For stdin sourced files, this function does something super duper incredibly hacky and shameful. So so shameful. I'm
obtaining the original source code of the target module from the only instance of pycodestyle.Checker through the
Python garbage collector. Flake8's API doesn't give me the original source code of the module we are checking.
Instead it has pycodestyle give me an AST object of the module (already parsed). This unfortunately loses valuable
information like the kind of quotes used for strings (no way to know if a docstring was surrounded by triple double
quotes or just one single quote, thereby rendering pydocstyle's D300 error as unusable).
This will break one day. I'm sure of it. For now it fixes https://github.com/Robpol86/flake8-pydocstyle/issues/2
:param str filename: File path or 'stdin'. From Main().filename.
:return: First item is the filename or 'stdin', second are the contents of the file.
:rtype: tuple | [
"Read",
"file",
"to",
"memory",
"."
] | 657425541e1d868a6a5241a83c3a16a9a715d6b5 | https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L20-L43 | train | 47,443 |
Robpol86/flake8-pydocstyle | flake8_pydocstyle.py | ignore | def ignore(code):
"""Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool
"""
if code in Main.options['ignore']:
return True
if any(c in code for c in Main.options['ignore']):
return True
return False | python | def ignore(code):
"""Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool
"""
if code in Main.options['ignore']:
return True
if any(c in code for c in Main.options['ignore']):
return True
return False | [
"def",
"ignore",
"(",
"code",
")",
":",
"if",
"code",
"in",
"Main",
".",
"options",
"[",
"'ignore'",
"]",
":",
"return",
"True",
"if",
"any",
"(",
"c",
"in",
"code",
"for",
"c",
"in",
"Main",
".",
"options",
"[",
"'ignore'",
"]",
")",
":",
"retur... | Should this code be ignored.
:param str code: Error code (e.g. D201).
:return: True if code should be ignored, False otherwise.
:rtype: bool | [
"Should",
"this",
"code",
"be",
"ignored",
"."
] | 657425541e1d868a6a5241a83c3a16a9a715d6b5 | https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L46-L58 | train | 47,444 |
Robpol86/flake8-pydocstyle | flake8_pydocstyle.py | Main.add_options | def add_options(cls, parser):
"""Add options to flake8.
:param parser: optparse.OptionParser from pycodestyle.
"""
parser.add_option('--show-pydocstyle', action='store_true', help='show explanation of each PEP 257 error')
parser.config_options.append('show-pydocstyle') | python | def add_options(cls, parser):
"""Add options to flake8.
:param parser: optparse.OptionParser from pycodestyle.
"""
parser.add_option('--show-pydocstyle', action='store_true', help='show explanation of each PEP 257 error')
parser.config_options.append('show-pydocstyle') | [
"def",
"add_options",
"(",
"cls",
",",
"parser",
")",
":",
"parser",
".",
"add_option",
"(",
"'--show-pydocstyle'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'show explanation of each PEP 257 error'",
")",
"parser",
".",
"config_options",
".",
"append... | Add options to flake8.
:param parser: optparse.OptionParser from pycodestyle. | [
"Add",
"options",
"to",
"flake8",
"."
] | 657425541e1d868a6a5241a83c3a16a9a715d6b5 | https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L78-L84 | train | 47,445 |
Robpol86/flake8-pydocstyle | flake8_pydocstyle.py | Main.parse_options | def parse_options(cls, options):
"""Read parsed options from flake8.
:param options: Options to add to flake8's command line options.
"""
# Handle flake8 options.
cls.options['explain'] = bool(options.show_pydocstyle)
cls.options['ignore'] = options.ignore
# Handle pydocstyle options.
config = pydocstyle.RawConfigParser()
for file_name in pydocstyle.ConfigurationParser.PROJECT_CONFIG_FILES:
if config.read(os.path.join(os.path.abspath('.'), file_name)):
break
if not config.has_section('pydocstyle'):
return
native_options = dict()
for option in config.options('pydocstyle'):
if option == 'ignore':
native_options['ignore'] = config.get('pydocstyle', option)
if option in ('explain', 'source'):
native_options[option] = config.getboolean('pydocstyle', option)
native_options['show-source'] = native_options.pop('source', None)
if native_options.get('ignore'):
native_options['ignore'] = native_options['ignore'].split(',')
cls.options.update(dict((k, v) for k, v in native_options.items() if v)) | python | def parse_options(cls, options):
"""Read parsed options from flake8.
:param options: Options to add to flake8's command line options.
"""
# Handle flake8 options.
cls.options['explain'] = bool(options.show_pydocstyle)
cls.options['ignore'] = options.ignore
# Handle pydocstyle options.
config = pydocstyle.RawConfigParser()
for file_name in pydocstyle.ConfigurationParser.PROJECT_CONFIG_FILES:
if config.read(os.path.join(os.path.abspath('.'), file_name)):
break
if not config.has_section('pydocstyle'):
return
native_options = dict()
for option in config.options('pydocstyle'):
if option == 'ignore':
native_options['ignore'] = config.get('pydocstyle', option)
if option in ('explain', 'source'):
native_options[option] = config.getboolean('pydocstyle', option)
native_options['show-source'] = native_options.pop('source', None)
if native_options.get('ignore'):
native_options['ignore'] = native_options['ignore'].split(',')
cls.options.update(dict((k, v) for k, v in native_options.items() if v)) | [
"def",
"parse_options",
"(",
"cls",
",",
"options",
")",
":",
"# Handle flake8 options.",
"cls",
".",
"options",
"[",
"'explain'",
"]",
"=",
"bool",
"(",
"options",
".",
"show_pydocstyle",
")",
"cls",
".",
"options",
"[",
"'ignore'",
"]",
"=",
"options",
"... | Read parsed options from flake8.
:param options: Options to add to flake8's command line options. | [
"Read",
"parsed",
"options",
"from",
"flake8",
"."
] | 657425541e1d868a6a5241a83c3a16a9a715d6b5 | https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L87-L112 | train | 47,446 |
Robpol86/flake8-pydocstyle | flake8_pydocstyle.py | Main.run | def run(self):
"""Run analysis on a single file."""
pydocstyle.Error.explain = self.options['explain']
filename, source = load_file(self.filename)
for error in pydocstyle.PEP257Checker().check_source(source, filename):
if not hasattr(error, 'code') or ignore(error.code):
continue
lineno = error.line
offset = 0 # Column number starting from 0.
explanation = error.explanation if pydocstyle.Error.explain else ''
text = '{0} {1}{2}'.format(error.code, error.message.split(': ', 1)[1], explanation)
yield lineno, offset, text, Main | python | def run(self):
"""Run analysis on a single file."""
pydocstyle.Error.explain = self.options['explain']
filename, source = load_file(self.filename)
for error in pydocstyle.PEP257Checker().check_source(source, filename):
if not hasattr(error, 'code') or ignore(error.code):
continue
lineno = error.line
offset = 0 # Column number starting from 0.
explanation = error.explanation if pydocstyle.Error.explain else ''
text = '{0} {1}{2}'.format(error.code, error.message.split(': ', 1)[1], explanation)
yield lineno, offset, text, Main | [
"def",
"run",
"(",
"self",
")",
":",
"pydocstyle",
".",
"Error",
".",
"explain",
"=",
"self",
".",
"options",
"[",
"'explain'",
"]",
"filename",
",",
"source",
"=",
"load_file",
"(",
"self",
".",
"filename",
")",
"for",
"error",
"in",
"pydocstyle",
"."... | Run analysis on a single file. | [
"Run",
"analysis",
"on",
"a",
"single",
"file",
"."
] | 657425541e1d868a6a5241a83c3a16a9a715d6b5 | https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L114-L125 | train | 47,447 |
WhyNotHugo/django-afip | django_afip/pdf.py | ReceiptBarcodeGenerator.numbers | def numbers(self):
""""
Returns the barcode's number without the verification digit.
:return: list(int)
"""
numstring = '{:011d}{:02d}{:04d}{}{}'.format(
self._receipt.point_of_sales.owner.cuit, # 11 digits
int(self._receipt.receipt_type.code), # 2 digits
self._receipt.point_of_sales.number, # point of sales
self._receipt.validation.cae, # 14 digits
self._receipt.validation.cae_expiration.strftime('%Y%m%d'), # 8
)
return [int(num) for num in numstring] | python | def numbers(self):
""""
Returns the barcode's number without the verification digit.
:return: list(int)
"""
numstring = '{:011d}{:02d}{:04d}{}{}'.format(
self._receipt.point_of_sales.owner.cuit, # 11 digits
int(self._receipt.receipt_type.code), # 2 digits
self._receipt.point_of_sales.number, # point of sales
self._receipt.validation.cae, # 14 digits
self._receipt.validation.cae_expiration.strftime('%Y%m%d'), # 8
)
return [int(num) for num in numstring] | [
"def",
"numbers",
"(",
"self",
")",
":",
"numstring",
"=",
"'{:011d}{:02d}{:04d}{}{}'",
".",
"format",
"(",
"self",
".",
"_receipt",
".",
"point_of_sales",
".",
"owner",
".",
"cuit",
",",
"# 11 digits",
"int",
"(",
"self",
".",
"_receipt",
".",
"receipt_type... | Returns the barcode's number without the verification digit.
:return: list(int) | [
"Returns",
"the",
"barcode",
"s",
"number",
"without",
"the",
"verification",
"digit",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/pdf.py#L22-L35 | train | 47,448 |
WhyNotHugo/django-afip | django_afip/pdf.py | ReceiptBarcodeGenerator.verification_digit | def verification_digit(numbers):
"""
Returns the verification digit for a given numbre.
The verification digit is calculated as follows:
* A = sum of all even-positioned numbers
* B = A * 3
* C = sum of all odd-positioned numbers
* D = B + C
* The results is the smallset number N, such that (D + N) % 10 == 0
NOTE: Afip's documentation seems to have odd an even mixed up in the
explanation, but all examples follow the above algorithm.
:param list(int) numbers): The numbers for which the digits is to be
calculated.
:return: int
"""
a = sum(numbers[::2])
b = a * 3
c = sum(numbers[1::2])
d = b + c
e = d % 10
if e == 0:
return e
return 10 - e | python | def verification_digit(numbers):
"""
Returns the verification digit for a given numbre.
The verification digit is calculated as follows:
* A = sum of all even-positioned numbers
* B = A * 3
* C = sum of all odd-positioned numbers
* D = B + C
* The results is the smallset number N, such that (D + N) % 10 == 0
NOTE: Afip's documentation seems to have odd an even mixed up in the
explanation, but all examples follow the above algorithm.
:param list(int) numbers): The numbers for which the digits is to be
calculated.
:return: int
"""
a = sum(numbers[::2])
b = a * 3
c = sum(numbers[1::2])
d = b + c
e = d % 10
if e == 0:
return e
return 10 - e | [
"def",
"verification_digit",
"(",
"numbers",
")",
":",
"a",
"=",
"sum",
"(",
"numbers",
"[",
":",
":",
"2",
"]",
")",
"b",
"=",
"a",
"*",
"3",
"c",
"=",
"sum",
"(",
"numbers",
"[",
"1",
":",
":",
"2",
"]",
")",
"d",
"=",
"b",
"+",
"c",
"e... | Returns the verification digit for a given numbre.
The verification digit is calculated as follows:
* A = sum of all even-positioned numbers
* B = A * 3
* C = sum of all odd-positioned numbers
* D = B + C
* The results is the smallset number N, such that (D + N) % 10 == 0
NOTE: Afip's documentation seems to have odd an even mixed up in the
explanation, but all examples follow the above algorithm.
:param list(int) numbers): The numbers for which the digits is to be
calculated.
:return: int | [
"Returns",
"the",
"verification",
"digit",
"for",
"a",
"given",
"numbre",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/pdf.py#L38-L64 | train | 47,449 |
WhyNotHugo/django-afip | django_afip/pdf.py | ReceiptBarcodeGenerator.full_number | def full_number(self):
"""
Returns the full number including the verification digit.
:return: str
"""
return '{}{}'.format(
''.join(str(n) for n in self.numbers),
ReceiptBarcodeGenerator.verification_digit(self.numbers),
) | python | def full_number(self):
"""
Returns the full number including the verification digit.
:return: str
"""
return '{}{}'.format(
''.join(str(n) for n in self.numbers),
ReceiptBarcodeGenerator.verification_digit(self.numbers),
) | [
"def",
"full_number",
"(",
"self",
")",
":",
"return",
"'{}{}'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"str",
"(",
"n",
")",
"for",
"n",
"in",
"self",
".",
"numbers",
")",
",",
"ReceiptBarcodeGenerator",
".",
"verification_digit",
"(",
"self",
".... | Returns the full number including the verification digit.
:return: str | [
"Returns",
"the",
"full",
"number",
"including",
"the",
"verification",
"digit",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/pdf.py#L67-L76 | train | 47,450 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/datautil/snow_datautil/snow_read_data.py | write_screen_name_to_topics | def write_screen_name_to_topics(filepath, user_label_matrix, node_to_id, id_to_name, label_to_lemma, lemma_to_keyword, separator=","):
"""
Writes a user name and associated topic names per row.
"""
user_label_matrix = spsp.coo_matrix(user_label_matrix)
shape = user_label_matrix.shape
nnz = user_label_matrix.getnnz()
row = user_label_matrix.row
col = user_label_matrix.col
data = user_label_matrix.data
name_to_topic_set = defaultdict(set)
for edge in range(row.size):
node = row[edge]
user_twitter_id = node_to_id[node]
name = id_to_name[user_twitter_id]
label = col[edge]
lemma = label_to_lemma[label]
# topic = lemma_to_keyword[lemma]
name_to_topic_set[name].add(lemma)
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"\n"
f.write(file_row)
for name, topic_set in name_to_topic_set.items():
file_row = list()
file_row.append(name)
file_row.extend(topic_set)
file_row = separator.join(file_row) + "\n"
f.write(file_row) | python | def write_screen_name_to_topics(filepath, user_label_matrix, node_to_id, id_to_name, label_to_lemma, lemma_to_keyword, separator=","):
"""
Writes a user name and associated topic names per row.
"""
user_label_matrix = spsp.coo_matrix(user_label_matrix)
shape = user_label_matrix.shape
nnz = user_label_matrix.getnnz()
row = user_label_matrix.row
col = user_label_matrix.col
data = user_label_matrix.data
name_to_topic_set = defaultdict(set)
for edge in range(row.size):
node = row[edge]
user_twitter_id = node_to_id[node]
name = id_to_name[user_twitter_id]
label = col[edge]
lemma = label_to_lemma[label]
# topic = lemma_to_keyword[lemma]
name_to_topic_set[name].add(lemma)
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"\n"
f.write(file_row)
for name, topic_set in name_to_topic_set.items():
file_row = list()
file_row.append(name)
file_row.extend(topic_set)
file_row = separator.join(file_row) + "\n"
f.write(file_row) | [
"def",
"write_screen_name_to_topics",
"(",
"filepath",
",",
"user_label_matrix",
",",
"node_to_id",
",",
"id_to_name",
",",
"label_to_lemma",
",",
"lemma_to_keyword",
",",
"separator",
"=",
"\",\"",
")",
":",
"user_label_matrix",
"=",
"spsp",
".",
"coo_matrix",
"(",... | Writes a user name and associated topic names per row. | [
"Writes",
"a",
"user",
"name",
"and",
"associated",
"topic",
"names",
"per",
"row",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/snow_datautil/snow_read_data.py#L178-L216 | train | 47,451 |
MultipedRobotics/pyxl320 | bin/servo_ping.py | sweep | def sweep(port, rate, ID, retry=3):
"""
Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ...
"""
if port == 'dummy':
s = ServoSerial(port, rate, fake=True)
else:
s = ServoSerial(port, rate)
if ID < 0:
ID = xl320.XL320_BROADCAST_ADDR
try:
s.open()
except SerialException as e:
# print('Error opening serial port:')
print('-'*40)
print(sys.argv[0], ':')
print(e)
exit(1)
pkt = makePingPacket(ID)
# print('ping', pkt)
s.write(pkt)
# as more servos add up, I might need to increase the cnt number???
for cnt in range(retry):
ans = s.read()
if ans:
for pkt in ans:
servo = packetToDict(pkt)
utils.prettyPrintPacket(servo)
print('raw pkt: {}'.format(pkt))
else:
print('Try {}: no servos found'.format(cnt))
time.sleep(0.1)
s.close() | python | def sweep(port, rate, ID, retry=3):
"""
Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ...
"""
if port == 'dummy':
s = ServoSerial(port, rate, fake=True)
else:
s = ServoSerial(port, rate)
if ID < 0:
ID = xl320.XL320_BROADCAST_ADDR
try:
s.open()
except SerialException as e:
# print('Error opening serial port:')
print('-'*40)
print(sys.argv[0], ':')
print(e)
exit(1)
pkt = makePingPacket(ID)
# print('ping', pkt)
s.write(pkt)
# as more servos add up, I might need to increase the cnt number???
for cnt in range(retry):
ans = s.read()
if ans:
for pkt in ans:
servo = packetToDict(pkt)
utils.prettyPrintPacket(servo)
print('raw pkt: {}'.format(pkt))
else:
print('Try {}: no servos found'.format(cnt))
time.sleep(0.1)
s.close() | [
"def",
"sweep",
"(",
"port",
",",
"rate",
",",
"ID",
",",
"retry",
"=",
"3",
")",
":",
"if",
"port",
"==",
"'dummy'",
":",
"s",
"=",
"ServoSerial",
"(",
"port",
",",
"rate",
",",
"fake",
"=",
"True",
")",
"else",
":",
"s",
"=",
"ServoSerial",
"... | Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ... | [
"Sends",
"a",
"ping",
"packet",
"to",
"ID",
"s",
"from",
"0",
"to",
"maximum",
"and",
"prints",
"out",
"any",
"returned",
"messages",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/bin/servo_ping.py#L45-L87 | train | 47,452 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/arcte/arcte.py | calculate_epsilon_effective | def calculate_epsilon_effective(rho, epsilon, seed_degree, neighbor_degrees, mean_degree):
"""
Semi-automatic effective epsilon threshold calculation.
"""
# Calculate a weighted neighborhood degree average.
# neighborhood_degree = rho*seed_degree + (1-rho)*neighbor_degrees.mean()
neighborhood_degree = neighbor_degrees.mean()
# Calculate the seed neighborhood normalized effective epsilon.
epsilon_effective = (epsilon*np.log(1 + seed_degree))/np.log(1 + neighborhood_degree)
# Calculate the maximum epsilon for at least one push on a neighboring node.
# Also the minimum epsilon for a push on all the neighboring nodes.
epsilon_effective_maximum = np.max(1/(seed_degree*neighbor_degrees))
epsilon_effective_minimum = np.min(1/(seed_degree*neighbor_degrees))
# print(epsilon_effective, epsilon_effective_maximum, epsilon_effective_minimum)
# The maximum epsilon is absolute, whereas we regularize for the minimum.
if epsilon_effective > epsilon_effective_maximum:
epsilon_effective = epsilon_effective_maximum
elif epsilon_effective < epsilon_effective_minimum:
epsilon_effective = (epsilon_effective_minimum + epsilon_effective)/2
return epsilon_effective | python | def calculate_epsilon_effective(rho, epsilon, seed_degree, neighbor_degrees, mean_degree):
"""
Semi-automatic effective epsilon threshold calculation.
"""
# Calculate a weighted neighborhood degree average.
# neighborhood_degree = rho*seed_degree + (1-rho)*neighbor_degrees.mean()
neighborhood_degree = neighbor_degrees.mean()
# Calculate the seed neighborhood normalized effective epsilon.
epsilon_effective = (epsilon*np.log(1 + seed_degree))/np.log(1 + neighborhood_degree)
# Calculate the maximum epsilon for at least one push on a neighboring node.
# Also the minimum epsilon for a push on all the neighboring nodes.
epsilon_effective_maximum = np.max(1/(seed_degree*neighbor_degrees))
epsilon_effective_minimum = np.min(1/(seed_degree*neighbor_degrees))
# print(epsilon_effective, epsilon_effective_maximum, epsilon_effective_minimum)
# The maximum epsilon is absolute, whereas we regularize for the minimum.
if epsilon_effective > epsilon_effective_maximum:
epsilon_effective = epsilon_effective_maximum
elif epsilon_effective < epsilon_effective_minimum:
epsilon_effective = (epsilon_effective_minimum + epsilon_effective)/2
return epsilon_effective | [
"def",
"calculate_epsilon_effective",
"(",
"rho",
",",
"epsilon",
",",
"seed_degree",
",",
"neighbor_degrees",
",",
"mean_degree",
")",
":",
"# Calculate a weighted neighborhood degree average.",
"# neighborhood_degree = rho*seed_degree + (1-rho)*neighbor_degrees.mean()",
"neighborho... | Semi-automatic effective epsilon threshold calculation. | [
"Semi",
"-",
"automatic",
"effective",
"epsilon",
"threshold",
"calculation",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/arcte/arcte.py#L26-L50 | train | 47,453 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/arcte/arcte.py | arcte_with_lazy_pagerank | def arcte_with_lazy_pagerank(adjacency_matrix, rho, epsilon, number_of_threads=None):
"""
Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
adjacency_matrix = sparse.csr_matrix(adjacency_matrix)
number_of_nodes = adjacency_matrix.shape[0]
if number_of_threads is None:
number_of_threads = get_threads_number()
if number_of_threads == 1:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=False)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
local_features = arcte_with_lazy_pagerank_worker(iterate_nodes,
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon)
else:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=True)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
pool = mp.Pool(number_of_threads)
node_chunks = list(parallel_chunks(iterate_nodes, number_of_threads))
node_count = 0
for chunk in node_chunks:
node_count += len(list(chunk))
results = list()
for chunk_no in range(len(pool._pool)):
pool.apply_async(arcte_with_lazy_pagerank_worker,
args=(node_chunks[chunk_no],
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon),
callback=results.append)
pool.close()
pool.join()
# local_features = sparse.hstack(results)
local_features = results[0]
for additive_features in results[1:]:
local_features += additive_features
local_features = sparse.csr_matrix(local_features)
# Form base community feature matrix.
identity_matrix = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64))
adjacency_matrix_ones = adjacency_matrix
adjacency_matrix_ones.data = np.ones_like(adjacency_matrix.data)
base_community_features = identity_matrix + adjacency_matrix_ones
# Stack horizontally matrices to form feature matrix.
try:
features = sparse.hstack([base_community_features, local_features]).tocsr()
except ValueError as e:
print("Failure with horizontal feature stacking.")
features = base_community_features
return features | python | def arcte_with_lazy_pagerank(adjacency_matrix, rho, epsilon, number_of_threads=None):
"""
Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
adjacency_matrix = sparse.csr_matrix(adjacency_matrix)
number_of_nodes = adjacency_matrix.shape[0]
if number_of_threads is None:
number_of_threads = get_threads_number()
if number_of_threads == 1:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=False)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
local_features = arcte_with_lazy_pagerank_worker(iterate_nodes,
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon)
else:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=True)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
pool = mp.Pool(number_of_threads)
node_chunks = list(parallel_chunks(iterate_nodes, number_of_threads))
node_count = 0
for chunk in node_chunks:
node_count += len(list(chunk))
results = list()
for chunk_no in range(len(pool._pool)):
pool.apply_async(arcte_with_lazy_pagerank_worker,
args=(node_chunks[chunk_no],
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon),
callback=results.append)
pool.close()
pool.join()
# local_features = sparse.hstack(results)
local_features = results[0]
for additive_features in results[1:]:
local_features += additive_features
local_features = sparse.csr_matrix(local_features)
# Form base community feature matrix.
identity_matrix = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64))
adjacency_matrix_ones = adjacency_matrix
adjacency_matrix_ones.data = np.ones_like(adjacency_matrix.data)
base_community_features = identity_matrix + adjacency_matrix_ones
# Stack horizontally matrices to form feature matrix.
try:
features = sparse.hstack([base_community_features, local_features]).tocsr()
except ValueError as e:
print("Failure with horizontal feature stacking.")
features = base_community_features
return features | [
"def",
"arcte_with_lazy_pagerank",
"(",
"adjacency_matrix",
",",
"rho",
",",
"epsilon",
",",
"number_of_threads",
"=",
"None",
")",
":",
"adjacency_matrix",
"=",
"sparse",
".",
"csr_matrix",
"(",
"adjacency_matrix",
")",
"number_of_nodes",
"=",
"adjacency_matrix",
"... | Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix. | [
"Extracts",
"local",
"community",
"features",
"for",
"all",
"graph",
"nodes",
"based",
"on",
"the",
"partitioning",
"of",
"node",
"-",
"centric",
"similarity",
"vectors",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/arcte/arcte.py#L391-L488 | train | 47,454 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/holdout.py | get_folds_generator | def get_folds_generator(node_label_matrix,
labelled_node_indices,
number_of_categories,
dataset_memory_folder,
percentage,
number_of_folds=10):
"""
Read or form and store the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- memory_path: The folder where the results are stored.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Read or generate folds
####################################################################################################################
fold_file_path = dataset_memory_folder + "/folds/" + str(percentage) + "_folds.txt"
train_list = list()
test_list = list()
if not os.path.exists(fold_file_path):
with open(fold_file_path, "w") as fp:
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
# Write test nodes
row = [str(node) for node in test]
row = "\t".join(row) + "\n"
fp.write(row)
# Write train nodes
row = [str(node) for node in train]
row = "\t".join(row) + "\n"
fp.write(row)
train_list.append(train)
test_list.append(test)
else:
file_row_gen = get_file_row_generator(fold_file_path, "\t")
for trial in np.arange(number_of_folds):
# Read test nodes
test = next(file_row_gen)
test = [int(node) for node in test]
test = np.array(test)
# Read train nodes
train = next(file_row_gen)
train = [int(node) for node in train]
train = np.array(train)
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | python | def get_folds_generator(node_label_matrix,
labelled_node_indices,
number_of_categories,
dataset_memory_folder,
percentage,
number_of_folds=10):
"""
Read or form and store the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- memory_path: The folder where the results are stored.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Read or generate folds
####################################################################################################################
fold_file_path = dataset_memory_folder + "/folds/" + str(percentage) + "_folds.txt"
train_list = list()
test_list = list()
if not os.path.exists(fold_file_path):
with open(fold_file_path, "w") as fp:
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
# Write test nodes
row = [str(node) for node in test]
row = "\t".join(row) + "\n"
fp.write(row)
# Write train nodes
row = [str(node) for node in train]
row = "\t".join(row) + "\n"
fp.write(row)
train_list.append(train)
test_list.append(test)
else:
file_row_gen = get_file_row_generator(fold_file_path, "\t")
for trial in np.arange(number_of_folds):
# Read test nodes
test = next(file_row_gen)
test = [int(node) for node in test]
test = np.array(test)
# Read train nodes
train = next(file_row_gen)
train = [int(node) for node in train]
train = np.array(train)
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | [
"def",
"get_folds_generator",
"(",
"node_label_matrix",
",",
"labelled_node_indices",
",",
"number_of_categories",
",",
"dataset_memory_folder",
",",
"percentage",
",",
"number_of_folds",
"=",
"10",
")",
":",
"number_of_labeled_nodes",
"=",
"labelled_node_indices",
".",
"... | Read or form and store the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- memory_path: The folder where the results are stored.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds. | [
"Read",
"or",
"form",
"and",
"store",
"the",
"seed",
"nodes",
"for",
"training",
"and",
"testing",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/holdout.py#L11-L77 | train | 47,455 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/holdout.py | generate_folds | def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10):
"""
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Generate folds
####################################################################################################################
train_list = list()
test_list = list()
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | python | def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10):
"""
Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Generate folds
####################################################################################################################
train_list = list()
test_list = list()
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | [
"def",
"generate_folds",
"(",
"node_label_matrix",
",",
"labelled_node_indices",
",",
"number_of_categories",
",",
"percentage",
",",
"number_of_folds",
"=",
"10",
")",
":",
"number_of_labeled_nodes",
"=",
"labelled_node_indices",
".",
"size",
"training_set_size",
"=",
... | Form the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds. | [
"Form",
"the",
"seed",
"nodes",
"for",
"training",
"and",
"testing",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/holdout.py#L80-L111 | train | 47,456 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/evaluation.py | form_node_label_prediction_matrix | def form_node_label_prediction_matrix(y_pred, y_test):
"""
Given the discriminator distances, this function forms the node-label prediction matrix.
It is assumed that the number of true labels is known.
Inputs: - y_pred: A NumPy array that contains the distance from the discriminator for each label for each user.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
"""
number_of_test_nodes = y_pred.shape[0]
# We calculate the number of true labels for each node.
true_number_of_labels = np.squeeze(y_test.sum(axis=1))
# We sort the prediction array for each node.
index = np.argsort(y_pred, axis=1)
row = np.empty(y_test.getnnz(), dtype=np.int64)
col = np.empty(y_test.getnnz(), dtype=np.int64)
start = 0
for n in np.arange(number_of_test_nodes):
end = start + true_number_of_labels[0, n]
row[start:end] = n
col[start:end] = index[n, -1:-true_number_of_labels[0, n]-1:-1]
start = end
data = np.ones_like(row, dtype=np.int8)
y_pred = sparse.coo_matrix((data, (row, col)), shape=y_test.shape)
return y_pred | python | def form_node_label_prediction_matrix(y_pred, y_test):
"""
Given the discriminator distances, this function forms the node-label prediction matrix.
It is assumed that the number of true labels is known.
Inputs: - y_pred: A NumPy array that contains the distance from the discriminator for each label for each user.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
"""
number_of_test_nodes = y_pred.shape[0]
# We calculate the number of true labels for each node.
true_number_of_labels = np.squeeze(y_test.sum(axis=1))
# We sort the prediction array for each node.
index = np.argsort(y_pred, axis=1)
row = np.empty(y_test.getnnz(), dtype=np.int64)
col = np.empty(y_test.getnnz(), dtype=np.int64)
start = 0
for n in np.arange(number_of_test_nodes):
end = start + true_number_of_labels[0, n]
row[start:end] = n
col[start:end] = index[n, -1:-true_number_of_labels[0, n]-1:-1]
start = end
data = np.ones_like(row, dtype=np.int8)
y_pred = sparse.coo_matrix((data, (row, col)), shape=y_test.shape)
return y_pred | [
"def",
"form_node_label_prediction_matrix",
"(",
"y_pred",
",",
"y_test",
")",
":",
"number_of_test_nodes",
"=",
"y_pred",
".",
"shape",
"[",
"0",
"]",
"# We calculate the number of true labels for each node.",
"true_number_of_labels",
"=",
"np",
".",
"squeeze",
"(",
"y... | Given the discriminator distances, this function forms the node-label prediction matrix.
It is assumed that the number of true labels is known.
Inputs: - y_pred: A NumPy array that contains the distance from the discriminator for each label for each user.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format. | [
"Given",
"the",
"discriminator",
"distances",
"this",
"function",
"forms",
"the",
"node",
"-",
"label",
"prediction",
"matrix",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/evaluation.py#L9-L40 | train | 47,457 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/evaluation.py | calculate_measures | def calculate_measures(y_pred, y_test):
"""
Calculates the F-scores and F-score averages given a classification result and a ground truth.
Inputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - measures: A number of NumPy arrays containing evaluation scores for the experiment.
"""
y_pred = y_pred.toarray()
y_test = y_test.toarray()
macro_precision, macro_recall, macro_F1, macro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="macro")
micro_precision, micro_recall, micro_F1, micro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="micro")
F1 = f1_score(y_test,
y_pred,
average=None)
measures = [macro_recall, micro_recall, macro_precision, micro_precision, macro_F1, micro_F1, F1]
return measures | python | def calculate_measures(y_pred, y_test):
"""
Calculates the F-scores and F-score averages given a classification result and a ground truth.
Inputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - measures: A number of NumPy arrays containing evaluation scores for the experiment.
"""
y_pred = y_pred.toarray()
y_test = y_test.toarray()
macro_precision, macro_recall, macro_F1, macro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="macro")
micro_precision, micro_recall, micro_F1, micro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="micro")
F1 = f1_score(y_test,
y_pred,
average=None)
measures = [macro_recall, micro_recall, macro_precision, micro_precision, macro_F1, micro_F1, F1]
return measures | [
"def",
"calculate_measures",
"(",
"y_pred",
",",
"y_test",
")",
":",
"y_pred",
"=",
"y_pred",
".",
"toarray",
"(",
")",
"y_test",
"=",
"y_test",
".",
"toarray",
"(",
")",
"macro_precision",
",",
"macro_recall",
",",
"macro_F1",
",",
"macro_support",
"=",
"... | Calculates the F-scores and F-score averages given a classification result and a ground truth.
Inputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - measures: A number of NumPy arrays containing evaluation scores for the experiment. | [
"Calculates",
"the",
"F",
"-",
"scores",
"and",
"F",
"-",
"score",
"averages",
"given",
"a",
"classification",
"result",
"and",
"a",
"ground",
"truth",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/evaluation.py#L43-L70 | train | 47,458 |
MultipedRobotics/pyxl320 | pyxl320/ServoSerial.py | ServoSerial.decode | def decode(buff):
"""
Transforms the raw buffer data read in into a list of bytes
"""
pp = list(map(ord, buff))
if 0 == len(pp) == 1:
pp = []
return pp | python | def decode(buff):
"""
Transforms the raw buffer data read in into a list of bytes
"""
pp = list(map(ord, buff))
if 0 == len(pp) == 1:
pp = []
return pp | [
"def",
"decode",
"(",
"buff",
")",
":",
"pp",
"=",
"list",
"(",
"map",
"(",
"ord",
",",
"buff",
")",
")",
"if",
"0",
"==",
"len",
"(",
"pp",
")",
"==",
"1",
":",
"pp",
"=",
"[",
"]",
"return",
"pp"
] | Transforms the raw buffer data read in into a list of bytes | [
"Transforms",
"the",
"raw",
"buffer",
"data",
"read",
"in",
"into",
"a",
"list",
"of",
"bytes"
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/ServoSerial.py#L147-L154 | train | 47,459 |
MultipedRobotics/pyxl320 | pyxl320/ServoSerial.py | ServoSerial.write | def write(self, pkt):
"""
This is a simple serial write command. It toggles the RTS pin and formats
all of the data into bytes before it writes.
"""
self.setRTS(self.DD_WRITE)
self.flushInput()
# prep data array for transmition
pkt = bytearray(pkt)
pkt = bytes(pkt)
num = self.serial.write(pkt)
# self.serial.flush()
# print('wrote {} of len(pkt) = {}'.format(num, len(pkt)))
return num | python | def write(self, pkt):
"""
This is a simple serial write command. It toggles the RTS pin and formats
all of the data into bytes before it writes.
"""
self.setRTS(self.DD_WRITE)
self.flushInput()
# prep data array for transmition
pkt = bytearray(pkt)
pkt = bytes(pkt)
num = self.serial.write(pkt)
# self.serial.flush()
# print('wrote {} of len(pkt) = {}'.format(num, len(pkt)))
return num | [
"def",
"write",
"(",
"self",
",",
"pkt",
")",
":",
"self",
".",
"setRTS",
"(",
"self",
".",
"DD_WRITE",
")",
"self",
".",
"flushInput",
"(",
")",
"# prep data array for transmition",
"pkt",
"=",
"bytearray",
"(",
"pkt",
")",
"pkt",
"=",
"bytes",
"(",
"... | This is a simple serial write command. It toggles the RTS pin and formats
all of the data into bytes before it writes. | [
"This",
"is",
"a",
"simple",
"serial",
"write",
"command",
".",
"It",
"toggles",
"the",
"RTS",
"pin",
"and",
"formats",
"all",
"of",
"the",
"data",
"into",
"bytes",
"before",
"it",
"writes",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/ServoSerial.py#L252-L266 | train | 47,460 |
MultipedRobotics/pyxl320 | pyxl320/ServoSerial.py | ServoSerial.sendPkt | def sendPkt(self, pkt, retry=5, sleep_time=0.01):
"""
Sends a packet and waits for a return. If no return is given, then it
resends the packet. If an error occurs, it also resends the packet.
in:
pkt - command packet to send to servo
cnt - how many retries should this do? default = 5
out:
array of packets
"""
for cnt in range(retry):
self.serial.flushInput()
self.write(pkt) # send packet to servo
ans = self.read() # get return status packet
if ans:
# check for error and resend
return ans
else:
# print('>> retry {} <<'.format(cnt))
time.sleep(sleep_time)
return None | python | def sendPkt(self, pkt, retry=5, sleep_time=0.01):
"""
Sends a packet and waits for a return. If no return is given, then it
resends the packet. If an error occurs, it also resends the packet.
in:
pkt - command packet to send to servo
cnt - how many retries should this do? default = 5
out:
array of packets
"""
for cnt in range(retry):
self.serial.flushInput()
self.write(pkt) # send packet to servo
ans = self.read() # get return status packet
if ans:
# check for error and resend
return ans
else:
# print('>> retry {} <<'.format(cnt))
time.sleep(sleep_time)
return None | [
"def",
"sendPkt",
"(",
"self",
",",
"pkt",
",",
"retry",
"=",
"5",
",",
"sleep_time",
"=",
"0.01",
")",
":",
"for",
"cnt",
"in",
"range",
"(",
"retry",
")",
":",
"self",
".",
"serial",
".",
"flushInput",
"(",
")",
"self",
".",
"write",
"(",
"pkt"... | Sends a packet and waits for a return. If no return is given, then it
resends the packet. If an error occurs, it also resends the packet.
in:
pkt - command packet to send to servo
cnt - how many retries should this do? default = 5
out:
array of packets | [
"Sends",
"a",
"packet",
"and",
"waits",
"for",
"a",
"return",
".",
"If",
"no",
"return",
"is",
"given",
"then",
"it",
"resends",
"the",
"packet",
".",
"If",
"an",
"error",
"occurs",
"it",
"also",
"resends",
"the",
"packet",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/ServoSerial.py#L268-L292 | train | 47,461 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/datautil/insight_datautil/insight_read_data.py | scipy_sparse_to_csv | def scipy_sparse_to_csv(filepath, matrix, separator=",", directed=False, numbering="matlab"):
"""
Writes sparse matrix in separated value format.
"""
matrix = spsp.coo_matrix(matrix)
shape = matrix.shape
nnz = matrix.getnnz()
if numbering == "matlab":
row = matrix.row + 1
col = matrix.col + 1
data = matrix.data
elif numbering == "c":
row = matrix.row
col = matrix.col
data = matrix.data
else:
print("Invalid numbering style.")
raise RuntimeError
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"n_cols:" + separator + str(shape[1]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"directed:" + separator + str(directed) +\
"\n"
f.write(file_row)
for edge in range(row.size):
if directed is False:
if col[edge] < row[edge]:
continue
file_row = str(row[edge]) + separator + str(col[edge]) + separator + str(data[edge]) + "\n"
f.write(file_row) | python | def scipy_sparse_to_csv(filepath, matrix, separator=",", directed=False, numbering="matlab"):
"""
Writes sparse matrix in separated value format.
"""
matrix = spsp.coo_matrix(matrix)
shape = matrix.shape
nnz = matrix.getnnz()
if numbering == "matlab":
row = matrix.row + 1
col = matrix.col + 1
data = matrix.data
elif numbering == "c":
row = matrix.row
col = matrix.col
data = matrix.data
else:
print("Invalid numbering style.")
raise RuntimeError
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"n_cols:" + separator + str(shape[1]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"directed:" + separator + str(directed) +\
"\n"
f.write(file_row)
for edge in range(row.size):
if directed is False:
if col[edge] < row[edge]:
continue
file_row = str(row[edge]) + separator + str(col[edge]) + separator + str(data[edge]) + "\n"
f.write(file_row) | [
"def",
"scipy_sparse_to_csv",
"(",
"filepath",
",",
"matrix",
",",
"separator",
"=",
"\",\"",
",",
"directed",
"=",
"False",
",",
"numbering",
"=",
"\"matlab\"",
")",
":",
"matrix",
"=",
"spsp",
".",
"coo_matrix",
"(",
"matrix",
")",
"shape",
"=",
"matrix"... | Writes sparse matrix in separated value format. | [
"Writes",
"sparse",
"matrix",
"in",
"separated",
"value",
"format",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/insight_datautil/insight_read_data.py#L139-L174 | train | 47,462 |
toumorokoshi/jsonschema-extractor | ubuild.py | publish | def publish(build):
""" publish the package itself """
build.packages.install("wheel")
build.packages.install("twine")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "--release"
])
build.executables.run([
"twine", "upload", "dist/*"
]) | python | def publish(build):
""" publish the package itself """
build.packages.install("wheel")
build.packages.install("twine")
build.executables.run([
"python", "setup.py",
"sdist", "bdist_wheel", "--universal", "--release"
])
build.executables.run([
"twine", "upload", "dist/*"
]) | [
"def",
"publish",
"(",
"build",
")",
":",
"build",
".",
"packages",
".",
"install",
"(",
"\"wheel\"",
")",
"build",
".",
"packages",
".",
"install",
"(",
"\"twine\"",
")",
"build",
".",
"executables",
".",
"run",
"(",
"[",
"\"python\"",
",",
"\"setup.py\... | publish the package itself | [
"publish",
"the",
"package",
"itself"
] | e72c608f6ea72cb8bb43912e1177829cff32c13b | https://github.com/toumorokoshi/jsonschema-extractor/blob/e72c608f6ea72cb8bb43912e1177829cff32c13b/ubuild.py#L24-L34 | train | 47,463 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/eps_randomwalk/similarity.py | fast_approximate_personalized_pagerank | def fast_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001):
"""
Calculates the approximate personalized PageRank starting from a seed node without self-loops.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while len(pushable) > 0:
# While there are nodes with large residual probabilities, push
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
return number_of_push_operations | python | def fast_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001):
"""
Calculates the approximate personalized PageRank starting from a seed node without self-loops.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while len(pushable) > 0:
# While there are nodes with large residual probabilities, push
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
return number_of_push_operations | [
"def",
"fast_approximate_personalized_pagerank",
"(",
"s",
",",
"r",
",",
"w_i",
",",
"a_i",
",",
"out_degree",
",",
"in_degree",
",",
"seed_node",
",",
"rho",
"=",
"0.2",
",",
"epsilon",
"=",
"0.00001",
")",
":",
"# Initialize approximate PageRank and residual di... | Calculates the approximate personalized PageRank starting from a seed node without self-loops. | [
"Calculates",
"the",
"approximate",
"personalized",
"PageRank",
"starting",
"from",
"a",
"seed",
"node",
"without",
"self",
"-",
"loops",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/eps_randomwalk/similarity.py#L11-L63 | train | 47,464 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/eps_randomwalk/similarity.py | lazy_approximate_personalized_pagerank | def lazy_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001,
laziness_factor=0.5):
"""
Calculates the approximate personalized PageRank starting from a seed node with self-loops.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
# While there are nodes with large residual probabilities, push
while len(pushable) > 0:
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
return number_of_push_operations | python | def lazy_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001,
laziness_factor=0.5):
"""
Calculates the approximate personalized PageRank starting from a seed node with self-loops.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
# While there are nodes with large residual probabilities, push
while len(pushable) > 0:
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
return number_of_push_operations | [
"def",
"lazy_approximate_personalized_pagerank",
"(",
"s",
",",
"r",
",",
"w_i",
",",
"a_i",
",",
"out_degree",
",",
"in_degree",
",",
"seed_node",
",",
"rho",
"=",
"0.2",
",",
"epsilon",
"=",
"0.00001",
",",
"laziness_factor",
"=",
"0.5",
")",
":",
"# Ini... | Calculates the approximate personalized PageRank starting from a seed node with self-loops.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE. | [
"Calculates",
"the",
"approximate",
"personalized",
"PageRank",
"starting",
"from",
"a",
"seed",
"node",
"with",
"self",
"-",
"loops",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/eps_randomwalk/similarity.py#L66-L146 | train | 47,465 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/datautil/read_exotic_features.py | read_matlab_features | def read_matlab_features(array_paths, number_of_nodes, dimensionality):
"""
Returns a sparse feature matrix as calculated by a Matlab routine.
"""
# Read the data array
file_row_gen = get_file_row_generator(array_paths[0], "\t")
data = list()
append_data = data.append
for file_row in file_row_gen:
append_data(float(file_row[0]))
# Read the row array
file_row_gen = get_file_row_generator(array_paths[1], "\t")
row = list()
append_row = row.append
for file_row in file_row_gen:
append_row(int(float(file_row[0])))
# Read the data array
file_row_gen = get_file_row_generator(array_paths[2], "\t")
col = list()
append_col = col.append
for file_row in file_row_gen:
append_col(int(float(file_row[0])))
data = np.array(data).astype(np.float64)
row = np.array(row).astype(np.int64) - 1 # Due to Matlab numbering
col = np.array(col).astype(np.int64) - 1 # Due to Matlab numbering
print(np.max(row), np.min(row))
print(np.max(col), np.min(col))
# centroids_new = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes + 1, k))
features = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes, dimensionality))
return features | python | def read_matlab_features(array_paths, number_of_nodes, dimensionality):
"""
Returns a sparse feature matrix as calculated by a Matlab routine.
"""
# Read the data array
file_row_gen = get_file_row_generator(array_paths[0], "\t")
data = list()
append_data = data.append
for file_row in file_row_gen:
append_data(float(file_row[0]))
# Read the row array
file_row_gen = get_file_row_generator(array_paths[1], "\t")
row = list()
append_row = row.append
for file_row in file_row_gen:
append_row(int(float(file_row[0])))
# Read the data array
file_row_gen = get_file_row_generator(array_paths[2], "\t")
col = list()
append_col = col.append
for file_row in file_row_gen:
append_col(int(float(file_row[0])))
data = np.array(data).astype(np.float64)
row = np.array(row).astype(np.int64) - 1 # Due to Matlab numbering
col = np.array(col).astype(np.int64) - 1 # Due to Matlab numbering
print(np.max(row), np.min(row))
print(np.max(col), np.min(col))
# centroids_new = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes + 1, k))
features = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes, dimensionality))
return features | [
"def",
"read_matlab_features",
"(",
"array_paths",
",",
"number_of_nodes",
",",
"dimensionality",
")",
":",
"# Read the data array",
"file_row_gen",
"=",
"get_file_row_generator",
"(",
"array_paths",
"[",
"0",
"]",
",",
"\"\\t\"",
")",
"data",
"=",
"list",
"(",
")... | Returns a sparse feature matrix as calculated by a Matlab routine. | [
"Returns",
"a",
"sparse",
"feature",
"matrix",
"as",
"calculated",
"by",
"a",
"Matlab",
"routine",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/datautil/read_exotic_features.py#L108-L143 | train | 47,466 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | community_neighbors | def community_neighbors(c_j, reverse_index_rows, unavailable_communities, unavailable_communities_counter):
"""
Finds communities with shared nodes to a seed community. Called by mroc.
Inputs: - c_j: The seed community for which we want to find which communities overlap.
- reverse_index_rows: A node to community indicator matrix.
- unavailable_communities: A set of communities that have already either been merged or failed to merge.
- unavailable_communities_counter: The number of such communities.
Outputs: - indices: An array containing the communities that exhibit overlap with the seed community.
"""
indices = list()
extend = indices.extend
for node in c_j:
extend(reverse_index_rows[node])
indices = np.array(indices)
indices = np.setdiff1d(indices, unavailable_communities[:unavailable_communities_counter+1])
return indices | python | def community_neighbors(c_j, reverse_index_rows, unavailable_communities, unavailable_communities_counter):
"""
Finds communities with shared nodes to a seed community. Called by mroc.
Inputs: - c_j: The seed community for which we want to find which communities overlap.
- reverse_index_rows: A node to community indicator matrix.
- unavailable_communities: A set of communities that have already either been merged or failed to merge.
- unavailable_communities_counter: The number of such communities.
Outputs: - indices: An array containing the communities that exhibit overlap with the seed community.
"""
indices = list()
extend = indices.extend
for node in c_j:
extend(reverse_index_rows[node])
indices = np.array(indices)
indices = np.setdiff1d(indices, unavailable_communities[:unavailable_communities_counter+1])
return indices | [
"def",
"community_neighbors",
"(",
"c_j",
",",
"reverse_index_rows",
",",
"unavailable_communities",
",",
"unavailable_communities_counter",
")",
":",
"indices",
"=",
"list",
"(",
")",
"extend",
"=",
"indices",
".",
"extend",
"for",
"node",
"in",
"c_j",
":",
"ex... | Finds communities with shared nodes to a seed community. Called by mroc.
Inputs: - c_j: The seed community for which we want to find which communities overlap.
- reverse_index_rows: A node to community indicator matrix.
- unavailable_communities: A set of communities that have already either been merged or failed to merge.
- unavailable_communities_counter: The number of such communities.
Outputs: - indices: An array containing the communities that exhibit overlap with the seed community. | [
"Finds",
"communities",
"with",
"shared",
"nodes",
"to",
"a",
"seed",
"community",
".",
"Called",
"by",
"mroc",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L185-L204 | train | 47,467 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | jaccard | def jaccard(c_1, c_2):
"""
Calculates the Jaccard similarity between two sets of nodes. Called by mroc.
Inputs: - c_1: Community (set of nodes) 1.
- c_2: Community (set of nodes) 2.
Outputs: - jaccard_similarity: The Jaccard similarity of these two communities.
"""
nom = np.intersect1d(c_1, c_2).size
denom = np.union1d(c_1, c_2).size
return nom/denom | python | def jaccard(c_1, c_2):
"""
Calculates the Jaccard similarity between two sets of nodes. Called by mroc.
Inputs: - c_1: Community (set of nodes) 1.
- c_2: Community (set of nodes) 2.
Outputs: - jaccard_similarity: The Jaccard similarity of these two communities.
"""
nom = np.intersect1d(c_1, c_2).size
denom = np.union1d(c_1, c_2).size
return nom/denom | [
"def",
"jaccard",
"(",
"c_1",
",",
"c_2",
")",
":",
"nom",
"=",
"np",
".",
"intersect1d",
"(",
"c_1",
",",
"c_2",
")",
".",
"size",
"denom",
"=",
"np",
".",
"union1d",
"(",
"c_1",
",",
"c_2",
")",
".",
"size",
"return",
"nom",
"/",
"denom"
] | Calculates the Jaccard similarity between two sets of nodes. Called by mroc.
Inputs: - c_1: Community (set of nodes) 1.
- c_2: Community (set of nodes) 2.
Outputs: - jaccard_similarity: The Jaccard similarity of these two communities. | [
"Calculates",
"the",
"Jaccard",
"similarity",
"between",
"two",
"sets",
"of",
"nodes",
".",
"Called",
"by",
"mroc",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L207-L218 | train | 47,468 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | louvain | def louvain(adjacency_matrix):
"""
Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Convert to networkx undirected graph.
adjacency_matrix = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.Graph())
# Call LOUVAIN algorithm to calculate a hierarchy of communities.
tree = community.generate_dendogram(adjacency_matrix, part_init=None)
# Embed communities
row = list()
col = list()
append_row = row.append
append_col = col.append
community_counter = 0
for i in range(len(tree)):
partition = community.partition_at_level(tree, i)
for n, c in partition.items():
append_row(n)
append_col(community_counter + c)
community_counter += max(partition.values()) + 1
row = np.array(row)
col = np.array(col)
data = np.ones(row.size, dtype=np.float64)
louvain_features = sparse.coo_matrix((data, (row, col)), shape=(len(partition.keys()), community_counter),
dtype=np.float64)
return louvain_features | python | def louvain(adjacency_matrix):
"""
Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Convert to networkx undirected graph.
adjacency_matrix = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.Graph())
# Call LOUVAIN algorithm to calculate a hierarchy of communities.
tree = community.generate_dendogram(adjacency_matrix, part_init=None)
# Embed communities
row = list()
col = list()
append_row = row.append
append_col = col.append
community_counter = 0
for i in range(len(tree)):
partition = community.partition_at_level(tree, i)
for n, c in partition.items():
append_row(n)
append_col(community_counter + c)
community_counter += max(partition.values()) + 1
row = np.array(row)
col = np.array(col)
data = np.ones(row.size, dtype=np.float64)
louvain_features = sparse.coo_matrix((data, (row, col)), shape=(len(partition.keys()), community_counter),
dtype=np.float64)
return louvain_features | [
"def",
"louvain",
"(",
"adjacency_matrix",
")",
":",
"# Convert to networkx undirected graph.",
"adjacency_matrix",
"=",
"nx",
".",
"from_scipy_sparse_matrix",
"(",
"adjacency_matrix",
",",
"create_using",
"=",
"nx",
".",
"Graph",
"(",
")",
")",
"# Call LOUVAIN algorith... | Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix. | [
"Performs",
"community",
"embedding",
"using",
"the",
"LOUVAIN",
"method",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L221-L261 | train | 47,469 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | laplacian_eigenmaps | def laplacian_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
"""
# Calculate sparse graph Laplacian.
laplacian = get_normalized_laplacian(adjacency_matrix)
# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.
try:
eigenvalues, eigenvectors = spla.eigsh(laplacian,
k=k,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
# Discard the eigenvector corresponding to the zero-valued eigenvalue.
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | python | def laplacian_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
"""
# Calculate sparse graph Laplacian.
laplacian = get_normalized_laplacian(adjacency_matrix)
# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.
try:
eigenvalues, eigenvectors = spla.eigsh(laplacian,
k=k,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
# Discard the eigenvector corresponding to the zero-valued eigenvalue.
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | [
"def",
"laplacian_eigenmaps",
"(",
"adjacency_matrix",
",",
"k",
")",
":",
"# Calculate sparse graph Laplacian.",
"laplacian",
"=",
"get_normalized_laplacian",
"(",
"adjacency_matrix",
")",
"# Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian.",
"try",
":",
... | Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix.
Introduced in: Belkin, M., & Niyogi, P. (2003).
Laplacian eigenmaps for dimensionality reduction and data representation.
Neural computation, 15(6), 1373-1396.
Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix.
- k: The number of eigenvectors to extract.
Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector. | [
"Performs",
"spectral",
"graph",
"embedding",
"using",
"the",
"graph",
"symmetric",
"normalized",
"Laplacian",
"matrix",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L264-L294 | train | 47,470 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | replicator_eigenmaps | def replicator_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding on the centrality reweighted adjacency matrix
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a scipy.sparse.coo_matrix
- k: The number of social dimensions/eigenvectors to extract
- max_iter: The maximum number of iterations for the iterative eigensolution method
Outputs: - S in R^(nxk): The social dimensions represented as a numpy.array matrix
"""
number_of_nodes = adjacency_matrix.shape[0]
max_eigenvalue = spla.eigsh(adjacency_matrix,
k=1,
which='LM',
return_eigenvectors=False)
# Calculate Replicator matrix
eye_matrix = sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64)
eye_matrix = eye_matrix.tocsr()
eye_matrix.data = eye_matrix.data*max_eigenvalue
replicator = eye_matrix - adjacency_matrix
# Calculate bottom k+1 eigenvalues and eigenvectors of normalised Laplacian
try:
eigenvalues, eigenvectors = spla.eigsh(replicator,
k=k+1,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | python | def replicator_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding on the centrality reweighted adjacency matrix
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a scipy.sparse.coo_matrix
- k: The number of social dimensions/eigenvectors to extract
- max_iter: The maximum number of iterations for the iterative eigensolution method
Outputs: - S in R^(nxk): The social dimensions represented as a numpy.array matrix
"""
number_of_nodes = adjacency_matrix.shape[0]
max_eigenvalue = spla.eigsh(adjacency_matrix,
k=1,
which='LM',
return_eigenvectors=False)
# Calculate Replicator matrix
eye_matrix = sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64)
eye_matrix = eye_matrix.tocsr()
eye_matrix.data = eye_matrix.data*max_eigenvalue
replicator = eye_matrix - adjacency_matrix
# Calculate bottom k+1 eigenvalues and eigenvectors of normalised Laplacian
try:
eigenvalues, eigenvectors = spla.eigsh(replicator,
k=k+1,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | [
"def",
"replicator_eigenmaps",
"(",
"adjacency_matrix",
",",
"k",
")",
":",
"number_of_nodes",
"=",
"adjacency_matrix",
".",
"shape",
"[",
"0",
"]",
"max_eigenvalue",
"=",
"spla",
".",
"eigsh",
"(",
"adjacency_matrix",
",",
"k",
"=",
"1",
",",
"which",
"=",
... | Performs spectral graph embedding on the centrality reweighted adjacency matrix
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a scipy.sparse.coo_matrix
- k: The number of social dimensions/eigenvectors to extract
- max_iter: The maximum number of iterations for the iterative eigensolution method
Outputs: - S in R^(nxk): The social dimensions represented as a numpy.array matrix | [
"Performs",
"spectral",
"graph",
"embedding",
"on",
"the",
"centrality",
"reweighted",
"adjacency",
"matrix"
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L297-L333 | train | 47,471 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/embedding/competing_methods.py | base_communities | def base_communities(adjacency_matrix):
"""
Forms the community indicator normalized feature matrix for any graph.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
number_of_nodes = adjacency_matrix.shape[0]
# X = A + I
adjacency_matrix = adjacency_matrix.tocsr()
adjacency_matrix = adjacency_matrix.transpose()
features = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes)) + adjacency_matrix.tocsr()
features = features.tocsr()
features.data = np.ones_like(features.data)
return features | python | def base_communities(adjacency_matrix):
"""
Forms the community indicator normalized feature matrix for any graph.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
number_of_nodes = adjacency_matrix.shape[0]
# X = A + I
adjacency_matrix = adjacency_matrix.tocsr()
adjacency_matrix = adjacency_matrix.transpose()
features = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes)) + adjacency_matrix.tocsr()
features = features.tocsr()
features.data = np.ones_like(features.data)
return features | [
"def",
"base_communities",
"(",
"adjacency_matrix",
")",
":",
"number_of_nodes",
"=",
"adjacency_matrix",
".",
"shape",
"[",
"0",
"]",
"# X = A + I",
"adjacency_matrix",
"=",
"adjacency_matrix",
".",
"tocsr",
"(",
")",
"adjacency_matrix",
"=",
"adjacency_matrix",
".... | Forms the community indicator normalized feature matrix for any graph.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix. | [
"Forms",
"the",
"community",
"indicator",
"normalized",
"feature",
"matrix",
"for",
"any",
"graph",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/embedding/competing_methods.py#L336-L353 | train | 47,472 |
WhyNotHugo/django-afip | django_afip/crypto.py | create_key | def create_key(file_):
"""
Create a key and save it into ``file_``.
Note that ``file`` must be opened in binary mode.
"""
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
file_.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
file_.flush() | python | def create_key(file_):
"""
Create a key and save it into ``file_``.
Note that ``file`` must be opened in binary mode.
"""
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
file_.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
file_.flush() | [
"def",
"create_key",
"(",
"file_",
")",
":",
"pkey",
"=",
"crypto",
".",
"PKey",
"(",
")",
"pkey",
".",
"generate_key",
"(",
"crypto",
".",
"TYPE_RSA",
",",
"2048",
")",
"file_",
".",
"write",
"(",
"crypto",
".",
"dump_privatekey",
"(",
"crypto",
".",
... | Create a key and save it into ``file_``.
Note that ``file`` must be opened in binary mode. | [
"Create",
"a",
"key",
"and",
"save",
"it",
"into",
"file_",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/crypto.py#L41-L51 | train | 47,473 |
WhyNotHugo/django-afip | django_afip/crypto.py | create_csr | def create_csr(key_file, organization_name, common_name, serial_number, file_):
"""Create a CSR for a key, and save it into ``file``."""
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_file.read())
req = crypto.X509Req()
subj = req.get_subject()
subj.O = organization_name # noqa: E741 (we can't do anything about this)
subj.CN = common_name
subj.serialNumber = serial_number
req.set_pubkey(key)
req.sign(key, 'md5')
file_.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)) | python | def create_csr(key_file, organization_name, common_name, serial_number, file_):
"""Create a CSR for a key, and save it into ``file``."""
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_file.read())
req = crypto.X509Req()
subj = req.get_subject()
subj.O = organization_name # noqa: E741 (we can't do anything about this)
subj.CN = common_name
subj.serialNumber = serial_number
req.set_pubkey(key)
req.sign(key, 'md5')
file_.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)) | [
"def",
"create_csr",
"(",
"key_file",
",",
"organization_name",
",",
"common_name",
",",
"serial_number",
",",
"file_",
")",
":",
"key",
"=",
"crypto",
".",
"load_privatekey",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"key_file",
".",
"read",
"(",
")",
")",
... | Create a CSR for a key, and save it into ``file``. | [
"Create",
"a",
"CSR",
"for",
"a",
"key",
"and",
"save",
"it",
"into",
"file",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/crypto.py#L54-L68 | train | 47,474 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/classification.py | model_fit | def model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, classifier_type="LinearSVC"):
"""
Fits a Linear Support Vector Classifier to the labelled graph-based features using the LIBLINEAR library.
One-vs-All: http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
LinearSVC: http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- svm_hardness: Penalty of the error term.
- fit_intercept: Data centering as per scikit-learn.
- number_of_threads: The number of threads to use for training the multi-label scheme.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
"""
if classifier_type == "LinearSVC":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = OneVsRestClassifier(LinearSVC(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept),
n_jobs=number_of_threads)
model.fit(X_train, y_train)
elif classifier_type == "LogisticRegression":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = OneVsRestClassifier(LogisticRegression(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept),
n_jobs=number_of_threads)
model.fit(X_train, y_train)
elif classifier_type == "RandomForest":
model = OneVsRestClassifier(RandomForestClassifier(n_estimators=1000, criterion="gini",
n_jobs=number_of_threads, random_state=0))
if issparse(X_train):
model.fit(X_train.tocsc(), y_train.toarray())
else:
model.fit(X_train, y_train.toarray())
else:
print("Invalid classifier type.")
raise RuntimeError
return model | python | def model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, classifier_type="LinearSVC"):
"""
Fits a Linear Support Vector Classifier to the labelled graph-based features using the LIBLINEAR library.
One-vs-All: http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
LinearSVC: http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- svm_hardness: Penalty of the error term.
- fit_intercept: Data centering as per scikit-learn.
- number_of_threads: The number of threads to use for training the multi-label scheme.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
"""
if classifier_type == "LinearSVC":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = OneVsRestClassifier(LinearSVC(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept),
n_jobs=number_of_threads)
model.fit(X_train, y_train)
elif classifier_type == "LogisticRegression":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = OneVsRestClassifier(LogisticRegression(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept),
n_jobs=number_of_threads)
model.fit(X_train, y_train)
elif classifier_type == "RandomForest":
model = OneVsRestClassifier(RandomForestClassifier(n_estimators=1000, criterion="gini",
n_jobs=number_of_threads, random_state=0))
if issparse(X_train):
model.fit(X_train.tocsc(), y_train.toarray())
else:
model.fit(X_train, y_train.toarray())
else:
print("Invalid classifier type.")
raise RuntimeError
return model | [
"def",
"model_fit",
"(",
"X_train",
",",
"y_train",
",",
"svm_hardness",
",",
"fit_intercept",
",",
"number_of_threads",
",",
"classifier_type",
"=",
"\"LinearSVC\"",
")",
":",
"if",
"classifier_type",
"==",
"\"LinearSVC\"",
":",
"if",
"X_train",
".",
"shape",
"... | Fits a Linear Support Vector Classifier to the labelled graph-based features using the LIBLINEAR library.
One-vs-All: http://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html
LinearSVC: http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- svm_hardness: Penalty of the error term.
- fit_intercept: Data centering as per scikit-learn.
- number_of_threads: The number of threads to use for training the multi-label scheme.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models. | [
"Fits",
"a",
"Linear",
"Support",
"Vector",
"Classifier",
"to",
"the",
"labelled",
"graph",
"-",
"based",
"features",
"using",
"the",
"LIBLINEAR",
"library",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/classification.py#L13-L62 | train | 47,475 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/classification.py | meta_model_fit | def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"):
"""
Trains meta-labeler for predicting number of labels for each user.
Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
Large scale multi-label classification via metalabeler.
In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM.
"""
if regressor_type == "LinearSVR":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = LinearSVR(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept)
y_train_meta = y_train.sum(axis=1)
model.fit(X_train, y_train_meta)
else:
print("Invalid regressor type.")
raise RuntimeError
return model | python | def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"):
"""
Trains meta-labeler for predicting number of labels for each user.
Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
Large scale multi-label classification via metalabeler.
In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM.
"""
if regressor_type == "LinearSVR":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = LinearSVR(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept)
y_train_meta = y_train.sum(axis=1)
model.fit(X_train, y_train_meta)
else:
print("Invalid regressor type.")
raise RuntimeError
return model | [
"def",
"meta_model_fit",
"(",
"X_train",
",",
"y_train",
",",
"svm_hardness",
",",
"fit_intercept",
",",
"number_of_threads",
",",
"regressor_type",
"=",
"\"LinearSVR\"",
")",
":",
"if",
"regressor_type",
"==",
"\"LinearSVR\"",
":",
"if",
"X_train",
".",
"shape",
... | Trains meta-labeler for predicting number of labels for each user.
Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
Large scale multi-label classification via metalabeler.
In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM. | [
"Trains",
"meta",
"-",
"labeler",
"for",
"predicting",
"number",
"of",
"labels",
"for",
"each",
"user",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/classification.py#L65-L87 | train | 47,476 |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/classification.py | weigh_users | def weigh_users(X_test, model, classifier_type="LinearSVC"):
"""
Uses a trained model and the unlabelled features to produce a user-to-label distance matrix.
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator.
"""
if classifier_type == "LinearSVC":
decision_weights = model.decision_function(X_test)
elif classifier_type == "LogisticRegression":
decision_weights = model.predict_proba(X_test)
elif classifier_type == "RandomForest":
# if issparse(X_test):
# decision_weights = np.hstack(a[:, 1].reshape(X_test.shape[0], 1) for a in model.predict_proba(X_test.tocsr()))
# else:
# decision_weights = np.hstack(a[:, 1].reshape(X_test.shape[0], 1) for a in model.predict_proba(X_test))
if issparse(X_test):
decision_weights = model.predict_proba(X_test.tocsr())
else:
decision_weights = model.predict_proba(X_test)
else:
print("Invalid classifier type.")
raise RuntimeError
return decision_weights | python | def weigh_users(X_test, model, classifier_type="LinearSVC"):
"""
Uses a trained model and the unlabelled features to produce a user-to-label distance matrix.
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator.
"""
if classifier_type == "LinearSVC":
decision_weights = model.decision_function(X_test)
elif classifier_type == "LogisticRegression":
decision_weights = model.predict_proba(X_test)
elif classifier_type == "RandomForest":
# if issparse(X_test):
# decision_weights = np.hstack(a[:, 1].reshape(X_test.shape[0], 1) for a in model.predict_proba(X_test.tocsr()))
# else:
# decision_weights = np.hstack(a[:, 1].reshape(X_test.shape[0], 1) for a in model.predict_proba(X_test))
if issparse(X_test):
decision_weights = model.predict_proba(X_test.tocsr())
else:
decision_weights = model.predict_proba(X_test)
else:
print("Invalid classifier type.")
raise RuntimeError
return decision_weights | [
"def",
"weigh_users",
"(",
"X_test",
",",
"model",
",",
"classifier_type",
"=",
"\"LinearSVC\"",
")",
":",
"if",
"classifier_type",
"==",
"\"LinearSVC\"",
":",
"decision_weights",
"=",
"model",
".",
"decision_function",
"(",
"X_test",
")",
"elif",
"classifier_type... | Uses a trained model and the unlabelled features to produce a user-to-label distance matrix.
Inputs: - feature_matrix: The graph based-features in either NumPy or SciPy sparse array format.
- model: A trained scikit-learn One-vs-All multi-label scheme of linear SVC models.
- classifier_type: A string to be chosen among: * LinearSVC (LibLinear)
* LogisticRegression (LibLinear)
* RandomForest
Output: - decision_weights: A NumPy array containing the distance of each user from each label discriminator. | [
"Uses",
"a",
"trained",
"model",
"and",
"the",
"unlabelled",
"features",
"to",
"produce",
"a",
"user",
"-",
"to",
"-",
"label",
"distance",
"matrix",
"."
] | eda862687aa5a64b79c6b12de1b4dca6ce986dc8 | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/classification.py#L90-L119 | train | 47,477 |
WhyNotHugo/django-afip | django_afip/clients.py | get_client | def get_client(service_name, sandbox=False):
"""
Returns a client for a given service.
The `sandbox` argument should only be necessary if a the client will be
used to make a request. If it will only be used to serialize objects, it is
irrelevant. Avoid the overhead of determining the sandbox mode in the
calling context if only serialization operations will take place.
:param string service_name: The name of the web services.
:param bool sandbox: Whether the sandbox (or production) environment should
be used by the returned client.
:returns: A zeep client to communicate with an AFIP webservice.
:rtype: zeep.Client
"""
key = (service_name.lower(), sandbox,)
try:
if key not in cached_clients:
cached_clients[key] = Client(wsdls[key], transport=transport)
return cached_clients[key]
except KeyError:
raise ValueError('Unknown service name, {}'.format(service_name)) | python | def get_client(service_name, sandbox=False):
"""
Returns a client for a given service.
The `sandbox` argument should only be necessary if a the client will be
used to make a request. If it will only be used to serialize objects, it is
irrelevant. Avoid the overhead of determining the sandbox mode in the
calling context if only serialization operations will take place.
:param string service_name: The name of the web services.
:param bool sandbox: Whether the sandbox (or production) environment should
be used by the returned client.
:returns: A zeep client to communicate with an AFIP webservice.
:rtype: zeep.Client
"""
key = (service_name.lower(), sandbox,)
try:
if key not in cached_clients:
cached_clients[key] = Client(wsdls[key], transport=transport)
return cached_clients[key]
except KeyError:
raise ValueError('Unknown service name, {}'.format(service_name)) | [
"def",
"get_client",
"(",
"service_name",
",",
"sandbox",
"=",
"False",
")",
":",
"key",
"=",
"(",
"service_name",
".",
"lower",
"(",
")",
",",
"sandbox",
",",
")",
"try",
":",
"if",
"key",
"not",
"in",
"cached_clients",
":",
"cached_clients",
"[",
"ke... | Returns a client for a given service.
The `sandbox` argument should only be necessary if a the client will be
used to make a request. If it will only be used to serialize objects, it is
irrelevant. Avoid the overhead of determining the sandbox mode in the
calling context if only serialization operations will take place.
:param string service_name: The name of the web services.
:param bool sandbox: Whether the sandbox (or production) environment should
be used by the returned client.
:returns: A zeep client to communicate with an AFIP webservice.
:rtype: zeep.Client | [
"Returns",
"a",
"client",
"for",
"a",
"given",
"service",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/clients.py#L24-L47 | train | 47,478 |
toumorokoshi/jsonschema-extractor | jsonschema_extractor/attrs_extractor.py | AttrsExtractor.extract | def extract(cls, extractor, typ):
"""
take an attrs based class, and convert it
to jsonschema.
"""
schema = {
"title": typ.__name__,
"type": "object",
"properties": {},
"required": []
}
for attribute in attr.fields(typ):
details = cls._extract_attribute(extractor, attribute)
if details.is_required:
schema["required"].append(details.name)
schema["properties"][details.name] = details.schema
return schema | python | def extract(cls, extractor, typ):
"""
take an attrs based class, and convert it
to jsonschema.
"""
schema = {
"title": typ.__name__,
"type": "object",
"properties": {},
"required": []
}
for attribute in attr.fields(typ):
details = cls._extract_attribute(extractor, attribute)
if details.is_required:
schema["required"].append(details.name)
schema["properties"][details.name] = details.schema
return schema | [
"def",
"extract",
"(",
"cls",
",",
"extractor",
",",
"typ",
")",
":",
"schema",
"=",
"{",
"\"title\"",
":",
"typ",
".",
"__name__",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"}",
",",
"\"required\"",
":",
"[",
"]",
"}",
"fo... | take an attrs based class, and convert it
to jsonschema. | [
"take",
"an",
"attrs",
"based",
"class",
"and",
"convert",
"it",
"to",
"jsonschema",
"."
] | e72c608f6ea72cb8bb43912e1177829cff32c13b | https://github.com/toumorokoshi/jsonschema-extractor/blob/e72c608f6ea72cb8bb43912e1177829cff32c13b/jsonschema_extractor/attrs_extractor.py#L18-L34 | train | 47,479 |
MultipedRobotics/pyxl320 | pyxl320/utils.py | prettyPrintPacket | def prettyPrintPacket(ctrl_table):
"""
This will pretty print out a packet's fields.
in: dictionary of a packet
out: nothing ... everything is printed to screen
"""
print('---------------------------------------')
print("{:.<29} {}".format('id', ctrl_table['id']))
ctrl_table.pop('id')
for key, value in ctrl_table.items():
print("{:.<29} {}".format(key, value)) | python | def prettyPrintPacket(ctrl_table):
"""
This will pretty print out a packet's fields.
in: dictionary of a packet
out: nothing ... everything is printed to screen
"""
print('---------------------------------------')
print("{:.<29} {}".format('id', ctrl_table['id']))
ctrl_table.pop('id')
for key, value in ctrl_table.items():
print("{:.<29} {}".format(key, value)) | [
"def",
"prettyPrintPacket",
"(",
"ctrl_table",
")",
":",
"print",
"(",
"'---------------------------------------'",
")",
"print",
"(",
"\"{:.<29} {}\"",
".",
"format",
"(",
"'id'",
",",
"ctrl_table",
"[",
"'id'",
"]",
")",
")",
"ctrl_table",
".",
"pop",
"(",
"... | This will pretty print out a packet's fields.
in: dictionary of a packet
out: nothing ... everything is printed to screen | [
"This",
"will",
"pretty",
"print",
"out",
"a",
"packet",
"s",
"fields",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/utils.py#L62-L73 | train | 47,480 |
MultipedRobotics/pyxl320 | pyxl320/utils.py | JsonFile.write | def write(fname, data):
"""
Writes a Json file
in: fname - file name
data - dictionary of data to put into the file
out: nothing, everything is written to a file
"""
try:
with open(fname, 'w') as f:
json.dump(data, f)
except IOError:
raise Exception('Could not open {0!s} for writing'.format((fname))) | python | def write(fname, data):
"""
Writes a Json file
in: fname - file name
data - dictionary of data to put into the file
out: nothing, everything is written to a file
"""
try:
with open(fname, 'w') as f:
json.dump(data, f)
except IOError:
raise Exception('Could not open {0!s} for writing'.format((fname))) | [
"def",
"write",
"(",
"fname",
",",
"data",
")",
":",
"try",
":",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"data",
",",
"f",
")",
"except",
"IOError",
":",
"raise",
"Exception",
"(",
"'Could not open {... | Writes a Json file
in: fname - file name
data - dictionary of data to put into the file
out: nothing, everything is written to a file | [
"Writes",
"a",
"Json",
"file"
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/utils.py#L96-L110 | train | 47,481 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | makePacket | def makePacket(ID, instr, reg=None, params=None):
"""
This makes a generic packet.
TODO: look a struct ... does that add value using it?
0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H]
in:
ID - servo id
instr - instruction
reg - register
params - instruction parameter values
out: packet
"""
pkt = []
pkt += [0xFF, 0xFF, 0xFD] # header
pkt += [0x00] # reserved byte
pkt += [ID]
pkt += [0x00, 0x00] # length placeholder
pkt += [instr] # instruction
if reg:
pkt += le(reg) # not everything has a register
if params:
pkt += params # not everything has parameters
length = le(len(pkt) - 5) # length = len(packet) - (header(3), reserve(1), id(1))
pkt[5] = length[0] # L
pkt[6] = length[1] # H
crc = crc16(pkt)
pkt += le(crc)
return pkt | python | def makePacket(ID, instr, reg=None, params=None):
"""
This makes a generic packet.
TODO: look a struct ... does that add value using it?
0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H]
in:
ID - servo id
instr - instruction
reg - register
params - instruction parameter values
out: packet
"""
pkt = []
pkt += [0xFF, 0xFF, 0xFD] # header
pkt += [0x00] # reserved byte
pkt += [ID]
pkt += [0x00, 0x00] # length placeholder
pkt += [instr] # instruction
if reg:
pkt += le(reg) # not everything has a register
if params:
pkt += params # not everything has parameters
length = le(len(pkt) - 5) # length = len(packet) - (header(3), reserve(1), id(1))
pkt[5] = length[0] # L
pkt[6] = length[1] # H
crc = crc16(pkt)
pkt += le(crc)
return pkt | [
"def",
"makePacket",
"(",
"ID",
",",
"instr",
",",
"reg",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"pkt",
"=",
"[",
"]",
"pkt",
"+=",
"[",
"0xFF",
",",
"0xFF",
",",
"0xFD",
"]",
"# header",
"pkt",
"+=",
"[",
"0x00",
"]",
"# reserved byt... | This makes a generic packet.
TODO: look a struct ... does that add value using it?
0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H]
in:
ID - servo id
instr - instruction
reg - register
params - instruction parameter values
out: packet | [
"This",
"makes",
"a",
"generic",
"packet",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L111-L143 | train | 47,482 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | makeControlModePacket | def makeControlModePacket(ID, mode):
"""
Sets the xl-320 to either servo or wheel mode
"""
pkt = makeWritePacket(ID, xl320.XL320_CONTROL_MODE, le(mode))
return pkt | python | def makeControlModePacket(ID, mode):
"""
Sets the xl-320 to either servo or wheel mode
"""
pkt = makeWritePacket(ID, xl320.XL320_CONTROL_MODE, le(mode))
return pkt | [
"def",
"makeControlModePacket",
"(",
"ID",
",",
"mode",
")",
":",
"pkt",
"=",
"makeWritePacket",
"(",
"ID",
",",
"xl320",
".",
"XL320_CONTROL_MODE",
",",
"le",
"(",
"mode",
")",
")",
"return",
"pkt"
] | Sets the xl-320 to either servo or wheel mode | [
"Sets",
"the",
"xl",
"-",
"320",
"to",
"either",
"servo",
"or",
"wheel",
"mode"
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L271-L276 | train | 47,483 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | makeBaudRatePacket | def makeBaudRatePacket(ID, rate):
"""
Set baud rate of servo.
in: rate - 0: 9600, 1:57600, 2:115200, 3:1Mbps
out: write packet
"""
if rate not in [0, 1, 2, 3]:
raise Exception('Packet.makeBaudRatePacket: wrong rate {}'.format(rate))
pkt = makeWritePacket(ID, xl320.XL320_BAUD_RATE, [rate])
return pkt | python | def makeBaudRatePacket(ID, rate):
"""
Set baud rate of servo.
in: rate - 0: 9600, 1:57600, 2:115200, 3:1Mbps
out: write packet
"""
if rate not in [0, 1, 2, 3]:
raise Exception('Packet.makeBaudRatePacket: wrong rate {}'.format(rate))
pkt = makeWritePacket(ID, xl320.XL320_BAUD_RATE, [rate])
return pkt | [
"def",
"makeBaudRatePacket",
"(",
"ID",
",",
"rate",
")",
":",
"if",
"rate",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
"]",
":",
"raise",
"Exception",
"(",
"'Packet.makeBaudRatePacket: wrong rate {}'",
".",
"format",
"(",
"rate",
")",
")",
"... | Set baud rate of servo.
in: rate - 0: 9600, 1:57600, 2:115200, 3:1Mbps
out: write packet | [
"Set",
"baud",
"rate",
"of",
"servo",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L279-L289 | train | 47,484 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | makeSyncAnglePacket | def makeSyncAnglePacket(info):
"""
Write sync angle information to servos.
info = [[ID, angle], [ID, angle], ...]
"""
addr = le(xl320.XL320_GOAL_POSITION)
data = []
# since all servo angles have the same register addr (XL320_GOAL_POSITION)
# and data size (2), a sinc packet is smart choice
# compare bulk vs sync for the same commands:
# bulk = 94 bytes
# sync = 50 bytes
data.append(addr[0]) # LSB
data.append(addr[1]) # MSB
data.append(2) # data size LSM
data.append(0) # data size MSB
for pkt in info:
data.append(pkt[0]) # ID
angle = le(int(pkt[1]/300*1023))
data.append(angle[0]) # LSB
data.append(angle[1]) # MSB
ID = xl320.XL320_BROADCAST_ADDR
instr = xl320.XL320_SYNC_WRITE
pkt = makePacket(ID, instr, None, data) # create packet
# print(pkt)
return pkt | python | def makeSyncAnglePacket(info):
"""
Write sync angle information to servos.
info = [[ID, angle], [ID, angle], ...]
"""
addr = le(xl320.XL320_GOAL_POSITION)
data = []
# since all servo angles have the same register addr (XL320_GOAL_POSITION)
# and data size (2), a sinc packet is smart choice
# compare bulk vs sync for the same commands:
# bulk = 94 bytes
# sync = 50 bytes
data.append(addr[0]) # LSB
data.append(addr[1]) # MSB
data.append(2) # data size LSM
data.append(0) # data size MSB
for pkt in info:
data.append(pkt[0]) # ID
angle = le(int(pkt[1]/300*1023))
data.append(angle[0]) # LSB
data.append(angle[1]) # MSB
ID = xl320.XL320_BROADCAST_ADDR
instr = xl320.XL320_SYNC_WRITE
pkt = makePacket(ID, instr, None, data) # create packet
# print(pkt)
return pkt | [
"def",
"makeSyncAnglePacket",
"(",
"info",
")",
":",
"addr",
"=",
"le",
"(",
"xl320",
".",
"XL320_GOAL_POSITION",
")",
"data",
"=",
"[",
"]",
"# since all servo angles have the same register addr (XL320_GOAL_POSITION)",
"# and data size (2), a sinc packet is smart choice",
"#... | Write sync angle information to servos.
info = [[ID, angle], [ID, angle], ...] | [
"Write",
"sync",
"angle",
"information",
"to",
"servos",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L292-L322 | train | 47,485 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | makeBulkAnglePacket | def makeBulkAnglePacket(info):
"""
Write bulk angle information to servos.
info = [[ID, angle], [ID, angle], ...]
"""
addr = le(xl320.XL320_GOAL_POSITION)
data = []
for pkt in info:
data.append(pkt[0]) # ID
data.append(addr[0]) # LSB
data.append(addr[1]) # MSB
data.append(2) # 2 bytes
data.append(0)
angle = le(int(pkt[1]/300*1023))
data.append(angle[0]) # LSB
data.append(angle[1]) # MSB
ID = xl320.XL320_BROADCAST_ADDR
instr = xl320.XL320_BULK_WRITE
pkt = makePacket(ID, instr, None, data) # create packet
return pkt | python | def makeBulkAnglePacket(info):
"""
Write bulk angle information to servos.
info = [[ID, angle], [ID, angle], ...]
"""
addr = le(xl320.XL320_GOAL_POSITION)
data = []
for pkt in info:
data.append(pkt[0]) # ID
data.append(addr[0]) # LSB
data.append(addr[1]) # MSB
data.append(2) # 2 bytes
data.append(0)
angle = le(int(pkt[1]/300*1023))
data.append(angle[0]) # LSB
data.append(angle[1]) # MSB
ID = xl320.XL320_BROADCAST_ADDR
instr = xl320.XL320_BULK_WRITE
pkt = makePacket(ID, instr, None, data) # create packet
return pkt | [
"def",
"makeBulkAnglePacket",
"(",
"info",
")",
":",
"addr",
"=",
"le",
"(",
"xl320",
".",
"XL320_GOAL_POSITION",
")",
"data",
"=",
"[",
"]",
"for",
"pkt",
"in",
"info",
":",
"data",
".",
"append",
"(",
"pkt",
"[",
"0",
"]",
")",
"# ID",
"data",
".... | Write bulk angle information to servos.
info = [[ID, angle], [ID, angle], ...] | [
"Write",
"bulk",
"angle",
"information",
"to",
"servos",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L325-L347 | train | 47,486 |
MultipedRobotics/pyxl320 | pyxl320/Packet.py | findPkt | def findPkt(pkt):
"""
Search through a string of binary for a valid xl320 package.
in: buffer to search through
out: a list of valid data packet
"""
# print('findpkt', pkt)
# print('-----------------------')
ret = []
while len(pkt)-10 >= 0:
if pkt[0:4] != [0xFF, 0xFF, 0xFD, 0x00]:
pkt.pop(0) # get rid of the first index
# print(' - pop:', pkt)
continue
# print(' > good packet')
length = (pkt[6] << 8) + pkt[5]
# print(' > length', length)
crc_pos = 5 + length
pkt_crc = pkt[crc_pos:crc_pos + 2]
crc = le(crc16(pkt[:crc_pos]))
# if len(pkt) < (crc_pos + 1):
# print('<<< need more data for findPkt >>>')
# print(' > calc crc', crc)
# print(' > pkt crc', pkt_crc)
if pkt_crc == crc:
pkt_end = crc_pos+2
ret.append(pkt[:pkt_end])
# print(' > found:', pkt[:pkt_end])
# print(' > pkt size', pkt_end)
del pkt[:pkt_end]
# print(' > remaining:', pkt)
else:
pkt_end = crc_pos+2
# print(' - crap:', pkt[:pkt_end])
del pkt[:pkt_end]
# print('findpkt ret:', ret)
return ret | python | def findPkt(pkt):
"""
Search through a string of binary for a valid xl320 package.
in: buffer to search through
out: a list of valid data packet
"""
# print('findpkt', pkt)
# print('-----------------------')
ret = []
while len(pkt)-10 >= 0:
if pkt[0:4] != [0xFF, 0xFF, 0xFD, 0x00]:
pkt.pop(0) # get rid of the first index
# print(' - pop:', pkt)
continue
# print(' > good packet')
length = (pkt[6] << 8) + pkt[5]
# print(' > length', length)
crc_pos = 5 + length
pkt_crc = pkt[crc_pos:crc_pos + 2]
crc = le(crc16(pkt[:crc_pos]))
# if len(pkt) < (crc_pos + 1):
# print('<<< need more data for findPkt >>>')
# print(' > calc crc', crc)
# print(' > pkt crc', pkt_crc)
if pkt_crc == crc:
pkt_end = crc_pos+2
ret.append(pkt[:pkt_end])
# print(' > found:', pkt[:pkt_end])
# print(' > pkt size', pkt_end)
del pkt[:pkt_end]
# print(' > remaining:', pkt)
else:
pkt_end = crc_pos+2
# print(' - crap:', pkt[:pkt_end])
del pkt[:pkt_end]
# print('findpkt ret:', ret)
return ret | [
"def",
"findPkt",
"(",
"pkt",
")",
":",
"# print('findpkt', pkt)",
"# print('-----------------------')",
"ret",
"=",
"[",
"]",
"while",
"len",
"(",
"pkt",
")",
"-",
"10",
">=",
"0",
":",
"if",
"pkt",
"[",
"0",
":",
"4",
"]",
"!=",
"[",
"0xFF",
",",
"... | Search through a string of binary for a valid xl320 package.
in: buffer to search through
out: a list of valid data packet | [
"Search",
"through",
"a",
"string",
"of",
"binary",
"for",
"a",
"valid",
"xl320",
"package",
"."
] | 1a56540e208b028ee47d5fa0a7c7babcee0d9214 | https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L433-L471 | train | 47,487 |
WhyNotHugo/django-afip | django_afip/models.py | populate_all | def populate_all():
"""Fetch and store all metadata from the AFIP."""
ReceiptType.objects.populate()
ConceptType.objects.populate()
DocumentType.objects.populate()
VatType.objects.populate()
TaxType.objects.populate()
CurrencyType.objects.populate() | python | def populate_all():
"""Fetch and store all metadata from the AFIP."""
ReceiptType.objects.populate()
ConceptType.objects.populate()
DocumentType.objects.populate()
VatType.objects.populate()
TaxType.objects.populate()
CurrencyType.objects.populate() | [
"def",
"populate_all",
"(",
")",
":",
"ReceiptType",
".",
"objects",
".",
"populate",
"(",
")",
"ConceptType",
".",
"objects",
".",
"populate",
"(",
")",
"DocumentType",
".",
"objects",
".",
"populate",
"(",
")",
"VatType",
".",
"objects",
".",
"populate",... | Fetch and store all metadata from the AFIP. | [
"Fetch",
"and",
"store",
"all",
"metadata",
"from",
"the",
"AFIP",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L52-L59 | train | 47,488 |
WhyNotHugo/django-afip | django_afip/models.py | first_currency | def first_currency():
"""
Returns the id for the first currency
The `default` parameter of a foreign key *MUST* be a primary key (and not
an instance), else migrations break. This helper method exists solely for
that purpose.
"""
ct = CurrencyType.objects.filter(code='PES').first()
if ct:
return ct.pk | python | def first_currency():
"""
Returns the id for the first currency
The `default` parameter of a foreign key *MUST* be a primary key (and not
an instance), else migrations break. This helper method exists solely for
that purpose.
"""
ct = CurrencyType.objects.filter(code='PES').first()
if ct:
return ct.pk | [
"def",
"first_currency",
"(",
")",
":",
"ct",
"=",
"CurrencyType",
".",
"objects",
".",
"filter",
"(",
"code",
"=",
"'PES'",
")",
".",
"first",
"(",
")",
"if",
"ct",
":",
"return",
"ct",
".",
"pk"
] | Returns the id for the first currency
The `default` parameter of a foreign key *MUST* be a primary key (and not
an instance), else migrations break. This helper method exists solely for
that purpose. | [
"Returns",
"the",
"id",
"for",
"the",
"first",
"currency"
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L79-L89 | train | 47,489 |
WhyNotHugo/django-afip | django_afip/models.py | GenericAfipTypeManager.populate | def populate(self, ticket=None):
"""
Populate the database with types retrieved from the AFIP.
If no ticket is provided, the most recent available one will be used.
"""
ticket = ticket or AuthTicket.objects.get_any_active('wsfe')
client = clients.get_client('wsfe', ticket.owner.is_sandboxed)
service = getattr(client.service, self.__service_name)
response_xml = service(serializers.serialize_ticket(ticket))
check_response(response_xml)
for result in getattr(response_xml.ResultGet, self.__type_name):
self.get_or_create(
code=result.Id,
description=result.Desc,
valid_from=parsers.parse_date(result.FchDesde),
valid_to=parsers.parse_date(result.FchHasta),
) | python | def populate(self, ticket=None):
"""
Populate the database with types retrieved from the AFIP.
If no ticket is provided, the most recent available one will be used.
"""
ticket = ticket or AuthTicket.objects.get_any_active('wsfe')
client = clients.get_client('wsfe', ticket.owner.is_sandboxed)
service = getattr(client.service, self.__service_name)
response_xml = service(serializers.serialize_ticket(ticket))
check_response(response_xml)
for result in getattr(response_xml.ResultGet, self.__type_name):
self.get_or_create(
code=result.Id,
description=result.Desc,
valid_from=parsers.parse_date(result.FchDesde),
valid_to=parsers.parse_date(result.FchHasta),
) | [
"def",
"populate",
"(",
"self",
",",
"ticket",
"=",
"None",
")",
":",
"ticket",
"=",
"ticket",
"or",
"AuthTicket",
".",
"objects",
".",
"get_any_active",
"(",
"'wsfe'",
")",
"client",
"=",
"clients",
".",
"get_client",
"(",
"'wsfe'",
",",
"ticket",
".",
... | Populate the database with types retrieved from the AFIP.
If no ticket is provided, the most recent available one will be used. | [
"Populate",
"the",
"database",
"with",
"types",
"retrieved",
"from",
"the",
"AFIP",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L106-L125 | train | 47,490 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.certificate_object | def certificate_object(self):
"""
Returns the certificate as an OpenSSL object
Returns the certificate as an OpenSSL object (rather than as a file
object).
"""
if not self.certificate:
return None
self.certificate.seek(0)
return crypto.parse_certificate(self.certificate.read()) | python | def certificate_object(self):
"""
Returns the certificate as an OpenSSL object
Returns the certificate as an OpenSSL object (rather than as a file
object).
"""
if not self.certificate:
return None
self.certificate.seek(0)
return crypto.parse_certificate(self.certificate.read()) | [
"def",
"certificate_object",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"certificate",
":",
"return",
"None",
"self",
".",
"certificate",
".",
"seek",
"(",
"0",
")",
"return",
"crypto",
".",
"parse_certificate",
"(",
"self",
".",
"certificate",
".",
... | Returns the certificate as an OpenSSL object
Returns the certificate as an OpenSSL object (rather than as a file
object). | [
"Returns",
"the",
"certificate",
"as",
"an",
"OpenSSL",
"object"
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L295-L305 | train | 47,491 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.get_certificate_expiration | def get_certificate_expiration(self):
"""
Gets the certificate expiration from the certificate
Gets the certificate expiration from the certificate file. Note that
this value is stored into ``certificate_expiration`` when an instance
is saved, so you should generally prefer that method (since this one
requires reading and parsing the entire certificate).
"""
datestring = self.certificate_object.get_notAfter().decode()
dt = datetime.strptime(datestring, '%Y%m%d%H%M%SZ')
return dt.replace(tzinfo=timezone.utc) | python | def get_certificate_expiration(self):
"""
Gets the certificate expiration from the certificate
Gets the certificate expiration from the certificate file. Note that
this value is stored into ``certificate_expiration`` when an instance
is saved, so you should generally prefer that method (since this one
requires reading and parsing the entire certificate).
"""
datestring = self.certificate_object.get_notAfter().decode()
dt = datetime.strptime(datestring, '%Y%m%d%H%M%SZ')
return dt.replace(tzinfo=timezone.utc) | [
"def",
"get_certificate_expiration",
"(",
"self",
")",
":",
"datestring",
"=",
"self",
".",
"certificate_object",
".",
"get_notAfter",
"(",
")",
".",
"decode",
"(",
")",
"dt",
"=",
"datetime",
".",
"strptime",
"(",
"datestring",
",",
"'%Y%m%d%H%M%SZ'",
")",
... | Gets the certificate expiration from the certificate
Gets the certificate expiration from the certificate file. Note that
this value is stored into ``certificate_expiration`` when an instance
is saved, so you should generally prefer that method (since this one
requires reading and parsing the entire certificate). | [
"Gets",
"the",
"certificate",
"expiration",
"from",
"the",
"certificate"
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L307-L319 | train | 47,492 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.generate_key | def generate_key(self, force=False):
"""
Creates a key file for this TaxPayer
Creates a key file for this TaxPayer if it does not have one, and
immediately saves it.
Returns True if and only if a key was created.
"""
if self.key and not force:
logger.warning(
'Tried to generate key for a taxpayer that already had one'
)
return False
with NamedTemporaryFile(suffix='.key') as file_:
crypto.create_key(file_)
self.key = File(file_, name='{}.key'.format(uuid.uuid4().hex))
self.save()
return True | python | def generate_key(self, force=False):
"""
Creates a key file for this TaxPayer
Creates a key file for this TaxPayer if it does not have one, and
immediately saves it.
Returns True if and only if a key was created.
"""
if self.key and not force:
logger.warning(
'Tried to generate key for a taxpayer that already had one'
)
return False
with NamedTemporaryFile(suffix='.key') as file_:
crypto.create_key(file_)
self.key = File(file_, name='{}.key'.format(uuid.uuid4().hex))
self.save()
return True | [
"def",
"generate_key",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"self",
".",
"key",
"and",
"not",
"force",
":",
"logger",
".",
"warning",
"(",
"'Tried to generate key for a taxpayer that already had one'",
")",
"return",
"False",
"with",
"NamedTe... | Creates a key file for this TaxPayer
Creates a key file for this TaxPayer if it does not have one, and
immediately saves it.
Returns True if and only if a key was created. | [
"Creates",
"a",
"key",
"file",
"for",
"this",
"TaxPayer"
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L321-L341 | train | 47,493 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.generate_csr | def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr | python | def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr | [
"def",
"generate_csr",
"(",
"self",
",",
"basename",
"=",
"'djangoafip'",
")",
":",
"csr",
"=",
"BytesIO",
"(",
")",
"crypto",
".",
"create_csr",
"(",
"self",
".",
"key",
".",
"file",
",",
"self",
".",
"name",
",",
"'{}{}'",
".",
"format",
"(",
"base... | Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP. | [
"Creates",
"a",
"CSR",
"for",
"this",
"TaxPayer",
"s",
"key"
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L343-L359 | train | 47,494 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.create_ticket | def create_ticket(self, service):
"""Create an AuthTicket for a given service."""
ticket = AuthTicket(owner=self, service=service)
ticket.authorize()
return ticket | python | def create_ticket(self, service):
"""Create an AuthTicket for a given service."""
ticket = AuthTicket(owner=self, service=service)
ticket.authorize()
return ticket | [
"def",
"create_ticket",
"(",
"self",
",",
"service",
")",
":",
"ticket",
"=",
"AuthTicket",
"(",
"owner",
"=",
"self",
",",
"service",
"=",
"service",
")",
"ticket",
".",
"authorize",
"(",
")",
"return",
"ticket"
] | Create an AuthTicket for a given service. | [
"Create",
"an",
"AuthTicket",
"for",
"a",
"given",
"service",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L361-L365 | train | 47,495 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.get_ticket | def get_ticket(self, service):
"""Return an existing AuthTicket for a given service."""
return self.auth_tickets \
.filter(expires__gt=datetime.now(timezone.utc), service=service) \
.last() | python | def get_ticket(self, service):
"""Return an existing AuthTicket for a given service."""
return self.auth_tickets \
.filter(expires__gt=datetime.now(timezone.utc), service=service) \
.last() | [
"def",
"get_ticket",
"(",
"self",
",",
"service",
")",
":",
"return",
"self",
".",
"auth_tickets",
".",
"filter",
"(",
"expires__gt",
"=",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
",",
"service",
"=",
"service",
")",
".",
"last",
"(",
... | Return an existing AuthTicket for a given service. | [
"Return",
"an",
"existing",
"AuthTicket",
"for",
"a",
"given",
"service",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L367-L371 | train | 47,496 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayer.fetch_points_of_sales | def fetch_points_of_sales(self, ticket=None):
"""
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
"""
ticket = ticket or self.get_or_create_ticket('wsfe')
client = clients.get_client('wsfe', self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
issuance_type=pos_data.EmisionTipo,
owner=self,
defaults={
'blocked': pos_data.Bloqueado == 'N',
'drop_date': parsers.parse_date(pos_data.FchBaja),
}
))
return results | python | def fetch_points_of_sales(self, ticket=None):
"""
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
"""
ticket = ticket or self.get_or_create_ticket('wsfe')
client = clients.get_client('wsfe', self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
issuance_type=pos_data.EmisionTipo,
owner=self,
defaults={
'blocked': pos_data.Bloqueado == 'N',
'drop_date': parsers.parse_date(pos_data.FchBaja),
}
))
return results | [
"def",
"fetch_points_of_sales",
"(",
"self",
",",
"ticket",
"=",
"None",
")",
":",
"ticket",
"=",
"ticket",
"or",
"self",
".",
"get_or_create_ticket",
"(",
"'wsfe'",
")",
"client",
"=",
"clients",
".",
"get_client",
"(",
"'wsfe'",
",",
"self",
".",
"is_san... | Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,). | [
"Fetch",
"all",
"point",
"of",
"sales",
"objects",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L385-L414 | train | 47,497 |
WhyNotHugo/django-afip | django_afip/models.py | TaxPayerExtras.logo_as_data_uri | def logo_as_data_uri(self):
"""This TaxPayer's logo as a data uri."""
_, ext = os.path.splitext(self.logo.file.name)
with open(self.logo.file.name, 'rb') as f:
data = base64.b64encode(f.read())
return 'data:image/{};base64,{}'.format(
ext[1:], # Remove the leading dot.
data.decode()
) | python | def logo_as_data_uri(self):
"""This TaxPayer's logo as a data uri."""
_, ext = os.path.splitext(self.logo.file.name)
with open(self.logo.file.name, 'rb') as f:
data = base64.b64encode(f.read())
return 'data:image/{};base64,{}'.format(
ext[1:], # Remove the leading dot.
data.decode()
) | [
"def",
"logo_as_data_uri",
"(",
"self",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"logo",
".",
"file",
".",
"name",
")",
"with",
"open",
"(",
"self",
".",
"logo",
".",
"file",
".",
"name",
",",
"'rb'",
... | This TaxPayer's logo as a data uri. | [
"This",
"TaxPayer",
"s",
"logo",
"as",
"a",
"data",
"uri",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L501-L510 | train | 47,498 |
WhyNotHugo/django-afip | django_afip/models.py | AuthTicket.authorize | def authorize(self):
"""Send this ticket to AFIP for authorization."""
request = self.__create_request_xml()
request = self.__sign_request(request)
request = b64encode(request).decode()
client = clients.get_client('wsaa', self.owner.is_sandboxed)
try:
raw_response = client.service.loginCms(request)
except Fault as e:
if str(e) == 'Certificado expirado':
raise exceptions.CertificateExpired(str(e)) from e
if str(e) == 'Certificado no emitido por AC de confianza':
raise exceptions.UntrustedCertificate(str(e)) from e
raise exceptions.AuthenticationError(str(e)) from e
response = etree.fromstring(raw_response.encode('utf-8'))
self.token = response.xpath(self.TOKEN_XPATH)[0].text
self.signature = response.xpath(self.SIGN_XPATH)[0].text
self.save() | python | def authorize(self):
"""Send this ticket to AFIP for authorization."""
request = self.__create_request_xml()
request = self.__sign_request(request)
request = b64encode(request).decode()
client = clients.get_client('wsaa', self.owner.is_sandboxed)
try:
raw_response = client.service.loginCms(request)
except Fault as e:
if str(e) == 'Certificado expirado':
raise exceptions.CertificateExpired(str(e)) from e
if str(e) == 'Certificado no emitido por AC de confianza':
raise exceptions.UntrustedCertificate(str(e)) from e
raise exceptions.AuthenticationError(str(e)) from e
response = etree.fromstring(raw_response.encode('utf-8'))
self.token = response.xpath(self.TOKEN_XPATH)[0].text
self.signature = response.xpath(self.SIGN_XPATH)[0].text
self.save() | [
"def",
"authorize",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"__create_request_xml",
"(",
")",
"request",
"=",
"self",
".",
"__sign_request",
"(",
"request",
")",
"request",
"=",
"b64encode",
"(",
"request",
")",
".",
"decode",
"(",
")",
"clie... | Send this ticket to AFIP for authorization. | [
"Send",
"this",
"ticket",
"to",
"AFIP",
"for",
"authorization",
"."
] | 5fb73213f1fe86ca52b501ffd0737911ef26ddb3 | https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L669-L689 | train | 47,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.