repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
yjzhang/uncurl_python | uncurl/dimensionality_reduction.py | dim_reduce_data | def dim_reduce_data(data, d):
"""
Does a MDS on the data directly, not on the means.
Args:
data (array): genes x cells
d (int): desired dimensionality
Returns:
X, a cells x d matrix
"""
genes, cells = data.shape
distances = np.zeros((cells, cells))
for i in range(cells):
for j in range(cells):
distances[i,j] = poisson_dist(data[:,i], data[:,j])
# do MDS on the distance matrix (procedure from Wikipedia)
proximity = distances**2
J = np.eye(cells) - 1./cells
B = -0.5*np.dot(J, np.dot(proximity, J))
# B should be symmetric, so we can use eigh
e_val, e_vec = np.linalg.eigh(B)
# Note: lam should be ordered to be the largest eigenvalues
lam = np.diag(e_val[-d:])[::-1]
#lam = max_or_zero(lam)
E = e_vec[:,-d:][::-1]
X = np.dot(E, lam**0.5)
return X | python | def dim_reduce_data(data, d):
"""
Does a MDS on the data directly, not on the means.
Args:
data (array): genes x cells
d (int): desired dimensionality
Returns:
X, a cells x d matrix
"""
genes, cells = data.shape
distances = np.zeros((cells, cells))
for i in range(cells):
for j in range(cells):
distances[i,j] = poisson_dist(data[:,i], data[:,j])
# do MDS on the distance matrix (procedure from Wikipedia)
proximity = distances**2
J = np.eye(cells) - 1./cells
B = -0.5*np.dot(J, np.dot(proximity, J))
# B should be symmetric, so we can use eigh
e_val, e_vec = np.linalg.eigh(B)
# Note: lam should be ordered to be the largest eigenvalues
lam = np.diag(e_val[-d:])[::-1]
#lam = max_or_zero(lam)
E = e_vec[:,-d:][::-1]
X = np.dot(E, lam**0.5)
return X | [
"def",
"dim_reduce_data",
"(",
"data",
",",
"d",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"distances",
"=",
"np",
".",
"zeros",
"(",
"(",
"cells",
",",
"cells",
")",
")",
"for",
"i",
"in",
"range",
"(",
"cells",
")",
":",
"for"... | Does a MDS on the data directly, not on the means.
Args:
data (array): genes x cells
d (int): desired dimensionality
Returns:
X, a cells x d matrix | [
"Does",
"a",
"MDS",
"on",
"the",
"data",
"directly",
"not",
"on",
"the",
"means",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L64-L91 | train | 47,300 |
moonso/loqusdb | loqusdb/plugins/mongo/case.py | CaseMixin.case | def case(self, case):
"""Get a case from the database
Search the cases with the case id
Args:
case (dict): A case dictionary
Returns:
mongo_case (dict): A mongo case dictionary
"""
LOG.debug("Getting case {0} from database".format(case.get('case_id')))
case_id = case['case_id']
return self.db.case.find_one({'case_id': case_id}) | python | def case(self, case):
"""Get a case from the database
Search the cases with the case id
Args:
case (dict): A case dictionary
Returns:
mongo_case (dict): A mongo case dictionary
"""
LOG.debug("Getting case {0} from database".format(case.get('case_id')))
case_id = case['case_id']
return self.db.case.find_one({'case_id': case_id}) | [
"def",
"case",
"(",
"self",
",",
"case",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Getting case {0} from database\"",
".",
"format",
"(",
"case",
".",
"get",
"(",
"'case_id'",
")",
")",
")",
"case_id",
"=",
"case",
"[",
"'case_id'",
"]",
"return",
"self",
... | Get a case from the database
Search the cases with the case id
Args:
case (dict): A case dictionary
Returns:
mongo_case (dict): A mongo case dictionary | [
"Get",
"a",
"case",
"from",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L11-L24 | train | 47,301 |
moonso/loqusdb | loqusdb/plugins/mongo/case.py | CaseMixin.nr_cases | def nr_cases(self, snv_cases=None, sv_cases=None):
"""Return the number of cases in the database
Args:
snv_cases(bool): If only snv cases should be searched
sv_cases(bool): If only snv cases should be searched
Returns:
cases (Iterable(Case)): A iterable with mongo cases
"""
query = {}
if snv_cases:
query = {'vcf_path': {'$exists':True}}
if sv_cases:
query = {'vcf_sv_path': {'$exists':True}}
if snv_cases and sv_cases:
query = None
return self.db.case.count_documents(query) | python | def nr_cases(self, snv_cases=None, sv_cases=None):
"""Return the number of cases in the database
Args:
snv_cases(bool): If only snv cases should be searched
sv_cases(bool): If only snv cases should be searched
Returns:
cases (Iterable(Case)): A iterable with mongo cases
"""
query = {}
if snv_cases:
query = {'vcf_path': {'$exists':True}}
if sv_cases:
query = {'vcf_sv_path': {'$exists':True}}
if snv_cases and sv_cases:
query = None
return self.db.case.count_documents(query) | [
"def",
"nr_cases",
"(",
"self",
",",
"snv_cases",
"=",
"None",
",",
"sv_cases",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"snv_cases",
":",
"query",
"=",
"{",
"'vcf_path'",
":",
"{",
"'$exists'",
":",
"True",
"}",
"}",
"if",
"sv_cases",
... | Return the number of cases in the database
Args:
snv_cases(bool): If only snv cases should be searched
sv_cases(bool): If only snv cases should be searched
Returns:
cases (Iterable(Case)): A iterable with mongo cases | [
"Return",
"the",
"number",
"of",
"cases",
"in",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L35-L54 | train | 47,302 |
moonso/loqusdb | loqusdb/plugins/mongo/case.py | CaseMixin.add_case | def add_case(self, case, update=False):
"""Add a case to the case collection
If the case exists and update is False raise error.
Args:
db (MongoClient): A connection to the mongodb
case (dict): A case dictionary
update(bool): If existing case should be updated
Returns:
mongo_case_id(ObjectId)
"""
existing_case = self.case(case)
if existing_case and not update:
raise CaseError("Case {} already exists".format(case['case_id']))
if existing_case:
self.db.case.find_one_and_replace(
{'case_id': case['case_id']},
case,
)
else:
self.db.case.insert_one(case)
return case | python | def add_case(self, case, update=False):
"""Add a case to the case collection
If the case exists and update is False raise error.
Args:
db (MongoClient): A connection to the mongodb
case (dict): A case dictionary
update(bool): If existing case should be updated
Returns:
mongo_case_id(ObjectId)
"""
existing_case = self.case(case)
if existing_case and not update:
raise CaseError("Case {} already exists".format(case['case_id']))
if existing_case:
self.db.case.find_one_and_replace(
{'case_id': case['case_id']},
case,
)
else:
self.db.case.insert_one(case)
return case | [
"def",
"add_case",
"(",
"self",
",",
"case",
",",
"update",
"=",
"False",
")",
":",
"existing_case",
"=",
"self",
".",
"case",
"(",
"case",
")",
"if",
"existing_case",
"and",
"not",
"update",
":",
"raise",
"CaseError",
"(",
"\"Case {} already exists\"",
".... | Add a case to the case collection
If the case exists and update is False raise error.
Args:
db (MongoClient): A connection to the mongodb
case (dict): A case dictionary
update(bool): If existing case should be updated
Returns:
mongo_case_id(ObjectId) | [
"Add",
"a",
"case",
"to",
"the",
"case",
"collection"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L57-L82 | train | 47,303 |
moonso/loqusdb | loqusdb/plugins/mongo/case.py | CaseMixin.delete_case | def delete_case(self, case):
"""Delete case from the database
Delete a case from the database
Args:
case (dict): A case dictionary
"""
mongo_case = self.case(case)
if not mongo_case:
raise CaseError("Tried to delete case {0} but could not find case".format(
case.get('case_id')
))
LOG.info("Removing case {0} from database".format(
mongo_case.get('case_id')
))
self.db.case.delete_one({'_id': mongo_case['_id']})
return | python | def delete_case(self, case):
"""Delete case from the database
Delete a case from the database
Args:
case (dict): A case dictionary
"""
mongo_case = self.case(case)
if not mongo_case:
raise CaseError("Tried to delete case {0} but could not find case".format(
case.get('case_id')
))
LOG.info("Removing case {0} from database".format(
mongo_case.get('case_id')
))
self.db.case.delete_one({'_id': mongo_case['_id']})
return | [
"def",
"delete_case",
"(",
"self",
",",
"case",
")",
":",
"mongo_case",
"=",
"self",
".",
"case",
"(",
"case",
")",
"if",
"not",
"mongo_case",
":",
"raise",
"CaseError",
"(",
"\"Tried to delete case {0} but could not find case\"",
".",
"format",
"(",
"case",
"... | Delete case from the database
Delete a case from the database
Args:
case (dict): A case dictionary | [
"Delete",
"case",
"from",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/case.py#L84-L104 | train | 47,304 |
moonso/loqusdb | loqusdb/build_models/profile_variant.py | build_profile_variant | def build_profile_variant(variant):
"""Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
"""
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
pos = int(variant.POS)
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
maf = get_maf(variant)
profile_variant = ProfileVariant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
ref=ref,
alt=alt,
maf=maf,
id_column = variant.ID
)
return profile_variant | python | def build_profile_variant(variant):
"""Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
"""
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
pos = int(variant.POS)
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
maf = get_maf(variant)
profile_variant = ProfileVariant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
ref=ref,
alt=alt,
maf=maf,
id_column = variant.ID
)
return profile_variant | [
"def",
"build_profile_variant",
"(",
"variant",
")",
":",
"chrom",
"=",
"variant",
".",
"CHROM",
"if",
"chrom",
".",
"startswith",
"(",
"(",
"'chr'",
",",
"'CHR'",
",",
"'Chr'",
")",
")",
":",
"chrom",
"=",
"chrom",
"[",
"3",
":",
"]",
"pos",
"=",
... | Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant) | [
"Returns",
"a",
"ProfileVariant",
"object"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/profile_variant.py#L24-L57 | train | 47,305 |
moonso/loqusdb | loqusdb/utils/vcf.py | add_headers | def add_headers(vcf_obj, nr_cases=None, sv=False):
"""Add loqus specific information to a VCF header
Args:
vcf_obj(cyvcf2.VCF)
"""
vcf_obj.add_info_to_header(
{
'ID':"Obs",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observations for the variant"}
)
if not sv:
vcf_obj.add_info_to_header(
{
'ID':"Hom",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed homozygotes"}
)
vcf_obj.add_info_to_header(
{
'ID':"Hem",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed hemizygotes"}
)
if nr_cases:
case_header = "##NrCases={}".format(nr_cases)
vcf_obj.add_to_header(case_header)
# head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
return | python | def add_headers(vcf_obj, nr_cases=None, sv=False):
"""Add loqus specific information to a VCF header
Args:
vcf_obj(cyvcf2.VCF)
"""
vcf_obj.add_info_to_header(
{
'ID':"Obs",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observations for the variant"}
)
if not sv:
vcf_obj.add_info_to_header(
{
'ID':"Hom",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed homozygotes"}
)
vcf_obj.add_info_to_header(
{
'ID':"Hem",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed hemizygotes"}
)
if nr_cases:
case_header = "##NrCases={}".format(nr_cases)
vcf_obj.add_to_header(case_header)
# head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
return | [
"def",
"add_headers",
"(",
"vcf_obj",
",",
"nr_cases",
"=",
"None",
",",
"sv",
"=",
"False",
")",
":",
"vcf_obj",
".",
"add_info_to_header",
"(",
"{",
"'ID'",
":",
"\"Obs\"",
",",
"'Number'",
":",
"'1'",
",",
"'Type'",
":",
"'Integer'",
",",
"'Descriptio... | Add loqus specific information to a VCF header
Args:
vcf_obj(cyvcf2.VCF) | [
"Add",
"loqus",
"specific",
"information",
"to",
"a",
"VCF",
"header"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L12-L45 | train | 47,306 |
moonso/loqusdb | loqusdb/utils/vcf.py | get_file_handle | def get_file_handle(file_path):
"""Return cyvcf2 VCF object
Args:
file_path(str)
Returns:
vcf_obj(cyvcf2.VCF)
"""
LOG.debug("Check if file end is correct")
if not os.path.exists(file_path):
raise IOError("No such file:{0}".format(file_path))
if not os.path.splitext(file_path)[-1] in VALID_ENDINGS:
raise IOError("Not a valid vcf file name: {}".format(file_path))
vcf_obj = VCF(file_path)
return vcf_obj | python | def get_file_handle(file_path):
"""Return cyvcf2 VCF object
Args:
file_path(str)
Returns:
vcf_obj(cyvcf2.VCF)
"""
LOG.debug("Check if file end is correct")
if not os.path.exists(file_path):
raise IOError("No such file:{0}".format(file_path))
if not os.path.splitext(file_path)[-1] in VALID_ENDINGS:
raise IOError("Not a valid vcf file name: {}".format(file_path))
vcf_obj = VCF(file_path)
return vcf_obj | [
"def",
"get_file_handle",
"(",
"file_path",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Check if file end is correct\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"IOError",
"(",
"\"No such file:{0}\"",
".",
"format",
... | Return cyvcf2 VCF object
Args:
file_path(str)
Returns:
vcf_obj(cyvcf2.VCF) | [
"Return",
"cyvcf2",
"VCF",
"object"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L49-L68 | train | 47,307 |
moonso/loqusdb | loqusdb/utils/vcf.py | check_vcf | def check_vcf(vcf_path, expected_type='snv'):
"""Check if there are any problems with the vcf file
Args:
vcf_path(str)
expected_type(str): 'sv' or 'snv'
Returns:
vcf_info(dict): dict like
{
'nr_variants':<INT>,
'variant_type': <STR> in ['snv', 'sv'],
'individuals': <LIST> individual positions in file
}
"""
LOG.info("Check if vcf is on correct format...")
vcf = VCF(vcf_path)
individuals = vcf.samples
variant_type = None
previous_pos = None
previous_chrom = None
posititon_variants = set()
nr_variants = 0
for nr_variants,variant in enumerate(vcf,1):
# Check the type of variant
current_type = 'sv' if variant.var_type == 'sv' else 'snv'
if not variant_type:
variant_type = current_type
# Vcf can not include both snvs and svs
if variant_type != current_type:
raise VcfError("Vcf includes a mix of snvs and svs")
current_chrom = variant.CHROM
current_pos = variant.POS
# We start with a simple id that can be used by SV:s
variant_id = "{0}_{1}".format(current_chrom, current_pos)
# For SNVs we can create a proper variant id with chrom_pos_ref_alt
if variant_type == 'snv':
variant_id = get_variant_id(variant)
# Initiate variables
if not previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
# Update variables if new chromosome
if current_chrom != previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
if variant_type == 'snv':
# Check if variant is unique
if current_pos == previous_pos:
if variant_id in posititon_variants:
raise VcfError("Variant {0} occurs several times"\
" in vcf".format(variant_id))
else:
posititon_variants.add(variant_id)
# Check if vcf is sorted
else:
if not current_pos >= previous_pos:
raise VcfError("Vcf if not sorted in a correct way")
previous_pos = current_pos
# Reset posititon_variants since we are on a new position
posititon_variants = set([variant_id])
if variant_type != expected_type:
raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format(
expected_type.upper(), vcf_path))
LOG.info("Vcf file %s looks fine", vcf_path)
LOG.info("Nr of variants in vcf: {0}".format(nr_variants))
LOG.info("Type of variants in vcf: {0}".format(variant_type))
vcf_info = {
'nr_variants': nr_variants,
'variant_type': variant_type,
'individuals': individuals,
}
return vcf_info | python | def check_vcf(vcf_path, expected_type='snv'):
"""Check if there are any problems with the vcf file
Args:
vcf_path(str)
expected_type(str): 'sv' or 'snv'
Returns:
vcf_info(dict): dict like
{
'nr_variants':<INT>,
'variant_type': <STR> in ['snv', 'sv'],
'individuals': <LIST> individual positions in file
}
"""
LOG.info("Check if vcf is on correct format...")
vcf = VCF(vcf_path)
individuals = vcf.samples
variant_type = None
previous_pos = None
previous_chrom = None
posititon_variants = set()
nr_variants = 0
for nr_variants,variant in enumerate(vcf,1):
# Check the type of variant
current_type = 'sv' if variant.var_type == 'sv' else 'snv'
if not variant_type:
variant_type = current_type
# Vcf can not include both snvs and svs
if variant_type != current_type:
raise VcfError("Vcf includes a mix of snvs and svs")
current_chrom = variant.CHROM
current_pos = variant.POS
# We start with a simple id that can be used by SV:s
variant_id = "{0}_{1}".format(current_chrom, current_pos)
# For SNVs we can create a proper variant id with chrom_pos_ref_alt
if variant_type == 'snv':
variant_id = get_variant_id(variant)
# Initiate variables
if not previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
# Update variables if new chromosome
if current_chrom != previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
if variant_type == 'snv':
# Check if variant is unique
if current_pos == previous_pos:
if variant_id in posititon_variants:
raise VcfError("Variant {0} occurs several times"\
" in vcf".format(variant_id))
else:
posititon_variants.add(variant_id)
# Check if vcf is sorted
else:
if not current_pos >= previous_pos:
raise VcfError("Vcf if not sorted in a correct way")
previous_pos = current_pos
# Reset posititon_variants since we are on a new position
posititon_variants = set([variant_id])
if variant_type != expected_type:
raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format(
expected_type.upper(), vcf_path))
LOG.info("Vcf file %s looks fine", vcf_path)
LOG.info("Nr of variants in vcf: {0}".format(nr_variants))
LOG.info("Type of variants in vcf: {0}".format(variant_type))
vcf_info = {
'nr_variants': nr_variants,
'variant_type': variant_type,
'individuals': individuals,
}
return vcf_info | [
"def",
"check_vcf",
"(",
"vcf_path",
",",
"expected_type",
"=",
"'snv'",
")",
":",
"LOG",
".",
"info",
"(",
"\"Check if vcf is on correct format...\"",
")",
"vcf",
"=",
"VCF",
"(",
"vcf_path",
")",
"individuals",
"=",
"vcf",
".",
"samples",
"variant_type",
"="... | Check if there are any problems with the vcf file
Args:
vcf_path(str)
expected_type(str): 'sv' or 'snv'
Returns:
vcf_info(dict): dict like
{
'nr_variants':<INT>,
'variant_type': <STR> in ['snv', 'sv'],
'individuals': <LIST> individual positions in file
} | [
"Check",
"if",
"there",
"are",
"any",
"problems",
"with",
"the",
"vcf",
"file"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/vcf.py#L89-L180 | train | 47,308 |
xi/ldif3 | ldif3.py | is_dn | def is_dn(s):
"""Return True if s is a LDAP DN."""
if s == '':
return True
rm = DN_REGEX.match(s)
return rm is not None and rm.group(0) == s | python | def is_dn(s):
"""Return True if s is a LDAP DN."""
if s == '':
return True
rm = DN_REGEX.match(s)
return rm is not None and rm.group(0) == s | [
"def",
"is_dn",
"(",
"s",
")",
":",
"if",
"s",
"==",
"''",
":",
"return",
"True",
"rm",
"=",
"DN_REGEX",
".",
"match",
"(",
"s",
")",
"return",
"rm",
"is",
"not",
"None",
"and",
"rm",
".",
"group",
"(",
"0",
")",
"==",
"s"
] | Return True if s is a LDAP DN. | [
"Return",
"True",
"if",
"s",
"is",
"a",
"LDAP",
"DN",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L43-L48 | train | 47,309 |
xi/ldif3 | ldif3.py | LDIFWriter._fold_line | def _fold_line(self, line):
"""Write string line as one or more folded lines."""
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end | python | def _fold_line(self, line):
"""Write string line as one or more folded lines."""
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end | [
"def",
"_fold_line",
"(",
"self",
",",
"line",
")",
":",
"if",
"len",
"(",
"line",
")",
"<=",
"self",
".",
"_cols",
":",
"self",
".",
"_output_file",
".",
"write",
"(",
"line",
")",
"self",
".",
"_output_file",
".",
"write",
"(",
"self",
".",
"_lin... | Write string line as one or more folded lines. | [
"Write",
"string",
"line",
"as",
"one",
"or",
"more",
"folded",
"lines",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L100-L114 | train | 47,310 |
xi/ldif3 | ldif3.py | LDIFWriter._needs_base64_encoding | def _needs_base64_encoding(self, attr_type, attr_value):
"""Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs
"""
return attr_type.lower() in self._base64_attrs or \
isinstance(attr_value, bytes) or \
UNSAFE_STRING_RE.search(attr_value) is not None | python | def _needs_base64_encoding(self, attr_type, attr_value):
"""Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs
"""
return attr_type.lower() in self._base64_attrs or \
isinstance(attr_value, bytes) or \
UNSAFE_STRING_RE.search(attr_value) is not None | [
"def",
"_needs_base64_encoding",
"(",
"self",
",",
"attr_type",
",",
"attr_value",
")",
":",
"return",
"attr_type",
".",
"lower",
"(",
")",
"in",
"self",
".",
"_base64_attrs",
"or",
"isinstance",
"(",
"attr_value",
",",
"bytes",
")",
"or",
"UNSAFE_STRING_RE",
... | Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs | [
"Return",
"True",
"if",
"attr_value",
"has",
"to",
"be",
"base",
"-",
"64",
"encoded",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L116-L124 | train | 47,311 |
xi/ldif3 | ldif3.py | LDIFWriter._unparse_changetype | def _unparse_changetype(self, mod_len):
"""Detect and write the changetype."""
if mod_len == 2:
changetype = 'add'
elif mod_len == 3:
changetype = 'modify'
else:
raise ValueError("modlist item of wrong length")
self._unparse_attr('changetype', changetype) | python | def _unparse_changetype(self, mod_len):
"""Detect and write the changetype."""
if mod_len == 2:
changetype = 'add'
elif mod_len == 3:
changetype = 'modify'
else:
raise ValueError("modlist item of wrong length")
self._unparse_attr('changetype', changetype) | [
"def",
"_unparse_changetype",
"(",
"self",
",",
"mod_len",
")",
":",
"if",
"mod_len",
"==",
"2",
":",
"changetype",
"=",
"'add'",
"elif",
"mod_len",
"==",
"3",
":",
"changetype",
"=",
"'modify'",
"else",
":",
"raise",
"ValueError",
"(",
"\"modlist item of wr... | Detect and write the changetype. | [
"Detect",
"and",
"write",
"the",
"changetype",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L148-L157 | train | 47,312 |
xi/ldif3 | ldif3.py | LDIFWriter.unparse | def unparse(self, dn, record):
"""Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple).
"""
self._unparse_attr('dn', dn)
if isinstance(record, dict):
self._unparse_entry_record(record)
elif isinstance(record, list):
self._unparse_change_record(record)
else:
raise ValueError("Argument record must be dictionary or list")
self._output_file.write(self._line_sep)
self.records_written += 1 | python | def unparse(self, dn, record):
"""Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple).
"""
self._unparse_attr('dn', dn)
if isinstance(record, dict):
self._unparse_entry_record(record)
elif isinstance(record, list):
self._unparse_change_record(record)
else:
raise ValueError("Argument record must be dictionary or list")
self._output_file.write(self._line_sep)
self.records_written += 1 | [
"def",
"unparse",
"(",
"self",
",",
"dn",
",",
"record",
")",
":",
"self",
".",
"_unparse_attr",
"(",
"'dn'",
",",
"dn",
")",
"if",
"isinstance",
"(",
"record",
",",
"dict",
")",
":",
"self",
".",
"_unparse_entry_record",
"(",
"record",
")",
"elif",
... | Write an entry or change record to the output file.
:type dn: string
:param dn: distinguished name
:type record: Union[Dict[string, List[string]], List[Tuple]]
:param record: Either a dictionary holding an entry or a list of
additions (2-tuple) or modifications (3-tuple). | [
"Write",
"an",
"entry",
"or",
"change",
"record",
"to",
"the",
"output",
"file",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L183-L201 | train | 47,313 |
xi/ldif3 | ldif3.py | LDIFParser._strip_line_sep | def _strip_line_sep(self, s):
"""Strip trailing line separators from s, but no other whitespaces."""
if s[-2:] == b'\r\n':
return s[:-2]
elif s[-1:] == b'\n':
return s[:-1]
else:
return s | python | def _strip_line_sep(self, s):
"""Strip trailing line separators from s, but no other whitespaces."""
if s[-2:] == b'\r\n':
return s[:-2]
elif s[-1:] == b'\n':
return s[:-1]
else:
return s | [
"def",
"_strip_line_sep",
"(",
"self",
",",
"s",
")",
":",
"if",
"s",
"[",
"-",
"2",
":",
"]",
"==",
"b'\\r\\n'",
":",
"return",
"s",
"[",
":",
"-",
"2",
"]",
"elif",
"s",
"[",
"-",
"1",
":",
"]",
"==",
"b'\\n'",
":",
"return",
"s",
"[",
":... | Strip trailing line separators from s, but no other whitespaces. | [
"Strip",
"trailing",
"line",
"separators",
"from",
"s",
"but",
"no",
"other",
"whitespaces",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L233-L240 | train | 47,314 |
xi/ldif3 | ldif3.py | LDIFParser._iter_unfolded_lines | def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline | python | def _iter_unfolded_lines(self):
"""Iter input unfoled lines. Skip comments."""
line = self._input_file.readline()
while line:
self.line_counter += 1
self.byte_counter += len(line)
line = self._strip_line_sep(line)
nextline = self._input_file.readline()
while nextline and nextline[:1] == b' ':
line += self._strip_line_sep(nextline)[1:]
nextline = self._input_file.readline()
if not line.startswith(b'#'):
yield line
line = nextline | [
"def",
"_iter_unfolded_lines",
"(",
"self",
")",
":",
"line",
"=",
"self",
".",
"_input_file",
".",
"readline",
"(",
")",
"while",
"line",
":",
"self",
".",
"line_counter",
"+=",
"1",
"self",
".",
"byte_counter",
"+=",
"len",
"(",
"line",
")",
"line",
... | Iter input unfoled lines. Skip comments. | [
"Iter",
"input",
"unfoled",
"lines",
".",
"Skip",
"comments",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L261-L277 | train | 47,315 |
xi/ldif3 | ldif3.py | LDIFParser._iter_blocks | def _iter_blocks(self):
"""Iter input lines in blocks separated by blank lines."""
lines = []
for line in self._iter_unfolded_lines():
if line:
lines.append(line)
elif lines:
self.records_read += 1
yield lines
lines = []
if lines:
self.records_read += 1
yield lines | python | def _iter_blocks(self):
"""Iter input lines in blocks separated by blank lines."""
lines = []
for line in self._iter_unfolded_lines():
if line:
lines.append(line)
elif lines:
self.records_read += 1
yield lines
lines = []
if lines:
self.records_read += 1
yield lines | [
"def",
"_iter_blocks",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"self",
".",
"_iter_unfolded_lines",
"(",
")",
":",
"if",
"line",
":",
"lines",
".",
"append",
"(",
"line",
")",
"elif",
"lines",
":",
"self",
".",
"records_re... | Iter input lines in blocks separated by blank lines. | [
"Iter",
"input",
"lines",
"in",
"blocks",
"separated",
"by",
"blank",
"lines",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L279-L291 | train | 47,316 |
xi/ldif3 | ldif3.py | LDIFParser._check_dn | def _check_dn(self, dn, attr_value):
"""Check dn attribute for issues."""
if dn is not None:
self._error('Two lines starting with dn: in one record.')
if not is_dn(attr_value):
self._error('No valid string-representation of '
'distinguished name %s.' % attr_value) | python | def _check_dn(self, dn, attr_value):
"""Check dn attribute for issues."""
if dn is not None:
self._error('Two lines starting with dn: in one record.')
if not is_dn(attr_value):
self._error('No valid string-representation of '
'distinguished name %s.' % attr_value) | [
"def",
"_check_dn",
"(",
"self",
",",
"dn",
",",
"attr_value",
")",
":",
"if",
"dn",
"is",
"not",
"None",
":",
"self",
".",
"_error",
"(",
"'Two lines starting with dn: in one record.'",
")",
"if",
"not",
"is_dn",
"(",
"attr_value",
")",
":",
"self",
".",
... | Check dn attribute for issues. | [
"Check",
"dn",
"attribute",
"for",
"issues",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L334-L340 | train | 47,317 |
xi/ldif3 | ldif3.py | LDIFParser._check_changetype | def _check_changetype(self, dn, changetype, attr_value):
"""Check changetype attribute for issues."""
if dn is None:
self._error('Read changetype: before getting valid dn: line.')
if changetype is not None:
self._error('Two lines starting with changetype: in one record.')
if attr_value not in CHANGE_TYPES:
self._error('changetype value %s is invalid.' % attr_value) | python | def _check_changetype(self, dn, changetype, attr_value):
"""Check changetype attribute for issues."""
if dn is None:
self._error('Read changetype: before getting valid dn: line.')
if changetype is not None:
self._error('Two lines starting with changetype: in one record.')
if attr_value not in CHANGE_TYPES:
self._error('changetype value %s is invalid.' % attr_value) | [
"def",
"_check_changetype",
"(",
"self",
",",
"dn",
",",
"changetype",
",",
"attr_value",
")",
":",
"if",
"dn",
"is",
"None",
":",
"self",
".",
"_error",
"(",
"'Read changetype: before getting valid dn: line.'",
")",
"if",
"changetype",
"is",
"not",
"None",
":... | Check changetype attribute for issues. | [
"Check",
"changetype",
"attribute",
"for",
"issues",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L342-L349 | train | 47,318 |
xi/ldif3 | ldif3.py | LDIFParser._parse_entry_record | def _parse_entry_record(self, lines):
"""Parse a single entry record from a list of lines."""
dn = None
entry = OrderedDict()
for line in lines:
attr_type, attr_value = self._parse_attr(line)
if attr_type == 'dn':
self._check_dn(dn, attr_value)
dn = attr_value
elif attr_type == 'version' and dn is None:
pass # version = 1
else:
if dn is None:
self._error('First line of record does not start '
'with "dn:": %s' % attr_type)
if attr_value is not None and \
attr_type.lower() not in self._ignored_attr_types:
if attr_type in entry:
entry[attr_type].append(attr_value)
else:
entry[attr_type] = [attr_value]
return dn, entry | python | def _parse_entry_record(self, lines):
"""Parse a single entry record from a list of lines."""
dn = None
entry = OrderedDict()
for line in lines:
attr_type, attr_value = self._parse_attr(line)
if attr_type == 'dn':
self._check_dn(dn, attr_value)
dn = attr_value
elif attr_type == 'version' and dn is None:
pass # version = 1
else:
if dn is None:
self._error('First line of record does not start '
'with "dn:": %s' % attr_type)
if attr_value is not None and \
attr_type.lower() not in self._ignored_attr_types:
if attr_type in entry:
entry[attr_type].append(attr_value)
else:
entry[attr_type] = [attr_value]
return dn, entry | [
"def",
"_parse_entry_record",
"(",
"self",
",",
"lines",
")",
":",
"dn",
"=",
"None",
"entry",
"=",
"OrderedDict",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"attr_type",
",",
"attr_value",
"=",
"self",
".",
"_parse_attr",
"(",
"line",
")",
"if",
"at... | Parse a single entry record from a list of lines. | [
"Parse",
"a",
"single",
"entry",
"record",
"from",
"a",
"list",
"of",
"lines",
"."
] | debc4222bb48492de0d3edcc3c71fdae5bc612a4 | https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L351-L375 | train | 47,319 |
yjzhang/uncurl_python | uncurl/zip_state_estimation.py | zip_estimate_state | def zip_estimate_state(data, clusters, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Zero-inflated Poisson Mixture model to estimate cell states and
cell state mixing weights.
Args:
data (array): genes x cells
clusters (int): number of mixture components
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M: genes x clusters - state centers
W: clusters x cells - state mixing components for each cell
ll: final log-likelihood
"""
genes, cells = data.shape
# TODO: estimate ZIP parameter?
if init_means is None:
means, assignments = kmeans_pp(data, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
# using zero-inflated parameters...
L, Z = zip_fit_params_mle(data)
# repeat steps 1 and 2 until convergence:
ll = np.inf
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective = _create_w_objective(means, data, Z)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=True, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective = _create_m_objective(w_new, data, Z)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=True, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
ll = m_res.fun
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, ll | python | def zip_estimate_state(data, clusters, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Zero-inflated Poisson Mixture model to estimate cell states and
cell state mixing weights.
Args:
data (array): genes x cells
clusters (int): number of mixture components
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M: genes x clusters - state centers
W: clusters x cells - state mixing components for each cell
ll: final log-likelihood
"""
genes, cells = data.shape
# TODO: estimate ZIP parameter?
if init_means is None:
means, assignments = kmeans_pp(data, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
# using zero-inflated parameters...
L, Z = zip_fit_params_mle(data)
# repeat steps 1 and 2 until convergence:
ll = np.inf
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective = _create_w_objective(means, data, Z)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=True, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective = _create_m_objective(w_new, data, Z)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=True, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
ll = m_res.fun
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, ll | [
"def",
"zip_estimate_state",
"(",
"data",
",",
"clusters",
",",
"init_means",
"=",
"None",
",",
"init_weights",
"=",
"None",
",",
"max_iters",
"=",
"10",
",",
"tol",
"=",
"1e-4",
",",
"disp",
"=",
"True",
",",
"inner_max_iters",
"=",
"400",
",",
"normali... | Uses a Zero-inflated Poisson Mixture model to estimate cell states and
cell state mixing weights.
Args:
data (array): genes x cells
clusters (int): number of mixture components
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M: genes x clusters - state centers
W: clusters x cells - state mixing components for each cell
ll: final log-likelihood | [
"Uses",
"a",
"Zero",
"-",
"inflated",
"Poisson",
"Mixture",
"model",
"to",
"estimate",
"cell",
"states",
"and",
"cell",
"state",
"mixing",
"weights",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_state_estimation.py#L64-L127 | train | 47,320 |
yjzhang/uncurl_python | uncurl/clustering.py | kmeans_pp | def kmeans_pp(data, k, centers=None):
"""
Generates kmeans++ initial centers.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).
Returns:
centers - a genes x k array of cluster means.
assignments - a cells x 1 array of cluster assignments
"""
# TODO: what if there is missing data for a given gene?
# missing data could be if all the entires are -1.
genes, cells = data.shape
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
num_known_centers = 0
if centers is None:
centers = np.zeros((genes, k))
else:
num_known_centers = centers.shape[1]
centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1)
distances = np.zeros((cells, k))
distances[:] = np.inf
if num_known_centers == 0:
init = np.random.randint(0, cells)
if sparse.issparse(data):
centers[:,0] = data[:, init].toarray().flatten()
else:
centers[:,0] = data[:, init]
num_known_centers+=1
available_cells = list(range(cells))
for c in range(num_known_centers, k):
c2 = c-1
# use different formulation for distance... if sparse, use lls
# if not sparse, use poisson_dist
if sparse.issparse(data):
lls = poisson_ll(data, centers[:,c2:c2+1]).flatten()
distances[:,c2] = 1 + lls.max() - lls
distances[:,c2] /= distances[:,c2].max()
else:
for cell in range(cells):
distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2])
# choose a new data point as center... probability proportional
# to distance^2
min_distances = np.min(distances, 1)
min_distances = min_distances**2
min_distances = min_distances[available_cells]
# should be sampling without replacement
min_dist = np.random.choice(available_cells,
p=min_distances/min_distances.sum())
available_cells.pop(available_cells.index(min_dist))
if sparse.issparse(data):
centers[:,c] = data[:, min_dist].toarray().flatten()
else:
centers[:,c] = data[:, min_dist]
lls = poisson_ll(data, centers)
new_assignments = np.argmax(lls, 1)
centers[centers==0.0] = eps
return centers, new_assignments | python | def kmeans_pp(data, k, centers=None):
"""
Generates kmeans++ initial centers.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).
Returns:
centers - a genes x k array of cluster means.
assignments - a cells x 1 array of cluster assignments
"""
# TODO: what if there is missing data for a given gene?
# missing data could be if all the entires are -1.
genes, cells = data.shape
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
num_known_centers = 0
if centers is None:
centers = np.zeros((genes, k))
else:
num_known_centers = centers.shape[1]
centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1)
distances = np.zeros((cells, k))
distances[:] = np.inf
if num_known_centers == 0:
init = np.random.randint(0, cells)
if sparse.issparse(data):
centers[:,0] = data[:, init].toarray().flatten()
else:
centers[:,0] = data[:, init]
num_known_centers+=1
available_cells = list(range(cells))
for c in range(num_known_centers, k):
c2 = c-1
# use different formulation for distance... if sparse, use lls
# if not sparse, use poisson_dist
if sparse.issparse(data):
lls = poisson_ll(data, centers[:,c2:c2+1]).flatten()
distances[:,c2] = 1 + lls.max() - lls
distances[:,c2] /= distances[:,c2].max()
else:
for cell in range(cells):
distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2])
# choose a new data point as center... probability proportional
# to distance^2
min_distances = np.min(distances, 1)
min_distances = min_distances**2
min_distances = min_distances[available_cells]
# should be sampling without replacement
min_dist = np.random.choice(available_cells,
p=min_distances/min_distances.sum())
available_cells.pop(available_cells.index(min_dist))
if sparse.issparse(data):
centers[:,c] = data[:, min_dist].toarray().flatten()
else:
centers[:,c] = data[:, min_dist]
lls = poisson_ll(data, centers)
new_assignments = np.argmax(lls, 1)
centers[centers==0.0] = eps
return centers, new_assignments | [
"def",
"kmeans_pp",
"(",
"data",
",",
"k",
",",
"centers",
"=",
"None",
")",
":",
"# TODO: what if there is missing data for a given gene?",
"# missing data could be if all the entires are -1.",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"if",
"sparse",
".",
"i... | Generates kmeans++ initial centers.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).
Returns:
centers - a genes x k array of cluster means.
assignments - a cells x 1 array of cluster assignments | [
"Generates",
"kmeans",
"++",
"initial",
"centers",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/clustering.py#L10-L71 | train | 47,321 |
yjzhang/uncurl_python | uncurl/clustering.py | poisson_cluster | def poisson_cluster(data, k, init=None, max_iters=100):
"""
Performs Poisson hard EM on the given data.
Args:
data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format.
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
a tuple of two arrays: a cells x 1 vector of cluster assignments,
and a genes x k array of cluster means.
"""
# TODO: be able to use a combination of fixed and unknown starting points
# e.g., have init values only for certain genes, have a row of all
# zeros indicating that kmeans++ should be used for that row.
genes, cells = data.shape
#print 'starting: ', centers
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
init, assignments = kmeans_pp(data, k, centers=init)
centers = np.copy(init)
assignments = np.zeros(cells)
for it in range(max_iters):
lls = poisson_ll(data, centers)
#cluster_dists = np.zeros((cells, k))
new_assignments = np.argmax(lls, 1)
if np.equal(assignments, new_assignments).all():
#print 'ending: ', centers
return new_assignments, centers
for c in range(k):
if sparse.issparse(data):
if data[:,new_assignments==c].shape[0]==0:
# re-initialize centers?
new_c, _ = kmeans_pp(data, k, centers[:,:c])
centers[:,c] = new_c[:,c]
else:
centers[:,c] = np.asarray(data[:,new_assignments==c].mean(1)).flatten()
else:
if len(data[:,new_assignments==c])==0:
new_c, _ = kmeans_pp(data, k, centers[:,:c])
centers[:,c] = new_c[:,c]
else:
centers[:,c] = np.mean(data[:,new_assignments==c], 1)
assignments = new_assignments
return assignments, centers | python | def poisson_cluster(data, k, init=None, max_iters=100):
"""
Performs Poisson hard EM on the given data.
Args:
data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format.
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
a tuple of two arrays: a cells x 1 vector of cluster assignments,
and a genes x k array of cluster means.
"""
# TODO: be able to use a combination of fixed and unknown starting points
# e.g., have init values only for certain genes, have a row of all
# zeros indicating that kmeans++ should be used for that row.
genes, cells = data.shape
#print 'starting: ', centers
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
init, assignments = kmeans_pp(data, k, centers=init)
centers = np.copy(init)
assignments = np.zeros(cells)
for it in range(max_iters):
lls = poisson_ll(data, centers)
#cluster_dists = np.zeros((cells, k))
new_assignments = np.argmax(lls, 1)
if np.equal(assignments, new_assignments).all():
#print 'ending: ', centers
return new_assignments, centers
for c in range(k):
if sparse.issparse(data):
if data[:,new_assignments==c].shape[0]==0:
# re-initialize centers?
new_c, _ = kmeans_pp(data, k, centers[:,:c])
centers[:,c] = new_c[:,c]
else:
centers[:,c] = np.asarray(data[:,new_assignments==c].mean(1)).flatten()
else:
if len(data[:,new_assignments==c])==0:
new_c, _ = kmeans_pp(data, k, centers[:,:c])
centers[:,c] = new_c[:,c]
else:
centers[:,c] = np.mean(data[:,new_assignments==c], 1)
assignments = new_assignments
return assignments, centers | [
"def",
"poisson_cluster",
"(",
"data",
",",
"k",
",",
"init",
"=",
"None",
",",
"max_iters",
"=",
"100",
")",
":",
"# TODO: be able to use a combination of fixed and unknown starting points",
"# e.g., have init values only for certain genes, have a row of all",
"# zeros indicatin... | Performs Poisson hard EM on the given data.
Args:
data (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format.
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
a tuple of two arrays: a cells x 1 vector of cluster assignments,
and a genes x k array of cluster means. | [
"Performs",
"Poisson",
"hard",
"EM",
"on",
"the",
"given",
"data",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/clustering.py#L73-L119 | train | 47,322 |
moonso/loqusdb | loqusdb/commands/view.py | cases | def cases(ctx, case_id, to_json):
"""Display cases in the database."""
adapter = ctx.obj['adapter']
cases = []
if case_id:
case_obj = adapter.case({'case_id':case_id})
if not case_obj:
LOG.info("Case {0} does not exist in database".format(case_id))
return
case_obj['_id'] = str(case_obj['_id'])
cases.append(case_obj)
else:
cases = adapter.cases()
if cases.count() == 0:
LOG.info("No cases found in database")
ctx.abort()
if to_json:
click.echo(json.dumps(cases))
return
click.echo("#case_id\tvcf_path")
for case_obj in cases:
click.echo("{0}\t{1}".format(case_obj.get('case_id'), case_obj.get('vcf_path'))) | python | def cases(ctx, case_id, to_json):
"""Display cases in the database."""
adapter = ctx.obj['adapter']
cases = []
if case_id:
case_obj = adapter.case({'case_id':case_id})
if not case_obj:
LOG.info("Case {0} does not exist in database".format(case_id))
return
case_obj['_id'] = str(case_obj['_id'])
cases.append(case_obj)
else:
cases = adapter.cases()
if cases.count() == 0:
LOG.info("No cases found in database")
ctx.abort()
if to_json:
click.echo(json.dumps(cases))
return
click.echo("#case_id\tvcf_path")
for case_obj in cases:
click.echo("{0}\t{1}".format(case_obj.get('case_id'), case_obj.get('vcf_path'))) | [
"def",
"cases",
"(",
"ctx",
",",
"case_id",
",",
"to_json",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"cases",
"=",
"[",
"]",
"if",
"case_id",
":",
"case_obj",
"=",
"adapter",
".",
"case",
"(",
"{",
"'case_id'",
":",
"case... | Display cases in the database. | [
"Display",
"cases",
"in",
"the",
"database",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L19-L45 | train | 47,323 |
moonso/loqusdb | loqusdb/commands/view.py | variants | def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type,
sv_type):
"""Display variants in the database."""
if sv_type:
variant_type = 'sv'
adapter = ctx.obj['adapter']
if (start or end):
if not (chromosome and start and end):
LOG.warning("Regions must be specified with chromosome, start and end")
return
if variant_id:
variant = adapter.get_variant({'_id':variant_id})
if variant:
click.echo(variant)
else:
LOG.info("Variant {0} does not exist in database".format(variant_id))
return
if variant_type == 'snv':
result = adapter.get_variants(
chromosome=chromosome,
start=start,
end=end
)
else:
LOG.info("Search for svs")
result = adapter.get_sv_variants(
chromosome=chromosome,
end_chromosome=end_chromosome,
sv_type=sv_type,
pos=start,
end=end
)
i = 0
for variant in result:
i += 1
pp(variant)
LOG.info("Number of variants found in database: %s", i) | python | def variants(ctx, variant_id, chromosome, end_chromosome, start, end, variant_type,
sv_type):
"""Display variants in the database."""
if sv_type:
variant_type = 'sv'
adapter = ctx.obj['adapter']
if (start or end):
if not (chromosome and start and end):
LOG.warning("Regions must be specified with chromosome, start and end")
return
if variant_id:
variant = adapter.get_variant({'_id':variant_id})
if variant:
click.echo(variant)
else:
LOG.info("Variant {0} does not exist in database".format(variant_id))
return
if variant_type == 'snv':
result = adapter.get_variants(
chromosome=chromosome,
start=start,
end=end
)
else:
LOG.info("Search for svs")
result = adapter.get_sv_variants(
chromosome=chromosome,
end_chromosome=end_chromosome,
sv_type=sv_type,
pos=start,
end=end
)
i = 0
for variant in result:
i += 1
pp(variant)
LOG.info("Number of variants found in database: %s", i) | [
"def",
"variants",
"(",
"ctx",
",",
"variant_id",
",",
"chromosome",
",",
"end_chromosome",
",",
"start",
",",
"end",
",",
"variant_type",
",",
"sv_type",
")",
":",
"if",
"sv_type",
":",
"variant_type",
"=",
"'sv'",
"adapter",
"=",
"ctx",
".",
"obj",
"["... | Display variants in the database. | [
"Display",
"variants",
"in",
"the",
"database",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L77-L119 | train | 47,324 |
moonso/loqusdb | loqusdb/commands/view.py | index | def index(ctx, view):
"""Index the database."""
adapter = ctx.obj['adapter']
if view:
click.echo(adapter.indexes())
return
adapter.ensure_indexes() | python | def index(ctx, view):
"""Index the database."""
adapter = ctx.obj['adapter']
if view:
click.echo(adapter.indexes())
return
adapter.ensure_indexes() | [
"def",
"index",
"(",
"ctx",
",",
"view",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"if",
"view",
":",
"click",
".",
"echo",
"(",
"adapter",
".",
"indexes",
"(",
")",
")",
"return",
"adapter",
".",
"ensure_indexes",
"(",
")... | Index the database. | [
"Index",
"the",
"database",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/view.py#L127-L133 | train | 47,325 |
limix/numpy-sugar | numpy_sugar/linalg/dot.py | ddot | def ddot(L, R, left=None, out=None):
r"""Dot product of a matrix and a diagonal one.
Args:
L (array_like): Left matrix.
R (array_like): Right matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
L = asarray(L, float)
R = asarray(R, float)
if left is None:
ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2
if not ok:
msg = "Wrong array layout. One array should have"
msg += " ndim=1 and the other one ndim=2."
raise ValueError(msg)
left = L.ndim == 1
if left:
if out is None:
out = copy(R)
L = L.reshape(list(L.shape) + [1] * (R.ndim - 1))
return multiply(L, R, out=out)
else:
if out is None:
out = copy(L)
return multiply(L, R, out=out) | python | def ddot(L, R, left=None, out=None):
r"""Dot product of a matrix and a diagonal one.
Args:
L (array_like): Left matrix.
R (array_like): Right matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
L = asarray(L, float)
R = asarray(R, float)
if left is None:
ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2
if not ok:
msg = "Wrong array layout. One array should have"
msg += " ndim=1 and the other one ndim=2."
raise ValueError(msg)
left = L.ndim == 1
if left:
if out is None:
out = copy(R)
L = L.reshape(list(L.shape) + [1] * (R.ndim - 1))
return multiply(L, R, out=out)
else:
if out is None:
out = copy(L)
return multiply(L, R, out=out) | [
"def",
"ddot",
"(",
"L",
",",
"R",
",",
"left",
"=",
"None",
",",
"out",
"=",
"None",
")",
":",
"L",
"=",
"asarray",
"(",
"L",
",",
"float",
")",
"R",
"=",
"asarray",
"(",
"R",
",",
"float",
")",
"if",
"left",
"is",
"None",
":",
"ok",
"=",
... | r"""Dot product of a matrix and a diagonal one.
Args:
L (array_like): Left matrix.
R (array_like): Right matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix. | [
"r",
"Dot",
"product",
"of",
"a",
"matrix",
"and",
"a",
"diagonal",
"one",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L29-L57 | train | 47,326 |
limix/numpy-sugar | numpy_sugar/linalg/dot.py | cdot | def cdot(L, out=None):
r"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
"""
L = asarray(L, float)
layout_error = "Wrong matrix layout."
if L.ndim != 2:
raise ValueError(layout_error)
if L.shape[0] != L.shape[1]:
raise ValueError(layout_error)
if out is None:
out = empty((L.shape[0], L.shape[1]), float)
return einsum("ij,kj->ik", L, L, out=out) | python | def cdot(L, out=None):
r"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
"""
L = asarray(L, float)
layout_error = "Wrong matrix layout."
if L.ndim != 2:
raise ValueError(layout_error)
if L.shape[0] != L.shape[1]:
raise ValueError(layout_error)
if out is None:
out = empty((L.shape[0], L.shape[1]), float)
return einsum("ij,kj->ik", L, L, out=out) | [
"def",
"cdot",
"(",
"L",
",",
"out",
"=",
"None",
")",
":",
"L",
"=",
"asarray",
"(",
"L",
",",
"float",
")",
"layout_error",
"=",
"\"Wrong matrix layout.\"",
"if",
"L",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"layout_error",
")",
"i... | r"""Product of a Cholesky matrix with itself transposed.
Args:
L (array_like): Cholesky matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`. | [
"r",
"Product",
"of",
"a",
"Cholesky",
"matrix",
"with",
"itself",
"transposed",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L60-L83 | train | 47,327 |
limix/numpy-sugar | numpy_sugar/_rankdata.py | nanrankdata | def nanrankdata(a, axis=-1, inplace=False):
""" Rank data for arrays contaning NaN values.
Parameters
----------
X : array_like
Array of values.
axis : int, optional
Axis value. Defaults to `1`.
inplace : bool, optional
Defaults to `False`.
Returns
-------
array_like
Ranked array.
Examples
--------
.. doctest::
>>> from numpy_sugar import nanrankdata
>>> from numpy import arange
>>>
>>> X = arange(15).reshape((5, 3)).astype(float)
>>> print(nanrankdata(X))
[[1. 1. 1.]
[2. 2. 2.]
[3. 3. 3.]
[4. 4. 4.]
[5. 5. 5.]]
"""
from scipy.stats import rankdata
if hasattr(a, "dtype") and issubdtype(a.dtype, integer):
raise ValueError("Integer type is not supported.")
if isinstance(a, (tuple, list)):
if inplace:
raise ValueError("Can't use `inplace=True` for {}.".format(type(a)))
a = asarray(a, float)
orig_shape = a.shape
if a.ndim == 1:
a = a.reshape(orig_shape + (1,))
if not inplace:
a = a.copy()
def rank1d(x):
idx = ~isnan(x)
x[idx] = rankdata(x[idx])
return x
a = a.swapaxes(1, axis)
a = apply_along_axis(rank1d, 0, a)
a = a.swapaxes(1, axis)
return a.reshape(orig_shape) | python | def nanrankdata(a, axis=-1, inplace=False):
""" Rank data for arrays contaning NaN values.
Parameters
----------
X : array_like
Array of values.
axis : int, optional
Axis value. Defaults to `1`.
inplace : bool, optional
Defaults to `False`.
Returns
-------
array_like
Ranked array.
Examples
--------
.. doctest::
>>> from numpy_sugar import nanrankdata
>>> from numpy import arange
>>>
>>> X = arange(15).reshape((5, 3)).astype(float)
>>> print(nanrankdata(X))
[[1. 1. 1.]
[2. 2. 2.]
[3. 3. 3.]
[4. 4. 4.]
[5. 5. 5.]]
"""
from scipy.stats import rankdata
if hasattr(a, "dtype") and issubdtype(a.dtype, integer):
raise ValueError("Integer type is not supported.")
if isinstance(a, (tuple, list)):
if inplace:
raise ValueError("Can't use `inplace=True` for {}.".format(type(a)))
a = asarray(a, float)
orig_shape = a.shape
if a.ndim == 1:
a = a.reshape(orig_shape + (1,))
if not inplace:
a = a.copy()
def rank1d(x):
idx = ~isnan(x)
x[idx] = rankdata(x[idx])
return x
a = a.swapaxes(1, axis)
a = apply_along_axis(rank1d, 0, a)
a = a.swapaxes(1, axis)
return a.reshape(orig_shape) | [
"def",
"nanrankdata",
"(",
"a",
",",
"axis",
"=",
"-",
"1",
",",
"inplace",
"=",
"False",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"rankdata",
"if",
"hasattr",
"(",
"a",
",",
"\"dtype\"",
")",
"and",
"issubdtype",
"(",
"a",
".",
"dtype",
... | Rank data for arrays contaning NaN values.
Parameters
----------
X : array_like
Array of values.
axis : int, optional
Axis value. Defaults to `1`.
inplace : bool, optional
Defaults to `False`.
Returns
-------
array_like
Ranked array.
Examples
--------
.. doctest::
>>> from numpy_sugar import nanrankdata
>>> from numpy import arange
>>>
>>> X = arange(15).reshape((5, 3)).astype(float)
>>> print(nanrankdata(X))
[[1. 1. 1.]
[2. 2. 2.]
[3. 3. 3.]
[4. 4. 4.]
[5. 5. 5.]] | [
"Rank",
"data",
"for",
"arrays",
"contaning",
"NaN",
"values",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_rankdata.py#L4-L64 | train | 47,328 |
limix/numpy-sugar | numpy_sugar/linalg/det.py | plogdet | def plogdet(K):
r"""Log of the pseudo-determinant.
It assumes that ``K`` is a positive semi-definite matrix.
Args:
K (array_like): matrix.
Returns:
float: log of the pseudo-determinant.
"""
egvals = eigvalsh(K)
return npsum(log(egvals[egvals > epsilon])) | python | def plogdet(K):
r"""Log of the pseudo-determinant.
It assumes that ``K`` is a positive semi-definite matrix.
Args:
K (array_like): matrix.
Returns:
float: log of the pseudo-determinant.
"""
egvals = eigvalsh(K)
return npsum(log(egvals[egvals > epsilon])) | [
"def",
"plogdet",
"(",
"K",
")",
":",
"egvals",
"=",
"eigvalsh",
"(",
"K",
")",
"return",
"npsum",
"(",
"log",
"(",
"egvals",
"[",
"egvals",
">",
"epsilon",
"]",
")",
")"
] | r"""Log of the pseudo-determinant.
It assumes that ``K`` is a positive semi-definite matrix.
Args:
K (array_like): matrix.
Returns:
float: log of the pseudo-determinant. | [
"r",
"Log",
"of",
"the",
"pseudo",
"-",
"determinant",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/det.py#L8-L20 | train | 47,329 |
limix/numpy-sugar | numpy_sugar/linalg/qs.py | economic_qs | def economic_qs(K, epsilon=sqrt(finfo(float).eps)):
r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``.
"""
(S, Q) = eigh(K)
nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon
nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon
if nok:
from scipy.linalg import eigh as sp_eigh
(S, Q) = sp_eigh(K)
ok = S >= epsilon
nok = logical_not(ok)
S0 = S[ok]
Q0 = Q[:, ok]
Q1 = Q[:, nok]
return ((Q0, Q1), S0) | python | def economic_qs(K, epsilon=sqrt(finfo(float).eps)):
r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``.
"""
(S, Q) = eigh(K)
nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon
nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon
if nok:
from scipy.linalg import eigh as sp_eigh
(S, Q) = sp_eigh(K)
ok = S >= epsilon
nok = logical_not(ok)
S0 = S[ok]
Q0 = Q[:, ok]
Q1 = Q[:, nok]
return ((Q0, Q1), S0) | [
"def",
"economic_qs",
"(",
"K",
",",
"epsilon",
"=",
"sqrt",
"(",
"finfo",
"(",
"float",
")",
".",
"eps",
")",
")",
":",
"(",
"S",
",",
"Q",
")",
"=",
"eigh",
"(",
"K",
")",
"nok",
"=",
"abs",
"(",
"max",
"(",
"Q",
"[",
"0",
"]",
".",
"mi... | r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``. | [
"r",
"Economic",
"eigen",
"decomposition",
"for",
"symmetric",
"matrices",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/qs.py#L5-L36 | train | 47,330 |
limix/numpy-sugar | numpy_sugar/_array.py | cartesian | def cartesian(shape):
r"""Cartesian indexing.
Returns a sequence of n-tuples indexing each element of a hypothetical
matrix of the given shape.
Args:
shape (tuple): tuple of dimensions.
Returns:
array_like: indices.
Example
-------
.. doctest::
>>> from numpy_sugar import cartesian
>>> print(cartesian((2, 3)))
[[0 0]
[0 1]
[0 2]
[1 0]
[1 1]
[1 2]]
Reference:
[1] http://stackoverflow.com/a/27286794
"""
n = len(shape)
idx = [slice(0, s) for s in shape]
g = rollaxis(mgrid[idx], 0, n + 1)
return g.reshape((prod(shape), n)) | python | def cartesian(shape):
r"""Cartesian indexing.
Returns a sequence of n-tuples indexing each element of a hypothetical
matrix of the given shape.
Args:
shape (tuple): tuple of dimensions.
Returns:
array_like: indices.
Example
-------
.. doctest::
>>> from numpy_sugar import cartesian
>>> print(cartesian((2, 3)))
[[0 0]
[0 1]
[0 2]
[1 0]
[1 1]
[1 2]]
Reference:
[1] http://stackoverflow.com/a/27286794
"""
n = len(shape)
idx = [slice(0, s) for s in shape]
g = rollaxis(mgrid[idx], 0, n + 1)
return g.reshape((prod(shape), n)) | [
"def",
"cartesian",
"(",
"shape",
")",
":",
"n",
"=",
"len",
"(",
"shape",
")",
"idx",
"=",
"[",
"slice",
"(",
"0",
",",
"s",
")",
"for",
"s",
"in",
"shape",
"]",
"g",
"=",
"rollaxis",
"(",
"mgrid",
"[",
"idx",
"]",
",",
"0",
",",
"n",
"+",... | r"""Cartesian indexing.
Returns a sequence of n-tuples indexing each element of a hypothetical
matrix of the given shape.
Args:
shape (tuple): tuple of dimensions.
Returns:
array_like: indices.
Example
-------
.. doctest::
>>> from numpy_sugar import cartesian
>>> print(cartesian((2, 3)))
[[0 0]
[0 1]
[0 2]
[1 0]
[1 1]
[1 2]]
Reference:
[1] http://stackoverflow.com/a/27286794 | [
"r",
"Cartesian",
"indexing",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_array.py#L96-L129 | train | 47,331 |
limix/numpy-sugar | numpy_sugar/_array.py | unique | def unique(ar):
r"""Find the unique elements of an array.
It uses ``dask.array.unique`` if necessary.
Args:
ar (array_like): Input array.
Returns:
array_like: the sorted unique elements.
"""
import dask.array as da
if isinstance(ar, da.core.Array):
return da.unique(ar)
return _unique(ar) | python | def unique(ar):
r"""Find the unique elements of an array.
It uses ``dask.array.unique`` if necessary.
Args:
ar (array_like): Input array.
Returns:
array_like: the sorted unique elements.
"""
import dask.array as da
if isinstance(ar, da.core.Array):
return da.unique(ar)
return _unique(ar) | [
"def",
"unique",
"(",
"ar",
")",
":",
"import",
"dask",
".",
"array",
"as",
"da",
"if",
"isinstance",
"(",
"ar",
",",
"da",
".",
"core",
".",
"Array",
")",
":",
"return",
"da",
".",
"unique",
"(",
"ar",
")",
"return",
"_unique",
"(",
"ar",
")"
] | r"""Find the unique elements of an array.
It uses ``dask.array.unique`` if necessary.
Args:
ar (array_like): Input array.
Returns:
array_like: the sorted unique elements. | [
"r",
"Find",
"the",
"unique",
"elements",
"of",
"an",
"array",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_array.py#L132-L149 | train | 47,332 |
limix/numpy-sugar | numpy_sugar/linalg/lu.py | lu_slogdet | def lu_slogdet(LU):
r"""Natural logarithm of a LU decomposition.
Args:
LU (tuple): LU decomposition.
Returns:
tuple: sign and log-determinant.
"""
LU = (asarray(LU[0], float), asarray(LU[1], float))
adet = _sum(log(_abs(LU[0].diagonal())))
s = prod(sign(LU[0].diagonal()))
nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32"))
odd = nrows_exchange % 2 == 1
if odd:
s *= -1.0
return (s, adet) | python | def lu_slogdet(LU):
r"""Natural logarithm of a LU decomposition.
Args:
LU (tuple): LU decomposition.
Returns:
tuple: sign and log-determinant.
"""
LU = (asarray(LU[0], float), asarray(LU[1], float))
adet = _sum(log(_abs(LU[0].diagonal())))
s = prod(sign(LU[0].diagonal()))
nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype="int32"))
odd = nrows_exchange % 2 == 1
if odd:
s *= -1.0
return (s, adet) | [
"def",
"lu_slogdet",
"(",
"LU",
")",
":",
"LU",
"=",
"(",
"asarray",
"(",
"LU",
"[",
"0",
"]",
",",
"float",
")",
",",
"asarray",
"(",
"LU",
"[",
"1",
"]",
",",
"float",
")",
")",
"adet",
"=",
"_sum",
"(",
"log",
"(",
"_abs",
"(",
"LU",
"["... | r"""Natural logarithm of a LU decomposition.
Args:
LU (tuple): LU decomposition.
Returns:
tuple: sign and log-determinant. | [
"r",
"Natural",
"logarithm",
"of",
"a",
"LU",
"decomposition",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lu.py#L6-L26 | train | 47,333 |
limix/numpy-sugar | numpy_sugar/linalg/lu.py | lu_solve | def lu_solve(LU, b):
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
"""
from scipy.linalg import lu_solve as sp_lu_solve
LU = (asarray(LU[0], float), asarray(LU[1], float))
b = asarray(b, float)
return sp_lu_solve(LU, b, check_finite=False) | python | def lu_solve(LU, b):
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
"""
from scipy.linalg import lu_solve as sp_lu_solve
LU = (asarray(LU[0], float), asarray(LU[1], float))
b = asarray(b, float)
return sp_lu_solve(LU, b, check_finite=False) | [
"def",
"lu_solve",
"(",
"LU",
",",
"b",
")",
":",
"from",
"scipy",
".",
"linalg",
"import",
"lu_solve",
"as",
"sp_lu_solve",
"LU",
"=",
"(",
"asarray",
"(",
"LU",
"[",
"0",
"]",
",",
"float",
")",
",",
"asarray",
"(",
"LU",
"[",
"1",
"]",
",",
... | r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization. | [
"r",
"Solve",
"for",
"LU",
"decomposition",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lu.py#L29-L52 | train | 47,334 |
limix/numpy-sugar | numpy_sugar/linalg/lstsq.py | lstsq | def lstsq(A, b):
r"""Return the least-squares solution to a linear matrix equation.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Least-squares solution.
"""
A = asarray(A, float)
b = asarray(b, float)
if A.ndim == 1:
A = A[:, newaxis]
if A.shape[1] == 1:
return dot(A.T, b) / squeeze(dot(A.T, A))
rcond = finfo(double).eps * max(*A.shape)
return npy_lstsq(A, b, rcond=rcond)[0] | python | def lstsq(A, b):
r"""Return the least-squares solution to a linear matrix equation.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Least-squares solution.
"""
A = asarray(A, float)
b = asarray(b, float)
if A.ndim == 1:
A = A[:, newaxis]
if A.shape[1] == 1:
return dot(A.T, b) / squeeze(dot(A.T, A))
rcond = finfo(double).eps * max(*A.shape)
return npy_lstsq(A, b, rcond=rcond)[0] | [
"def",
"lstsq",
"(",
"A",
",",
"b",
")",
":",
"A",
"=",
"asarray",
"(",
"A",
",",
"float",
")",
"b",
"=",
"asarray",
"(",
"b",
",",
"float",
")",
"if",
"A",
".",
"ndim",
"==",
"1",
":",
"A",
"=",
"A",
"[",
":",
",",
"newaxis",
"]",
"if",
... | r"""Return the least-squares solution to a linear matrix equation.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Least-squares solution. | [
"r",
"Return",
"the",
"least",
"-",
"squares",
"solution",
"to",
"a",
"linear",
"matrix",
"equation",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/lstsq.py#L6-L26 | train | 47,335 |
limix/numpy-sugar | numpy_sugar/linalg/svd.py | economic_svd | def economic_svd(G, epsilon=sqrt(finfo(float).eps)):
r"""Economic Singular Value Decomposition.
Args:
G (array_like): Matrix to be factorized.
epsilon (float): Threshold on the square root of the eigen values.
Default is ``sqrt(finfo(float).eps)``.
Returns:
:class:`numpy.ndarray`: Unitary matrix.
:class:`numpy.ndarray`: Singular values.
:class:`numpy.ndarray`: Unitary matrix.
See Also
--------
numpy.linalg.svd : Cholesky decomposition.
scipy.linalg.svd : Cholesky decomposition.
"""
from scipy.linalg import svd
G = asarray(G, float)
(U, S, V) = svd(G, full_matrices=False, check_finite=False)
ok = S >= epsilon
S = S[ok]
U = U[:, ok]
V = V[ok, :]
return (U, S, V) | python | def economic_svd(G, epsilon=sqrt(finfo(float).eps)):
r"""Economic Singular Value Decomposition.
Args:
G (array_like): Matrix to be factorized.
epsilon (float): Threshold on the square root of the eigen values.
Default is ``sqrt(finfo(float).eps)``.
Returns:
:class:`numpy.ndarray`: Unitary matrix.
:class:`numpy.ndarray`: Singular values.
:class:`numpy.ndarray`: Unitary matrix.
See Also
--------
numpy.linalg.svd : Cholesky decomposition.
scipy.linalg.svd : Cholesky decomposition.
"""
from scipy.linalg import svd
G = asarray(G, float)
(U, S, V) = svd(G, full_matrices=False, check_finite=False)
ok = S >= epsilon
S = S[ok]
U = U[:, ok]
V = V[ok, :]
return (U, S, V) | [
"def",
"economic_svd",
"(",
"G",
",",
"epsilon",
"=",
"sqrt",
"(",
"finfo",
"(",
"float",
")",
".",
"eps",
")",
")",
":",
"from",
"scipy",
".",
"linalg",
"import",
"svd",
"G",
"=",
"asarray",
"(",
"G",
",",
"float",
")",
"(",
"U",
",",
"S",
","... | r"""Economic Singular Value Decomposition.
Args:
G (array_like): Matrix to be factorized.
epsilon (float): Threshold on the square root of the eigen values.
Default is ``sqrt(finfo(float).eps)``.
Returns:
:class:`numpy.ndarray`: Unitary matrix.
:class:`numpy.ndarray`: Singular values.
:class:`numpy.ndarray`: Unitary matrix.
See Also
--------
numpy.linalg.svd : Cholesky decomposition.
scipy.linalg.svd : Cholesky decomposition. | [
"r",
"Economic",
"Singular",
"Value",
"Decomposition",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/svd.py#L4-L30 | train | 47,336 |
limix/numpy-sugar | numpy_sugar/linalg/solve.py | hsolve | def hsolve(A, y):
r"""Solver for the linear equations of two variables and equations only.
It uses Householder reductions to solve ``Ax = y`` in a robust manner.
Parameters
----------
A : array_like
Coefficient matrix.
y : array_like
Ordinate values.
Returns
-------
:class:`numpy.ndarray` Solution ``x``.
"""
n = _norm(A[0, 0], A[1, 0])
u0 = A[0, 0] - n
u1 = A[1, 0]
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
B00 = 1 - 2 * v0 * v0
B01 = 0 - 2 * v0 * v1
B11 = 1 - 2 * v1 * v1
D00 = B00 * A[0, 0] + B01 * A[1, 0]
D01 = B00 * A[0, 1] + B01 * A[1, 1]
D11 = B01 * A[0, 1] + B11 * A[1, 1]
b0 = y[0] - 2 * y[0] * v0 * v0 - 2 * y[1] * v0 * v1
b1 = y[1] - 2 * y[0] * v1 * v0 - 2 * y[1] * v1 * v1
n = _norm(D00, D01)
u0 = D00 - n
u1 = D01
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
E00 = 1 - 2 * v0 * v0
E01 = 0 - 2 * v0 * v1
E11 = 1 - 2 * v1 * v1
F00 = E00 * D00 + E01 * D01
F01 = E01 * D11
F11 = E11 * D11
F11 = (npy_abs(F11) > epsilon.small) * F11
with errstate(divide="ignore", invalid="ignore"):
Fi00 = nan_to_num(F00 / F00 / F00)
Fi11 = nan_to_num(F11 / F11 / F11)
Fi10 = nan_to_num(-(F01 / F00) * Fi11)
c0 = Fi00 * b0
c1 = Fi10 * b0 + Fi11 * b1
x0 = E00 * c0 + E01 * c1
x1 = E01 * c0 + E11 * c1
return array([x0, x1]) | python | def hsolve(A, y):
r"""Solver for the linear equations of two variables and equations only.
It uses Householder reductions to solve ``Ax = y`` in a robust manner.
Parameters
----------
A : array_like
Coefficient matrix.
y : array_like
Ordinate values.
Returns
-------
:class:`numpy.ndarray` Solution ``x``.
"""
n = _norm(A[0, 0], A[1, 0])
u0 = A[0, 0] - n
u1 = A[1, 0]
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
B00 = 1 - 2 * v0 * v0
B01 = 0 - 2 * v0 * v1
B11 = 1 - 2 * v1 * v1
D00 = B00 * A[0, 0] + B01 * A[1, 0]
D01 = B00 * A[0, 1] + B01 * A[1, 1]
D11 = B01 * A[0, 1] + B11 * A[1, 1]
b0 = y[0] - 2 * y[0] * v0 * v0 - 2 * y[1] * v0 * v1
b1 = y[1] - 2 * y[0] * v1 * v0 - 2 * y[1] * v1 * v1
n = _norm(D00, D01)
u0 = D00 - n
u1 = D01
nu = _norm(u0, u1)
with errstate(invalid="ignore", divide="ignore"):
v0 = nan_to_num(u0 / nu)
v1 = nan_to_num(u1 / nu)
E00 = 1 - 2 * v0 * v0
E01 = 0 - 2 * v0 * v1
E11 = 1 - 2 * v1 * v1
F00 = E00 * D00 + E01 * D01
F01 = E01 * D11
F11 = E11 * D11
F11 = (npy_abs(F11) > epsilon.small) * F11
with errstate(divide="ignore", invalid="ignore"):
Fi00 = nan_to_num(F00 / F00 / F00)
Fi11 = nan_to_num(F11 / F11 / F11)
Fi10 = nan_to_num(-(F01 / F00) * Fi11)
c0 = Fi00 * b0
c1 = Fi10 * b0 + Fi11 * b1
x0 = E00 * c0 + E01 * c1
x1 = E01 * c0 + E11 * c1
return array([x0, x1]) | [
"def",
"hsolve",
"(",
"A",
",",
"y",
")",
":",
"n",
"=",
"_norm",
"(",
"A",
"[",
"0",
",",
"0",
"]",
",",
"A",
"[",
"1",
",",
"0",
"]",
")",
"u0",
"=",
"A",
"[",
"0",
",",
"0",
"]",
"-",
"n",
"u1",
"=",
"A",
"[",
"1",
",",
"0",
"]... | r"""Solver for the linear equations of two variables and equations only.
It uses Householder reductions to solve ``Ax = y`` in a robust manner.
Parameters
----------
A : array_like
Coefficient matrix.
y : array_like
Ordinate values.
Returns
-------
:class:`numpy.ndarray` Solution ``x``. | [
"r",
"Solver",
"for",
"the",
"linear",
"equations",
"of",
"two",
"variables",
"and",
"equations",
"only",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L31-L98 | train | 47,337 |
limix/numpy-sugar | numpy_sugar/linalg/solve.py | rsolve | def rsolve(A, b, epsilon=_epsilon):
r"""Robust solve for the linear equations.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``.
"""
A = asarray(A, float)
b = asarray(b, float)
if A.shape[0] == 0:
return zeros((A.shape[1],))
if A.shape[1] == 0:
return zeros((0,))
try:
x = lstsq(A, b, rcond=epsilon)
r = sum(x[3] > epsilon)
if r == 0:
return zeros(A.shape[1])
return x[0]
except (ValueError, LinAlgError) as e:
warnings.warn(str(e), RuntimeWarning)
return solve(A, b) | python | def rsolve(A, b, epsilon=_epsilon):
r"""Robust solve for the linear equations.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``.
"""
A = asarray(A, float)
b = asarray(b, float)
if A.shape[0] == 0:
return zeros((A.shape[1],))
if A.shape[1] == 0:
return zeros((0,))
try:
x = lstsq(A, b, rcond=epsilon)
r = sum(x[3] > epsilon)
if r == 0:
return zeros(A.shape[1])
return x[0]
except (ValueError, LinAlgError) as e:
warnings.warn(str(e), RuntimeWarning)
return solve(A, b) | [
"def",
"rsolve",
"(",
"A",
",",
"b",
",",
"epsilon",
"=",
"_epsilon",
")",
":",
"A",
"=",
"asarray",
"(",
"A",
",",
"float",
")",
"b",
"=",
"asarray",
"(",
"b",
",",
"float",
")",
"if",
"A",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"ret... | r"""Robust solve for the linear equations.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``. | [
"r",
"Robust",
"solve",
"for",
"the",
"linear",
"equations",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/solve.py#L139-L163 | train | 47,338 |
limix/numpy-sugar | numpy_sugar/linalg/_kron.py | kron_dot | def kron_dot(A, B, C, out=None):
r""" Kronecker product followed by dot product.
Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of
dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`.
It computes
.. math::
\text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C))
\in n\times p,
which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`.
Parameters
----------
A : array_like
Matrix A.
B : array_like
Matrix B.
C : array_like
Matrix C.
out : :class:`numpy.ndarray`, optional
Copy result to. Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
unvec((A ⊗ B) vec(C))
"""
from numpy import dot, zeros, asarray
A = asarray(A)
B = asarray(B)
C = asarray(C)
if out is None:
out = zeros((B.shape[0], A.shape[0]))
dot(B, dot(C, A.T), out=out)
return out | python | def kron_dot(A, B, C, out=None):
r""" Kronecker product followed by dot product.
Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of
dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`.
It computes
.. math::
\text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C))
\in n\times p,
which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`.
Parameters
----------
A : array_like
Matrix A.
B : array_like
Matrix B.
C : array_like
Matrix C.
out : :class:`numpy.ndarray`, optional
Copy result to. Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
unvec((A ⊗ B) vec(C))
"""
from numpy import dot, zeros, asarray
A = asarray(A)
B = asarray(B)
C = asarray(C)
if out is None:
out = zeros((B.shape[0], A.shape[0]))
dot(B, dot(C, A.T), out=out)
return out | [
"def",
"kron_dot",
"(",
"A",
",",
"B",
",",
"C",
",",
"out",
"=",
"None",
")",
":",
"from",
"numpy",
"import",
"dot",
",",
"zeros",
",",
"asarray",
"A",
"=",
"asarray",
"(",
"A",
")",
"B",
"=",
"asarray",
"(",
"B",
")",
"C",
"=",
"asarray",
"... | r""" Kronecker product followed by dot product.
Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of
dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`.
It computes
.. math::
\text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C))
\in n\times p,
which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`.
Parameters
----------
A : array_like
Matrix A.
B : array_like
Matrix B.
C : array_like
Matrix C.
out : :class:`numpy.ndarray`, optional
Copy result to. Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
unvec((A ⊗ B) vec(C)) | [
"r",
"Kronecker",
"product",
"followed",
"by",
"dot",
"product",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/_kron.py#L1-L41 | train | 47,339 |
limix/numpy-sugar | numpy_sugar/linalg/property.py | check_semidefinite_positiveness | def check_semidefinite_positiveness(A):
"""Check if ``A`` is a semi-definite positive matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
"""
B = empty_like(A)
B[:] = A
B[diag_indices_from(B)] += sqrt(finfo(float).eps)
try:
cholesky(B)
except LinAlgError:
return False
return True | python | def check_semidefinite_positiveness(A):
"""Check if ``A`` is a semi-definite positive matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
"""
B = empty_like(A)
B[:] = A
B[diag_indices_from(B)] += sqrt(finfo(float).eps)
try:
cholesky(B)
except LinAlgError:
return False
return True | [
"def",
"check_semidefinite_positiveness",
"(",
"A",
")",
":",
"B",
"=",
"empty_like",
"(",
"A",
")",
"B",
"[",
":",
"]",
"=",
"A",
"B",
"[",
"diag_indices_from",
"(",
"B",
")",
"]",
"+=",
"sqrt",
"(",
"finfo",
"(",
"float",
")",
".",
"eps",
")",
... | Check if ``A`` is a semi-definite positive matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is definite positive; ``False`` otherwise. | [
"Check",
"if",
"A",
"is",
"a",
"semi",
"-",
"definite",
"positive",
"matrix",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/property.py#L21-L37 | train | 47,340 |
limix/numpy-sugar | numpy_sugar/linalg/property.py | check_symmetry | def check_symmetry(A):
"""Check if ``A`` is a symmetric matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is symmetric; ``False`` otherwise.
"""
A = asanyarray(A)
if A.ndim != 2:
raise ValueError("Checks symmetry only for bi-dimensional arrays.")
if A.shape[0] != A.shape[1]:
return False
return abs(A - A.T).max() < sqrt(finfo(float).eps) | python | def check_symmetry(A):
"""Check if ``A`` is a symmetric matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is symmetric; ``False`` otherwise.
"""
A = asanyarray(A)
if A.ndim != 2:
raise ValueError("Checks symmetry only for bi-dimensional arrays.")
if A.shape[0] != A.shape[1]:
return False
return abs(A - A.T).max() < sqrt(finfo(float).eps) | [
"def",
"check_symmetry",
"(",
"A",
")",
":",
"A",
"=",
"asanyarray",
"(",
"A",
")",
"if",
"A",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"Checks symmetry only for bi-dimensional arrays.\"",
")",
"if",
"A",
".",
"shape",
"[",
"0",
"]",
"!... | Check if ``A`` is a symmetric matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is symmetric; ``False`` otherwise. | [
"Check",
"if",
"A",
"is",
"a",
"symmetric",
"matrix",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/property.py#L40-L56 | train | 47,341 |
limix/numpy-sugar | numpy_sugar/linalg/cho.py | cho_solve | def cho_solve(L, b):
r"""Solve for Cholesky decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the Cholesky factorization of :math:`\mathrm A`.
Args:
L (array_like): Lower triangular matrix.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
numpy.linalg.cholesky : Cholesky decomposition.
scipy.linalg.cho_solve : Solve linear equations given Cholesky
factorization.
"""
from scipy.linalg import cho_solve as sp_cho_solve
L = asarray(L, float)
b = asarray(b, float)
if L.size == 0:
if b.size != 0:
raise ValueError("Dimension mismatch between L and b.")
return empty(b.shape)
return sp_cho_solve((L, True), b, check_finite=False) | python | def cho_solve(L, b):
r"""Solve for Cholesky decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the Cholesky factorization of :math:`\mathrm A`.
Args:
L (array_like): Lower triangular matrix.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
numpy.linalg.cholesky : Cholesky decomposition.
scipy.linalg.cho_solve : Solve linear equations given Cholesky
factorization.
"""
from scipy.linalg import cho_solve as sp_cho_solve
L = asarray(L, float)
b = asarray(b, float)
if L.size == 0:
if b.size != 0:
raise ValueError("Dimension mismatch between L and b.")
return empty(b.shape)
return sp_cho_solve((L, True), b, check_finite=False) | [
"def",
"cho_solve",
"(",
"L",
",",
"b",
")",
":",
"from",
"scipy",
".",
"linalg",
"import",
"cho_solve",
"as",
"sp_cho_solve",
"L",
"=",
"asarray",
"(",
"L",
",",
"float",
")",
"b",
"=",
"asarray",
"(",
"b",
",",
"float",
")",
"if",
"L",
".",
"si... | r"""Solve for Cholesky decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the Cholesky factorization of :math:`\mathrm A`.
Args:
L (array_like): Lower triangular matrix.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
numpy.linalg.cholesky : Cholesky decomposition.
scipy.linalg.cho_solve : Solve linear equations given Cholesky
factorization. | [
"r",
"Solve",
"for",
"Cholesky",
"decomposition",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/cho.py#L4-L32 | train | 47,342 |
opentargets/validator | opentargets_validator/helpers.py | file_or_resource | def file_or_resource(fname=None):
'''get filename and check if in getcwd then get from
the package resources folder
'''
if fname is not None:
filename = os.path.expanduser(fname)
resource_package = opentargets_validator.__name__
resource_path = os.path.sep.join(('resources', filename))
abs_filename = os.path.join(os.path.abspath(os.getcwd()), filename) \
if not os.path.isabs(filename) else filename
return abs_filename if os.path.isfile(abs_filename) \
else res.resource_filename(resource_package, resource_path) | python | def file_or_resource(fname=None):
'''get filename and check if in getcwd then get from
the package resources folder
'''
if fname is not None:
filename = os.path.expanduser(fname)
resource_package = opentargets_validator.__name__
resource_path = os.path.sep.join(('resources', filename))
abs_filename = os.path.join(os.path.abspath(os.getcwd()), filename) \
if not os.path.isabs(filename) else filename
return abs_filename if os.path.isfile(abs_filename) \
else res.resource_filename(resource_package, resource_path) | [
"def",
"file_or_resource",
"(",
"fname",
"=",
"None",
")",
":",
"if",
"fname",
"is",
"not",
"None",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"fname",
")",
"resource_package",
"=",
"opentargets_validator",
".",
"__name__",
"resource_pa... | get filename and check if in getcwd then get from
the package resources folder | [
"get",
"filename",
"and",
"check",
"if",
"in",
"getcwd",
"then",
"get",
"from",
"the",
"package",
"resources",
"folder"
] | 0a80c42fc02237c72e27a32e022c1d5d9f4e25ff | https://github.com/opentargets/validator/blob/0a80c42fc02237c72e27a32e022c1d5d9f4e25ff/opentargets_validator/helpers.py#L157-L171 | train | 47,343 |
DiamondLightSource/python-procrunner | procrunner/__init__.py | run_process_dummy | def run_process_dummy(command, **kwargs):
"""
A stand-in function that returns a valid result dictionary indicating a
successful execution. The external process is not run.
"""
warnings.warn(
"procrunner.run_process_dummy() is deprecated", DeprecationWarning, stacklevel=2
)
time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime())
logger.info("run_process is disabled. Requested command: %s", command)
result = ReturnObject(
{
"exitcode": 0,
"command": command,
"stdout": "",
"stderr": "",
"timeout": False,
"runtime": 0,
"time_start": time_start,
"time_end": time_start,
}
)
if kwargs.get("stdin") is not None:
result.update(
{"stdin_bytes_sent": len(kwargs["stdin"]), "stdin_bytes_remain": 0}
)
return result | python | def run_process_dummy(command, **kwargs):
"""
A stand-in function that returns a valid result dictionary indicating a
successful execution. The external process is not run.
"""
warnings.warn(
"procrunner.run_process_dummy() is deprecated", DeprecationWarning, stacklevel=2
)
time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime())
logger.info("run_process is disabled. Requested command: %s", command)
result = ReturnObject(
{
"exitcode": 0,
"command": command,
"stdout": "",
"stderr": "",
"timeout": False,
"runtime": 0,
"time_start": time_start,
"time_end": time_start,
}
)
if kwargs.get("stdin") is not None:
result.update(
{"stdin_bytes_sent": len(kwargs["stdin"]), "stdin_bytes_remain": 0}
)
return result | [
"def",
"run_process_dummy",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"procrunner.run_process_dummy() is deprecated\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"time_start",
"=",
"time",
".",
"strftime"... | A stand-in function that returns a valid result dictionary indicating a
successful execution. The external process is not run. | [
"A",
"stand",
"-",
"in",
"function",
"that",
"returns",
"a",
"valid",
"result",
"dictionary",
"indicating",
"a",
"successful",
"execution",
".",
"The",
"external",
"process",
"is",
"not",
"run",
"."
] | e11c446f97f28abceb507d21403259757f08be0a | https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L593-L621 | train | 47,344 |
DiamondLightSource/python-procrunner | procrunner/__init__.py | run_process | def run_process(*args, **kwargs):
"""API used up to version 0.2.0."""
warnings.warn(
"procrunner.run_process() is deprecated and has been renamed to run()",
DeprecationWarning,
stacklevel=2,
)
return run(*args, **kwargs) | python | def run_process(*args, **kwargs):
"""API used up to version 0.2.0."""
warnings.warn(
"procrunner.run_process() is deprecated and has been renamed to run()",
DeprecationWarning,
stacklevel=2,
)
return run(*args, **kwargs) | [
"def",
"run_process",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"procrunner.run_process() is deprecated and has been renamed to run()\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"return",
"run",
... | API used up to version 0.2.0. | [
"API",
"used",
"up",
"to",
"version",
"0",
".",
"2",
".",
"0",
"."
] | e11c446f97f28abceb507d21403259757f08be0a | https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L624-L631 | train | 47,345 |
DiamondLightSource/python-procrunner | procrunner/__init__.py | _NonBlockingStreamReader.get_output | def get_output(self):
"""
Retrieve the stored data in full.
This call may block if the reading thread has not yet terminated.
"""
self._closing = True
if not self.has_finished():
if self._debug:
# Main thread overtook stream reading thread.
underrun_debug_timer = timeit.default_timer()
logger.warning("NBSR underrun")
self._thread.join()
if not self.has_finished():
if self._debug:
logger.debug(
"NBSR join after %f seconds, underrun not resolved"
% (timeit.default_timer() - underrun_debug_timer)
)
raise Exception("thread did not terminate")
if self._debug:
logger.debug(
"NBSR underrun resolved after %f seconds"
% (timeit.default_timer() - underrun_debug_timer)
)
if self._closed:
raise Exception("streamreader double-closed")
self._closed = True
data = self._buffer.getvalue()
self._buffer.close()
return data | python | def get_output(self):
"""
Retrieve the stored data in full.
This call may block if the reading thread has not yet terminated.
"""
self._closing = True
if not self.has_finished():
if self._debug:
# Main thread overtook stream reading thread.
underrun_debug_timer = timeit.default_timer()
logger.warning("NBSR underrun")
self._thread.join()
if not self.has_finished():
if self._debug:
logger.debug(
"NBSR join after %f seconds, underrun not resolved"
% (timeit.default_timer() - underrun_debug_timer)
)
raise Exception("thread did not terminate")
if self._debug:
logger.debug(
"NBSR underrun resolved after %f seconds"
% (timeit.default_timer() - underrun_debug_timer)
)
if self._closed:
raise Exception("streamreader double-closed")
self._closed = True
data = self._buffer.getvalue()
self._buffer.close()
return data | [
"def",
"get_output",
"(",
"self",
")",
":",
"self",
".",
"_closing",
"=",
"True",
"if",
"not",
"self",
".",
"has_finished",
"(",
")",
":",
"if",
"self",
".",
"_debug",
":",
"# Main thread overtook stream reading thread.",
"underrun_debug_timer",
"=",
"timeit",
... | Retrieve the stored data in full.
This call may block if the reading thread has not yet terminated. | [
"Retrieve",
"the",
"stored",
"data",
"in",
"full",
".",
"This",
"call",
"may",
"block",
"if",
"the",
"reading",
"thread",
"has",
"not",
"yet",
"terminated",
"."
] | e11c446f97f28abceb507d21403259757f08be0a | https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L173-L202 | train | 47,346 |
limix/numpy-sugar | numpy_sugar/linalg/diag.py | sum2diag | def sum2diag(A, D, out=None):
r"""Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
A = asarray(A, float)
D = asarray(D, float)
if out is None:
out = copy(A)
else:
copyto(out, A)
einsum("ii->i", out)[:] += D
return out | python | def sum2diag(A, D, out=None):
r"""Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
"""
A = asarray(A, float)
D = asarray(D, float)
if out is None:
out = copy(A)
else:
copyto(out, A)
einsum("ii->i", out)[:] += D
return out | [
"def",
"sum2diag",
"(",
"A",
",",
"D",
",",
"out",
"=",
"None",
")",
":",
"A",
"=",
"asarray",
"(",
"A",
",",
"float",
")",
"D",
"=",
"asarray",
"(",
"D",
",",
"float",
")",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"copy",
"(",
"A",
")",... | r"""Add values ``D`` to the diagonal of matrix ``A``.
Args:
A (array_like): Left-hand side.
D (array_like or float): Values to add.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix. | [
"r",
"Add",
"values",
"D",
"to",
"the",
"diagonal",
"of",
"matrix",
"A",
"."
] | 4bdfa26913135c76ef3cd542a332f4e5861e948b | https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/diag.py#L29-L47 | train | 47,347 |
ikegami-yukino/jaconv | jaconv/jaconv.py | kata2hira | def kata2hira(text, ignore=''):
"""Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン
"""
if ignore:
k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy())
return _convert(text, k2h_map)
return _convert(text, K2H_TABLE) | python | def kata2hira(text, ignore=''):
"""Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン
"""
if ignore:
k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy())
return _convert(text, k2h_map)
return _convert(text, K2H_TABLE) | [
"def",
"kata2hira",
"(",
"text",
",",
"ignore",
"=",
"''",
")",
":",
"if",
"ignore",
":",
"k2h_map",
"=",
"_exclude_ignorechar",
"(",
"ignore",
",",
"K2H_TABLE",
".",
"copy",
"(",
")",
")",
"return",
"_convert",
"(",
"text",
",",
"k2h_map",
")",
"retur... | Convert Full-width Katakana to Hiragana
Parameters
----------
text : str
Full-width Katakana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Hiragana string.
Examples
--------
>>> print(jaconv.kata2hira('巴マミ'))
巴まみ
>>> print(jaconv.kata2hira('マミサン', ignore='ン'))
まみさン | [
"Convert",
"Full",
"-",
"width",
"Katakana",
"to",
"Hiragana"
] | 5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7 | https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L77-L102 | train | 47,348 |
ibab/matplotlib-hep | matplotlib_hep/__init__.py | histpoints | def histpoints(x, bins=None, xerr=None, yerr='gamma', normed=False, **kwargs):
"""
Plot a histogram as a series of data points.
Compute and draw the histogram of *x* using individual (x,y) points
for the bin contents.
By default, vertical poisson error bars are calculated using the
gamma distribution.
Horizontal error bars are omitted by default.
These can be enabled using the *xerr* argument.
Use ``xerr='binwidth'`` to draw horizontal error bars that indicate
the width of each histogram bin.
Parameters
---------
x : (n,) array or sequence of (n,) arrays
Input values. This takes either a single array or a sequence of
arrays, which are not required to be of the same length.
"""
import matplotlib.pyplot as plt
if bins is None:
bins = calc_nbins(x)
h, bins = np.histogram(x, bins=bins)
width = bins[1] - bins[0]
center = (bins[:-1] + bins[1:]) / 2
area = sum(h * width)
if isinstance(yerr, str):
yerr = poisson_limits(h, yerr)
if xerr == 'binwidth':
xerr = width / 2
if normed:
h = h / area
yerr = yerr / area
area = 1.
if not 'color' in kwargs:
kwargs['color'] = 'black'
if not 'fmt' in kwargs:
kwargs['fmt'] = 'o'
plt.errorbar(center, h, xerr=xerr, yerr=yerr, **kwargs)
return center, (yerr[0], h, yerr[1]), area | python | def histpoints(x, bins=None, xerr=None, yerr='gamma', normed=False, **kwargs):
"""
Plot a histogram as a series of data points.
Compute and draw the histogram of *x* using individual (x,y) points
for the bin contents.
By default, vertical poisson error bars are calculated using the
gamma distribution.
Horizontal error bars are omitted by default.
These can be enabled using the *xerr* argument.
Use ``xerr='binwidth'`` to draw horizontal error bars that indicate
the width of each histogram bin.
Parameters
---------
x : (n,) array or sequence of (n,) arrays
Input values. This takes either a single array or a sequence of
arrays, which are not required to be of the same length.
"""
import matplotlib.pyplot as plt
if bins is None:
bins = calc_nbins(x)
h, bins = np.histogram(x, bins=bins)
width = bins[1] - bins[0]
center = (bins[:-1] + bins[1:]) / 2
area = sum(h * width)
if isinstance(yerr, str):
yerr = poisson_limits(h, yerr)
if xerr == 'binwidth':
xerr = width / 2
if normed:
h = h / area
yerr = yerr / area
area = 1.
if not 'color' in kwargs:
kwargs['color'] = 'black'
if not 'fmt' in kwargs:
kwargs['fmt'] = 'o'
plt.errorbar(center, h, xerr=xerr, yerr=yerr, **kwargs)
return center, (yerr[0], h, yerr[1]), area | [
"def",
"histpoints",
"(",
"x",
",",
"bins",
"=",
"None",
",",
"xerr",
"=",
"None",
",",
"yerr",
"=",
"'gamma'",
",",
"normed",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"bins",
"is",
... | Plot a histogram as a series of data points.
Compute and draw the histogram of *x* using individual (x,y) points
for the bin contents.
By default, vertical poisson error bars are calculated using the
gamma distribution.
Horizontal error bars are omitted by default.
These can be enabled using the *xerr* argument.
Use ``xerr='binwidth'`` to draw horizontal error bars that indicate
the width of each histogram bin.
Parameters
---------
x : (n,) array or sequence of (n,) arrays
Input values. This takes either a single array or a sequence of
arrays, which are not required to be of the same length. | [
"Plot",
"a",
"histogram",
"as",
"a",
"series",
"of",
"data",
"points",
"."
] | 7ff83ffbc059a0ca9326f1ecb39979b13e33b22d | https://github.com/ibab/matplotlib-hep/blob/7ff83ffbc059a0ca9326f1ecb39979b13e33b22d/matplotlib_hep/__init__.py#L32-L84 | train | 47,349 |
kevinconway/daemons | daemons/message/eventlet.py | EventletMessageManager.pool | def pool(self):
"""Get an eventlet pool used to dispatch requests."""
self._pool = self._pool or eventlet.GreenPool(size=self.pool_size)
return self._pool | python | def pool(self):
"""Get an eventlet pool used to dispatch requests."""
self._pool = self._pool or eventlet.GreenPool(size=self.pool_size)
return self._pool | [
"def",
"pool",
"(",
"self",
")",
":",
"self",
".",
"_pool",
"=",
"self",
".",
"_pool",
"or",
"eventlet",
".",
"GreenPool",
"(",
"size",
"=",
"self",
".",
"pool_size",
")",
"return",
"self",
".",
"_pool"
] | Get an eventlet pool used to dispatch requests. | [
"Get",
"an",
"eventlet",
"pool",
"used",
"to",
"dispatch",
"requests",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/message/eventlet.py#L18-L21 | train | 47,350 |
kevinconway/daemons | daemons/startstop/simple.py | SimpleStartStopManager.start | def start(self):
"""Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'.
"""
if self.pid is not None:
LOG.error(
"The process is already running with pid {0}.".format(self.pid)
)
sys.exit(exit.ALREADY_RUNNING)
self.daemonize()
LOG.info("Beginning run loop for process.")
try:
self.run()
except Exception:
LOG.exception("Uncaught exception in the daemon run() method.")
self.stop()
sys.exit(exit.RUN_FAILURE) | python | def start(self):
"""Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'.
"""
if self.pid is not None:
LOG.error(
"The process is already running with pid {0}.".format(self.pid)
)
sys.exit(exit.ALREADY_RUNNING)
self.daemonize()
LOG.info("Beginning run loop for process.")
try:
self.run()
except Exception:
LOG.exception("Uncaught exception in the daemon run() method.")
self.stop()
sys.exit(exit.RUN_FAILURE) | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"pid",
"is",
"not",
"None",
":",
"LOG",
".",
"error",
"(",
"\"The process is already running with pid {0}.\"",
".",
"format",
"(",
"self",
".",
"pid",
")",
")",
"sys",
".",
"exit",
"(",
"exit",
... | Start the process with daemonization.
If the process is already started this call should exit with code
ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then
call 'run'. | [
"Start",
"the",
"process",
"with",
"daemonization",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L24-L49 | train | 47,351 |
kevinconway/daemons | daemons/startstop/simple.py | SimpleStartStopManager.stop | def stop(self):
"""Stop the daemonized process.
If the process is already stopped this call should exit successfully.
If the process cannot be stopped this call should exit with code
STOP_FAILED.
"""
if self.pid is None:
return None
try:
while True:
self.send(signal.SIGTERM)
time.sleep(0.1)
except RuntimeError as err:
if "No such process" in str(err):
LOG.info("Succesfully stopped the process.")
return None
LOG.exception("Failed to stop the process:")
sys.exit(exit.STOP_FAILED)
except TypeError as err:
if "an integer is required" in str(err):
LOG.info("Succesfully stopped the process.")
return None
LOG.exception("Failed to stop the process:")
sys.exit(exit.STOP_FAILED) | python | def stop(self):
"""Stop the daemonized process.
If the process is already stopped this call should exit successfully.
If the process cannot be stopped this call should exit with code
STOP_FAILED.
"""
if self.pid is None:
return None
try:
while True:
self.send(signal.SIGTERM)
time.sleep(0.1)
except RuntimeError as err:
if "No such process" in str(err):
LOG.info("Succesfully stopped the process.")
return None
LOG.exception("Failed to stop the process:")
sys.exit(exit.STOP_FAILED)
except TypeError as err:
if "an integer is required" in str(err):
LOG.info("Succesfully stopped the process.")
return None
LOG.exception("Failed to stop the process:")
sys.exit(exit.STOP_FAILED) | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"pid",
"is",
"None",
":",
"return",
"None",
"try",
":",
"while",
"True",
":",
"self",
".",
"send",
"(",
"signal",
".",
"SIGTERM",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"except",
"Runt... | Stop the daemonized process.
If the process is already stopped this call should exit successfully.
If the process cannot be stopped this call should exit with code
STOP_FAILED. | [
"Stop",
"the",
"daemonized",
"process",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L51-L87 | train | 47,352 |
kevinconway/daemons | daemons/signal/simple.py | SimpleSignalManager.handle | def handle(self, signum, handler):
"""Set a function to run when the given signal is recieved.
Multiple handlers may be assigned to a single signal. The order of
handlers does not need to be preserved.
'signum' must be an integer representing a signal.
'handler' must be a callable.
"""
if not isinstance(signum, int):
raise TypeError(
"Signals must be given as integers. Got {0}.".format(
type(signum),
),
)
if not callable(handler):
raise TypeError(
"Signal handlers must be callable.",
)
signal.signal(signum, self._handle_signals)
self._handlers[signum].append(handler) | python | def handle(self, signum, handler):
"""Set a function to run when the given signal is recieved.
Multiple handlers may be assigned to a single signal. The order of
handlers does not need to be preserved.
'signum' must be an integer representing a signal.
'handler' must be a callable.
"""
if not isinstance(signum, int):
raise TypeError(
"Signals must be given as integers. Got {0}.".format(
type(signum),
),
)
if not callable(handler):
raise TypeError(
"Signal handlers must be callable.",
)
signal.signal(signum, self._handle_signals)
self._handlers[signum].append(handler) | [
"def",
"handle",
"(",
"self",
",",
"signum",
",",
"handler",
")",
":",
"if",
"not",
"isinstance",
"(",
"signum",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Signals must be given as integers. Got {0}.\"",
".",
"format",
"(",
"type",
"(",
"signum",
")... | Set a function to run when the given signal is recieved.
Multiple handlers may be assigned to a single signal. The order of
handlers does not need to be preserved.
'signum' must be an integer representing a signal.
'handler' must be a callable. | [
"Set",
"a",
"function",
"to",
"run",
"when",
"the",
"given",
"signal",
"is",
"recieved",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L43-L68 | train | 47,353 |
kevinconway/daemons | daemons/signal/simple.py | SimpleSignalManager.send | def send(self, signum):
"""Send the given signal to the running process.
If the process is not running a RuntimeError with a message of "No such
process" should be emitted.
"""
if not isinstance(signum, int):
raise TypeError(
"Signals must be given as integers. Got {0}.".format(
type(signum),
),
)
try:
os.kill(self.pid, signum)
except OSError as err:
if "No such process" in err.strerror:
raise RuntimeError("No such process {0}.".format(self.pid))
raise err | python | def send(self, signum):
"""Send the given signal to the running process.
If the process is not running a RuntimeError with a message of "No such
process" should be emitted.
"""
if not isinstance(signum, int):
raise TypeError(
"Signals must be given as integers. Got {0}.".format(
type(signum),
),
)
try:
os.kill(self.pid, signum)
except OSError as err:
if "No such process" in err.strerror:
raise RuntimeError("No such process {0}.".format(self.pid))
raise err | [
"def",
"send",
"(",
"self",
",",
"signum",
")",
":",
"if",
"not",
"isinstance",
"(",
"signum",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Signals must be given as integers. Got {0}.\"",
".",
"format",
"(",
"type",
"(",
"signum",
")",
",",
")",
","... | Send the given signal to the running process.
If the process is not running a RuntimeError with a message of "No such
process" should be emitted. | [
"Send",
"the",
"given",
"signal",
"to",
"the",
"running",
"process",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L70-L94 | train | 47,354 |
kevinconway/daemons | daemons/signal/simple.py | SimpleSignalManager._handle_signals | def _handle_signals(self, signum, frame):
"""Handler for all signals.
This method must be used to handle all signals for the process. It is
responsible for runnin the appropriate signal handlers registered with
the 'handle' method unless they are shutdown signals. Shutdown signals
must trigger the 'shutdown' method.
"""
if signum in self.kill_signals:
return self.shutdown(signum)
for handler in self._handlers[signum]:
handler() | python | def _handle_signals(self, signum, frame):
"""Handler for all signals.
This method must be used to handle all signals for the process. It is
responsible for runnin the appropriate signal handlers registered with
the 'handle' method unless they are shutdown signals. Shutdown signals
must trigger the 'shutdown' method.
"""
if signum in self.kill_signals:
return self.shutdown(signum)
for handler in self._handlers[signum]:
handler() | [
"def",
"_handle_signals",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"if",
"signum",
"in",
"self",
".",
"kill_signals",
":",
"return",
"self",
".",
"shutdown",
"(",
"signum",
")",
"for",
"handler",
"in",
"self",
".",
"_handlers",
"[",
"signum",
... | Handler for all signals.
This method must be used to handle all signals for the process. It is
responsible for runnin the appropriate signal handlers registered with
the 'handle' method unless they are shutdown signals. Shutdown signals
must trigger the 'shutdown' method. | [
"Handler",
"for",
"all",
"signals",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L96-L110 | train | 47,355 |
kevinconway/daemons | daemons/signal/simple.py | SimpleSignalManager.shutdown | def shutdown(self, signum):
"""Handle all signals which trigger a process stop.
This method should run all appropriate signal handlers registered
through the 'handle' method. At the end it should cause the process
to exit with a status code. If any of the handlers raise an exception
the exit code should be SHUTDOWN_FAILED otherwise SUCCESS.
"""
dirty = False
for handler in self._handlers[signum]:
try:
handler()
except:
LOG.exception("A shutdown handler failed to execute:")
dirty = True
del self.pid
if dirty:
sys.exit(exit.SHUTDOWN_FAILED)
return None
sys.exit(exit.SUCCESS)
return None | python | def shutdown(self, signum):
"""Handle all signals which trigger a process stop.
This method should run all appropriate signal handlers registered
through the 'handle' method. At the end it should cause the process
to exit with a status code. If any of the handlers raise an exception
the exit code should be SHUTDOWN_FAILED otherwise SUCCESS.
"""
dirty = False
for handler in self._handlers[signum]:
try:
handler()
except:
LOG.exception("A shutdown handler failed to execute:")
dirty = True
del self.pid
if dirty:
sys.exit(exit.SHUTDOWN_FAILED)
return None
sys.exit(exit.SUCCESS)
return None | [
"def",
"shutdown",
"(",
"self",
",",
"signum",
")",
":",
"dirty",
"=",
"False",
"for",
"handler",
"in",
"self",
".",
"_handlers",
"[",
"signum",
"]",
":",
"try",
":",
"handler",
"(",
")",
"except",
":",
"LOG",
".",
"exception",
"(",
"\"A shutdown handl... | Handle all signals which trigger a process stop.
This method should run all appropriate signal handlers registered
through the 'handle' method. At the end it should cause the process
to exit with a status code. If any of the handlers raise an exception
the exit code should be SHUTDOWN_FAILED otherwise SUCCESS. | [
"Handle",
"all",
"signals",
"which",
"trigger",
"a",
"process",
"stop",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L112-L140 | train | 47,356 |
DEIB-GECO/PyGMQL | gmql/dataset/loaders/Loader.py | load_from_remote | def load_from_remote(remote_name, owner=None):
""" Loads the data from a remote repository.
:param remote_name: The name of the dataset in the remote repository
:param owner: (optional) The owner of the dataset. If nothing is provided, the current user
is used. For public datasets use 'public'.
:return: A new GMQLDataset or a GDataframe
"""
from .. import GMQLDataset
pmg = get_python_manager()
remote_manager = get_remote_manager()
parser = remote_manager.get_dataset_schema(remote_name, owner)
source_table = get_source_table()
id = source_table.search_source(remote=remote_name)
if id is None:
id = source_table.add_source(remote=remote_name, parser=parser)
index = pmg.read_dataset(str(id), parser.get_gmql_parser())
remote_sources = [id]
return GMQLDataset.GMQLDataset(index=index, location="remote", path_or_name=remote_name,
remote_sources=remote_sources) | python | def load_from_remote(remote_name, owner=None):
""" Loads the data from a remote repository.
:param remote_name: The name of the dataset in the remote repository
:param owner: (optional) The owner of the dataset. If nothing is provided, the current user
is used. For public datasets use 'public'.
:return: A new GMQLDataset or a GDataframe
"""
from .. import GMQLDataset
pmg = get_python_manager()
remote_manager = get_remote_manager()
parser = remote_manager.get_dataset_schema(remote_name, owner)
source_table = get_source_table()
id = source_table.search_source(remote=remote_name)
if id is None:
id = source_table.add_source(remote=remote_name, parser=parser)
index = pmg.read_dataset(str(id), parser.get_gmql_parser())
remote_sources = [id]
return GMQLDataset.GMQLDataset(index=index, location="remote", path_or_name=remote_name,
remote_sources=remote_sources) | [
"def",
"load_from_remote",
"(",
"remote_name",
",",
"owner",
"=",
"None",
")",
":",
"from",
".",
".",
"import",
"GMQLDataset",
"pmg",
"=",
"get_python_manager",
"(",
")",
"remote_manager",
"=",
"get_remote_manager",
"(",
")",
"parser",
"=",
"remote_manager",
"... | Loads the data from a remote repository.
:param remote_name: The name of the dataset in the remote repository
:param owner: (optional) The owner of the dataset. If nothing is provided, the current user
is used. For public datasets use 'public'.
:return: A new GMQLDataset or a GDataframe | [
"Loads",
"the",
"data",
"from",
"a",
"remote",
"repository",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Loader.py#L145-L166 | train | 47,357 |
nathan-hoad/python-iwlib | iwlib/iwlist.py | scan | def scan(interface):
"""Perform a scan for access points in the area.
Arguments:
interface - device to use for scanning (e.g. eth1, wlan0).
"""
interface = _get_bytes(interface)
head = ffi.new('wireless_scan_head *')
with iwlib_socket() as sock:
range = _get_range_info(interface, sock=sock)
if iwlib.iw_scan(sock, interface, range.we_version_compiled, head) != 0:
errno = ffi.errno
strerror = "Error while scanning: %s" % os.strerror(errno)
raise OSError(errno, strerror)
results = []
scan = head.result
buf = ffi.new('char []', 1024)
while scan != ffi.NULL:
parsed_scan = {}
if scan.b.has_mode:
parsed_scan['Mode'] = ffi.string(iwlib.iw_operation_mode[scan.b.mode])
if scan.b.essid_on:
parsed_scan['ESSID'] = ffi.string(scan.b.essid)
else:
parsed_scan['ESSID'] = b'Auto'
if scan.has_ap_addr:
iwlib.iw_ether_ntop(
ffi.cast('struct ether_addr *', scan.ap_addr.sa_data), buf)
if scan.b.has_mode and scan.b.mode == iwlib.IW_MODE_ADHOC:
parsed_scan['Cell'] = ffi.string(buf)
else:
parsed_scan['Access Point'] = ffi.string(buf)
if scan.has_maxbitrate:
iwlib.iw_print_bitrate(buf, len(buf), scan.maxbitrate.value)
parsed_scan['BitRate'] = ffi.string(buf)
if scan.has_stats:
parsed_scan['stats'] = _parse_stats(scan.stats)
results.append(parsed_scan)
scan = scan.next
return results | python | def scan(interface):
"""Perform a scan for access points in the area.
Arguments:
interface - device to use for scanning (e.g. eth1, wlan0).
"""
interface = _get_bytes(interface)
head = ffi.new('wireless_scan_head *')
with iwlib_socket() as sock:
range = _get_range_info(interface, sock=sock)
if iwlib.iw_scan(sock, interface, range.we_version_compiled, head) != 0:
errno = ffi.errno
strerror = "Error while scanning: %s" % os.strerror(errno)
raise OSError(errno, strerror)
results = []
scan = head.result
buf = ffi.new('char []', 1024)
while scan != ffi.NULL:
parsed_scan = {}
if scan.b.has_mode:
parsed_scan['Mode'] = ffi.string(iwlib.iw_operation_mode[scan.b.mode])
if scan.b.essid_on:
parsed_scan['ESSID'] = ffi.string(scan.b.essid)
else:
parsed_scan['ESSID'] = b'Auto'
if scan.has_ap_addr:
iwlib.iw_ether_ntop(
ffi.cast('struct ether_addr *', scan.ap_addr.sa_data), buf)
if scan.b.has_mode and scan.b.mode == iwlib.IW_MODE_ADHOC:
parsed_scan['Cell'] = ffi.string(buf)
else:
parsed_scan['Access Point'] = ffi.string(buf)
if scan.has_maxbitrate:
iwlib.iw_print_bitrate(buf, len(buf), scan.maxbitrate.value)
parsed_scan['BitRate'] = ffi.string(buf)
if scan.has_stats:
parsed_scan['stats'] = _parse_stats(scan.stats)
results.append(parsed_scan)
scan = scan.next
return results | [
"def",
"scan",
"(",
"interface",
")",
":",
"interface",
"=",
"_get_bytes",
"(",
"interface",
")",
"head",
"=",
"ffi",
".",
"new",
"(",
"'wireless_scan_head *'",
")",
"with",
"iwlib_socket",
"(",
")",
"as",
"sock",
":",
"range",
"=",
"_get_range_info",
"(",... | Perform a scan for access points in the area.
Arguments:
interface - device to use for scanning (e.g. eth1, wlan0). | [
"Perform",
"a",
"scan",
"for",
"access",
"points",
"in",
"the",
"area",
"."
] | f7604de0a27709fca139c4bada58263bdce4f08e | https://github.com/nathan-hoad/python-iwlib/blob/f7604de0a27709fca139c4bada58263bdce4f08e/iwlib/iwlist.py#L21-L74 | train | 47,358 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.logout | def logout(self):
""" Logout from the remote account
:return: None
"""
url = self.address + "/logout"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error"))) | python | def logout(self):
""" Logout from the remote account
:return: None
"""
url = self.address + "/logout"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error"))) | [
"def",
"logout",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"address",
"+",
"\"/logout\"",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"header",
")",
"if... | Logout from the remote account
:return: None | [
"Logout",
"from",
"the",
"remote",
"account"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L168-L177 | train | 47,359 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.get_dataset_list | def get_dataset_list(self):
""" Returns the list of available datasets for the current user.
:return: a pandas Dataframe
"""
url = self.address + "/datasets"
header = self.__check_authentication()
response = requests.get(url, headers=header)
response = response.json()
datasets = response.get("datasets")
res = pd.DataFrame.from_dict(datasets)
return self.process_info_list(res, "info") | python | def get_dataset_list(self):
""" Returns the list of available datasets for the current user.
:return: a pandas Dataframe
"""
url = self.address + "/datasets"
header = self.__check_authentication()
response = requests.get(url, headers=header)
response = response.json()
datasets = response.get("datasets")
res = pd.DataFrame.from_dict(datasets)
return self.process_info_list(res, "info") | [
"def",
"get_dataset_list",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"address",
"+",
"\"/datasets\"",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"header",
... | Returns the list of available datasets for the current user.
:return: a pandas Dataframe | [
"Returns",
"the",
"list",
"of",
"available",
"datasets",
"for",
"the",
"current",
"user",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L196-L207 | train | 47,360 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.get_dataset_samples | def get_dataset_samples(self, dataset_name, owner=None):
""" Get the list of samples of a specific remote dataset.
:param dataset_name: the dataset name
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a pandas Dataframe
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
header = self.__check_authentication()
url = self.address + "/datasets/" + dataset_name
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
samples = response.get("samples")
if len(samples) == 0:
return None
res = pd.DataFrame.from_dict(samples)
return self.process_info_list(res, "info") | python | def get_dataset_samples(self, dataset_name, owner=None):
""" Get the list of samples of a specific remote dataset.
:param dataset_name: the dataset name
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a pandas Dataframe
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
header = self.__check_authentication()
url = self.address + "/datasets/" + dataset_name
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
samples = response.get("samples")
if len(samples) == 0:
return None
res = pd.DataFrame.from_dict(samples)
return self.process_info_list(res, "info") | [
"def",
"get_dataset_samples",
"(",
"self",
",",
"dataset_name",
",",
"owner",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"owner",
",",
"str",
")",
":",
"owner",
"=",
"owner",
".",
"lower",
"(",
")",
"dataset_name",
"=",
"owner",
"+",
"\".\"",
"+",... | Get the list of samples of a specific remote dataset.
:param dataset_name: the dataset name
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a pandas Dataframe | [
"Get",
"the",
"list",
"of",
"samples",
"of",
"a",
"specific",
"remote",
"dataset",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L209-L232 | train | 47,361 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.get_dataset_schema | def get_dataset_schema(self, dataset_name, owner=None):
""" Given a dataset name, it returns a BedParser coherent with the schema of it
:param dataset_name: a dataset name on the repository
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a BedParser
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
url = self.address + "/datasets/" + dataset_name+"/schema"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
name = response.get("name")
schemaType = response.get("type")
coordinates_system = response.get("coordinate_system")
fields = response.get("fields")
i = 0
chrPos, startPos, stopPos, strandPos = None, None, None, None
otherPos = []
if schemaType == GTF:
chrPos = 0 # seqname
startPos = 3 # start
stopPos = 4 # end
strandPos = 6 # strand
otherPos = [(1, 'source', 'string'), (2, 'feature', 'string'),
(5, 'score', 'float'), (7, 'frame', 'string')]
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() not in {'seqname', 'start', 'end', 'strand',
'source', 'feature', 'score', 'frame'}:
otherPos.append((i, fieldName, fieldType))
i += 1
else:
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() in chr_aliases and chrPos is None:
chrPos = i
elif fieldName.lower() in start_aliases and startPos is None:
startPos = i
elif fieldName.lower() in stop_aliases and stopPos is None:
stopPos = i
elif fieldName.lower() in strand_aliases and strandPos is None:
strandPos = i
else: # other positions
otherPos.append((i, fieldName, fieldType))
i += 1
if len(otherPos) == 0:
otherPos = None
return RegionParser(chrPos=chrPos,
startPos=startPos,
stopPos=stopPos,
strandPos=strandPos,
otherPos=otherPos,
schema_format=schemaType,
coordinate_system=coordinates_system,
delimiter="\t", parser_name=name) | python | def get_dataset_schema(self, dataset_name, owner=None):
""" Given a dataset name, it returns a BedParser coherent with the schema of it
:param dataset_name: a dataset name on the repository
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a BedParser
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
url = self.address + "/datasets/" + dataset_name+"/schema"
header = self.__check_authentication()
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
name = response.get("name")
schemaType = response.get("type")
coordinates_system = response.get("coordinate_system")
fields = response.get("fields")
i = 0
chrPos, startPos, stopPos, strandPos = None, None, None, None
otherPos = []
if schemaType == GTF:
chrPos = 0 # seqname
startPos = 3 # start
stopPos = 4 # end
strandPos = 6 # strand
otherPos = [(1, 'source', 'string'), (2, 'feature', 'string'),
(5, 'score', 'float'), (7, 'frame', 'string')]
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() not in {'seqname', 'start', 'end', 'strand',
'source', 'feature', 'score', 'frame'}:
otherPos.append((i, fieldName, fieldType))
i += 1
else:
for field in fields:
fieldName = field.get("name")
fieldType = field.get("type").lower()
if fieldName.lower() in chr_aliases and chrPos is None:
chrPos = i
elif fieldName.lower() in start_aliases and startPos is None:
startPos = i
elif fieldName.lower() in stop_aliases and stopPos is None:
stopPos = i
elif fieldName.lower() in strand_aliases and strandPos is None:
strandPos = i
else: # other positions
otherPos.append((i, fieldName, fieldType))
i += 1
if len(otherPos) == 0:
otherPos = None
return RegionParser(chrPos=chrPos,
startPos=startPos,
stopPos=stopPos,
strandPos=strandPos,
otherPos=otherPos,
schema_format=schemaType,
coordinate_system=coordinates_system,
delimiter="\t", parser_name=name) | [
"def",
"get_dataset_schema",
"(",
"self",
",",
"dataset_name",
",",
"owner",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"owner",
",",
"str",
")",
":",
"owner",
"=",
"owner",
".",
"lower",
"(",
")",
"dataset_name",
"=",
"owner",
"+",
"\".\"",
"+",
... | Given a dataset name, it returns a BedParser coherent with the schema of it
:param dataset_name: a dataset name on the repository
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a BedParser | [
"Given",
"a",
"dataset",
"name",
"it",
"returns",
"a",
"BedParser",
"coherent",
"with",
"the",
"schema",
"of",
"it"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L234-L304 | train | 47,362 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.upload_dataset | def upload_dataset(self, dataset, dataset_name, schema_path=None):
""" Upload to the repository an entire dataset from a local path
:param dataset: the local path of the dataset
:param dataset_name: the name you want to assign to the dataset remotely
:return: None
"""
url = self.address + "/datasets/" + dataset_name + "/uploadSample"
header = self.__check_authentication()
fields = dict()
remove = False
if isinstance(dataset, GDataframe):
tmp_path = TempFileManager.get_new_dataset_tmp_folder()
dataset.to_dataset_files(local_path=tmp_path)
dataset = tmp_path
remove = True
# a path is provided
if not isinstance(dataset, str):
raise TypeError("Dataset can be a path or a GDataframe. {} was passed".format(type(dataset)))
file_paths, schema_path_found = Loader.get_file_paths(dataset)
if schema_path is None:
schema_path = schema_path_found
fields['schema'] = (os.path.basename(schema_path), open(schema_path, "rb"), 'application/octet-stream')
for i, file in enumerate(file_paths):
fields["file"+str(i + 1)] = (os.path.basename(file), open(file, "rb"), 'application/octet-stream')
encoder = MultipartEncoder(fields)
callback = create_callback(encoder, len(fields))
m_encoder = MultipartEncoderMonitor(encoder, callback)
header['Content-Type'] = m_encoder.content_type
self.logger.debug("Uploading dataset at {} with name {}".format(dataset, dataset_name))
response = requests.post(url, data=m_encoder,
headers=header)
# closing files
for fn in fields.keys():
_, f, _ = fields[fn]
f.close()
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.content))
if remove:
TempFileManager.delete_tmp_dataset(dataset) | python | def upload_dataset(self, dataset, dataset_name, schema_path=None):
""" Upload to the repository an entire dataset from a local path
:param dataset: the local path of the dataset
:param dataset_name: the name you want to assign to the dataset remotely
:return: None
"""
url = self.address + "/datasets/" + dataset_name + "/uploadSample"
header = self.__check_authentication()
fields = dict()
remove = False
if isinstance(dataset, GDataframe):
tmp_path = TempFileManager.get_new_dataset_tmp_folder()
dataset.to_dataset_files(local_path=tmp_path)
dataset = tmp_path
remove = True
# a path is provided
if not isinstance(dataset, str):
raise TypeError("Dataset can be a path or a GDataframe. {} was passed".format(type(dataset)))
file_paths, schema_path_found = Loader.get_file_paths(dataset)
if schema_path is None:
schema_path = schema_path_found
fields['schema'] = (os.path.basename(schema_path), open(schema_path, "rb"), 'application/octet-stream')
for i, file in enumerate(file_paths):
fields["file"+str(i + 1)] = (os.path.basename(file), open(file, "rb"), 'application/octet-stream')
encoder = MultipartEncoder(fields)
callback = create_callback(encoder, len(fields))
m_encoder = MultipartEncoderMonitor(encoder, callback)
header['Content-Type'] = m_encoder.content_type
self.logger.debug("Uploading dataset at {} with name {}".format(dataset, dataset_name))
response = requests.post(url, data=m_encoder,
headers=header)
# closing files
for fn in fields.keys():
_, f, _ = fields[fn]
f.close()
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.content))
if remove:
TempFileManager.delete_tmp_dataset(dataset) | [
"def",
"upload_dataset",
"(",
"self",
",",
"dataset",
",",
"dataset_name",
",",
"schema_path",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"address",
"+",
"\"/datasets/\"",
"+",
"dataset_name",
"+",
"\"/uploadSample\"",
"header",
"=",
"self",
".",
"__che... | Upload to the repository an entire dataset from a local path
:param dataset: the local path of the dataset
:param dataset_name: the name you want to assign to the dataset remotely
:return: None | [
"Upload",
"to",
"the",
"repository",
"an",
"entire",
"dataset",
"from",
"a",
"local",
"path"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L306-L356 | train | 47,363 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.delete_dataset | def delete_dataset(self, dataset_name):
""" Deletes the dataset having the specified name
:param dataset_name: the name that the dataset has on the repository
:return: None
"""
url = self.address + "/datasets/" + dataset_name
header = self.__check_authentication()
response = requests.delete(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
self.logger.debug("Dataset {} was deleted from the repository".format(dataset_name)) | python | def delete_dataset(self, dataset_name):
""" Deletes the dataset having the specified name
:param dataset_name: the name that the dataset has on the repository
:return: None
"""
url = self.address + "/datasets/" + dataset_name
header = self.__check_authentication()
response = requests.delete(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
self.logger.debug("Dataset {} was deleted from the repository".format(dataset_name)) | [
"def",
"delete_dataset",
"(",
"self",
",",
"dataset_name",
")",
":",
"url",
"=",
"self",
".",
"address",
"+",
"\"/datasets/\"",
"+",
"dataset_name",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"response",
"=",
"requests",
".",
"delete",
"... | Deletes the dataset having the specified name
:param dataset_name: the name that the dataset has on the repository
:return: None | [
"Deletes",
"the",
"dataset",
"having",
"the",
"specified",
"name"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L358-L369 | train | 47,364 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.download_dataset | def download_dataset(self, dataset_name, local_path, how="stream"):
""" It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
"""
if not os.path.isdir(local_path):
os.makedirs(local_path)
else:
raise ValueError("Path {} already exists!".format(local_path))
local_path = os.path.join(local_path, FILES_FOLDER)
os.makedirs(local_path)
if how == 'zip':
return self.download_as_zip(dataset_name, local_path)
elif how == 'stream':
return self.download_as_stream(dataset_name, local_path)
else:
raise ValueError("how must be {'zip', 'stream'}") | python | def download_dataset(self, dataset_name, local_path, how="stream"):
""" It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None
"""
if not os.path.isdir(local_path):
os.makedirs(local_path)
else:
raise ValueError("Path {} already exists!".format(local_path))
local_path = os.path.join(local_path, FILES_FOLDER)
os.makedirs(local_path)
if how == 'zip':
return self.download_as_zip(dataset_name, local_path)
elif how == 'stream':
return self.download_as_stream(dataset_name, local_path)
else:
raise ValueError("how must be {'zip', 'stream'}") | [
"def",
"download_dataset",
"(",
"self",
",",
"dataset_name",
",",
"local_path",
",",
"how",
"=",
"\"stream\"",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"local_path",
")",
":",
"os",
".",
"makedirs",
"(",
"local_path",
")",
"else",
"... | It downloads from the repository the specified dataset and puts it
in the specified local folder
:param dataset_name: the name the dataset has in the repository
:param local_path: where you want to save the dataset
:param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream'
downloads the dataset sample by sample
:return: None | [
"It",
"downloads",
"from",
"the",
"repository",
"the",
"specified",
"dataset",
"and",
"puts",
"it",
"in",
"the",
"specified",
"local",
"folder"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L375-L398 | train | 47,365 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.query | def query(self, query, output_path=None, file_name="query", output="tab"):
""" Execute a GMQL textual query on the remote server.
:param query: the string containing the query
:param output_path (optional): where to store the results locally. If specified
the results are downloaded locally
:param file_name (optional): the name of the query
:param output (optional): how to save the results. It can be "tab" or "gtf"
:return: a pandas dataframe with the dictionary ids of the results
"""
header = self.__check_authentication()
header['Content-Type'] = "text/plain"
output = output.lower()
if output not in ['tab', 'gtf']:
raise ValueError("output must be 'tab' or 'gtf'")
url = self.address + "/queries/run/" + file_name + '/' + output
response = requests.post(url, data=query, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
response = response.json()
jobid = response.get("id")
self.logger.debug("JobId: {}. Waiting for the result".format(jobid))
status_resp = self._wait_for_result(jobid)
datasets = status_resp.get("datasets")
return self.__process_result_datasets(datasets, output_path) | python | def query(self, query, output_path=None, file_name="query", output="tab"):
""" Execute a GMQL textual query on the remote server.
:param query: the string containing the query
:param output_path (optional): where to store the results locally. If specified
the results are downloaded locally
:param file_name (optional): the name of the query
:param output (optional): how to save the results. It can be "tab" or "gtf"
:return: a pandas dataframe with the dictionary ids of the results
"""
header = self.__check_authentication()
header['Content-Type'] = "text/plain"
output = output.lower()
if output not in ['tab', 'gtf']:
raise ValueError("output must be 'tab' or 'gtf'")
url = self.address + "/queries/run/" + file_name + '/' + output
response = requests.post(url, data=query, headers=header)
if response.status_code != 200:
raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
response = response.json()
jobid = response.get("id")
self.logger.debug("JobId: {}. Waiting for the result".format(jobid))
status_resp = self._wait_for_result(jobid)
datasets = status_resp.get("datasets")
return self.__process_result_datasets(datasets, output_path) | [
"def",
"query",
"(",
"self",
",",
"query",
",",
"output_path",
"=",
"None",
",",
"file_name",
"=",
"\"query\"",
",",
"output",
"=",
"\"tab\"",
")",
":",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"header",
"[",
"'Content-Type'",
"]",
... | Execute a GMQL textual query on the remote server.
:param query: the string containing the query
:param output_path (optional): where to store the results locally. If specified
the results are downloaded locally
:param file_name (optional): the name of the query
:param output (optional): how to save the results. It can be "tab" or "gtf"
:return: a pandas dataframe with the dictionary ids of the results | [
"Execute",
"a",
"GMQL",
"textual",
"query",
"on",
"the",
"remote",
"server",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L490-L516 | train | 47,366 |
DEIB-GECO/PyGMQL | gmql/RemoteConnection/RemoteManager.py | RemoteManager.trace_job | def trace_job(self, jobId):
""" Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information
"""
header = self.__check_authentication()
status_url = self.address + "/jobs/" + jobId + "/trace"
status_resp = requests.get(status_url, headers=header)
if status_resp.status_code != 200:
raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error")))
return status_resp.json() | python | def trace_job(self, jobId):
""" Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information
"""
header = self.__check_authentication()
status_url = self.address + "/jobs/" + jobId + "/trace"
status_resp = requests.get(status_url, headers=header)
if status_resp.status_code != 200:
raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error")))
return status_resp.json() | [
"def",
"trace_job",
"(",
"self",
",",
"jobId",
")",
":",
"header",
"=",
"self",
".",
"__check_authentication",
"(",
")",
"status_url",
"=",
"self",
".",
"address",
"+",
"\"/jobs/\"",
"+",
"jobId",
"+",
"\"/trace\"",
"status_resp",
"=",
"requests",
".",
"ge... | Get information about the specified remote job
:param jobId: the job identifier
:return: a dictionary with the information | [
"Get",
"information",
"about",
"the",
"specified",
"remote",
"job"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L611-L622 | train | 47,367 |
DEIB-GECO/PyGMQL | gmql/settings.py | set_mode | def set_mode(how):
""" Sets the behavior of the API
:param how: if 'remote' all the execution is performed on the remote server; if 'local' all
it is executed locally. Default = 'local'
:return: None
"""
global __mode
if how == "local":
__mode = how
elif how == "remote":
__mode = how
else:
raise ValueError("how must be 'local' or 'remote'") | python | def set_mode(how):
""" Sets the behavior of the API
:param how: if 'remote' all the execution is performed on the remote server; if 'local' all
it is executed locally. Default = 'local'
:return: None
"""
global __mode
if how == "local":
__mode = how
elif how == "remote":
__mode = how
else:
raise ValueError("how must be 'local' or 'remote'") | [
"def",
"set_mode",
"(",
"how",
")",
":",
"global",
"__mode",
"if",
"how",
"==",
"\"local\"",
":",
"__mode",
"=",
"how",
"elif",
"how",
"==",
"\"remote\"",
":",
"__mode",
"=",
"how",
"else",
":",
"raise",
"ValueError",
"(",
"\"how must be 'local' or 'remote'\... | Sets the behavior of the API
:param how: if 'remote' all the execution is performed on the remote server; if 'local' all
it is executed locally. Default = 'local'
:return: None | [
"Sets",
"the",
"behavior",
"of",
"the",
"API"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L79-L92 | train | 47,368 |
DEIB-GECO/PyGMQL | gmql/settings.py | set_progress | def set_progress(how):
""" Enables or disables the progress bars for the loading, writing and downloading
of datasets
:param how: True if you want the progress bar, False otherwise
:return: None
Example::
import gmql as gl
gl.set_progress(True) # abilitates progress bars
# ....do something...
gl.set_progress(False) # removes progress bars
# ....do something...
"""
global __progress_bar
if isinstance(how, bool):
__progress_bar = how
else:
raise ValueError(
"how must be a boolean. {} was found".format(type(how))) | python | def set_progress(how):
""" Enables or disables the progress bars for the loading, writing and downloading
of datasets
:param how: True if you want the progress bar, False otherwise
:return: None
Example::
import gmql as gl
gl.set_progress(True) # abilitates progress bars
# ....do something...
gl.set_progress(False) # removes progress bars
# ....do something...
"""
global __progress_bar
if isinstance(how, bool):
__progress_bar = how
else:
raise ValueError(
"how must be a boolean. {} was found".format(type(how))) | [
"def",
"set_progress",
"(",
"how",
")",
":",
"global",
"__progress_bar",
"if",
"isinstance",
"(",
"how",
",",
"bool",
")",
":",
"__progress_bar",
"=",
"how",
"else",
":",
"raise",
"ValueError",
"(",
"\"how must be a boolean. {} was found\"",
".",
"format",
"(",
... | Enables or disables the progress bars for the loading, writing and downloading
of datasets
:param how: True if you want the progress bar, False otherwise
:return: None
Example::
import gmql as gl
gl.set_progress(True) # abilitates progress bars
# ....do something...
gl.set_progress(False) # removes progress bars
# ....do something... | [
"Enables",
"or",
"disables",
"the",
"progress",
"bars",
"for",
"the",
"loading",
"writing",
"and",
"downloading",
"of",
"datasets"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L107-L128 | train | 47,369 |
DEIB-GECO/PyGMQL | gmql/settings.py | set_meta_profiling | def set_meta_profiling(how):
""" Enables or disables the profiling of metadata at the loading of a GMQLDataset
:param how: True if you want to analyze the metadata when a GMQLDataset is created
by a load_from_*. False otherwise. (Default=True)
:return: None
"""
global __metadata_profiling
if isinstance(how, bool):
__metadata_profiling = how
else:
raise TypeError("how must be boolean. {} was provided".format(type(how))) | python | def set_meta_profiling(how):
""" Enables or disables the profiling of metadata at the loading of a GMQLDataset
:param how: True if you want to analyze the metadata when a GMQLDataset is created
by a load_from_*. False otherwise. (Default=True)
:return: None
"""
global __metadata_profiling
if isinstance(how, bool):
__metadata_profiling = how
else:
raise TypeError("how must be boolean. {} was provided".format(type(how))) | [
"def",
"set_meta_profiling",
"(",
"how",
")",
":",
"global",
"__metadata_profiling",
"if",
"isinstance",
"(",
"how",
",",
"bool",
")",
":",
"__metadata_profiling",
"=",
"how",
"else",
":",
"raise",
"TypeError",
"(",
"\"how must be boolean. {} was provided\"",
".",
... | Enables or disables the profiling of metadata at the loading of a GMQLDataset
:param how: True if you want to analyze the metadata when a GMQLDataset is created
by a load_from_*. False otherwise. (Default=True)
:return: None | [
"Enables",
"or",
"disables",
"the",
"profiling",
"of",
"metadata",
"at",
"the",
"loading",
"of",
"a",
"GMQLDataset"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L136-L147 | train | 47,370 |
DEIB-GECO/PyGMQL | gmql/dataset/parsers/RegionParser.py | RegionParser.parse_regions | def parse_regions(self, path):
""" Given a file path, it loads it into memory as a Pandas dataframe
:param path: file path
:return: a Pandas Dataframe
"""
if self.schema_format.lower() == GTF.lower():
res = self._parse_gtf_regions(path)
else:
res = self._parse_tab_regions(path)
return res | python | def parse_regions(self, path):
""" Given a file path, it loads it into memory as a Pandas dataframe
:param path: file path
:return: a Pandas Dataframe
"""
if self.schema_format.lower() == GTF.lower():
res = self._parse_gtf_regions(path)
else:
res = self._parse_tab_regions(path)
return res | [
"def",
"parse_regions",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"schema_format",
".",
"lower",
"(",
")",
"==",
"GTF",
".",
"lower",
"(",
")",
":",
"res",
"=",
"self",
".",
"_parse_gtf_regions",
"(",
"path",
")",
"else",
":",
"res",
"... | Given a file path, it loads it into memory as a Pandas dataframe
:param path: file path
:return: a Pandas Dataframe | [
"Given",
"a",
"file",
"path",
"it",
"loads",
"it",
"into",
"memory",
"as",
"a",
"Pandas",
"dataframe"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L101-L111 | train | 47,371 |
DEIB-GECO/PyGMQL | gmql/dataset/parsers/RegionParser.py | RegionParser.get_attributes | def get_attributes(self):
""" Returns the unordered list of attributes
:return: list of strings
"""
attr = ['chr', 'start', 'stop']
if self.strandPos is not None:
attr.append('strand')
if self.otherPos:
for i, o in enumerate(self.otherPos):
attr.append(o[1])
return attr | python | def get_attributes(self):
""" Returns the unordered list of attributes
:return: list of strings
"""
attr = ['chr', 'start', 'stop']
if self.strandPos is not None:
attr.append('strand')
if self.otherPos:
for i, o in enumerate(self.otherPos):
attr.append(o[1])
return attr | [
"def",
"get_attributes",
"(",
"self",
")",
":",
"attr",
"=",
"[",
"'chr'",
",",
"'start'",
",",
"'stop'",
"]",
"if",
"self",
".",
"strandPos",
"is",
"not",
"None",
":",
"attr",
".",
"append",
"(",
"'strand'",
")",
"if",
"self",
".",
"otherPos",
":",
... | Returns the unordered list of attributes
:return: list of strings | [
"Returns",
"the",
"unordered",
"list",
"of",
"attributes"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L153-L165 | train | 47,372 |
DEIB-GECO/PyGMQL | gmql/dataset/parsers/RegionParser.py | RegionParser.get_ordered_attributes | def get_ordered_attributes(self):
""" Returns the ordered list of attributes
:return: list of strings
"""
attrs = self.get_attributes()
attr_arr = np.array(attrs)
poss = [self.chrPos, self.startPos, self.stopPos]
if self.strandPos is not None:
poss.append(self.strandPos)
if self.otherPos:
for o in self.otherPos:
poss.append(o[0])
idx_sort = np.array(poss).argsort()
return attr_arr[idx_sort].tolist() | python | def get_ordered_attributes(self):
""" Returns the ordered list of attributes
:return: list of strings
"""
attrs = self.get_attributes()
attr_arr = np.array(attrs)
poss = [self.chrPos, self.startPos, self.stopPos]
if self.strandPos is not None:
poss.append(self.strandPos)
if self.otherPos:
for o in self.otherPos:
poss.append(o[0])
idx_sort = np.array(poss).argsort()
return attr_arr[idx_sort].tolist() | [
"def",
"get_ordered_attributes",
"(",
"self",
")",
":",
"attrs",
"=",
"self",
".",
"get_attributes",
"(",
")",
"attr_arr",
"=",
"np",
".",
"array",
"(",
"attrs",
")",
"poss",
"=",
"[",
"self",
".",
"chrPos",
",",
"self",
".",
"startPos",
",",
"self",
... | Returns the ordered list of attributes
:return: list of strings | [
"Returns",
"the",
"ordered",
"list",
"of",
"attributes"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L167-L182 | train | 47,373 |
DEIB-GECO/PyGMQL | gmql/dataset/parsers/RegionParser.py | RegionParser.get_types | def get_types(self):
""" Returns the unordered list of data types
:return: list of data types
"""
types = [str, int, int]
if self.strandPos is not None:
types.append(str)
if self.otherPos:
for o in self.otherPos:
types.append(o[2])
return types | python | def get_types(self):
""" Returns the unordered list of data types
:return: list of data types
"""
types = [str, int, int]
if self.strandPos is not None:
types.append(str)
if self.otherPos:
for o in self.otherPos:
types.append(o[2])
return types | [
"def",
"get_types",
"(",
"self",
")",
":",
"types",
"=",
"[",
"str",
",",
"int",
",",
"int",
"]",
"if",
"self",
".",
"strandPos",
"is",
"not",
"None",
":",
"types",
".",
"append",
"(",
"str",
")",
"if",
"self",
".",
"otherPos",
":",
"for",
"o",
... | Returns the unordered list of data types
:return: list of data types | [
"Returns",
"the",
"unordered",
"list",
"of",
"data",
"types"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L184-L196 | train | 47,374 |
DEIB-GECO/PyGMQL | gmql/dataset/parsers/RegionParser.py | RegionParser.get_ordered_types | def get_ordered_types(self):
""" Returns the ordered list of data types
:return: list of data types
"""
types = self.get_types()
types_arr = np.array(types)
poss = [self.chrPos, self.startPos, self.stopPos]
if self.strandPos is not None:
poss.append(self.strandPos)
if self.otherPos:
for o in self.otherPos:
poss.append(o[0])
idx_sort = np.array(poss).argsort()
return types_arr[idx_sort].tolist() | python | def get_ordered_types(self):
""" Returns the ordered list of data types
:return: list of data types
"""
types = self.get_types()
types_arr = np.array(types)
poss = [self.chrPos, self.startPos, self.stopPos]
if self.strandPos is not None:
poss.append(self.strandPos)
if self.otherPos:
for o in self.otherPos:
poss.append(o[0])
idx_sort = np.array(poss).argsort()
return types_arr[idx_sort].tolist() | [
"def",
"get_ordered_types",
"(",
"self",
")",
":",
"types",
"=",
"self",
".",
"get_types",
"(",
")",
"types_arr",
"=",
"np",
".",
"array",
"(",
"types",
")",
"poss",
"=",
"[",
"self",
".",
"chrPos",
",",
"self",
".",
"startPos",
",",
"self",
".",
"... | Returns the ordered list of data types
:return: list of data types | [
"Returns",
"the",
"ordered",
"list",
"of",
"data",
"types"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L212-L226 | train | 47,375 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.xmeans | def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False):
"""
Constructor of the x-means clustering.rst algorithm
:param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...]
Note: The dimensions of the initial centers should be same as of the dataset.
:param kmax: Maximum number of clusters that can be allocated.
:param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing
:param criterion: Type of splitting creation.
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: returns the clustering.rst object
"""
model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore)
return cls(model) | python | def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False):
"""
Constructor of the x-means clustering.rst algorithm
:param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...]
Note: The dimensions of the initial centers should be same as of the dataset.
:param kmax: Maximum number of clusters that can be allocated.
:param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing
:param criterion: Type of splitting creation.
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: returns the clustering.rst object
"""
model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore)
return cls(model) | [
"def",
"xmeans",
"(",
"cls",
",",
"initial_centers",
"=",
"None",
",",
"kmax",
"=",
"20",
",",
"tolerance",
"=",
"0.025",
",",
"criterion",
"=",
"splitting_type",
".",
"BAYESIAN_INFORMATION_CRITERION",
",",
"ccore",
"=",
"False",
")",
":",
"model",
"=",
"x... | Constructor of the x-means clustering.rst algorithm
:param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...]
Note: The dimensions of the initial centers should be same as of the dataset.
:param kmax: Maximum number of clusters that can be allocated.
:param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing
:param criterion: Type of splitting creation.
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: returns the clustering.rst object | [
"Constructor",
"of",
"the",
"x",
"-",
"means",
"clustering",
".",
"rst",
"algorithm"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L26-L39 | train | 47,376 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.clarans | def clarans(cls, number_clusters, num_local, max_neighbour):
"""
Constructor of the CLARANS clustering.rst algorithm
:param number_clusters: the number of clusters to be allocated
:param num_local: the number of local minima obtained (amount of iterations for solving the problem).
:param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem).
:return: the resulting clustering.rst object
"""
model = clarans(None, number_clusters, num_local, max_neighbour)
return cls(model) | python | def clarans(cls, number_clusters, num_local, max_neighbour):
"""
Constructor of the CLARANS clustering.rst algorithm
:param number_clusters: the number of clusters to be allocated
:param num_local: the number of local minima obtained (amount of iterations for solving the problem).
:param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem).
:return: the resulting clustering.rst object
"""
model = clarans(None, number_clusters, num_local, max_neighbour)
return cls(model) | [
"def",
"clarans",
"(",
"cls",
",",
"number_clusters",
",",
"num_local",
",",
"max_neighbour",
")",
":",
"model",
"=",
"clarans",
"(",
"None",
",",
"number_clusters",
",",
"num_local",
",",
"max_neighbour",
")",
"return",
"cls",
"(",
"model",
")"
] | Constructor of the CLARANS clustering.rst algorithm
:param number_clusters: the number of clusters to be allocated
:param num_local: the number of local minima obtained (amount of iterations for solving the problem).
:param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem).
:return: the resulting clustering.rst object | [
"Constructor",
"of",
"the",
"CLARANS",
"clustering",
".",
"rst",
"algorithm"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L42-L52 | train | 47,377 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.rock | def rock(cls, data, eps, number_clusters, threshold=0.5, ccore=False):
"""
Constructor of the ROCK cluster analysis algorithm
:param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius
:param number_clusters: Defines number of clusters that should be allocated from the input data set
:param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: The resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = rock(data, eps, number_clusters, threshold, ccore)
return cls(model) | python | def rock(cls, data, eps, number_clusters, threshold=0.5, ccore=False):
"""
Constructor of the ROCK cluster analysis algorithm
:param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius
:param number_clusters: Defines number of clusters that should be allocated from the input data set
:param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: The resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = rock(data, eps, number_clusters, threshold, ccore)
return cls(model) | [
"def",
"rock",
"(",
"cls",
",",
"data",
",",
"eps",
",",
"number_clusters",
",",
"threshold",
"=",
"0.5",
",",
"ccore",
"=",
"False",
")",
":",
"data",
"=",
"cls",
".",
"input_preprocess",
"(",
"data",
")",
"model",
"=",
"rock",
"(",
"data",
",",
"... | Constructor of the ROCK cluster analysis algorithm
:param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius
:param number_clusters: Defines number of clusters that should be allocated from the input data set
:param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing
:param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
:return: The resulting clustering.rst object | [
"Constructor",
"of",
"the",
"ROCK",
"cluster",
"analysis",
"algorithm"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L55-L67 | train | 47,378 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.optics | def optics(cls, data, eps, minpts, ccore=False):
"""
Constructor of OPTICS clustering.rst algorithm
:param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple
:param eps: Connectivity radius between points, points may be connected if distance between them less than the radius
:param minpts: Minimum number of shared neighbors that is required for establishing links between points
:param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified.
In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake
in connectivity radius usage.
:param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem
:return: the resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = optics(data, eps, minpts)
return cls(model) | python | def optics(cls, data, eps, minpts, ccore=False):
"""
Constructor of OPTICS clustering.rst algorithm
:param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple
:param eps: Connectivity radius between points, points may be connected if distance between them less than the radius
:param minpts: Minimum number of shared neighbors that is required for establishing links between points
:param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified.
In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake
in connectivity radius usage.
:param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem
:return: the resulting clustering.rst object
"""
data = cls.input_preprocess(data)
model = optics(data, eps, minpts)
return cls(model) | [
"def",
"optics",
"(",
"cls",
",",
"data",
",",
"eps",
",",
"minpts",
",",
"ccore",
"=",
"False",
")",
":",
"data",
"=",
"cls",
".",
"input_preprocess",
"(",
"data",
")",
"model",
"=",
"optics",
"(",
"data",
",",
"eps",
",",
"minpts",
")",
"return",... | Constructor of OPTICS clustering.rst algorithm
:param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple
:param eps: Connectivity radius between points, points may be connected if distance between them less than the radius
:param minpts: Minimum number of shared neighbors that is required for establishing links between points
:param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified.
In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake
in connectivity radius usage.
:param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem
:return: the resulting clustering.rst object | [
"Constructor",
"of",
"OPTICS",
"clustering",
".",
"rst",
"algorithm"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L78-L93 | train | 47,379 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.is_pyclustering_instance | def is_pyclustering_instance(model):
"""
Checks if the clustering.rst algorithm belongs to pyclustering
:param model: the clustering.rst algorithm model
:return: the truth value (Boolean)
"""
return any(isinstance(model, i) for i in [xmeans, clarans, rock, optics]) | python | def is_pyclustering_instance(model):
"""
Checks if the clustering.rst algorithm belongs to pyclustering
:param model: the clustering.rst algorithm model
:return: the truth value (Boolean)
"""
return any(isinstance(model, i) for i in [xmeans, clarans, rock, optics]) | [
"def",
"is_pyclustering_instance",
"(",
"model",
")",
":",
"return",
"any",
"(",
"isinstance",
"(",
"model",
",",
"i",
")",
"for",
"i",
"in",
"[",
"xmeans",
",",
"clarans",
",",
"rock",
",",
"optics",
"]",
")"
] | Checks if the clustering.rst algorithm belongs to pyclustering
:param model: the clustering.rst algorithm model
:return: the truth value (Boolean) | [
"Checks",
"if",
"the",
"clustering",
".",
"rst",
"algorithm",
"belongs",
"to",
"pyclustering"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L205-L212 | train | 47,380 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.fit | def fit(self, data=None):
"""
Performs clustering.rst
:param data: Data to be fit
:return: the clustering.rst object
"""
if self.is_pyclustering_instance(self.model):
if isinstance(self.model, xmeans):
data = self.input_preprocess(data)
self.model._xmeans__pointer_data = data
elif isinstance(self.model, clarans):
data = self.input_preprocess(data)
self.model._clarans__pointer_data = data
self.model.process()
else:
self.model.fit(data)
return self | python | def fit(self, data=None):
"""
Performs clustering.rst
:param data: Data to be fit
:return: the clustering.rst object
"""
if self.is_pyclustering_instance(self.model):
if isinstance(self.model, xmeans):
data = self.input_preprocess(data)
self.model._xmeans__pointer_data = data
elif isinstance(self.model, clarans):
data = self.input_preprocess(data)
self.model._clarans__pointer_data = data
self.model.process()
else:
self.model.fit(data)
return self | [
"def",
"fit",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"if",
"self",
".",
"is_pyclustering_instance",
"(",
"self",
".",
"model",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"model",
",",
"xmeans",
")",
":",
"data",
"=",
"self",
".",
"in... | Performs clustering.rst
:param data: Data to be fit
:return: the clustering.rst object | [
"Performs",
"clustering",
".",
"rst"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L214-L232 | train | 47,381 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering._labels_from_pyclusters | def _labels_from_pyclusters(self):
"""
Computes and returns the list of labels indicating the data points and the corresponding cluster ids.
:return: The list of labels
"""
clusters = self.model.get_clusters()
labels = []
for i in range(0, len(clusters)):
for j in clusters[i]:
labels.insert(int(j), i)
return labels | python | def _labels_from_pyclusters(self):
"""
Computes and returns the list of labels indicating the data points and the corresponding cluster ids.
:return: The list of labels
"""
clusters = self.model.get_clusters()
labels = []
for i in range(0, len(clusters)):
for j in clusters[i]:
labels.insert(int(j), i)
return labels | [
"def",
"_labels_from_pyclusters",
"(",
"self",
")",
":",
"clusters",
"=",
"self",
".",
"model",
".",
"get_clusters",
"(",
")",
"labels",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"clusters",
")",
")",
":",
"for",
"j",
"in... | Computes and returns the list of labels indicating the data points and the corresponding cluster ids.
:return: The list of labels | [
"Computes",
"and",
"returns",
"the",
"list",
"of",
"labels",
"indicating",
"the",
"data",
"points",
"and",
"the",
"corresponding",
"cluster",
"ids",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L235-L246 | train | 47,382 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.retrieve_cluster | def retrieve_cluster(self, df, cluster_no):
"""
Extracts the cluster at the given index from the input dataframe
:param df: the dataframe that contains the clusters
:param cluster_no: the cluster number
:return: returns the extracted cluster
"""
if self.is_pyclustering_instance(self.model):
clusters = self.model.get_clusters()
mask = []
for i in range(0, df.shape[0]):
mask.append(i in clusters[cluster_no])
else:
mask = self.model.labels_ == cluster_no # a boolean mask
return df[mask] | python | def retrieve_cluster(self, df, cluster_no):
"""
Extracts the cluster at the given index from the input dataframe
:param df: the dataframe that contains the clusters
:param cluster_no: the cluster number
:return: returns the extracted cluster
"""
if self.is_pyclustering_instance(self.model):
clusters = self.model.get_clusters()
mask = []
for i in range(0, df.shape[0]):
mask.append(i in clusters[cluster_no])
else:
mask = self.model.labels_ == cluster_no # a boolean mask
return df[mask] | [
"def",
"retrieve_cluster",
"(",
"self",
",",
"df",
",",
"cluster_no",
")",
":",
"if",
"self",
".",
"is_pyclustering_instance",
"(",
"self",
".",
"model",
")",
":",
"clusters",
"=",
"self",
".",
"model",
".",
"get_clusters",
"(",
")",
"mask",
"=",
"[",
... | Extracts the cluster at the given index from the input dataframe
:param df: the dataframe that contains the clusters
:param cluster_no: the cluster number
:return: returns the extracted cluster | [
"Extracts",
"the",
"cluster",
"at",
"the",
"given",
"index",
"from",
"the",
"input",
"dataframe"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L248-L263 | train | 47,383 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.get_labels | def get_labels(obj):
"""
Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels
"""
if Clustering.is_pyclustering_instance(obj.model):
return obj._labels_from_pyclusters
else:
return obj.model.labels_ | python | def get_labels(obj):
"""
Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels
"""
if Clustering.is_pyclustering_instance(obj.model):
return obj._labels_from_pyclusters
else:
return obj.model.labels_ | [
"def",
"get_labels",
"(",
"obj",
")",
":",
"if",
"Clustering",
".",
"is_pyclustering_instance",
"(",
"obj",
".",
"model",
")",
":",
"return",
"obj",
".",
"_labels_from_pyclusters",
"else",
":",
"return",
"obj",
".",
"model",
".",
"labels_"
] | Retrieve the labels of a clustering.rst object
:param obj: the clustering.rst object
:return: the resulting labels | [
"Retrieve",
"the",
"labels",
"of",
"a",
"clustering",
".",
"rst",
"object"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L266-L276 | train | 47,384 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/clustering.py | Clustering.silhouette_n_clusters | def silhouette_n_clusters(data, k_min, k_max, distance='euclidean'):
"""
Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually
:param data: The data object
:param k_min: lowerbound of the cluster range
:param k_max: upperbound of the cluster range
:param distance: the distance metric, 'euclidean' by default
:return:
"""
k_range = range(k_min, k_max)
k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range]
silhouette_scores = [obj.silhouette_score(data=data, metric=distance) for obj in k_means_var]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(k_range, silhouette_scores, 'b*-')
ax.set_ylim((-1, 1))
plt.grid(True)
plt.xlabel('n_clusters')
plt.ylabel('The silhouette score')
plt.title('Silhouette score vs. k')
plt.show() | python | def silhouette_n_clusters(data, k_min, k_max, distance='euclidean'):
"""
Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually
:param data: The data object
:param k_min: lowerbound of the cluster range
:param k_max: upperbound of the cluster range
:param distance: the distance metric, 'euclidean' by default
:return:
"""
k_range = range(k_min, k_max)
k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range]
silhouette_scores = [obj.silhouette_score(data=data, metric=distance) for obj in k_means_var]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(k_range, silhouette_scores, 'b*-')
ax.set_ylim((-1, 1))
plt.grid(True)
plt.xlabel('n_clusters')
plt.ylabel('The silhouette score')
plt.title('Silhouette score vs. k')
plt.show() | [
"def",
"silhouette_n_clusters",
"(",
"data",
",",
"k_min",
",",
"k_max",
",",
"distance",
"=",
"'euclidean'",
")",
":",
"k_range",
"=",
"range",
"(",
"k_min",
",",
"k_max",
")",
"k_means_var",
"=",
"[",
"Clustering",
".",
"kmeans",
"(",
"k",
")",
".",
... | Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually
:param data: The data object
:param k_min: lowerbound of the cluster range
:param k_max: upperbound of the cluster range
:param distance: the distance metric, 'euclidean' by default
:return: | [
"Computes",
"and",
"plot",
"the",
"silhouette",
"score",
"vs",
"number",
"of",
"clusters",
"graph",
"to",
"help",
"selecting",
"the",
"number",
"of",
"clusters",
"visually"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L279-L303 | train | 47,385 |
DEIB-GECO/PyGMQL | gmql/dataset/loaders/Materializations.py | materialize | def materialize(datasets):
""" Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized.
The engine will perform all the materializations at the same time, if an output path is provided,
while will perform each operation separately if the output_path is not specified.
:param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset}
:return: a list of GDataframe or a dictionary {'output_path' : GDataframe}
"""
from .. import GMQLDataset
if isinstance(datasets, dict):
result = dict()
for output_path in datasets.keys():
dataset = datasets[output_path]
if not isinstance(dataset, GMQLDataset.GMQLDataset):
raise TypeError("The values of the dictionary must be GMQLDataset."
" {} was given".format(type(dataset)))
gframe = dataset.materialize(output_path)
result[output_path] = gframe
elif isinstance(datasets, list):
result = []
for dataset in datasets:
if not isinstance(dataset, GMQLDataset.GMQLDataset):
raise TypeError("The values of the list must be GMQLDataset."
" {} was given".format(type(dataset)))
gframe = dataset.materialize()
result.append(gframe)
else:
raise TypeError("The input must be a dictionary of a list. "
"{} was given".format(type(datasets)))
return result | python | def materialize(datasets):
""" Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized.
The engine will perform all the materializations at the same time, if an output path is provided,
while will perform each operation separately if the output_path is not specified.
:param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset}
:return: a list of GDataframe or a dictionary {'output_path' : GDataframe}
"""
from .. import GMQLDataset
if isinstance(datasets, dict):
result = dict()
for output_path in datasets.keys():
dataset = datasets[output_path]
if not isinstance(dataset, GMQLDataset.GMQLDataset):
raise TypeError("The values of the dictionary must be GMQLDataset."
" {} was given".format(type(dataset)))
gframe = dataset.materialize(output_path)
result[output_path] = gframe
elif isinstance(datasets, list):
result = []
for dataset in datasets:
if not isinstance(dataset, GMQLDataset.GMQLDataset):
raise TypeError("The values of the list must be GMQLDataset."
" {} was given".format(type(dataset)))
gframe = dataset.materialize()
result.append(gframe)
else:
raise TypeError("The input must be a dictionary of a list. "
"{} was given".format(type(datasets)))
return result | [
"def",
"materialize",
"(",
"datasets",
")",
":",
"from",
".",
".",
"import",
"GMQLDataset",
"if",
"isinstance",
"(",
"datasets",
",",
"dict",
")",
":",
"result",
"=",
"dict",
"(",
")",
"for",
"output_path",
"in",
"datasets",
".",
"keys",
"(",
")",
":",... | Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized.
The engine will perform all the materializations at the same time, if an output path is provided,
while will perform each operation separately if the output_path is not specified.
:param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset}
:return: a list of GDataframe or a dictionary {'output_path' : GDataframe} | [
"Multiple",
"materializations",
".",
"Enables",
"the",
"user",
"to",
"specify",
"a",
"set",
"of",
"GMQLDataset",
"to",
"be",
"materialized",
".",
"The",
"engine",
"will",
"perform",
"all",
"the",
"materializations",
"at",
"the",
"same",
"time",
"if",
"an",
"... | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Materializations.py#L9-L38 | train | 47,386 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/biclustering.py | Biclustering.retrieve_bicluster | def retrieve_bicluster(self, df, row_no, column_no):
"""
Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe.
:param df: the input dataframe whose values were biclustered
:param row_no: the number of the row bicluster
:param column_no: the number of the column bicluster
:return: the extracted bicluster from the dataframe
"""
res = df[self.model.biclusters_[0][row_no]]
bicluster = res[res.columns[self.model.biclusters_[1][column_no]]]
return bicluster | python | def retrieve_bicluster(self, df, row_no, column_no):
"""
Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe.
:param df: the input dataframe whose values were biclustered
:param row_no: the number of the row bicluster
:param column_no: the number of the column bicluster
:return: the extracted bicluster from the dataframe
"""
res = df[self.model.biclusters_[0][row_no]]
bicluster = res[res.columns[self.model.biclusters_[1][column_no]]]
return bicluster | [
"def",
"retrieve_bicluster",
"(",
"self",
",",
"df",
",",
"row_no",
",",
"column_no",
")",
":",
"res",
"=",
"df",
"[",
"self",
".",
"model",
".",
"biclusters_",
"[",
"0",
"]",
"[",
"row_no",
"]",
"]",
"bicluster",
"=",
"res",
"[",
"res",
".",
"colu... | Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe.
:param df: the input dataframe whose values were biclustered
:param row_no: the number of the row bicluster
:param column_no: the number of the column bicluster
:return: the extracted bicluster from the dataframe | [
"Extracts",
"the",
"bicluster",
"at",
"the",
"given",
"row",
"bicluster",
"number",
"and",
"the",
"column",
"bicluster",
"number",
"from",
"the",
"input",
"dataframe",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/biclustering.py#L50-L61 | train | 47,387 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/biclustering.py | Biclustering.bicluster_similarity | def bicluster_similarity(self, reference_model):
"""
Calculates the similarity between the current model of biclusters and the reference model of biclusters
:param reference_model: The reference model of biclusters
:return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters.
"""
similarity_score = consensus_score(self.model.biclusters_, reference_model.biclusters_)
return similarity_score | python | def bicluster_similarity(self, reference_model):
"""
Calculates the similarity between the current model of biclusters and the reference model of biclusters
:param reference_model: The reference model of biclusters
:return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters.
"""
similarity_score = consensus_score(self.model.biclusters_, reference_model.biclusters_)
return similarity_score | [
"def",
"bicluster_similarity",
"(",
"self",
",",
"reference_model",
")",
":",
"similarity_score",
"=",
"consensus_score",
"(",
"self",
".",
"model",
".",
"biclusters_",
",",
"reference_model",
".",
"biclusters_",
")",
"return",
"similarity_score"
] | Calculates the similarity between the current model of biclusters and the reference model of biclusters
:param reference_model: The reference model of biclusters
:return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters. | [
"Calculates",
"the",
"similarity",
"between",
"the",
"current",
"model",
"of",
"biclusters",
"and",
"the",
"reference",
"model",
"of",
"biclusters"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/biclustering.py#L63-L71 | train | 47,388 |
DEIB-GECO/PyGMQL | gmql/ml/multi_ref_model.py | MultiRefModel.merge | def merge(self, samples_uuid):
"""
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
"""
all_meta_data = pd.DataFrame()
for dm in self.data_model:
all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0)
group = all_meta_data.groupby([samples_uuid])['sample']
sample_sets = group.apply(list).values
merged_df = pd.DataFrame()
multi_index = list(map(list, zip(*sample_sets)))
multi_index_names = list(range(0, len(sample_sets[0])))
i = 1
for pair in sample_sets:
i += 1
numbers = list(range(0, len(pair)))
df_temp = pd.DataFrame()
for n in numbers:
try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False)
df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1)
except:
pass
merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0)
multi_index = np.asarray(multi_index)
multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names)
merged_df.index = multi_index
return merged_df | python | def merge(self, samples_uuid):
"""
The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe
"""
all_meta_data = pd.DataFrame()
for dm in self.data_model:
all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0)
group = all_meta_data.groupby([samples_uuid])['sample']
sample_sets = group.apply(list).values
merged_df = pd.DataFrame()
multi_index = list(map(list, zip(*sample_sets)))
multi_index_names = list(range(0, len(sample_sets[0])))
i = 1
for pair in sample_sets:
i += 1
numbers = list(range(0, len(pair)))
df_temp = pd.DataFrame()
for n in numbers:
try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False)
df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1)
except:
pass
merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0)
multi_index = np.asarray(multi_index)
multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names)
merged_df.index = multi_index
return merged_df | [
"def",
"merge",
"(",
"self",
",",
"samples_uuid",
")",
":",
"all_meta_data",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"dm",
"in",
"self",
".",
"data_model",
":",
"all_meta_data",
"=",
"pd",
".",
"concat",
"(",
"[",
"all_meta_data",
",",
"dm",
".",... | The method to merge the datamodels belonging to different references
:param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references
:return: Returns the merged dataframe | [
"The",
"method",
"to",
"merge",
"the",
"datamodels",
"belonging",
"to",
"different",
"references"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/multi_ref_model.py#L57-L91 | train | 47,389 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/preprocessing.py | Preprocessing.impute_using_statistics | def impute_using_statistics(df, method='min'):
"""
Imputes the missing values by the selected statistical property of each column
:param df: The input dataframe that contains missing values
:param method: The imputation method (min by default)
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian noise according to mean/std of column
:return: the imputed dataframe
"""
sf = SimpleFill(method)
imputed_matrix = sf.complete(df.values)
imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns)
return imputed_df | python | def impute_using_statistics(df, method='min'):
"""
Imputes the missing values by the selected statistical property of each column
:param df: The input dataframe that contains missing values
:param method: The imputation method (min by default)
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian noise according to mean/std of column
:return: the imputed dataframe
"""
sf = SimpleFill(method)
imputed_matrix = sf.complete(df.values)
imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns)
return imputed_df | [
"def",
"impute_using_statistics",
"(",
"df",
",",
"method",
"=",
"'min'",
")",
":",
"sf",
"=",
"SimpleFill",
"(",
"method",
")",
"imputed_matrix",
"=",
"sf",
".",
"complete",
"(",
"df",
".",
"values",
")",
"imputed_df",
"=",
"pd",
".",
"DataFrame",
"(",
... | Imputes the missing values by the selected statistical property of each column
:param df: The input dataframe that contains missing values
:param method: The imputation method (min by default)
"zero": fill missing entries with zeros
"mean": fill with column means
"median" : fill with column medians
"min": fill with min value per column
"random": fill with gaussian noise according to mean/std of column
:return: the imputed dataframe | [
"Imputes",
"the",
"missing",
"values",
"by",
"the",
"selected",
"statistical",
"property",
"of",
"each",
"column"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L49-L65 | train | 47,390 |
DEIB-GECO/PyGMQL | gmql/ml/algorithms/preprocessing.py | Preprocessing.impute_knn | def impute_knn(df, k=3):
"""
Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data.
:param df: The input dataframe that contains missing values
:param k: The number of neighbours
:return: the imputed dataframe
"""
imputed_matrix = KNN(k=k).complete(df.values)
imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns)
return imputed_df | python | def impute_knn(df, k=3):
"""
Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data.
:param df: The input dataframe that contains missing values
:param k: The number of neighbours
:return: the imputed dataframe
"""
imputed_matrix = KNN(k=k).complete(df.values)
imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns)
return imputed_df | [
"def",
"impute_knn",
"(",
"df",
",",
"k",
"=",
"3",
")",
":",
"imputed_matrix",
"=",
"KNN",
"(",
"k",
"=",
"k",
")",
".",
"complete",
"(",
"df",
".",
"values",
")",
"imputed_df",
"=",
"pd",
".",
"DataFrame",
"(",
"imputed_matrix",
",",
"df",
".",
... | Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data.
:param df: The input dataframe that contains missing values
:param k: The number of neighbours
:return: the imputed dataframe | [
"Nearest",
"neighbour",
"imputations",
"which",
"weights",
"samples",
"using",
"the",
"mean",
"squared",
"difference",
"on",
"features",
"for",
"which",
"two",
"rows",
"both",
"have",
"observed",
"data",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L68-L78 | train | 47,391 |
DEIB-GECO/PyGMQL | gmql/dataset/DataStructures/MetaField.py | MetaField.isin | def isin(self, values):
""" Selects the samples having the metadata attribute between the values provided
as input
:param values: a list of elements
:return a new complex condition
"""
if not isinstance(values, list):
raise TypeError("Input should be a string. {} was provided".format(type(values)))
if not (self.name.startswith("(") and self.name.endswith(")")):
first = True
new_condition = None
for v in values:
if first:
first = False
new_condition = self.__eq__(v)
else:
new_condition = new_condition.__or__(self.__eq__(v))
return new_condition
else:
raise SyntaxError("You cannot use 'isin' with a complex condition") | python | def isin(self, values):
""" Selects the samples having the metadata attribute between the values provided
as input
:param values: a list of elements
:return a new complex condition
"""
if not isinstance(values, list):
raise TypeError("Input should be a string. {} was provided".format(type(values)))
if not (self.name.startswith("(") and self.name.endswith(")")):
first = True
new_condition = None
for v in values:
if first:
first = False
new_condition = self.__eq__(v)
else:
new_condition = new_condition.__or__(self.__eq__(v))
return new_condition
else:
raise SyntaxError("You cannot use 'isin' with a complex condition") | [
"def",
"isin",
"(",
"self",
",",
"values",
")",
":",
"if",
"not",
"isinstance",
"(",
"values",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"Input should be a string. {} was provided\"",
".",
"format",
"(",
"type",
"(",
"values",
")",
")",
")",
"if"... | Selects the samples having the metadata attribute between the values provided
as input
:param values: a list of elements
:return a new complex condition | [
"Selects",
"the",
"samples",
"having",
"the",
"metadata",
"attribute",
"between",
"the",
"values",
"provided",
"as",
"input"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/DataStructures/MetaField.py#L75-L95 | train | 47,392 |
kevinconway/daemons | daemons/daemonize/simple.py | SimpleDaemonizeManager.daemonize | def daemonize(self):
"""Double fork and set the pid."""
self._double_fork()
# Write pidfile.
self.pid = os.getpid()
LOG.info(
"Succesfully daemonized process {0}.".format(self.pid)
) | python | def daemonize(self):
"""Double fork and set the pid."""
self._double_fork()
# Write pidfile.
self.pid = os.getpid()
LOG.info(
"Succesfully daemonized process {0}.".format(self.pid)
) | [
"def",
"daemonize",
"(",
"self",
")",
":",
"self",
".",
"_double_fork",
"(",
")",
"# Write pidfile.",
"self",
".",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"LOG",
".",
"info",
"(",
"\"Succesfully daemonized process {0}.\"",
".",
"format",
"(",
"self",
".... | Double fork and set the pid. | [
"Double",
"fork",
"and",
"set",
"the",
"pid",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/daemonize/simple.py#L22-L31 | train | 47,393 |
kevinconway/daemons | daemons/daemonize/simple.py | SimpleDaemonizeManager._double_fork | def _double_fork(self):
"""Do the UNIX double-fork magic.
See Stevens' "Advanced Programming in the UNIX Environment" for details
(ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
return None
except OSError as err:
LOG.exception(
"Fork #1 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
# Decouple from parent environment.
os.chdir("/")
os.setsid()
os.umask(0)
# Do second fork.
try:
pid = os.fork()
if pid > 0:
# Exit from second parent.
sys.exit(0)
except OSError as err:
LOG.exception(
"Fork #2 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None | python | def _double_fork(self):
"""Do the UNIX double-fork magic.
See Stevens' "Advanced Programming in the UNIX Environment" for details
(ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
return None
except OSError as err:
LOG.exception(
"Fork #1 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
# Decouple from parent environment.
os.chdir("/")
os.setsid()
os.umask(0)
# Do second fork.
try:
pid = os.fork()
if pid > 0:
# Exit from second parent.
sys.exit(0)
except OSError as err:
LOG.exception(
"Fork #2 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None | [
"def",
"_double_fork",
"(",
"self",
")",
":",
"try",
":",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"if",
"pid",
">",
"0",
":",
"# Exit first parent.",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"None",
"except",
"OSError",
"as",
"err",
":",
"LOG"... | Do the UNIX double-fork magic.
See Stevens' "Advanced Programming in the UNIX Environment" for details
(ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 | [
"Do",
"the",
"UNIX",
"double",
"-",
"fork",
"magic",
"."
] | b0fe0db5821171a35aa9078596d19d630c570b38 | https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/daemonize/simple.py#L33-L83 | train | 47,394 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.from_memory | def from_memory(cls, data, meta):
"""
Overloaded constructor to create the GenometricSpace object from memory data and meta variables.
The indexes of the data and meta dataframes should be the same.
:param data: The data model
:param meta: The metadata
:return: A GenometricSpace object
"""
obj = cls()
obj.data = data
obj.meta = meta
return obj | python | def from_memory(cls, data, meta):
"""
Overloaded constructor to create the GenometricSpace object from memory data and meta variables.
The indexes of the data and meta dataframes should be the same.
:param data: The data model
:param meta: The metadata
:return: A GenometricSpace object
"""
obj = cls()
obj.data = data
obj.meta = meta
return obj | [
"def",
"from_memory",
"(",
"cls",
",",
"data",
",",
"meta",
")",
":",
"obj",
"=",
"cls",
"(",
")",
"obj",
".",
"data",
"=",
"data",
"obj",
".",
"meta",
"=",
"meta",
"return",
"obj"
] | Overloaded constructor to create the GenometricSpace object from memory data and meta variables.
The indexes of the data and meta dataframes should be the same.
:param data: The data model
:param meta: The metadata
:return: A GenometricSpace object | [
"Overloaded",
"constructor",
"to",
"create",
"the",
"GenometricSpace",
"object",
"from",
"memory",
"data",
"and",
"meta",
"variables",
".",
"The",
"indexes",
"of",
"the",
"data",
"and",
"meta",
"dataframes",
"should",
"be",
"the",
"same",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L27-L41 | train | 47,395 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.load | def load(self, _path, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False, file_extension="gdm"):
"""Parses and loads the data into instance attributes.
The indexes of the data and meta dataframes should be the same.
:param path: The path to the dataset on the filesystem
:param regs: the regions that are to be analyzed
:param meta: the meta-data that are to be analyzed
:param values: the values that are to be selected
:param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0)
values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix.
(unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets.
If the full_load parameter is True then all the zero(0) data are going to be read.
"""
if not full_load:
warnings.warn("\n\nYou are using the optimized loading technique. "
"All-zero rows are not going to be loaded into memory. "
"To load all the data please set the full_load parameter equal to True.")
p = Parser(_path)
self.meta = p.parse_meta(meta)
self.data = p.parse_data(regs, values, full_load=full_load, extension=file_extension)
self._path = _path | python | def load(self, _path, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False, file_extension="gdm"):
"""Parses and loads the data into instance attributes.
The indexes of the data and meta dataframes should be the same.
:param path: The path to the dataset on the filesystem
:param regs: the regions that are to be analyzed
:param meta: the meta-data that are to be analyzed
:param values: the values that are to be selected
:param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0)
values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix.
(unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets.
If the full_load parameter is True then all the zero(0) data are going to be read.
"""
if not full_load:
warnings.warn("\n\nYou are using the optimized loading technique. "
"All-zero rows are not going to be loaded into memory. "
"To load all the data please set the full_load parameter equal to True.")
p = Parser(_path)
self.meta = p.parse_meta(meta)
self.data = p.parse_data(regs, values, full_load=full_load, extension=file_extension)
self._path = _path | [
"def",
"load",
"(",
"self",
",",
"_path",
",",
"regs",
"=",
"[",
"'chr'",
",",
"'left'",
",",
"'right'",
",",
"'strand'",
"]",
",",
"meta",
"=",
"[",
"]",
",",
"values",
"=",
"[",
"]",
",",
"full_load",
"=",
"False",
",",
"file_extension",
"=",
"... | Parses and loads the data into instance attributes.
The indexes of the data and meta dataframes should be the same.
:param path: The path to the dataset on the filesystem
:param regs: the regions that are to be analyzed
:param meta: the meta-data that are to be analyzed
:param values: the values that are to be selected
:param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0)
values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix.
(unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets.
If the full_load parameter is True then all the zero(0) data are going to be read. | [
"Parses",
"and",
"loads",
"the",
"data",
"into",
"instance",
"attributes",
".",
"The",
"indexes",
"of",
"the",
"data",
"and",
"meta",
"dataframes",
"should",
"be",
"the",
"same",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L43-L64 | train | 47,396 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.set_meta | def set_meta(self, selected_meta):
"""Sets one axis of the 2D multi-indexed dataframe
index to the selected meta data.
:param selected_meta: The list of the metadata users want to index with.
"""
meta_names = list(selected_meta)
meta_names.append('sample')
meta_index = []
# To set the index for existing samples in the region dataframe.
# The index size of the region dataframe does not necessarily be equal to that of metadata df.
warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n"
"In case of single index, the index itself should be the sample_id")
for x in meta_names:
meta_index.append(self.meta.ix[self.data.index.get_level_values(-1)][x].values)
meta_index = np.asarray(meta_index)
multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names)
self.data.index = multi_meta_index | python | def set_meta(self, selected_meta):
"""Sets one axis of the 2D multi-indexed dataframe
index to the selected meta data.
:param selected_meta: The list of the metadata users want to index with.
"""
meta_names = list(selected_meta)
meta_names.append('sample')
meta_index = []
# To set the index for existing samples in the region dataframe.
# The index size of the region dataframe does not necessarily be equal to that of metadata df.
warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n"
"In case of single index, the index itself should be the sample_id")
for x in meta_names:
meta_index.append(self.meta.ix[self.data.index.get_level_values(-1)][x].values)
meta_index = np.asarray(meta_index)
multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names)
self.data.index = multi_meta_index | [
"def",
"set_meta",
"(",
"self",
",",
"selected_meta",
")",
":",
"meta_names",
"=",
"list",
"(",
"selected_meta",
")",
"meta_names",
".",
"append",
"(",
"'sample'",
")",
"meta_index",
"=",
"[",
"]",
"# To set the index for existing samples in the region dataframe.",
... | Sets one axis of the 2D multi-indexed dataframe
index to the selected meta data.
:param selected_meta: The list of the metadata users want to index with. | [
"Sets",
"one",
"axis",
"of",
"the",
"2D",
"multi",
"-",
"indexed",
"dataframe",
"index",
"to",
"the",
"selected",
"meta",
"data",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L66-L84 | train | 47,397 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.to_matrix | def to_matrix(self, values, selected_regions, default_value=0):
"""Creates a 2D multi-indexed matrix representation of the data.
This representation allows the data to be sent to the machine learning algorithms.
Args:
:param values: The value or values that are going to fill the matrix.
:param selected_regions: The index to one axis of the matrix.
:param default_value: The default fill value of the matrix
"""
if isinstance(values, list):
for v in values:
try:
self.data[v] = self.data[v].map(float)
except:
print(self.data[v])
else:
self.data[values] = self.data[values].map(float)
print("started pivoting")
self.data = pd.pivot_table(self.data,
values=values, columns=selected_regions, index=['sample'],
fill_value=default_value)
print("end of pivoting") | python | def to_matrix(self, values, selected_regions, default_value=0):
"""Creates a 2D multi-indexed matrix representation of the data.
This representation allows the data to be sent to the machine learning algorithms.
Args:
:param values: The value or values that are going to fill the matrix.
:param selected_regions: The index to one axis of the matrix.
:param default_value: The default fill value of the matrix
"""
if isinstance(values, list):
for v in values:
try:
self.data[v] = self.data[v].map(float)
except:
print(self.data[v])
else:
self.data[values] = self.data[values].map(float)
print("started pivoting")
self.data = pd.pivot_table(self.data,
values=values, columns=selected_regions, index=['sample'],
fill_value=default_value)
print("end of pivoting") | [
"def",
"to_matrix",
"(",
"self",
",",
"values",
",",
"selected_regions",
",",
"default_value",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"list",
")",
":",
"for",
"v",
"in",
"values",
":",
"try",
":",
"self",
".",
"data",
"[",
"v",
... | Creates a 2D multi-indexed matrix representation of the data.
This representation allows the data to be sent to the machine learning algorithms.
Args:
:param values: The value or values that are going to fill the matrix.
:param selected_regions: The index to one axis of the matrix.
:param default_value: The default fill value of the matrix | [
"Creates",
"a",
"2D",
"multi",
"-",
"indexed",
"matrix",
"representation",
"of",
"the",
"data",
".",
"This",
"representation",
"allows",
"the",
"data",
"to",
"be",
"sent",
"to",
"the",
"machine",
"learning",
"algorithms",
"."
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L86-L108 | train | 47,398 |
DEIB-GECO/PyGMQL | gmql/ml/genometric_space.py | GenometricSpace.get_values | def get_values(self, set, selected_meta):
"""
Retrieves the selected metadata values of the given set
:param set: cluster that contains the data
:param selected_meta: the values of the selected_meta
:return: the values of the selected meta of the cluster
"""
warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n"
"In case of single index, the index itself should be the sample_id")
sample_ids = set.index.get_level_values(-1)
corresponding_meta = self.meta.loc[sample_ids]
values = corresponding_meta[selected_meta]
try:
values = values.astype(float)
except ValueError:
print("the values should be numeric")
return values | python | def get_values(self, set, selected_meta):
"""
Retrieves the selected metadata values of the given set
:param set: cluster that contains the data
:param selected_meta: the values of the selected_meta
:return: the values of the selected meta of the cluster
"""
warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n"
"In case of single index, the index itself should be the sample_id")
sample_ids = set.index.get_level_values(-1)
corresponding_meta = self.meta.loc[sample_ids]
values = corresponding_meta[selected_meta]
try:
values = values.astype(float)
except ValueError:
print("the values should be numeric")
return values | [
"def",
"get_values",
"(",
"self",
",",
"set",
",",
"selected_meta",
")",
":",
"warnings",
".",
"warn",
"(",
"\"\\n\\nThis method assumes that the last level of the index is the sample_id.\\n\"",
"\"In case of single index, the index itself should be the sample_id\"",
")",
"sample_i... | Retrieves the selected metadata values of the given set
:param set: cluster that contains the data
:param selected_meta: the values of the selected_meta
:return: the values of the selected meta of the cluster | [
"Retrieves",
"the",
"selected",
"metadata",
"values",
"of",
"the",
"given",
"set"
] | e58b2f9402a86056dcda484a32e3de0bb06ed991 | https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L110-L128 | train | 47,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.