repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
yjzhang/uncurl_python | uncurl/ensemble.py | state_estimation_ensemble | def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):
"""
Runs an ensemble method on the list of M results...
Args:
data: genes x cells array
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
M_new
W_new
ll
"""
if len(M_list)==0:
M_list = []
for i in range(n_runs):
M, W, ll = poisson_estimate_state(data, k, **se_params)
M_list.append(M)
M_stacked = np.hstack(M_list)
M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params)
W_new = np.dot(data.T, M_new)
W_new = W_new/W_new.sum(0)
return M_new, W_new, ll | python | def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):
"""
Runs an ensemble method on the list of M results...
Args:
data: genes x cells array
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
M_new
W_new
ll
"""
if len(M_list)==0:
M_list = []
for i in range(n_runs):
M, W, ll = poisson_estimate_state(data, k, **se_params)
M_list.append(M)
M_stacked = np.hstack(M_list)
M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params)
W_new = np.dot(data.T, M_new)
W_new = W_new/W_new.sum(0)
return M_new, W_new, ll | [
"def",
"state_estimation_ensemble",
"(",
"data",
",",
"k",
",",
"n_runs",
"=",
"10",
",",
"M_list",
"=",
"[",
"]",
",",
"*",
"*",
"se_params",
")",
":",
"if",
"len",
"(",
"M_list",
")",
"==",
"0",
":",
"M_list",
"=",
"[",
"]",
"for",
"i",
"in",
... | Runs an ensemble method on the list of M results...
Args:
data: genes x cells array
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
M_new
W_new
ll | [
"Runs",
"an",
"ensemble",
"method",
"on",
"the",
"list",
"of",
"M",
"results",
"..."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L22-L47 | train | 47,200 |
yjzhang/uncurl_python | uncurl/ensemble.py | nmf_ensemble | def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):
"""
Runs an ensemble method on the list of NMF W matrices...
Args:
data: genes x cells array (should be log + cell-normalized)
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
W_new
H_new
"""
nmf = NMF(k)
if len(W_list)==0:
W_list = []
for i in range(n_runs):
W = nmf.fit_transform(data)
W_list.append(W)
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
#W_new = W_new/W_new.sum(0)
# alternatively, use nmf_w and h_new as initializations for another NMF round?
return nmf_w, H_new | python | def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):
"""
Runs an ensemble method on the list of NMF W matrices...
Args:
data: genes x cells array (should be log + cell-normalized)
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
W_new
H_new
"""
nmf = NMF(k)
if len(W_list)==0:
W_list = []
for i in range(n_runs):
W = nmf.fit_transform(data)
W_list.append(W)
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
#W_new = W_new/W_new.sum(0)
# alternatively, use nmf_w and h_new as initializations for another NMF round?
return nmf_w, H_new | [
"def",
"nmf_ensemble",
"(",
"data",
",",
"k",
",",
"n_runs",
"=",
"10",
",",
"W_list",
"=",
"[",
"]",
",",
"*",
"*",
"nmf_params",
")",
":",
"nmf",
"=",
"NMF",
"(",
"k",
")",
"if",
"len",
"(",
"W_list",
")",
"==",
"0",
":",
"W_list",
"=",
"["... | Runs an ensemble method on the list of NMF W matrices...
Args:
data: genes x cells array (should be log + cell-normalized)
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
W_new
H_new | [
"Runs",
"an",
"ensemble",
"method",
"on",
"the",
"list",
"of",
"NMF",
"W",
"matrices",
"..."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L49-L79 | train | 47,201 |
yjzhang/uncurl_python | uncurl/ensemble.py | nmf_tsne | def nmf_tsne(data, k, n_runs=10, init='enhanced', **params):
"""
runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run
"""
clusters = []
nmf = NMF(k)
tsne = TSNE(2)
km = KMeans(k)
for i in range(n_runs):
w = nmf.fit_transform(data)
h = nmf.components_
tsne_wh = tsne.fit_transform(w.dot(h).T)
clust = km.fit_predict(tsne_wh)
clusters.append(clust)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
nmf_new = NMF(k, init='custom')
# TODO: find an initialization for the consensus W and H
init_w, init_h = nmf_init(data, consensus, k, init)
W = nmf_new.fit_transform(data, W=init_w, H=init_h)
H = nmf_new.components_
return W, H | python | def nmf_tsne(data, k, n_runs=10, init='enhanced', **params):
"""
runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run
"""
clusters = []
nmf = NMF(k)
tsne = TSNE(2)
km = KMeans(k)
for i in range(n_runs):
w = nmf.fit_transform(data)
h = nmf.components_
tsne_wh = tsne.fit_transform(w.dot(h).T)
clust = km.fit_predict(tsne_wh)
clusters.append(clust)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
nmf_new = NMF(k, init='custom')
# TODO: find an initialization for the consensus W and H
init_w, init_h = nmf_init(data, consensus, k, init)
W = nmf_new.fit_transform(data, W=init_w, H=init_h)
H = nmf_new.components_
return W, H | [
"def",
"nmf_tsne",
"(",
"data",
",",
"k",
",",
"n_runs",
"=",
"10",
",",
"init",
"=",
"'enhanced'",
",",
"*",
"*",
"params",
")",
":",
"clusters",
"=",
"[",
"]",
"nmf",
"=",
"NMF",
"(",
"k",
")",
"tsne",
"=",
"TSNE",
"(",
"2",
")",
"km",
"=",... | runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run | [
"runs",
"tsne",
"-",
"consensus",
"-",
"NMF"
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L150-L177 | train | 47,202 |
yjzhang/uncurl_python | uncurl/ensemble.py | poisson_consensus_se | def poisson_consensus_se(data, k, n_runs=10, **se_params):
"""
Initializes Poisson State Estimation using a consensus Poisson clustering.
"""
clusters = []
for i in range(n_runs):
assignments, means = poisson_cluster(data, k)
clusters.append(assignments)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
init_m, init_w = nmf_init(data, consensus, k, 'basic')
M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params)
return M, W, ll | python | def poisson_consensus_se(data, k, n_runs=10, **se_params):
"""
Initializes Poisson State Estimation using a consensus Poisson clustering.
"""
clusters = []
for i in range(n_runs):
assignments, means = poisson_cluster(data, k)
clusters.append(assignments)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
init_m, init_w = nmf_init(data, consensus, k, 'basic')
M, W, ll = poisson_estimate_state(data, k, init_means=init_m, init_weights=init_w, **se_params)
return M, W, ll | [
"def",
"poisson_consensus_se",
"(",
"data",
",",
"k",
",",
"n_runs",
"=",
"10",
",",
"*",
"*",
"se_params",
")",
":",
"clusters",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_runs",
")",
":",
"assignments",
",",
"means",
"=",
"poisson_cluster",
... | Initializes Poisson State Estimation using a consensus Poisson clustering. | [
"Initializes",
"Poisson",
"State",
"Estimation",
"using",
"a",
"consensus",
"Poisson",
"clustering",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L235-L247 | train | 47,203 |
bachya/py17track | py17track/profile.py | Profile.login | async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True | python | async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True | [
"async",
"def",
"login",
"(",
"self",
",",
"email",
":",
"str",
",",
"password",
":",
"str",
")",
"->",
"bool",
":",
"login_resp",
"=",
"await",
"self",
".",
"_request",
"(",
"'post'",
",",
"API_URL_USER",
",",
"json",
"=",
"{",
"'version'",
":",
"'1... | Login to the profile. | [
"Login",
"to",
"the",
"profile",
"."
] | e6e64f2a79571433df7ee702cb4ebc4127b7ad6d | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L22-L45 | train | 47,204 |
bachya/py17track | py17track/profile.py | Profile.packages | async def packages(
self, package_state: Union[int, str] = '',
show_archived: bool = False) -> list:
"""Get the list of packages associated with the account."""
packages_resp = await self._request(
'post',
API_URL_BUYER,
json={
'version': '1.0',
'method': 'GetTrackInfoList',
'param': {
'IsArchived': show_archived,
'Item': '',
'Page': 1,
'PerPage': 40,
'PackageState': package_state,
'Sequence': '0'
},
'sourcetype': 0
})
_LOGGER.debug('Packages response: %s', packages_resp)
packages = []
for package in packages_resp.get('Json', []):
last_event = package.get('FLastEvent')
if last_event:
event = json.loads(last_event)
else:
event = {}
kwargs = {
'destination_country': package.get('FSecondCountry', 0),
'friendly_name': package.get('FRemark'),
'info_text': event.get('z'),
'location': event.get('c'),
'origin_country': package.get('FFirstCountry', 0),
'package_type': package.get('FTrackStateType', 0),
'status': package.get('FPackageState', 0)
}
packages.append(Package(package['FTrackNo'], **kwargs))
return packages | python | async def packages(
self, package_state: Union[int, str] = '',
show_archived: bool = False) -> list:
"""Get the list of packages associated with the account."""
packages_resp = await self._request(
'post',
API_URL_BUYER,
json={
'version': '1.0',
'method': 'GetTrackInfoList',
'param': {
'IsArchived': show_archived,
'Item': '',
'Page': 1,
'PerPage': 40,
'PackageState': package_state,
'Sequence': '0'
},
'sourcetype': 0
})
_LOGGER.debug('Packages response: %s', packages_resp)
packages = []
for package in packages_resp.get('Json', []):
last_event = package.get('FLastEvent')
if last_event:
event = json.loads(last_event)
else:
event = {}
kwargs = {
'destination_country': package.get('FSecondCountry', 0),
'friendly_name': package.get('FRemark'),
'info_text': event.get('z'),
'location': event.get('c'),
'origin_country': package.get('FFirstCountry', 0),
'package_type': package.get('FTrackStateType', 0),
'status': package.get('FPackageState', 0)
}
packages.append(Package(package['FTrackNo'], **kwargs))
return packages | [
"async",
"def",
"packages",
"(",
"self",
",",
"package_state",
":",
"Union",
"[",
"int",
",",
"str",
"]",
"=",
"''",
",",
"show_archived",
":",
"bool",
"=",
"False",
")",
"->",
"list",
":",
"packages_resp",
"=",
"await",
"self",
".",
"_request",
"(",
... | Get the list of packages associated with the account. | [
"Get",
"the",
"list",
"of",
"packages",
"associated",
"with",
"the",
"account",
"."
] | e6e64f2a79571433df7ee702cb4ebc4127b7ad6d | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L47-L88 | train | 47,205 |
bachya/py17track | py17track/profile.py | Profile.summary | async def summary(self, show_archived: bool = False) -> dict:
"""Get a quick summary of how many packages are in an account."""
summary_resp = await self._request(
'post',
API_URL_BUYER,
json={
'version': '1.0',
'method': 'GetIndexData',
'param': {
'IsArchived': show_archived
},
'sourcetype': 0
})
_LOGGER.debug('Summary response: %s', summary_resp)
results = {}
for kind in summary_resp.get('Json', {}).get('eitem', []):
results[PACKAGE_STATUS_MAP[kind['e']]] = kind['ec']
return results | python | async def summary(self, show_archived: bool = False) -> dict:
"""Get a quick summary of how many packages are in an account."""
summary_resp = await self._request(
'post',
API_URL_BUYER,
json={
'version': '1.0',
'method': 'GetIndexData',
'param': {
'IsArchived': show_archived
},
'sourcetype': 0
})
_LOGGER.debug('Summary response: %s', summary_resp)
results = {}
for kind in summary_resp.get('Json', {}).get('eitem', []):
results[PACKAGE_STATUS_MAP[kind['e']]] = kind['ec']
return results | [
"async",
"def",
"summary",
"(",
"self",
",",
"show_archived",
":",
"bool",
"=",
"False",
")",
"->",
"dict",
":",
"summary_resp",
"=",
"await",
"self",
".",
"_request",
"(",
"'post'",
",",
"API_URL_BUYER",
",",
"json",
"=",
"{",
"'version'",
":",
"'1.0'",... | Get a quick summary of how many packages are in an account. | [
"Get",
"a",
"quick",
"summary",
"of",
"how",
"many",
"packages",
"are",
"in",
"an",
"account",
"."
] | e6e64f2a79571433df7ee702cb4ebc4127b7ad6d | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L90-L109 | train | 47,206 |
markperdue/pyvesync | home_assistant/custom_components/switch.py | setup_platform | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the VeSync switch platform."""
if discovery_info is None:
return
switches = []
manager = hass.data[DOMAIN]['manager']
if manager.outlets is not None and manager.outlets:
if len(manager.outlets) == 1:
count_string = 'switch'
else:
count_string = 'switches'
_LOGGER.info("Discovered %d VeSync %s",
len(manager.outlets), count_string)
if len(manager.outlets) > 1:
for switch in manager.outlets:
switch._energy_update_interval = ENERGY_UPDATE_INT
switches.append(VeSyncSwitchHA(switch))
_LOGGER.info("Added a VeSync switch named '%s'",
switch.device_name)
else:
switches.append(VeSyncSwitchHA(manager.outlets))
else:
_LOGGER.info("No VeSync switches found")
add_entities(switches) | python | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the VeSync switch platform."""
if discovery_info is None:
return
switches = []
manager = hass.data[DOMAIN]['manager']
if manager.outlets is not None and manager.outlets:
if len(manager.outlets) == 1:
count_string = 'switch'
else:
count_string = 'switches'
_LOGGER.info("Discovered %d VeSync %s",
len(manager.outlets), count_string)
if len(manager.outlets) > 1:
for switch in manager.outlets:
switch._energy_update_interval = ENERGY_UPDATE_INT
switches.append(VeSyncSwitchHA(switch))
_LOGGER.info("Added a VeSync switch named '%s'",
switch.device_name)
else:
switches.append(VeSyncSwitchHA(manager.outlets))
else:
_LOGGER.info("No VeSync switches found")
add_entities(switches) | [
"def",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"discovery_info",
"is",
"None",
":",
"return",
"switches",
"=",
"[",
"]",
"manager",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]... | Set up the VeSync switch platform. | [
"Set",
"up",
"the",
"VeSync",
"switch",
"platform",
"."
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/home_assistant/custom_components/switch.py#L12-L41 | train | 47,207 |
markperdue/pyvesync | home_assistant/custom_components/switch.py | VeSyncSwitchHA.device_state_attributes | def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
attr['active_time'] = self.smartplug.active_time
attr['voltage'] = self.smartplug.voltage
attr['active_time'] = self.smartplug.active_time
attr['weekly_energy_total'] = self.smartplug.weekly_energy_total
attr['monthly_energy_total'] = self.smartplug.monthly_energy_total
attr['yearly_energy_total'] = self.smartplug.yearly_energy_total
return attr | python | def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
attr['active_time'] = self.smartplug.active_time
attr['voltage'] = self.smartplug.voltage
attr['active_time'] = self.smartplug.active_time
attr['weekly_energy_total'] = self.smartplug.weekly_energy_total
attr['monthly_energy_total'] = self.smartplug.monthly_energy_total
attr['yearly_energy_total'] = self.smartplug.yearly_energy_total
return attr | [
"def",
"device_state_attributes",
"(",
"self",
")",
":",
"attr",
"=",
"{",
"}",
"attr",
"[",
"'active_time'",
"]",
"=",
"self",
".",
"smartplug",
".",
"active_time",
"attr",
"[",
"'voltage'",
"]",
"=",
"self",
".",
"smartplug",
".",
"voltage",
"attr",
"[... | Return the state attributes of the device. | [
"Return",
"the",
"state",
"attributes",
"of",
"the",
"device",
"."
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/home_assistant/custom_components/switch.py#L62-L71 | train | 47,208 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.get_variants | def get_variants(self, chromosome=None, start=None, end=None):
"""Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if start:
query['start'] = {'$lte': end}
query['end'] = {'$gte': start}
LOG.info("Find all variants {}".format(query))
return self.db.variant.find(query).sort([('start', ASCENDING)]) | python | def get_variants(self, chromosome=None, start=None, end=None):
"""Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if start:
query['start'] = {'$lte': end}
query['end'] = {'$gte': start}
LOG.info("Find all variants {}".format(query))
return self.db.variant.find(query).sort([('start', ASCENDING)]) | [
"def",
"get_variants",
"(",
"self",
",",
"chromosome",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"chromosome",
":",
"query",
"[",
"'chrom'",
"]",
"=",
"chromosome",
"if",
"start",
":",
... | Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant)) | [
"Return",
"all",
"variants",
"in",
"the",
"database",
"If",
"no",
"region",
"is",
"specified",
"all",
"variants",
"will",
"be",
"returned",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L137-L157 | train | 47,209 |
moonso/loqusdb | loqusdb/plugins/mongo/variant.py | VariantMixin.delete_variant | def delete_variant(self, variant):
"""Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
"""
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return | python | def delete_variant(self, variant):
"""Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
"""
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return | [
"def",
"delete_variant",
"(",
"self",
",",
"variant",
")",
":",
"mongo_variant",
"=",
"self",
".",
"get_variant",
"(",
"variant",
")",
"if",
"mongo_variant",
":",
"if",
"mongo_variant",
"[",
"'observations'",
"]",
"==",
"1",
":",
"LOG",
".",
"debug",
"(",
... | Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary | [
"Delete",
"observation",
"in",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/variant.py#L159-L196 | train | 47,210 |
yjzhang/uncurl_python | uncurl/sampling.py | downsample | def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data | python | def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data | [
"def",
"downsample",
"(",
"data",
",",
"percent",
")",
":",
"n_genes",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"n_cells",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"new_data",
"=",
"data",
".",
"copy",
"(",
")",
"total_count",
"=",
"float",
"(",
... | downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1 | [
"downsample",
"the",
"data",
"by",
"removing",
"a",
"given",
"percentage",
"of",
"the",
"reads",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/sampling.py#L7-L34 | train | 47,211 |
yjzhang/uncurl_python | uncurl/nb_state_estimation.py | nb_estimate_state | def nb_estimate_state(data, clusters, R=None, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration
"""
# TODO: deal with non-NB data... just ignore it? or do something else?
data_subset = data.copy()
genes, cells = data_subset.shape
# 1. use nb_fit to get inital Rs
if R is None:
nb_indices = find_nb_genes(data)
data_subset = data[nb_indices, :]
if init_means is not None and len(init_means) > sum(nb_indices):
init_means = init_means[nb_indices, :]
genes, cells = data_subset.shape
R = np.zeros(genes)
P, R = nb_fit(data_subset)
if init_means is None:
means, assignments = kmeans_pp(data_subset, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
ll = np.inf
# repeat steps 1 and 2 until convergence:
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective, w_deriv = _create_w_objective(means, data_subset, R)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=w_deriv, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective, m_deriv = _create_m_objective(w_new, data_subset, R)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=m_deriv, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
ll = m_res.fun
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, R, ll | python | def nb_estimate_state(data, clusters, R=None, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration
"""
# TODO: deal with non-NB data... just ignore it? or do something else?
data_subset = data.copy()
genes, cells = data_subset.shape
# 1. use nb_fit to get inital Rs
if R is None:
nb_indices = find_nb_genes(data)
data_subset = data[nb_indices, :]
if init_means is not None and len(init_means) > sum(nb_indices):
init_means = init_means[nb_indices, :]
genes, cells = data_subset.shape
R = np.zeros(genes)
P, R = nb_fit(data_subset)
if init_means is None:
means, assignments = kmeans_pp(data_subset, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
ll = np.inf
# repeat steps 1 and 2 until convergence:
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective, w_deriv = _create_w_objective(means, data_subset, R)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=w_deriv, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective, m_deriv = _create_m_objective(w_new, data_subset, R)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=m_deriv, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
ll = m_res.fun
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, R, ll | [
"def",
"nb_estimate_state",
"(",
"data",
",",
"clusters",
",",
"R",
"=",
"None",
",",
"init_means",
"=",
"None",
",",
"init_weights",
"=",
"None",
",",
"max_iters",
"=",
"10",
",",
"tol",
"=",
"1e-4",
",",
"disp",
"=",
"True",
",",
"inner_max_iters",
"... | Uses a Negative Binomial Mixture model to estimate cell states and
cell state mixing weights.
If some of the genes do not fit a negative binomial distribution
(mean > var), then the genes are discarded from the analysis.
Args:
data (array): genes x cells
clusters (int): number of mixture components
R (array, optional): vector of length genes containing the dispersion estimates for each gene. Default: use nb_fit
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M (array): genes x clusters - state centers
W (array): clusters x cells - state mixing components for each cell
R (array): 1 x genes - NB dispersion parameter for each gene
ll (float): Log-likelihood of final iteration | [
"Uses",
"a",
"Negative",
"Binomial",
"Mixture",
"model",
"to",
"estimate",
"cell",
"states",
"and",
"cell",
"state",
"mixing",
"weights",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_state_estimation.py#L71-L147 | train | 47,212 |
moonso/loqusdb | scripts/load_files.py | cli | def cli(ctx, directory, uri, verbose, count):
"""Load all files in a directory."""
# configure root logger to print to STDERR
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
p = Path(directory)
if not p.is_dir():
LOG.warning("{0} is not a valid directory".format(directory))
ctx.abort()
start_time = datetime.now()
# Make sure that the database is indexed
index_call = ['loqusdb', 'index']
base_call = ['loqusdb']
if uri:
base_call.append('--uri')
base_call.append(uri)
index_call.append('--uri')
index_call.append(uri)
subprocess.run(index_call)
base_call.append('load')
nr_files = 0
for nr_files,file_name in enumerate(list(p.glob('*.vcf')),1):
call = deepcopy(base_call)
case_id = file_name.stem.split('.')[0]
call.append('--sv-variants')
call.append(str(file_name))
call.append('--case-id')
call.append(case_id)
if count:
continue
try:
subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
LOG.warning("Failed to load file %s", filename)
LOG.info("Continue with files...")
if nr_files % 100:
LOG.info("%s files loaded", nr_files)
LOG.info("%s files inserted", nr_files)
LOG.info("Time to insert files: {}".format(datetime.now()-start_time)) | python | def cli(ctx, directory, uri, verbose, count):
"""Load all files in a directory."""
# configure root logger to print to STDERR
loglevel = "INFO"
if verbose:
loglevel = "DEBUG"
coloredlogs.install(level=loglevel)
p = Path(directory)
if not p.is_dir():
LOG.warning("{0} is not a valid directory".format(directory))
ctx.abort()
start_time = datetime.now()
# Make sure that the database is indexed
index_call = ['loqusdb', 'index']
base_call = ['loqusdb']
if uri:
base_call.append('--uri')
base_call.append(uri)
index_call.append('--uri')
index_call.append(uri)
subprocess.run(index_call)
base_call.append('load')
nr_files = 0
for nr_files,file_name in enumerate(list(p.glob('*.vcf')),1):
call = deepcopy(base_call)
case_id = file_name.stem.split('.')[0]
call.append('--sv-variants')
call.append(str(file_name))
call.append('--case-id')
call.append(case_id)
if count:
continue
try:
subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
LOG.warning("Failed to load file %s", filename)
LOG.info("Continue with files...")
if nr_files % 100:
LOG.info("%s files loaded", nr_files)
LOG.info("%s files inserted", nr_files)
LOG.info("Time to insert files: {}".format(datetime.now()-start_time)) | [
"def",
"cli",
"(",
"ctx",
",",
"directory",
",",
"uri",
",",
"verbose",
",",
"count",
")",
":",
"# configure root logger to print to STDERR",
"loglevel",
"=",
"\"INFO\"",
"if",
"verbose",
":",
"loglevel",
"=",
"\"DEBUG\"",
"coloredlogs",
".",
"install",
"(",
"... | Load all files in a directory. | [
"Load",
"all",
"files",
"in",
"a",
"directory",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/scripts/load_files.py#L24-L72 | train | 47,213 |
yjzhang/uncurl_python | uncurl/nmf_wrapper.py | nmf_init | def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w | python | def nmf_init(data, clusters, k, init='enhanced'):
"""
Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant.
"""
init_m = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].toarray().flatten()
else:
init_m[:,i] = np.array(data[:,clusters==i].mean(1)).flatten()
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_m[:,i] = data[:,point].flatten()
else:
init_m[:,i] = data[:,clusters==i].mean(1)
init_w = np.zeros((k, data.shape[1]))
if init == 'enhanced':
distances = np.zeros((k, data.shape[1]))
for i in range(k):
for j in range(data.shape[1]):
distances[i,j] = np.sqrt(((data[:,j] - init_m[:,i])**2).sum())
for i in range(k):
for j in range(data.shape[1]):
init_w[i,j] = 1/((distances[:,j]/distances[i,j])**(-2)).sum()
elif init == 'basic':
init_w = initialize_from_assignments(clusters, k)
elif init == 'nmf':
init_w_, _, n_iter = non_negative_factorization(data.T, n_components=k, init='custom', update_W=False, W=init_m.T)
init_w = init_w_.T
return init_m, init_w | [
"def",
"nmf_init",
"(",
"data",
",",
"clusters",
",",
"k",
",",
"init",
"=",
"'enhanced'",
")",
":",
"init_m",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"k",
")",
")",
"if",
"sparse",
".",
"issparse",
"(",
"da... | Generates initial M and W given a data set and an array of cluster labels.
There are 3 options for init:
enhanced - uses EIn-NMF from Gong 2013
basic - uses means for M, assigns W such that the chosen cluster for a given cell has value 0.75 and all others have 0.25/(k-1).
nmf - uses means for M, and assigns W using the NMF objective while holding M constant. | [
"Generates",
"initial",
"M",
"and",
"W",
"given",
"a",
"data",
"set",
"and",
"an",
"array",
"of",
"cluster",
"labels",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nmf_wrapper.py#L10-L48 | train | 47,214 |
moonso/loqusdb | loqusdb/build_models/variant.py | get_variant_id | def get_variant_id(variant):
"""Get a variant id on the format chrom_pos_ref_alt"""
variant_id = '_'.join([
str(variant.CHROM),
str(variant.POS),
str(variant.REF),
str(variant.ALT[0])
]
)
return variant_id | python | def get_variant_id(variant):
"""Get a variant id on the format chrom_pos_ref_alt"""
variant_id = '_'.join([
str(variant.CHROM),
str(variant.POS),
str(variant.REF),
str(variant.ALT[0])
]
)
return variant_id | [
"def",
"get_variant_id",
"(",
"variant",
")",
":",
"variant_id",
"=",
"'_'",
".",
"join",
"(",
"[",
"str",
"(",
"variant",
".",
"CHROM",
")",
",",
"str",
"(",
"variant",
".",
"POS",
")",
",",
"str",
"(",
"variant",
".",
"REF",
")",
",",
"str",
"(... | Get a variant id on the format chrom_pos_ref_alt | [
"Get",
"a",
"variant",
"id",
"on",
"the",
"format",
"chrom_pos_ref_alt"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/variant.py#L35-L44 | train | 47,215 |
moonso/loqusdb | loqusdb/commands/migrate.py | migrate | def migrate(ctx,):
"""Migrate an old loqusdb instance to 1.0
"""
adapter = ctx.obj['adapter']
start_time = datetime.now()
nr_updated = migrate_database(adapter)
LOG.info("All variants updated, time to complete migration: {}".format(
datetime.now() - start_time))
LOG.info("Nr variants that where updated: %s", nr_updated) | python | def migrate(ctx,):
"""Migrate an old loqusdb instance to 1.0
"""
adapter = ctx.obj['adapter']
start_time = datetime.now()
nr_updated = migrate_database(adapter)
LOG.info("All variants updated, time to complete migration: {}".format(
datetime.now() - start_time))
LOG.info("Nr variants that where updated: %s", nr_updated) | [
"def",
"migrate",
"(",
"ctx",
",",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"start_time",
"=",
"datetime",
".",
"now",
"(",
")",
"nr_updated",
"=",
"migrate_database",
"(",
"adapter",
")",
"LOG",
".",
"info",
"(",
"\"All vari... | Migrate an old loqusdb instance to 1.0 | [
"Migrate",
"an",
"old",
"loqusdb",
"instance",
"to",
"1",
".",
"0"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/migrate.py#L14-L25 | train | 47,216 |
moonso/loqusdb | loqusdb/commands/export.py | export | def export(ctx, outfile, variant_type):
"""Export the variants of a loqus db
The variants are exported to a vcf file
"""
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Export the variants from {0}".format(adapter))
nr_cases = 0
is_sv = variant_type == 'sv'
existing_chromosomes = set(adapter.get_chromosomes(sv=is_sv))
ordered_chromosomes = []
for chrom in CHROMOSOME_ORDER:
if chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
existing_chromosomes.remove(chrom)
for chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
nr_cases = adapter.cases().count()
LOG.info("Found {0} cases in database".format(nr_cases))
head = HeaderParser()
head.add_fileformat("VCFv4.3")
head.add_meta_line("NrCases", nr_cases)
head.add_info("Obs", '1', 'Integer', "The number of observations for the variant")
head.add_info("Hom", '1', 'Integer', "The number of observed homozygotes")
head.add_info("Hem", '1', 'Integer', "The number of observed hemizygotes")
head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
if variant_type == 'sv':
head.add_info("END", '1', 'Integer', "End position of the variant")
head.add_info("SVTYPE", '1', 'String', "Type of structural variant")
head.add_info("SVLEN", '1', 'Integer', "Length of structural variant")
for chrom in ordered_chromosomes:
length = adapter.get_max_position(chrom)
head.add_contig(contig_id=chrom, length=str(length))
print_headers(head, outfile=outfile)
for chrom in ordered_chromosomes:
if variant_type == 'snv':
LOG.info("Collecting all SNV variants")
variants = adapter.get_variants(chromosome=chrom)
else:
LOG.info("Collecting all SV variants")
variants = adapter.get_sv_variants(chromosome=chrom)
LOG.info("{} variants found".format(variants.count()))
for variant in variants:
variant_line = format_variant(variant, variant_type=variant_type)
# chrom = variant['chrom']
# pos = variant['start']
# ref = variant['ref']
# alt = variant['alt']
# observations = variant['observations']
# homozygotes = variant['homozygote']
# hemizygotes = variant['hemizygote']
# info = "Obs={0}".format(observations)
# if homozygotes:
# info += ";Hom={0}".format(homozygotes)
# if hemizygotes:
# info += ";Hem={0}".format(hemizygotes)
# variant_line = "{0}\t{1}\t.\t{2}\t{3}\t.\t.\t{4}\n".format(
# chrom, pos, ref, alt, info)
print_variant(variant_line=variant_line, outfile=outfile) | python | def export(ctx, outfile, variant_type):
"""Export the variants of a loqus db
The variants are exported to a vcf file
"""
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Export the variants from {0}".format(adapter))
nr_cases = 0
is_sv = variant_type == 'sv'
existing_chromosomes = set(adapter.get_chromosomes(sv=is_sv))
ordered_chromosomes = []
for chrom in CHROMOSOME_ORDER:
if chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
existing_chromosomes.remove(chrom)
for chrom in existing_chromosomes:
ordered_chromosomes.append(chrom)
nr_cases = adapter.cases().count()
LOG.info("Found {0} cases in database".format(nr_cases))
head = HeaderParser()
head.add_fileformat("VCFv4.3")
head.add_meta_line("NrCases", nr_cases)
head.add_info("Obs", '1', 'Integer', "The number of observations for the variant")
head.add_info("Hom", '1', 'Integer', "The number of observed homozygotes")
head.add_info("Hem", '1', 'Integer', "The number of observed hemizygotes")
head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
if variant_type == 'sv':
head.add_info("END", '1', 'Integer', "End position of the variant")
head.add_info("SVTYPE", '1', 'String', "Type of structural variant")
head.add_info("SVLEN", '1', 'Integer', "Length of structural variant")
for chrom in ordered_chromosomes:
length = adapter.get_max_position(chrom)
head.add_contig(contig_id=chrom, length=str(length))
print_headers(head, outfile=outfile)
for chrom in ordered_chromosomes:
if variant_type == 'snv':
LOG.info("Collecting all SNV variants")
variants = adapter.get_variants(chromosome=chrom)
else:
LOG.info("Collecting all SV variants")
variants = adapter.get_sv_variants(chromosome=chrom)
LOG.info("{} variants found".format(variants.count()))
for variant in variants:
variant_line = format_variant(variant, variant_type=variant_type)
# chrom = variant['chrom']
# pos = variant['start']
# ref = variant['ref']
# alt = variant['alt']
# observations = variant['observations']
# homozygotes = variant['homozygote']
# hemizygotes = variant['hemizygote']
# info = "Obs={0}".format(observations)
# if homozygotes:
# info += ";Hom={0}".format(homozygotes)
# if hemizygotes:
# info += ";Hem={0}".format(hemizygotes)
# variant_line = "{0}\t{1}\t.\t{2}\t{3}\t.\t.\t{4}\n".format(
# chrom, pos, ref, alt, info)
print_variant(variant_line=variant_line, outfile=outfile) | [
"def",
"export",
"(",
"ctx",
",",
"outfile",
",",
"variant_type",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"version",
"=",
"ctx",
".",
"obj",
"[",
"'version'",
"]",
"LOG",
".",
"info",
"(",
"\"Export the variants from {0}\"",
"... | Export the variants of a loqus db
The variants are exported to a vcf file | [
"Export",
"the",
"variants",
"of",
"a",
"loqus",
"db",
"The",
"variants",
"are",
"exported",
"to",
"a",
"vcf",
"file"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/export.py#L28-L97 | train | 47,217 |
moonso/loqusdb | loqusdb/utils/load.py | load_database | def load_database(adapter, variant_file=None, sv_file=None, family_file=None,
family_type='ped', skip_case_id=False, gq_treshold=None,
case_id=None, max_window = 3000, profile_file=None,
hard_threshold=0.95, soft_threshold=0.9):
"""Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int)
"""
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
vcf_files.append(variant_file)
# Get the indivuduals that are present in vcf file
vcf_individuals = vcf_info['individuals']
nr_sv_variants = None
sv_individuals = None
if sv_file:
vcf_info = check_vcf(sv_file, 'sv')
nr_sv_variants = vcf_info['nr_variants']
vcf_files.append(sv_file)
sv_individuals = vcf_info['individuals']
profiles = None
matches = None
if profile_file:
profiles = get_profiles(adapter, profile_file)
###Check if any profile already exists
matches = profile_match(adapter,
profiles,
hard_threshold=hard_threshold,
soft_threshold=soft_threshold)
# If a gq treshold is used the variants needs to have GQ
for _vcf_file in vcf_files:
# Get a cyvcf2.VCF object
vcf = get_vcf(_vcf_file)
if gq_treshold:
if not vcf.contains('GQ'):
LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))
raise SyntaxError('GQ is not defined in vcf header')
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
LOG.info("Loading family from %s", family_file)
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
# Convert infromation to a loqusdb Case object
case_obj = build_case(
case=family,
case_id=case_id,
vcf_path=variant_file,
vcf_individuals=vcf_individuals,
nr_variants=nr_variants,
vcf_sv_path=sv_file,
sv_individuals=sv_individuals,
nr_sv_variants=nr_sv_variants,
profiles=profiles,
matches=matches,
profile_path=profile_file
)
# Build and load a new case, or update an existing one
load_case(
adapter=adapter,
case_obj=case_obj,
)
nr_inserted = 0
# If case was succesfully added we can store the variants
for file_type in ['vcf_path','vcf_sv_path']:
variant_type = 'snv'
if file_type == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_type) is None:
continue
vcf_obj = get_vcf(case_obj[file_type])
try:
nr_inserted += load_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
skip_case_id=skip_case_id,
gq_treshold=gq_treshold,
max_window=max_window,
variant_type=variant_type,
)
except Exception as err:
# If something went wrong do a rollback
LOG.warning(err)
delete(
adapter=adapter,
case_obj=case_obj,
)
raise err
return nr_inserted | python | def load_database(adapter, variant_file=None, sv_file=None, family_file=None,
family_type='ped', skip_case_id=False, gq_treshold=None,
case_id=None, max_window = 3000, profile_file=None,
hard_threshold=0.95, soft_threshold=0.9):
"""Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int)
"""
vcf_files = []
nr_variants = None
vcf_individuals = None
if variant_file:
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
vcf_files.append(variant_file)
# Get the indivuduals that are present in vcf file
vcf_individuals = vcf_info['individuals']
nr_sv_variants = None
sv_individuals = None
if sv_file:
vcf_info = check_vcf(sv_file, 'sv')
nr_sv_variants = vcf_info['nr_variants']
vcf_files.append(sv_file)
sv_individuals = vcf_info['individuals']
profiles = None
matches = None
if profile_file:
profiles = get_profiles(adapter, profile_file)
###Check if any profile already exists
matches = profile_match(adapter,
profiles,
hard_threshold=hard_threshold,
soft_threshold=soft_threshold)
# If a gq treshold is used the variants needs to have GQ
for _vcf_file in vcf_files:
# Get a cyvcf2.VCF object
vcf = get_vcf(_vcf_file)
if gq_treshold:
if not vcf.contains('GQ'):
LOG.warning('Set gq-treshold to 0 or add info to vcf {0}'.format(_vcf_file))
raise SyntaxError('GQ is not defined in vcf header')
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
LOG.info("Loading family from %s", family_file)
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
# Convert infromation to a loqusdb Case object
case_obj = build_case(
case=family,
case_id=case_id,
vcf_path=variant_file,
vcf_individuals=vcf_individuals,
nr_variants=nr_variants,
vcf_sv_path=sv_file,
sv_individuals=sv_individuals,
nr_sv_variants=nr_sv_variants,
profiles=profiles,
matches=matches,
profile_path=profile_file
)
# Build and load a new case, or update an existing one
load_case(
adapter=adapter,
case_obj=case_obj,
)
nr_inserted = 0
# If case was succesfully added we can store the variants
for file_type in ['vcf_path','vcf_sv_path']:
variant_type = 'snv'
if file_type == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_type) is None:
continue
vcf_obj = get_vcf(case_obj[file_type])
try:
nr_inserted += load_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
skip_case_id=skip_case_id,
gq_treshold=gq_treshold,
max_window=max_window,
variant_type=variant_type,
)
except Exception as err:
# If something went wrong do a rollback
LOG.warning(err)
delete(
adapter=adapter,
case_obj=case_obj,
)
raise err
return nr_inserted | [
"def",
"load_database",
"(",
"adapter",
",",
"variant_file",
"=",
"None",
",",
"sv_file",
"=",
"None",
",",
"family_file",
"=",
"None",
",",
"family_type",
"=",
"'ped'",
",",
"skip_case_id",
"=",
"False",
",",
"gq_treshold",
"=",
"None",
",",
"case_id",
"=... | Load the database with a case and its variants
Args:
adapter: Connection to database
variant_file(str): Path to variant file
sv_file(str): Path to sv variant file
family_file(str): Path to family file
family_type(str): Format of family file
skip_case_id(bool): If no case information should be added to variants
gq_treshold(int): If only quality variants should be considered
case_id(str): If different case id than the one in family file should be used
max_window(int): Specify the max size for sv windows
check_profile(bool): Does profile check if True
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
nr_inserted(int) | [
"Load",
"the",
"database",
"with",
"a",
"case",
"and",
"its",
"variants"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L26-L151 | train | 47,218 |
moonso/loqusdb | loqusdb/utils/load.py | load_case | def load_case(adapter, case_obj, update=False):
"""Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case)
"""
# Check if the case already exists in database.
existing_case = adapter.case(case_obj)
if existing_case:
if not update:
raise CaseError("Case {0} already exists in database".format(case_obj['case_id']))
case_obj = update_case(case_obj, existing_case)
# Add the case to database
try:
adapter.add_case(case_obj, update=update)
except CaseError as err:
raise err
return case_obj | python | def load_case(adapter, case_obj, update=False):
"""Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case)
"""
# Check if the case already exists in database.
existing_case = adapter.case(case_obj)
if existing_case:
if not update:
raise CaseError("Case {0} already exists in database".format(case_obj['case_id']))
case_obj = update_case(case_obj, existing_case)
# Add the case to database
try:
adapter.add_case(case_obj, update=update)
except CaseError as err:
raise err
return case_obj | [
"def",
"load_case",
"(",
"adapter",
",",
"case_obj",
",",
"update",
"=",
"False",
")",
":",
"# Check if the case already exists in database.",
"existing_case",
"=",
"adapter",
".",
"case",
"(",
"case_obj",
")",
"if",
"existing_case",
":",
"if",
"not",
"update",
... | Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case) | [
"Load",
"a",
"case",
"to",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L153-L177 | train | 47,219 |
moonso/loqusdb | loqusdb/utils/load.py | load_variants | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | python | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | [
"def",
"load_variants",
"(",
"adapter",
",",
"vcf_obj",
",",
"case_obj",
",",
"skip_case_id",
"=",
"False",
",",
"gq_treshold",
"=",
"None",
",",
"max_window",
"=",
"3000",
",",
"variant_type",
"=",
"'snv'",
")",
":",
"if",
"variant_type",
"==",
"'snv'",
"... | Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int) | [
"Load",
"variants",
"for",
"a",
"family",
"into",
"the",
"database",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L179-L222 | train | 47,220 |
yjzhang/uncurl_python | uncurl/preprocessing.py | max_variance_genes | def max_variance_genes(data, nbins=5, frac=0.2):
"""
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
"""
# TODO: profile, make more efficient for large matrices
# 8000 cells: 0.325 seconds
# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar
# 73233 cells: 5.347 seconds, 4.762 s in sparse_var
# csc_tocsr: 1.736 s
# copy: 1.028 s
# astype: 0.999 s
# there is almost certainly something superlinear in this method
# maybe it's to_csr?
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
# filter out genes with zero variance
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices | python | def max_variance_genes(data, nbins=5, frac=0.2):
"""
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
"""
# TODO: profile, make more efficient for large matrices
# 8000 cells: 0.325 seconds
# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar
# 73233 cells: 5.347 seconds, 4.762 s in sparse_var
# csc_tocsr: 1.736 s
# copy: 1.028 s
# astype: 0.999 s
# there is almost certainly something superlinear in this method
# maybe it's to_csr?
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
# filter out genes with zero variance
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices | [
"def",
"max_variance_genes",
"(",
"data",
",",
"nbins",
"=",
"5",
",",
"frac",
"=",
"0.2",
")",
":",
"# TODO: profile, make more efficient for large matrices",
"# 8000 cells: 0.325 seconds",
"# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar",
"# 73233 cells: 5.347... | This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints) | [
"This",
"function",
"identifies",
"the",
"genes",
"that",
"have",
"the",
"max",
"variance",
"across",
"a",
"number",
"of",
"bins",
"sorted",
"by",
"mean",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L25-L67 | train | 47,221 |
yjzhang/uncurl_python | uncurl/preprocessing.py | cell_normalize | def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm | python | def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm | [
"def",
"cell_normalize",
"(",
"data",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"data",
")",
":",
"data",
"=",
"sparse",
".",
"csc_matrix",
"(",
"data",
".",
"astype",
"(",
"float",
")",
")",
"# normalize in-place",
"sparse_cell_normalize",
"(",
"dat... | Returns the data where the expression is normalized so that the total
count per cell is equal. | [
"Returns",
"the",
"data",
"where",
"the",
"expression",
"is",
"normalized",
"so",
"that",
"the",
"total",
"count",
"per",
"cell",
"is",
"equal",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L69-L91 | train | 47,222 |
moonso/loqusdb | loqusdb/build_models/case.py | get_individual_positions | def get_individual_positions(individuals):
"""Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
"""
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos | python | def get_individual_positions(individuals):
"""Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
"""
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos | [
"def",
"get_individual_positions",
"(",
"individuals",
")",
":",
"ind_pos",
"=",
"{",
"}",
"if",
"individuals",
":",
"for",
"i",
",",
"ind",
"in",
"enumerate",
"(",
"individuals",
")",
":",
"ind_pos",
"[",
"ind",
"]",
"=",
"i",
"return",
"ind_pos"
] | Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position | [
"Return",
"a",
"dictionary",
"with",
"individual",
"positions"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/case.py#L8-L21 | train | 47,223 |
moonso/loqusdb | loqusdb/build_models/case.py | build_case | def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None,
vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None,
matches=None, profile_path=None):
"""Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case)
"""
# Create a dict that maps the ind ids to the position they have in vcf
individual_positions = get_individual_positions(vcf_individuals)
sv_individual_positions = get_individual_positions(sv_individuals)
family_id = None
if case:
if not case.affected_individuals:
LOG.warning("No affected individuals could be found in ped file")
family_id = case.family_id
# If case id is given manually we use that one
case_id = case_id or family_id
if case_id is None:
raise CaseError
case_obj = Case(
case_id=case_id,
)
if vcf_path:
case_obj['vcf_path'] = vcf_path
case_obj['nr_variants'] = nr_variants
if vcf_sv_path:
case_obj['vcf_sv_path'] = vcf_sv_path
case_obj['nr_sv_variants'] = nr_sv_variants
if profile_path:
case_obj['profile_path'] = profile_path
ind_objs = []
if case:
if individual_positions:
_ind_pos = individual_positions
else:
_ind_pos = sv_individual_positions
for ind_id in case.individuals:
individual = case.individuals[ind_id]
try:
#If a profile dict exists, get the profile for ind_id
profile = profiles[ind_id] if profiles else None
#If matching samples are found, get these samples for ind_id
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id=ind_id,
case_id=case_id,
ind_index=_ind_pos[ind_id],
sex=individual.sex,
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
except KeyError:
raise CaseError("Ind %s in ped file does not exist in VCF", ind_id)
else:
# If there where no family file we can create individuals from what we know
for ind_id in individual_positions:
profile = profiles[ind_id] if profiles else None
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id = ind_id,
case_id = case_id,
ind_index=individual_positions[ind_id],
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
# Add individuals to the correct variant type
for ind_obj in ind_objs:
if vcf_sv_path:
case_obj['sv_individuals'].append(dict(ind_obj))
case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)
if vcf_path:
case_obj['individuals'].append(dict(ind_obj))
case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)
return case_obj | python | def build_case(case, vcf_individuals=None, case_id=None, vcf_path=None, sv_individuals=None,
vcf_sv_path=None, nr_variants=None, nr_sv_variants=None, profiles=None,
matches=None, profile_path=None):
"""Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case)
"""
# Create a dict that maps the ind ids to the position they have in vcf
individual_positions = get_individual_positions(vcf_individuals)
sv_individual_positions = get_individual_positions(sv_individuals)
family_id = None
if case:
if not case.affected_individuals:
LOG.warning("No affected individuals could be found in ped file")
family_id = case.family_id
# If case id is given manually we use that one
case_id = case_id or family_id
if case_id is None:
raise CaseError
case_obj = Case(
case_id=case_id,
)
if vcf_path:
case_obj['vcf_path'] = vcf_path
case_obj['nr_variants'] = nr_variants
if vcf_sv_path:
case_obj['vcf_sv_path'] = vcf_sv_path
case_obj['nr_sv_variants'] = nr_sv_variants
if profile_path:
case_obj['profile_path'] = profile_path
ind_objs = []
if case:
if individual_positions:
_ind_pos = individual_positions
else:
_ind_pos = sv_individual_positions
for ind_id in case.individuals:
individual = case.individuals[ind_id]
try:
#If a profile dict exists, get the profile for ind_id
profile = profiles[ind_id] if profiles else None
#If matching samples are found, get these samples for ind_id
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id=ind_id,
case_id=case_id,
ind_index=_ind_pos[ind_id],
sex=individual.sex,
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
except KeyError:
raise CaseError("Ind %s in ped file does not exist in VCF", ind_id)
else:
# If there where no family file we can create individuals from what we know
for ind_id in individual_positions:
profile = profiles[ind_id] if profiles else None
similar_samples = matches[ind_id] if matches else None
ind_obj = Individual(
ind_id = ind_id,
case_id = case_id,
ind_index=individual_positions[ind_id],
profile=profile,
similar_samples=similar_samples
)
ind_objs.append(dict(ind_obj))
# Add individuals to the correct variant type
for ind_obj in ind_objs:
if vcf_sv_path:
case_obj['sv_individuals'].append(dict(ind_obj))
case_obj['_sv_inds'][ind_obj['ind_id']] = dict(ind_obj)
if vcf_path:
case_obj['individuals'].append(dict(ind_obj))
case_obj['_inds'][ind_obj['ind_id']] = dict(ind_obj)
return case_obj | [
"def",
"build_case",
"(",
"case",
",",
"vcf_individuals",
"=",
"None",
",",
"case_id",
"=",
"None",
",",
"vcf_path",
"=",
"None",
",",
"sv_individuals",
"=",
"None",
",",
"vcf_sv_path",
"=",
"None",
",",
"nr_variants",
"=",
"None",
",",
"nr_sv_variants",
"... | Build a Case from the given information
Args:
case(ped_parser.Family): A family object
vcf_individuals(list): Show the order of inds in vcf file
case_id(str): If another name than the one in family file should be used
vcf_path(str)
sv_individuals(list): Show the order of inds in vcf file
vcf_sv_path(str)
nr_variants(int)
nr_sv_variants(int)
profiles(dict): The profiles for each sample in vcf
matches(dict(list)): list of similar samples for each sample in vcf.
Returns:
case_obj(models.Case) | [
"Build",
"a",
"Case",
"from",
"the",
"given",
"information"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/build_models/case.py#L23-L122 | train | 47,224 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_poisson_data | def generate_poisson_data(centers, n_cells, cluster_probs=None):
"""
Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = centers.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.random.poisson(centers[:,c])
return output, np.array(labels) | python | def generate_poisson_data(centers, n_cells, cluster_probs=None):
"""
Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = centers.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.random.poisson(centers[:,c])
return output, np.array(labels) | [
"def",
"generate_poisson_data",
"(",
"centers",
",",
"n_cells",
",",
"cluster_probs",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"centers",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"... | Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels | [
"Generates",
"poisson",
"-",
"distributed",
"data",
"given",
"a",
"set",
"of",
"means",
"for",
"each",
"cluster",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L5-L28 | train | 47,225 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_zip_data | def generate_zip_data(M, L, n_cells, cluster_probs=None):
"""
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels) | python | def generate_zip_data(M, L, n_cells, cluster_probs=None):
"""
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
"""
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels) | [
"def",
"generate_zip_data",
"(",
"M",
",",
"L",
",",
"n_cells",
",",
"cluster_probs",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"M",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"cl... | Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels | [
"Generates",
"zero",
"-",
"inflated",
"poisson",
"-",
"distributed",
"data",
"given",
"a",
"set",
"of",
"means",
"and",
"zero",
"probs",
"for",
"each",
"cluster",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L30-L55 | train | 47,226 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_state_data | def generate_state_data(means, weights):
"""
Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
return sample.astype(float) | python | def generate_state_data(means, weights):
"""
Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
return sample.astype(float) | [
"def",
"generate_state_data",
"(",
"means",
",",
"weights",
")",
":",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"sample",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"x_true",
")",
"return",
"sample",
".",
"astype",
"(",
... | Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Poisson",
"Convex",
"Mixture",
"Model",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L58-L71 | train | 47,227 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_zip_state_data | def generate_zip_state_data(means, weights, z):
"""
Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
random = np.random.random(x_true.shape)
x_true[random < z] = 0
return sample.astype(float) | python | def generate_zip_state_data(means, weights, z):
"""
Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells
"""
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
random = np.random.random(x_true.shape)
x_true[random < z] = 0
return sample.astype(float) | [
"def",
"generate_zip_state_data",
"(",
"means",
",",
"weights",
",",
"z",
")",
":",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"sample",
"=",
"np",
".",
"random",
".",
"poisson",
"(",
"x_true",
")",
"random",
"=",
"np",
".",
... | Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Zero",
"-",
"inflated",
"Poisson",
"Convex",
"Mixture",
"Model",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L73-L89 | train | 47,228 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_nb_state_data | def generate_nb_state_data(means, weights, R):
"""
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
"""
cells = weights.shape[1]
# x_true = true means
x_true = np.dot(means, weights)
# convert means into P
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float) | python | def generate_nb_state_data(means, weights, R):
"""
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
"""
cells = weights.shape[1]
# x_true = true means
x_true = np.dot(means, weights)
# convert means into P
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float) | [
"def",
"generate_nb_state_data",
"(",
"means",
",",
"weights",
",",
"R",
")",
":",
"cells",
"=",
"weights",
".",
"shape",
"[",
"1",
"]",
"# x_true = true means",
"x_true",
"=",
"np",
".",
"dot",
"(",
"means",
",",
"weights",
")",
"# convert means into P",
... | Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells | [
"Generates",
"data",
"according",
"to",
"the",
"Negative",
"Binomial",
"Convex",
"Mixture",
"Model",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L91-L110 | train | 47,229 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_poisson_lineage | def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300):
"""
Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells
"""
# means...
M = np.random.random((n_genes, n_states))*means
center = M.mean(1)
W = np.zeros((n_states, n_cells_per_cluster*n_states))
# TODO
# start at a center where all the clusters have equal probability, and for
# each cluster, interpolate linearly towards the cluster.
index = 0
means = np.array([1.0/n_states]*n_states)
for c in range(n_states):
for i in range(n_cells_per_cluster):
w = np.copy(means)
new_value = w[c] + i*(1.0 - 1.0/n_states)/n_cells_per_cluster
w[:] = (1.0 - new_value)/(n_states - 1.0)
w[c] = new_value
W[:, index] = w
index += 1
return M, W | python | def generate_poisson_lineage(n_states, n_cells_per_cluster, n_genes, means=300):
"""
Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells
"""
# means...
M = np.random.random((n_genes, n_states))*means
center = M.mean(1)
W = np.zeros((n_states, n_cells_per_cluster*n_states))
# TODO
# start at a center where all the clusters have equal probability, and for
# each cluster, interpolate linearly towards the cluster.
index = 0
means = np.array([1.0/n_states]*n_states)
for c in range(n_states):
for i in range(n_cells_per_cluster):
w = np.copy(means)
new_value = w[c] + i*(1.0 - 1.0/n_states)/n_cells_per_cluster
w[:] = (1.0 - new_value)/(n_states - 1.0)
w[c] = new_value
W[:, index] = w
index += 1
return M, W | [
"def",
"generate_poisson_lineage",
"(",
"n_states",
",",
"n_cells_per_cluster",
",",
"n_genes",
",",
"means",
"=",
"300",
")",
":",
"# means...",
"M",
"=",
"np",
".",
"random",
".",
"random",
"(",
"(",
"n_genes",
",",
"n_states",
")",
")",
"*",
"means",
... | Generates a lineage for each state- assumes that each state has a common
ancestor.
Returns:
M - genes x clusters
W - clusters x cells | [
"Generates",
"a",
"lineage",
"for",
"each",
"state",
"-",
"assumes",
"that",
"each",
"state",
"has",
"a",
"common",
"ancestor",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L154-L180 | train | 47,230 |
yjzhang/uncurl_python | uncurl/simulation.py | generate_nb_data | def generate_nb_data(P, R, n_cells, assignments=None):
"""
Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels
"""
genes, clusters = P.shape
output = np.zeros((genes, n_cells))
if assignments is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
if assignments is None:
c = np.random.choice(range(clusters), p=cluster_probs)
else:
c = assignments[i]
labels.append(c)
# because numpy's negative binomial, r is the number of successes
output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])
return output, np.array(labels) | python | def generate_nb_data(P, R, n_cells, assignments=None):
"""
Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels
"""
genes, clusters = P.shape
output = np.zeros((genes, n_cells))
if assignments is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
if assignments is None:
c = np.random.choice(range(clusters), p=cluster_probs)
else:
c = assignments[i]
labels.append(c)
# because numpy's negative binomial, r is the number of successes
output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])
return output, np.array(labels) | [
"def",
"generate_nb_data",
"(",
"P",
",",
"R",
",",
"n_cells",
",",
"assignments",
"=",
"None",
")",
":",
"genes",
",",
"clusters",
"=",
"P",
".",
"shape",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"genes",
",",
"n_cells",
")",
")",
"if",
"assig... | Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels | [
"Generates",
"negative",
"binomial",
"data"
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/simulation.py#L182-L210 | train | 47,231 |
yjzhang/uncurl_python | uncurl/vis.py | visualize_poisson_w | def visualize_poisson_w(w, labels, filename, method='pca', figsize=(18,10), title='', **scatter_options):
"""
Saves a scatter plot of a visualization of W, the result from Poisson SE.
"""
if method == 'pca':
pca = PCA(2)
r_dim_red = pca.fit_transform(w.T).T
elif method == 'tsne':
pass
else:
print("Method is not available. use 'pca' (default) or 'tsne'.")
return
visualize_dim_red(r_dim_red, labels, filename, figsize, title, **scatter_options) | python | def visualize_poisson_w(w, labels, filename, method='pca', figsize=(18,10), title='', **scatter_options):
"""
Saves a scatter plot of a visualization of W, the result from Poisson SE.
"""
if method == 'pca':
pca = PCA(2)
r_dim_red = pca.fit_transform(w.T).T
elif method == 'tsne':
pass
else:
print("Method is not available. use 'pca' (default) or 'tsne'.")
return
visualize_dim_red(r_dim_red, labels, filename, figsize, title, **scatter_options) | [
"def",
"visualize_poisson_w",
"(",
"w",
",",
"labels",
",",
"filename",
",",
"method",
"=",
"'pca'",
",",
"figsize",
"=",
"(",
"18",
",",
"10",
")",
",",
"title",
"=",
"''",
",",
"*",
"*",
"scatter_options",
")",
":",
"if",
"method",
"==",
"'pca'",
... | Saves a scatter plot of a visualization of W, the result from Poisson SE. | [
"Saves",
"a",
"scatter",
"plot",
"of",
"a",
"visualization",
"of",
"W",
"the",
"result",
"from",
"Poisson",
"SE",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/vis.py#L6-L18 | train | 47,232 |
yjzhang/uncurl_python | uncurl/experiment_runner.py | generate_visualizations | def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
"""
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
"""
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | python | def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
"""
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
"""
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | [
"def",
"generate_visualizations",
"(",
"methods",
",",
"data",
",",
"true_labels",
",",
"base_dir",
"=",
"'visualizations'",
",",
"figsize",
"=",
"(",
"18",
",",
"10",
")",
",",
"*",
"*",
"scatter_options",
")",
":",
"plt",
".",
"figure",
"(",
"figsize",
... | Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter | [
"Generates",
"visualization",
"scatters",
"for",
"all",
"the",
"methods",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/experiment_runner.py#L1058-L1128 | train | 47,233 |
markperdue/pyvesync | src/pyvesync/helpers.py | Helpers.resolve_updates | def resolve_updates(orig_list, updated_list):
"""Merges changes from one list of devices against another"""
if updated_list is not None and updated_list:
if orig_list is None:
orig_list = updated_list
else:
# Add new devices not in list but found in the update
for new_device in updated_list:
was_found = False
for device in orig_list:
if new_device.cid == device.cid:
was_found = True
break
if not was_found:
orig_list.append(new_device)
# Remove old devices in the list not found in the update
for device in orig_list:
should_remove = True
for new_device in updated_list:
if device.cid == new_device.cid:
should_remove = False
break
if should_remove:
orig_list.remove(device)
# Call update on each device in the list
[device.update() for device in orig_list]
return orig_list | python | def resolve_updates(orig_list, updated_list):
"""Merges changes from one list of devices against another"""
if updated_list is not None and updated_list:
if orig_list is None:
orig_list = updated_list
else:
# Add new devices not in list but found in the update
for new_device in updated_list:
was_found = False
for device in orig_list:
if new_device.cid == device.cid:
was_found = True
break
if not was_found:
orig_list.append(new_device)
# Remove old devices in the list not found in the update
for device in orig_list:
should_remove = True
for new_device in updated_list:
if device.cid == new_device.cid:
should_remove = False
break
if should_remove:
orig_list.remove(device)
# Call update on each device in the list
[device.update() for device in orig_list]
return orig_list | [
"def",
"resolve_updates",
"(",
"orig_list",
",",
"updated_list",
")",
":",
"if",
"updated_list",
"is",
"not",
"None",
"and",
"updated_list",
":",
"if",
"orig_list",
"is",
"None",
":",
"orig_list",
"=",
"updated_list",
"else",
":",
"# Add new devices not in list bu... | Merges changes from one list of devices against another | [
"Merges",
"changes",
"from",
"one",
"list",
"of",
"devices",
"against",
"another"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/helpers.py#L222-L256 | train | 47,234 |
moonso/loqusdb | loqusdb/utils/profiling.py | get_profiles | def get_profiles(adapter, vcf_file):
"""Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
"""
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = pos + 1
chrom = profile_variant['chrom']
region = f"{chrom}:{pos}-{end}"
#Find variants in region
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
#If variant id i.e. chrom_pos_ref_alt matches
if variant_id == profile_variant['_id']:
found_variant = True
#find genotype for each individual in vcf
for i, individual in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if genotype == 'hom_alt':
gt_str = f"{alt}{alt}"
elif genotype == 'het':
gt_str = f"{ref}{alt}"
else:
gt_str = f"{ref}{ref}"
#Append genotype to profile string of individual
profiles[individual].append(gt_str)
#Break loop if variant is found in region
break
#If no call was found for variant, give all samples a hom ref genotype
if not found_variant:
for individual in individuals: profiles[individual].append(f"{ref}{ref}")
return profiles | python | def get_profiles(adapter, vcf_file):
"""Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
"""
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = pos + 1
chrom = profile_variant['chrom']
region = f"{chrom}:{pos}-{end}"
#Find variants in region
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
#If variant id i.e. chrom_pos_ref_alt matches
if variant_id == profile_variant['_id']:
found_variant = True
#find genotype for each individual in vcf
for i, individual in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if genotype == 'hom_alt':
gt_str = f"{alt}{alt}"
elif genotype == 'het':
gt_str = f"{ref}{alt}"
else:
gt_str = f"{ref}{ref}"
#Append genotype to profile string of individual
profiles[individual].append(gt_str)
#Break loop if variant is found in region
break
#If no call was found for variant, give all samples a hom ref genotype
if not found_variant:
for individual in individuals: profiles[individual].append(f"{ref}{ref}")
return profiles | [
"def",
"get_profiles",
"(",
"adapter",
",",
"vcf_file",
")",
":",
"vcf",
"=",
"get_file_handle",
"(",
"vcf_file",
")",
"individuals",
"=",
"vcf",
".",
"samples",
"profiles",
"=",
"{",
"individual",
":",
"[",
"]",
"for",
"individual",
"in",
"individuals",
"... | Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf. | [
"Given",
"a",
"vcf",
"get",
"a",
"profile",
"string",
"for",
"each",
"sample",
"in",
"the",
"vcf",
"based",
"on",
"the",
"profile",
"variants",
"in",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L15-L76 | train | 47,235 |
moonso/loqusdb | loqusdb/utils/profiling.py | profile_match | def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
"""
given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf.
"""
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(
profiles[sample], individual['profile']
)
if similarity >= hard_threshold:
msg = (
f"individual {sample} has a {similarity} similarity "
f"with individual {individual['ind_id']} in case "
f"{case['case_id']}"
)
LOG.critical(msg)
#Raise some exception
raise ProfileError
if similarity >= soft_threshold:
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | python | def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
"""
given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf.
"""
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(
profiles[sample], individual['profile']
)
if similarity >= hard_threshold:
msg = (
f"individual {sample} has a {similarity} similarity "
f"with individual {individual['ind_id']} in case "
f"{case['case_id']}"
)
LOG.critical(msg)
#Raise some exception
raise ProfileError
if similarity >= soft_threshold:
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | [
"def",
"profile_match",
"(",
"adapter",
",",
"profiles",
",",
"hard_threshold",
"=",
"0.95",
",",
"soft_threshold",
"=",
"0.9",
")",
":",
"matches",
"=",
"{",
"sample",
":",
"[",
"]",
"for",
"sample",
"in",
"profiles",
".",
"keys",
"(",
")",
"}",
"for"... | given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf. | [
"given",
"a",
"dict",
"of",
"profiles",
"searches",
"through",
"all",
"the",
"samples",
"in",
"the",
"DB",
"for",
"a",
"match",
".",
"If",
"a",
"matching",
"sample",
"is",
"found",
"an",
"exception",
"is",
"raised",
"and",
"the",
"variants",
"will",
"not... | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L78-L124 | train | 47,236 |
moonso/loqusdb | loqusdb/utils/profiling.py | compare_profiles | def compare_profiles(profile1, profile2):
"""
Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1)
"""
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio | python | def compare_profiles(profile1, profile2):
"""
Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1)
"""
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio | [
"def",
"compare_profiles",
"(",
"profile1",
",",
"profile2",
")",
":",
"length",
"=",
"len",
"(",
"profile1",
")",
"profile1",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"profile1",
")",
")",
"profile2",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"pr... | Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1) | [
"Given",
"two",
"profiles",
"determine",
"the",
"ratio",
"of",
"similarity",
"i",
".",
"e",
".",
"the",
"hamming",
"distance",
"between",
"the",
"strings",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L128-L151 | train | 47,237 |
moonso/loqusdb | loqusdb/utils/profiling.py | update_profiles | def update_profiles(adapter):
"""
For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb
"""
for case in adapter.cases():
#If the case has a vcf_path, get the profiles and update the
#case with new profiled individuals.
if case.get('profile_path'):
profiles = get_profiles(adapter, case['profile_path'])
profiled_individuals = deepcopy(case['individuals'])
for individual in profiled_individuals:
ind_id = individual['ind_id']
try:
profile = profiles[ind_id]
individual['profile'] = profile
except KeyError:
LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}")
updated_case = deepcopy(case)
updated_case['individuals'] = profiled_individuals
adapter.add_case(updated_case, update=True) | python | def update_profiles(adapter):
"""
For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb
"""
for case in adapter.cases():
#If the case has a vcf_path, get the profiles and update the
#case with new profiled individuals.
if case.get('profile_path'):
profiles = get_profiles(adapter, case['profile_path'])
profiled_individuals = deepcopy(case['individuals'])
for individual in profiled_individuals:
ind_id = individual['ind_id']
try:
profile = profiles[ind_id]
individual['profile'] = profile
except KeyError:
LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}")
updated_case = deepcopy(case)
updated_case['individuals'] = profiled_individuals
adapter.add_case(updated_case, update=True) | [
"def",
"update_profiles",
"(",
"adapter",
")",
":",
"for",
"case",
"in",
"adapter",
".",
"cases",
"(",
")",
":",
"#If the case has a vcf_path, get the profiles and update the",
"#case with new profiled individuals.",
"if",
"case",
".",
"get",
"(",
"'profile_path'",
")",... | For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb | [
"For",
"all",
"cases",
"having",
"vcf_path",
"update",
"the",
"profile",
"string",
"for",
"the",
"samples"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L154-L186 | train | 47,238 |
moonso/loqusdb | loqusdb/utils/profiling.py | profile_stats | def profile_stats(adapter, threshold = 0.9):
"""
Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values.
"""
profiles = []
samples = []
#Instatiate the distance dictionary with a count 0 for all the ranges
distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}
for case in adapter.cases():
for individual in case['individuals']:
if individual.get('profile'):
#Make sample name <case_id>.<sample_id>
sample_id = f"{case['case_id']}.{individual['ind_id']}"
ind_profile = individual['profile']
#Numpy array to hold all the distances for this samples profile
distance_array = np.array([], dtype=np.float)
for sample, profile in zip(samples, profiles):
#Get distance and append to distance array
distance = compare_profiles(ind_profile, profile)
distance_array = np.append(distance_array, distance)
#Issue warning if above threshold
if distance >= threshold:
LOG.warning(f"{sample_id} is {distance} similar to {sample}")
#Check number of distances in each range and add to distance_dict
for key,range in HAMMING_RANGES.items():
#Calculate the number of hamming distances found within the
#range for current individual
distance_dict[key] += np.sum(
(distance_array >= range[0]) & (distance_array < range[1])
)
#Append profile and sample_id for this sample for the next
#iteration
profiles.append(ind_profile)
samples.append(sample_id)
return distance_dict | python | def profile_stats(adapter, threshold = 0.9):
"""
Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values.
"""
profiles = []
samples = []
#Instatiate the distance dictionary with a count 0 for all the ranges
distance_dict = {key: 0 for key in HAMMING_RANGES.keys()}
for case in adapter.cases():
for individual in case['individuals']:
if individual.get('profile'):
#Make sample name <case_id>.<sample_id>
sample_id = f"{case['case_id']}.{individual['ind_id']}"
ind_profile = individual['profile']
#Numpy array to hold all the distances for this samples profile
distance_array = np.array([], dtype=np.float)
for sample, profile in zip(samples, profiles):
#Get distance and append to distance array
distance = compare_profiles(ind_profile, profile)
distance_array = np.append(distance_array, distance)
#Issue warning if above threshold
if distance >= threshold:
LOG.warning(f"{sample_id} is {distance} similar to {sample}")
#Check number of distances in each range and add to distance_dict
for key,range in HAMMING_RANGES.items():
#Calculate the number of hamming distances found within the
#range for current individual
distance_dict[key] += np.sum(
(distance_array >= range[0]) & (distance_array < range[1])
)
#Append profile and sample_id for this sample for the next
#iteration
profiles.append(ind_profile)
samples.append(sample_id)
return distance_dict | [
"def",
"profile_stats",
"(",
"adapter",
",",
"threshold",
"=",
"0.9",
")",
":",
"profiles",
"=",
"[",
"]",
"samples",
"=",
"[",
"]",
"#Instatiate the distance dictionary with a count 0 for all the ranges",
"distance_dict",
"=",
"{",
"key",
":",
"0",
"for",
"key",
... | Compares the pairwise hamming distances for all the sample profiles in
the database. Returns a table of the number of distances within given
ranges.
Args:
adapter (MongoAdapter): Adapter to mongodb
threshold (float): If any distance is found above this threshold
a warning will be given, stating the two matching samples.
Returns:
distance_dict (dict): dictionary with ranges as keys, and the number
of distances that are within these ranges as values. | [
"Compares",
"the",
"pairwise",
"hamming",
"distances",
"for",
"all",
"the",
"sample",
"profiles",
"in",
"the",
"database",
".",
"Returns",
"a",
"table",
"of",
"the",
"number",
"of",
"distances",
"within",
"given",
"ranges",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/profiling.py#L189-L248 | train | 47,239 |
yjzhang/uncurl_python | uncurl/evaluation.py | purity | def purity(labels, true_labels):
"""
Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better.
"""
purity = 0.0
for i in set(labels):
indices = (labels==i)
true_clusters = true_labels[indices]
if len(true_clusters)==0:
continue
counts = Counter(true_clusters)
lab, count = counts.most_common()[0]
purity += count
return float(purity)/len(labels) | python | def purity(labels, true_labels):
"""
Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better.
"""
purity = 0.0
for i in set(labels):
indices = (labels==i)
true_clusters = true_labels[indices]
if len(true_clusters)==0:
continue
counts = Counter(true_clusters)
lab, count = counts.most_common()[0]
purity += count
return float(purity)/len(labels) | [
"def",
"purity",
"(",
"labels",
",",
"true_labels",
")",
":",
"purity",
"=",
"0.0",
"for",
"i",
"in",
"set",
"(",
"labels",
")",
":",
"indices",
"=",
"(",
"labels",
"==",
"i",
")",
"true_clusters",
"=",
"true_labels",
"[",
"indices",
"]",
"if",
"len"... | Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better. | [
"Calculates",
"the",
"purity",
"score",
"for",
"the",
"given",
"labels",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/evaluation.py#L6-L26 | train | 47,240 |
yjzhang/uncurl_python | uncurl/evaluation.py | mdl | def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
"""
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
"""
N, m = data.shape
cost = ll + (N*m + m*k)*(np.log(data.sum()/(N*k)))
return cost | python | def mdl(ll, k, data):
"""
Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better...
"""
"""
N - no. of genes
n - no. of cells
k - no. of cell types
R - sum(Dataset) i.e. total no. of reads
function TotCost = TotBits(N,m,p,R,C)
# C is the cost from the cost function
TotCost = C + (N*m + m*p)*(log(R/(N*p)));
"""
N, m = data.shape
cost = ll + (N*m + m*k)*(np.log(data.sum()/(N*k)))
return cost | [
"def",
"mdl",
"(",
"ll",
",",
"k",
",",
"data",
")",
":",
"\"\"\"\n N - no. of genes\n n - no. of cells \n k - no. of cell types\n R - sum(Dataset) i.e. total no. of reads\n\n function TotCost = TotBits(N,m,p,R,C)\n # C is the cost from the cost function\n TotCost = ... | Returns the minimum description length score of the model given its
log-likelihood and k, the number of cell types.
a lower cost is better... | [
"Returns",
"the",
"minimum",
"description",
"length",
"score",
"of",
"the",
"model",
"given",
"its",
"log",
"-",
"likelihood",
"and",
"k",
"the",
"number",
"of",
"cell",
"types",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/evaluation.py#L51-L71 | train | 47,241 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | find_nb_genes | def find_nb_genes(data):
"""
Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans.
"""
data_means = data.mean(1)
data_vars = data.var(1)
nb_indices = data_means < 0.9*data_vars
return nb_indices | python | def find_nb_genes(data):
"""
Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans.
"""
data_means = data.mean(1)
data_vars = data.var(1)
nb_indices = data_means < 0.9*data_vars
return nb_indices | [
"def",
"find_nb_genes",
"(",
"data",
")",
":",
"data_means",
"=",
"data",
".",
"mean",
"(",
"1",
")",
"data_vars",
"=",
"data",
".",
"var",
"(",
"1",
")",
"nb_indices",
"=",
"data_means",
"<",
"0.9",
"*",
"data_vars",
"return",
"nb_indices"
] | Finds the indices of all genes in the dataset that have
a mean < 0.9 variance. Returns an array of booleans. | [
"Finds",
"the",
"indices",
"of",
"all",
"genes",
"in",
"the",
"dataset",
"that",
"have",
"a",
"mean",
"<",
"0",
".",
"9",
"variance",
".",
"Returns",
"an",
"array",
"of",
"booleans",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L12-L20 | train | 47,242 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_ll | def nb_ll(data, P, R):
"""
Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods
"""
# TODO: include factorial...
#data = data + eps
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
# don't need constant factors...
ll = gammaln(R_c + data) - gammaln(R_c) #- gammaln(data + 1)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
#new_ll = np.sum(nbinom.logpmf(data, R_c, P_c), 0)
lls[:,c] = ll.sum(0)
return lls | python | def nb_ll(data, P, R):
"""
Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods
"""
# TODO: include factorial...
#data = data + eps
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
# don't need constant factors...
ll = gammaln(R_c + data) - gammaln(R_c) #- gammaln(data + 1)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
#new_ll = np.sum(nbinom.logpmf(data, R_c, P_c), 0)
lls[:,c] = ll.sum(0)
return lls | [
"def",
"nb_ll",
"(",
"data",
",",
"P",
",",
"R",
")",
":",
"# TODO: include factorial...",
"#data = data + eps",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"P",
".",
"shape",
"[",
"1",
"]",
"lls",
"=",
"np",
".",
"zeros",
"(",
... | Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods | [
"Returns",
"the",
"negative",
"binomial",
"log",
"-",
"likelihood",
"of",
"the",
"data",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L36-L61 | train | 47,243 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | zinb_ll | def zinb_ll(data, P, R, Z):
"""
Returns the zero-inflated negative binomial log-likelihood of the data.
"""
lls = nb_ll(data, P, R)
clusters = P.shape[1]
for c in range(clusters):
pass
return lls | python | def zinb_ll(data, P, R, Z):
"""
Returns the zero-inflated negative binomial log-likelihood of the data.
"""
lls = nb_ll(data, P, R)
clusters = P.shape[1]
for c in range(clusters):
pass
return lls | [
"def",
"zinb_ll",
"(",
"data",
",",
"P",
",",
"R",
",",
"Z",
")",
":",
"lls",
"=",
"nb_ll",
"(",
"data",
",",
"P",
",",
"R",
")",
"clusters",
"=",
"P",
".",
"shape",
"[",
"1",
"]",
"for",
"c",
"in",
"range",
"(",
"clusters",
")",
":",
"pass... | Returns the zero-inflated negative binomial log-likelihood of the data. | [
"Returns",
"the",
"zero",
"-",
"inflated",
"negative",
"binomial",
"log",
"-",
"likelihood",
"of",
"the",
"data",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L63-L71 | train | 47,244 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_ll_row | def nb_ll_row(params, data_row):
"""
returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row
"""
p = params[0]
r = params[1]
n = len(data_row)
ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))
ll -= n*gammaln(r)
ll += np.sum(data_row)*np.log(p)
ll += n*r*np.log(1-p)
return -ll | python | def nb_ll_row(params, data_row):
"""
returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row
"""
p = params[0]
r = params[1]
n = len(data_row)
ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))
ll -= n*gammaln(r)
ll += np.sum(data_row)*np.log(p)
ll += n*r*np.log(1-p)
return -ll | [
"def",
"nb_ll_row",
"(",
"params",
",",
"data_row",
")",
":",
"p",
"=",
"params",
"[",
"0",
"]",
"r",
"=",
"params",
"[",
"1",
"]",
"n",
"=",
"len",
"(",
"data_row",
")",
"ll",
"=",
"np",
".",
"sum",
"(",
"gammaln",
"(",
"data_row",
"+",
"r",
... | returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row | [
"returns",
"the",
"negative",
"LL",
"of",
"a",
"single",
"row",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L73-L91 | train | 47,245 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_fit | def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R | python | def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
"""
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
"""
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R | [
"def",
"nb_fit",
"(",
"data",
",",
"P_init",
"=",
"None",
",",
"R_init",
"=",
"None",
",",
"epsilon",
"=",
"1e-8",
",",
"max_iters",
"=",
"100",
")",
":",
"means",
"=",
"data",
".",
"mean",
"(",
"1",
")",
"variances",
"=",
"data",
".",
"var",
"("... | Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data | [
"Fits",
"the",
"NB",
"distribution",
"to",
"data",
"using",
"method",
"of",
"moments",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L105-L133 | train | 47,246 |
yjzhang/uncurl_python | uncurl/nb_clustering.py | nb_cluster | def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init | python | def nb_cluster(data, k, P_init=None, R_init=None, assignments=None, means=None, max_iters=10):
"""
Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var
"""
genes, cells = data.shape
if P_init is None:
P_init = np.random.random((genes, k))
if R_init is None:
R_init = np.random.randint(1, data.max(), (genes, k))
R_init = R_init.astype(float)
if assignments is None:
_, assignments = kmeans_pp(data, k, means)
means = np.zeros((genes, k))
#assignments = np.array([np.random.randint(0,k) for i in range(cells)])
old_assignments = np.copy(assignments)
# If mean > variance, then fall back to Poisson, since NB
# distribution can't handle that case.
for i in range(max_iters):
# estimate params from assigned cells
nb_gene_indices = fit_cluster(data, assignments, k, P_init, R_init, means)
# re-calculate assignments
lls = nb_ll(data[nb_gene_indices, :], P_init[nb_gene_indices,:], R_init[nb_gene_indices,:])
lls += pois_ll.poisson_ll(data[~nb_gene_indices,:], means[~nb_gene_indices,:])
# set NB params to failure values
P_init[~nb_gene_indices,:] = 0
R_init[~nb_gene_indices,:] = np.inf
for c in range(cells):
assignments[c] = np.argmax(lls[c,:])
if np.equal(assignments,old_assignments).all():
break
old_assignments = np.copy(assignments)
return assignments, P_init, R_init | [
"def",
"nb_cluster",
"(",
"data",
",",
"k",
",",
"P_init",
"=",
"None",
",",
"R_init",
"=",
"None",
",",
"assignments",
"=",
"None",
",",
"means",
"=",
"None",
",",
"max_iters",
"=",
"10",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",... | Performs negative binomial clustering on the given data. If some genes have mean > variance, then these genes are fitted to a Poisson distribution.
Args:
data (array): genes x cells
k (int): number of clusters
P_init (array): NB success prob param - genes x k. Default: random
R_init (array): NB stopping param - genes x k. Default: random
assignments (array): cells x 1 array of integers 0...k-1. Default: kmeans-pp (poisson)
means (array): initial cluster means (for use with kmeans-pp to create initial assignments). Default: None
max_iters (int): default: 100
Returns:
assignments (array): 1d array of length cells, containing integers 0...k-1
P (array): genes x k - value is 0 for genes with mean > var
R (array): genes x k - value is inf for genes with mean > var | [
"Performs",
"negative",
"binomial",
"clustering",
"on",
"the",
"given",
"data",
".",
"If",
"some",
"genes",
"have",
"mean",
">",
"variance",
"then",
"these",
"genes",
"are",
"fitted",
"to",
"a",
"Poisson",
"distribution",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/nb_clustering.py#L141-L186 | train | 47,247 |
yjzhang/uncurl_python | uncurl/zip_utils.py | zip_ll | def zip_ll(data, means, M):
"""
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
"""
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll | python | def zip_ll(data, means, M):
"""
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
"""
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll | [
"def",
"zip_ll",
"(",
"data",
",",
"means",
",",
"M",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"means",
".",
"shape",
"[",
"1",
"]",
"ll",
"=",
"np",
".",
"zeros",
"(",
"(",
"cells",
",",
"clusters",
")",
")"... | Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair. | [
"Calculates",
"the",
"zero",
"-",
"inflated",
"Poisson",
"log",
"-",
"likelihood",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_utils.py#L9-L38 | train | 47,248 |
yjzhang/uncurl_python | uncurl/zip_utils.py | zip_ll_row | def zip_ll_row(params, data_row):
"""
Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood
"""
l = params[0]
pi = params[1]
d0 = (data_row==0)
likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)
return -np.log(likelihood+eps).sum() | python | def zip_ll_row(params, data_row):
"""
Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood
"""
l = params[0]
pi = params[1]
d0 = (data_row==0)
likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)
return -np.log(likelihood+eps).sum() | [
"def",
"zip_ll_row",
"(",
"params",
",",
"data_row",
")",
":",
"l",
"=",
"params",
"[",
"0",
"]",
"pi",
"=",
"params",
"[",
"1",
"]",
"d0",
"=",
"(",
"data_row",
"==",
"0",
")",
"likelihood",
"=",
"d0",
"*",
"pi",
"+",
"(",
"1",
"-",
"pi",
")... | Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood | [
"Returns",
"the",
"negative",
"log",
"-",
"likelihood",
"of",
"a",
"row",
"given",
"ZIP",
"data",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_utils.py#L40-L55 | train | 47,249 |
yjzhang/uncurl_python | uncurl/gap_score.py | preproc_data | def preproc_data(data, gene_subset=False, **kwargs):
"""
basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection...
"""
import uncurl
from uncurl.preprocessing import log1p, cell_normalize
from sklearn.decomposition import TruncatedSVD
data_subset = data
if gene_subset:
gene_subset = uncurl.max_variance_genes(data)
data_subset = data[gene_subset, :]
tsvd = TruncatedSVD(min(8, data_subset.shape[0] - 1))
data_tsvd = tsvd.fit_transform(log1p(cell_normalize(data_subset)).T)
return data_tsvd | python | def preproc_data(data, gene_subset=False, **kwargs):
"""
basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection...
"""
import uncurl
from uncurl.preprocessing import log1p, cell_normalize
from sklearn.decomposition import TruncatedSVD
data_subset = data
if gene_subset:
gene_subset = uncurl.max_variance_genes(data)
data_subset = data[gene_subset, :]
tsvd = TruncatedSVD(min(8, data_subset.shape[0] - 1))
data_tsvd = tsvd.fit_transform(log1p(cell_normalize(data_subset)).T)
return data_tsvd | [
"def",
"preproc_data",
"(",
"data",
",",
"gene_subset",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"uncurl",
"from",
"uncurl",
".",
"preprocessing",
"import",
"log1p",
",",
"cell_normalize",
"from",
"sklearn",
".",
"decomposition",
"import",
... | basic data preprocessing before running gap score
Assumes that data is a matrix of shape (genes, cells).
Returns a matrix of shape (cells, 8), using the first 8 SVD
components. Why 8? It's an arbitrary selection... | [
"basic",
"data",
"preprocessing",
"before",
"running",
"gap",
"score"
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L7-L25 | train | 47,250 |
yjzhang/uncurl_python | uncurl/gap_score.py | calculate_bounding_box | def calculate_bounding_box(data):
"""
Returns a 2 x m array indicating the min and max along each
dimension.
"""
mins = data.min(0)
maxes = data.max(0)
return mins, maxes | python | def calculate_bounding_box(data):
"""
Returns a 2 x m array indicating the min and max along each
dimension.
"""
mins = data.min(0)
maxes = data.max(0)
return mins, maxes | [
"def",
"calculate_bounding_box",
"(",
"data",
")",
":",
"mins",
"=",
"data",
".",
"min",
"(",
"0",
")",
"maxes",
"=",
"data",
".",
"max",
"(",
"0",
")",
"return",
"mins",
",",
"maxes"
] | Returns a 2 x m array indicating the min and max along each
dimension. | [
"Returns",
"a",
"2",
"x",
"m",
"array",
"indicating",
"the",
"min",
"and",
"max",
"along",
"each",
"dimension",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L27-L34 | train | 47,251 |
yjzhang/uncurl_python | uncurl/gap_score.py | run_gap_k_selection | def run_gap_k_selection(data, k_min=1, k_max=50, B=5,
skip=5, **kwargs):
"""
Runs gap score for all k from k_min to k_max.
"""
if k_min == k_max:
return k_min
gap_vals = []
sk_vals = []
k_range = list(range(k_min, k_max, skip))
min_k = 0
min_i = 0
for i, k in enumerate(k_range):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - (skip+1)*sk:
min_i = i
min_k = k_range[i-1]
break
#return k_range[-1], gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
if min_k == 0:
min_k = k_max
if skip == 1:
return min_k, gap_vals, sk_vals
gap_vals = []
sk_vals = []
for k in range(min_k - skip, min_k + skip):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - sk:
min_k = k-1
return min_k, gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
return k, gap_vals, sk_vals | python | def run_gap_k_selection(data, k_min=1, k_max=50, B=5,
skip=5, **kwargs):
"""
Runs gap score for all k from k_min to k_max.
"""
if k_min == k_max:
return k_min
gap_vals = []
sk_vals = []
k_range = list(range(k_min, k_max, skip))
min_k = 0
min_i = 0
for i, k in enumerate(k_range):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - (skip+1)*sk:
min_i = i
min_k = k_range[i-1]
break
#return k_range[-1], gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
if min_k == 0:
min_k = k_max
if skip == 1:
return min_k, gap_vals, sk_vals
gap_vals = []
sk_vals = []
for k in range(min_k - skip, min_k + skip):
km = KMeans(k)
clusters = km.fit_predict(data)
gap, sk = calculate_gap(data, clusters, km, B=B)
if len(gap_vals) > 1:
if gap_vals[-1] >= gap - sk:
min_k = k-1
return min_k, gap_vals, sk_vals
gap_vals.append(gap)
sk_vals.append(sk)
return k, gap_vals, sk_vals | [
"def",
"run_gap_k_selection",
"(",
"data",
",",
"k_min",
"=",
"1",
",",
"k_max",
"=",
"50",
",",
"B",
"=",
"5",
",",
"skip",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"k_min",
"==",
"k_max",
":",
"return",
"k_min",
"gap_vals",
"=",
"[",
... | Runs gap score for all k from k_min to k_max. | [
"Runs",
"gap",
"score",
"for",
"all",
"k",
"from",
"k_min",
"to",
"k_max",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/gap_score.py#L61-L101 | train | 47,252 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.get_devices | def get_devices(self) -> list:
"""Return list of VeSync devices"""
if not self.enabled:
return None
self.in_process = True
response, _ = helpers.call_api(
'/cloud/v1/deviceManaged/devices',
'post',
headers=helpers.req_headers(self),
json=helpers.req_body(self, 'devicelist')
)
if response and helpers.check_response(response, 'get_devices'):
if 'result' in response and 'list' in response['result']:
device_list = response['result']['list']
outlets, switches, fans = self.process_devices(device_list)
else:
logger.error('Device list in response not found')
else:
logger.error('Error retrieving device list')
self.in_process = False
return (outlets, switches, fans) | python | def get_devices(self) -> list:
"""Return list of VeSync devices"""
if not self.enabled:
return None
self.in_process = True
response, _ = helpers.call_api(
'/cloud/v1/deviceManaged/devices',
'post',
headers=helpers.req_headers(self),
json=helpers.req_body(self, 'devicelist')
)
if response and helpers.check_response(response, 'get_devices'):
if 'result' in response and 'list' in response['result']:
device_list = response['result']['list']
outlets, switches, fans = self.process_devices(device_list)
else:
logger.error('Device list in response not found')
else:
logger.error('Error retrieving device list')
self.in_process = False
return (outlets, switches, fans) | [
"def",
"get_devices",
"(",
"self",
")",
"->",
"list",
":",
"if",
"not",
"self",
".",
"enabled",
":",
"return",
"None",
"self",
".",
"in_process",
"=",
"True",
"response",
",",
"_",
"=",
"helpers",
".",
"call_api",
"(",
"'/cloud/v1/deviceManaged/devices'",
... | Return list of VeSync devices | [
"Return",
"list",
"of",
"VeSync",
"devices"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L106-L132 | train | 47,253 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.login | def login(self) -> bool:
"""Return True if log in request succeeds"""
user_check = isinstance(self.username, str) and len(self.username) > 0
pass_check = isinstance(self.password, str) and len(self.password) > 0
if user_check and pass_check:
response, _ = helpers.call_api(
'/cloud/v1/user/login',
'post',
json=helpers.req_body(self, 'login')
)
if response and helpers.check_response(response, 'login'):
self.token = response['result']['token']
self.account_id = response['result']['accountID']
self.enabled = True
return True
else:
logger.error('Error logging in with username and password')
return False
else:
if user_check is False:
logger.error('Username invalid')
if pass_check is False:
logger.error('Password invalid')
return False | python | def login(self) -> bool:
"""Return True if log in request succeeds"""
user_check = isinstance(self.username, str) and len(self.username) > 0
pass_check = isinstance(self.password, str) and len(self.password) > 0
if user_check and pass_check:
response, _ = helpers.call_api(
'/cloud/v1/user/login',
'post',
json=helpers.req_body(self, 'login')
)
if response and helpers.check_response(response, 'login'):
self.token = response['result']['token']
self.account_id = response['result']['accountID']
self.enabled = True
return True
else:
logger.error('Error logging in with username and password')
return False
else:
if user_check is False:
logger.error('Username invalid')
if pass_check is False:
logger.error('Password invalid')
return False | [
"def",
"login",
"(",
"self",
")",
"->",
"bool",
":",
"user_check",
"=",
"isinstance",
"(",
"self",
".",
"username",
",",
"str",
")",
"and",
"len",
"(",
"self",
".",
"username",
")",
">",
"0",
"pass_check",
"=",
"isinstance",
"(",
"self",
".",
"passwo... | Return True if log in request succeeds | [
"Return",
"True",
"if",
"log",
"in",
"request",
"succeeds"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L134-L162 | train | 47,254 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.update | def update(self):
"""Fetch updated information about devices"""
if self.device_time_check():
if not self.in_process:
outlets, switches, fans = self.get_devices()
self.outlets = helpers.resolve_updates(self.outlets, outlets)
self.switches = helpers.resolve_updates(
self.switches, switches)
self.fans = helpers.resolve_updates(self.fans, fans)
self.last_update_ts = time.time() | python | def update(self):
"""Fetch updated information about devices"""
if self.device_time_check():
if not self.in_process:
outlets, switches, fans = self.get_devices()
self.outlets = helpers.resolve_updates(self.outlets, outlets)
self.switches = helpers.resolve_updates(
self.switches, switches)
self.fans = helpers.resolve_updates(self.fans, fans)
self.last_update_ts = time.time() | [
"def",
"update",
"(",
"self",
")",
":",
"if",
"self",
".",
"device_time_check",
"(",
")",
":",
"if",
"not",
"self",
".",
"in_process",
":",
"outlets",
",",
"switches",
",",
"fans",
"=",
"self",
".",
"get_devices",
"(",
")",
"self",
".",
"outlets",
"=... | Fetch updated information about devices | [
"Fetch",
"updated",
"information",
"about",
"devices"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L171-L184 | train | 47,255 |
markperdue/pyvesync | src/pyvesync/vesync.py | VeSync.update_energy | def update_energy(self, bypass_check=False):
"""Fetch updated energy information about devices"""
for outlet in self.outlets:
outlet.update_energy(bypass_check) | python | def update_energy(self, bypass_check=False):
"""Fetch updated energy information about devices"""
for outlet in self.outlets:
outlet.update_energy(bypass_check) | [
"def",
"update_energy",
"(",
"self",
",",
"bypass_check",
"=",
"False",
")",
":",
"for",
"outlet",
"in",
"self",
".",
"outlets",
":",
"outlet",
".",
"update_energy",
"(",
"bypass_check",
")"
] | Fetch updated energy information about devices | [
"Fetch",
"updated",
"energy",
"information",
"about",
"devices"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesync.py#L186-L189 | train | 47,256 |
yjzhang/uncurl_python | uncurl/fit_dist_data.py | DistFitDataset | def DistFitDataset(Dat):
"""
Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
"""
#Assumes data to be in the form of a numpy matrix
(r,c) = Dat.shape
Poiss = np.zeros(r)
Norm = np.zeros(r)
LogNorm = np.zeros(r)
for i in range(r):
temp = GetDistFitError(Dat[i])
Poiss[i] = temp['poiss']
Norm[i] = temp['norm']
LogNorm[i] = temp['lognorm']
d = {}
d['poiss'] = Poiss
d['norm'] = Norm
d['lognorm'] = LogNorm
return d | python | def DistFitDataset(Dat):
"""
Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
"""
#Assumes data to be in the form of a numpy matrix
(r,c) = Dat.shape
Poiss = np.zeros(r)
Norm = np.zeros(r)
LogNorm = np.zeros(r)
for i in range(r):
temp = GetDistFitError(Dat[i])
Poiss[i] = temp['poiss']
Norm[i] = temp['norm']
LogNorm[i] = temp['lognorm']
d = {}
d['poiss'] = Poiss
d['norm'] = Norm
d['lognorm'] = LogNorm
return d | [
"def",
"DistFitDataset",
"(",
"Dat",
")",
":",
"#Assumes data to be in the form of a numpy matrix ",
"(",
"r",
",",
"c",
")",
"=",
"Dat",
".",
"shape",
"Poiss",
"=",
"np",
".",
"zeros",
"(",
"r",
")",
"Norm",
"=",
"np",
".",
"zeros",
"(",
"r",
")",
"Lo... | Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution. | [
"Given",
"a",
"data",
"matrix",
"this",
"returns",
"the",
"per",
"-",
"gene",
"fit",
"error",
"for",
"the",
"Poisson",
"Normal",
"and",
"Log",
"-",
"Normal",
"distributions",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/fit_dist_data.py#L55-L80 | train | 47,257 |
moonso/loqusdb | loqusdb/commands/annotate.py | annotate | def annotate(ctx, variant_file, sv):
"""Annotate the variants in a VCF
"""
adapter = ctx.obj['adapter']
variant_path = os.path.abspath(variant_file)
expected_type = 'snv'
if sv:
expected_type = 'sv'
if 'sv':
nr_cases = adapter.nr_cases(sv_cases=True)
else:
nr_cases = adapter.nr_cases(snv_cases=True)
LOG.info("Found {0} {1} cases in database".format(nr_cases, expected_type))
vcf_obj = get_file_handle(variant_path)
add_headers(vcf_obj, nr_cases=nr_cases, sv=sv)
# Print the headers
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) == 0:
continue
click.echo(header_line)
start_inserting = datetime.now()
if sv:
annotated_variants = annotate_svs(adapter, vcf_obj)
else:
annotated_variants = annotate_snvs(adapter, vcf_obj)
# try:
for variant in annotated_variants:
click.echo(str(variant).rstrip()) | python | def annotate(ctx, variant_file, sv):
"""Annotate the variants in a VCF
"""
adapter = ctx.obj['adapter']
variant_path = os.path.abspath(variant_file)
expected_type = 'snv'
if sv:
expected_type = 'sv'
if 'sv':
nr_cases = adapter.nr_cases(sv_cases=True)
else:
nr_cases = adapter.nr_cases(snv_cases=True)
LOG.info("Found {0} {1} cases in database".format(nr_cases, expected_type))
vcf_obj = get_file_handle(variant_path)
add_headers(vcf_obj, nr_cases=nr_cases, sv=sv)
# Print the headers
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) == 0:
continue
click.echo(header_line)
start_inserting = datetime.now()
if sv:
annotated_variants = annotate_svs(adapter, vcf_obj)
else:
annotated_variants = annotate_snvs(adapter, vcf_obj)
# try:
for variant in annotated_variants:
click.echo(str(variant).rstrip()) | [
"def",
"annotate",
"(",
"ctx",
",",
"variant_file",
",",
"sv",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"variant_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"variant_file",
")",
"expected_type",
"=",
"'snv'",
"if",
"s... | Annotate the variants in a VCF | [
"Annotate",
"the",
"variants",
"in",
"a",
"VCF"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/annotate.py#L25-L59 | train | 47,258 |
bachya/py17track | py17track/track.py | Track.find | async def find(self, *tracking_numbers: str) -> list:
"""Get tracking info for one or more tracking numbers."""
data = {'data': [{'num': num} for num in tracking_numbers]}
tracking_resp = await self._request('post', API_URL_TRACK, json=data)
print(tracking_resp)
if not tracking_resp.get('dat'):
raise InvalidTrackingNumberError('Invalid data')
packages = []
for info in tracking_resp['dat']:
package_info = info.get('track', {})
if not package_info:
continue
kwargs = {
'destination_country': package_info.get('c'),
'info_text': package_info.get('z0', {}).get('z'),
'location': package_info.get('z0', {}).get('c'),
'origin_country': package_info.get('b'),
'package_type': package_info.get('d', 0),
'status': package_info.get('e', 0),
'tracking_info_language': package_info.get('ln1', 'Unknown')
}
packages.append(Package(info['no'], **kwargs))
return packages | python | async def find(self, *tracking_numbers: str) -> list:
"""Get tracking info for one or more tracking numbers."""
data = {'data': [{'num': num} for num in tracking_numbers]}
tracking_resp = await self._request('post', API_URL_TRACK, json=data)
print(tracking_resp)
if not tracking_resp.get('dat'):
raise InvalidTrackingNumberError('Invalid data')
packages = []
for info in tracking_resp['dat']:
package_info = info.get('track', {})
if not package_info:
continue
kwargs = {
'destination_country': package_info.get('c'),
'info_text': package_info.get('z0', {}).get('z'),
'location': package_info.get('z0', {}).get('c'),
'origin_country': package_info.get('b'),
'package_type': package_info.get('d', 0),
'status': package_info.get('e', 0),
'tracking_info_language': package_info.get('ln1', 'Unknown')
}
packages.append(Package(info['no'], **kwargs))
return packages | [
"async",
"def",
"find",
"(",
"self",
",",
"*",
"tracking_numbers",
":",
"str",
")",
"->",
"list",
":",
"data",
"=",
"{",
"'data'",
":",
"[",
"{",
"'num'",
":",
"num",
"}",
"for",
"num",
"in",
"tracking_numbers",
"]",
"}",
"tracking_resp",
"=",
"await... | Get tracking info for one or more tracking numbers. | [
"Get",
"tracking",
"info",
"for",
"one",
"or",
"more",
"tracking",
"numbers",
"."
] | e6e64f2a79571433df7ee702cb4ebc4127b7ad6d | https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/track.py#L17-L44 | train | 47,259 |
yjzhang/uncurl_python | uncurl/qual2quant.py | binarize | def binarize(qualitative):
"""
binarizes an expression dataset.
"""
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
binarized = qualitative > thresholds.reshape((len(thresholds), 1)).repeat(8,1)
return binarized.astype(int) | python | def binarize(qualitative):
"""
binarizes an expression dataset.
"""
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
binarized = qualitative > thresholds.reshape((len(thresholds), 1)).repeat(8,1)
return binarized.astype(int) | [
"def",
"binarize",
"(",
"qualitative",
")",
":",
"thresholds",
"=",
"qualitative",
".",
"min",
"(",
"1",
")",
"+",
"(",
"qualitative",
".",
"max",
"(",
"1",
")",
"-",
"qualitative",
".",
"min",
"(",
"1",
")",
")",
"/",
"2.0",
"binarized",
"=",
"qua... | binarizes an expression dataset. | [
"binarizes",
"an",
"expression",
"dataset",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L43-L49 | train | 47,260 |
yjzhang/uncurl_python | uncurl/qual2quant.py | qualNorm_filter_genes | def qualNorm_filter_genes(data, qualitative, pval_threshold=0.05, smoothing=1e-5, eps=1e-5):
"""
Does qualNorm but returns a filtered gene set, based on a p-value threshold.
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
genes_included = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
pvals = np.zeros(genes)
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
# do a p-value test
p_val = poisson_test(data_i[assignments==low_i], data_i[assignments==high_i], smoothing=smoothing)
pvals[i] = p_val
if p_val <= pval_threshold:
genes_included.append(i)
else:
continue
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i]) + eps
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
output = output[genes_included,:]
pvals = pvals[genes_included]
return output, pvals, genes_included | python | def qualNorm_filter_genes(data, qualitative, pval_threshold=0.05, smoothing=1e-5, eps=1e-5):
"""
Does qualNorm but returns a filtered gene set, based on a p-value threshold.
"""
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
genes_included = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
pvals = np.zeros(genes)
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
# do a p-value test
p_val = poisson_test(data_i[assignments==low_i], data_i[assignments==high_i], smoothing=smoothing)
pvals[i] = p_val
if p_val <= pval_threshold:
genes_included.append(i)
else:
continue
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i]) + eps
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
output = output[genes_included,:]
pvals = pvals[genes_included]
return output, pvals, genes_included | [
"def",
"qualNorm_filter_genes",
"(",
"data",
",",
"qualitative",
",",
"pval_threshold",
"=",
"0.05",
",",
"smoothing",
"=",
"1e-5",
",",
"eps",
"=",
"1e-5",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"qualitative",
".",
... | Does qualNorm but returns a filtered gene set, based on a p-value threshold. | [
"Does",
"qualNorm",
"but",
"returns",
"a",
"filtered",
"gene",
"set",
"based",
"on",
"a",
"p",
"-",
"value",
"threshold",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/qual2quant.py#L51-L95 | train | 47,261 |
OCHA-DAP/hdx-python-country | setup.py | script_dir | def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | python | def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | [
"def",
"script_dir",
"(",
"pyobject",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"if",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
":",
"# py2exe, PyInstaller, cx_Freeze",
"path",
"=",
"abspath",
"(",
"sys",
".",
"executable",
")",
"else",... | Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory | [
"Get",
"current",
"script",
"s",
"directory"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L11-L27 | train | 47,262 |
OCHA-DAP/hdx-python-country | setup.py | script_dir_plus_file | def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename) | python | def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename) | [
"def",
"script_dir_plus_file",
"(",
"filename",
",",
"pyobject",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"return",
"join",
"(",
"script_dir",
"(",
"pyobject",
",",
"follow_symlinks",
")",
",",
"filename",
")"
] | Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended | [
"Get",
"current",
"script",
"s",
"directory",
"and",
"then",
"append",
"a",
"filename"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L30-L41 | train | 47,263 |
moonso/loqusdb | loqusdb/commands/identity.py | identity | def identity(ctx, variant_id):
"""Check how well SVs are working in the database
"""
if not variant_id:
LOG.warning("Please provide a variant id")
ctx.abort()
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Search variants {0}".format(adapter))
result = adapter.get_clusters(variant_id)
if result.count() == 0:
LOG.info("No hits for variant %s", variant_id)
return
for res in result:
click.echo(res) | python | def identity(ctx, variant_id):
"""Check how well SVs are working in the database
"""
if not variant_id:
LOG.warning("Please provide a variant id")
ctx.abort()
adapter = ctx.obj['adapter']
version = ctx.obj['version']
LOG.info("Search variants {0}".format(adapter))
result = adapter.get_clusters(variant_id)
if result.count() == 0:
LOG.info("No hits for variant %s", variant_id)
return
for res in result:
click.echo(res) | [
"def",
"identity",
"(",
"ctx",
",",
"variant_id",
")",
":",
"if",
"not",
"variant_id",
":",
"LOG",
".",
"warning",
"(",
"\"Please provide a variant id\"",
")",
"ctx",
".",
"abort",
"(",
")",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"vers... | Check how well SVs are working in the database | [
"Check",
"how",
"well",
"SVs",
"are",
"working",
"in",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/identity.py#L13-L32 | train | 47,264 |
ggravlingen/pygleif | pygleif/gleif.py | GLEIFEntity.registration_authority_entity_id | def registration_authority_entity_id(self):
"""
Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data.
"""
if ATTR_ENTITY_REGISTRATION_AUTHORITY in self.raw:
try:
return self.raw[
ATTR_ENTITY_REGISTRATION_AUTHORITY][
ATTR_ENTITY_REGISTRATION_AUTHORITY_ENTITY_ID][
ATTR_DOLLAR_SIGN]
except KeyError:
pass | python | def registration_authority_entity_id(self):
"""
Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data.
"""
if ATTR_ENTITY_REGISTRATION_AUTHORITY in self.raw:
try:
return self.raw[
ATTR_ENTITY_REGISTRATION_AUTHORITY][
ATTR_ENTITY_REGISTRATION_AUTHORITY_ENTITY_ID][
ATTR_DOLLAR_SIGN]
except KeyError:
pass | [
"def",
"registration_authority_entity_id",
"(",
"self",
")",
":",
"if",
"ATTR_ENTITY_REGISTRATION_AUTHORITY",
"in",
"self",
".",
"raw",
":",
"try",
":",
"return",
"self",
".",
"raw",
"[",
"ATTR_ENTITY_REGISTRATION_AUTHORITY",
"]",
"[",
"ATTR_ENTITY_REGISTRATION_AUTHORIT... | Some entities return the register entity id,
but other do not. Unsure if this is a bug or
inconsistently registered data. | [
"Some",
"entities",
"return",
"the",
"register",
"entity",
"id",
"but",
"other",
"do",
"not",
".",
"Unsure",
"if",
"this",
"is",
"a",
"bug",
"or",
"inconsistently",
"registered",
"data",
"."
] | f0f62f1a2878fce45fedcc2260264153808429f9 | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L127-L141 | train | 47,265 |
ggravlingen/pygleif | pygleif/gleif.py | GLEIFEntity.legal_form | def legal_form(self):
"""In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries"""
if ATTR_ENTITY_LEGAL_FORM in self.raw:
try:
return LEGAL_FORMS[self.legal_jurisdiction][
self.raw[ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
]
except KeyError:
legal_form = self.raw[
ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
if len(legal_form) == 4:
# If this is returned, the ELF should
# be added to the constants.
return 'ELF code: ' + legal_form
else:
return legal_form | python | def legal_form(self):
"""In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries"""
if ATTR_ENTITY_LEGAL_FORM in self.raw:
try:
return LEGAL_FORMS[self.legal_jurisdiction][
self.raw[ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
]
except KeyError:
legal_form = self.raw[
ATTR_ENTITY_LEGAL_FORM][
ATTR_ENTITY_LEGAL_FORM_CODE][ATTR_DOLLAR_SIGN]
if len(legal_form) == 4:
# If this is returned, the ELF should
# be added to the constants.
return 'ELF code: ' + legal_form
else:
return legal_form | [
"def",
"legal_form",
"(",
"self",
")",
":",
"if",
"ATTR_ENTITY_LEGAL_FORM",
"in",
"self",
".",
"raw",
":",
"try",
":",
"return",
"LEGAL_FORMS",
"[",
"self",
".",
"legal_jurisdiction",
"]",
"[",
"self",
".",
"raw",
"[",
"ATTR_ENTITY_LEGAL_FORM",
"]",
"[",
"... | In some cases, the legal form is stored in the JSON-data.
In other cases, an ELF-code, consisting of mix of exactly
four letters and numbers are stored. This ELF-code
can be looked up in a registry where a code maps to
a organizational type. ELF-codes are not unique,
it can reoccur under different names in different
countries | [
"In",
"some",
"cases",
"the",
"legal",
"form",
"is",
"stored",
"in",
"the",
"JSON",
"-",
"data",
".",
"In",
"other",
"cases",
"an",
"ELF",
"-",
"code",
"consisting",
"of",
"mix",
"of",
"exactly",
"four",
"letters",
"and",
"numbers",
"are",
"stored",
".... | f0f62f1a2878fce45fedcc2260264153808429f9 | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L157-L182 | train | 47,266 |
ggravlingen/pygleif | pygleif/gleif.py | DirectChild.valid_child_records | def valid_child_records(self):
child_lei = list()
"""Loop through data to find a valid record. Return list of LEI."""
for d in self.raw['data']:
# We're not very greedy here, but it seems some records have
# lapsed even through the issuer is active
if d['attributes']['relationship']['status'] in ['ACTIVE']:
child_lei.append(
d['attributes']['relationship']['startNode']['id'])
return child_lei | python | def valid_child_records(self):
child_lei = list()
"""Loop through data to find a valid record. Return list of LEI."""
for d in self.raw['data']:
# We're not very greedy here, but it seems some records have
# lapsed even through the issuer is active
if d['attributes']['relationship']['status'] in ['ACTIVE']:
child_lei.append(
d['attributes']['relationship']['startNode']['id'])
return child_lei | [
"def",
"valid_child_records",
"(",
"self",
")",
":",
"child_lei",
"=",
"list",
"(",
")",
"for",
"d",
"in",
"self",
".",
"raw",
"[",
"'data'",
"]",
":",
"# We're not very greedy here, but it seems some records have",
"# lapsed even through the issuer is active",
"if",
... | Loop through data to find a valid record. Return list of LEI. | [
"Loop",
"through",
"data",
"to",
"find",
"a",
"valid",
"record",
".",
"Return",
"list",
"of",
"LEI",
"."
] | f0f62f1a2878fce45fedcc2260264153808429f9 | https://github.com/ggravlingen/pygleif/blob/f0f62f1a2878fce45fedcc2260264153808429f9/pygleif/gleif.py#L305-L317 | train | 47,267 |
bosth/plpygis | plpygis/geometry.py | Geometry.from_geojson | def from_geojson(geojson, srid=4326):
"""
Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326.
"""
type_ = geojson["type"].lower()
if type_ == "geometrycollection":
geometries = []
for geometry in geojson["geometries"]:
geometries.append(Geometry.from_geojson(geometry, srid=None))
return GeometryCollection(geometries, srid)
elif type_ == "point":
return Point(geojson["coordinates"], srid=srid)
elif type_ == "linestring":
return LineString(geojson["coordinates"], srid=srid)
elif type_ == "polygon":
return Polygon(geojson["coordinates"], srid=srid)
elif type_ == "multipoint":
geometries = _MultiGeometry._multi_from_geojson(geojson, Point)
return MultiPoint(geometries, srid=srid)
elif type_ == "multilinestring":
geometries = _MultiGeometry._multi_from_geojson(geojson, LineString)
return MultiLineString(geometries, srid=srid)
elif type_ == "multipolygon":
geometries = _MultiGeometry._multi_from_geojson(geojson, Polygon)
return MultiPolygon(geometries, srid=srid) | python | def from_geojson(geojson, srid=4326):
"""
Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326.
"""
type_ = geojson["type"].lower()
if type_ == "geometrycollection":
geometries = []
for geometry in geojson["geometries"]:
geometries.append(Geometry.from_geojson(geometry, srid=None))
return GeometryCollection(geometries, srid)
elif type_ == "point":
return Point(geojson["coordinates"], srid=srid)
elif type_ == "linestring":
return LineString(geojson["coordinates"], srid=srid)
elif type_ == "polygon":
return Polygon(geojson["coordinates"], srid=srid)
elif type_ == "multipoint":
geometries = _MultiGeometry._multi_from_geojson(geojson, Point)
return MultiPoint(geometries, srid=srid)
elif type_ == "multilinestring":
geometries = _MultiGeometry._multi_from_geojson(geojson, LineString)
return MultiLineString(geometries, srid=srid)
elif type_ == "multipolygon":
geometries = _MultiGeometry._multi_from_geojson(geojson, Polygon)
return MultiPolygon(geometries, srid=srid) | [
"def",
"from_geojson",
"(",
"geojson",
",",
"srid",
"=",
"4326",
")",
":",
"type_",
"=",
"geojson",
"[",
"\"type\"",
"]",
".",
"lower",
"(",
")",
"if",
"type_",
"==",
"\"geometrycollection\"",
":",
"geometries",
"=",
"[",
"]",
"for",
"geometry",
"in",
... | Create a Geometry from a GeoJSON. The SRID can be overridden from the
expected 4326. | [
"Create",
"a",
"Geometry",
"from",
"a",
"GeoJSON",
".",
"The",
"SRID",
"can",
"be",
"overridden",
"from",
"the",
"expected",
"4326",
"."
] | 9469cc469df4c8cd407de158903d5465cda804ea | https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L77-L102 | train | 47,268 |
bosth/plpygis | plpygis/geometry.py | Geometry.from_shapely | def from_shapely(sgeom, srid=None):
"""
Create a Geometry from a Shapely geometry and the specified SRID.
The Shapely geometry will not be modified.
"""
if SHAPELY:
WKBWriter.defaults["include_srid"] = True
if srid:
lgeos.GEOSSetSRID(sgeom._geom, srid)
return Geometry(sgeom.wkb_hex)
else:
raise DependencyError("Shapely") | python | def from_shapely(sgeom, srid=None):
"""
Create a Geometry from a Shapely geometry and the specified SRID.
The Shapely geometry will not be modified.
"""
if SHAPELY:
WKBWriter.defaults["include_srid"] = True
if srid:
lgeos.GEOSSetSRID(sgeom._geom, srid)
return Geometry(sgeom.wkb_hex)
else:
raise DependencyError("Shapely") | [
"def",
"from_shapely",
"(",
"sgeom",
",",
"srid",
"=",
"None",
")",
":",
"if",
"SHAPELY",
":",
"WKBWriter",
".",
"defaults",
"[",
"\"include_srid\"",
"]",
"=",
"True",
"if",
"srid",
":",
"lgeos",
".",
"GEOSSetSRID",
"(",
"sgeom",
".",
"_geom",
",",
"sr... | Create a Geometry from a Shapely geometry and the specified SRID.
The Shapely geometry will not be modified. | [
"Create",
"a",
"Geometry",
"from",
"a",
"Shapely",
"geometry",
"and",
"the",
"specified",
"SRID",
"."
] | 9469cc469df4c8cd407de158903d5465cda804ea | https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L105-L117 | train | 47,269 |
bosth/plpygis | plpygis/geometry.py | Geometry.postgis_type | def postgis_type(self):
"""
Get the type of the geometry in PostGIS format, including additional
dimensions and SRID if they exist.
"""
dimz = "Z" if self.dimz else ""
dimm = "M" if self.dimm else ""
if self.srid:
return "geometry({}{}{},{})".format(self.type, dimz, dimm, self.srid)
else:
return "geometry({}{}{})".format(self.type, dimz, dimm) | python | def postgis_type(self):
"""
Get the type of the geometry in PostGIS format, including additional
dimensions and SRID if they exist.
"""
dimz = "Z" if self.dimz else ""
dimm = "M" if self.dimm else ""
if self.srid:
return "geometry({}{}{},{})".format(self.type, dimz, dimm, self.srid)
else:
return "geometry({}{}{})".format(self.type, dimz, dimm) | [
"def",
"postgis_type",
"(",
"self",
")",
":",
"dimz",
"=",
"\"Z\"",
"if",
"self",
".",
"dimz",
"else",
"\"\"",
"dimm",
"=",
"\"M\"",
"if",
"self",
".",
"dimm",
"else",
"\"\"",
"if",
"self",
".",
"srid",
":",
"return",
"\"geometry({}{}{},{})\"",
".",
"f... | Get the type of the geometry in PostGIS format, including additional
dimensions and SRID if they exist. | [
"Get",
"the",
"type",
"of",
"the",
"geometry",
"in",
"PostGIS",
"format",
"including",
"additional",
"dimensions",
"and",
"SRID",
"if",
"they",
"exist",
"."
] | 9469cc469df4c8cd407de158903d5465cda804ea | https://github.com/bosth/plpygis/blob/9469cc469df4c8cd407de158903d5465cda804ea/plpygis/geometry.py#L177-L187 | train | 47,270 |
yjzhang/uncurl_python | uncurl/pois_ll.py | poisson_ll | def poisson_ll(data, means):
"""
Calculates the Poisson log-likelihood.
Args:
data (array): 2d numpy array of genes x cells
means (array): 2d numpy array of genes x k
Returns:
cells x k array of log-likelihood for each cell/cluster pair
"""
if sparse.issparse(data):
return sparse_poisson_ll(data, means)
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose() + eps
#ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0)
ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)
return ll | python | def poisson_ll(data, means):
"""
Calculates the Poisson log-likelihood.
Args:
data (array): 2d numpy array of genes x cells
means (array): 2d numpy array of genes x k
Returns:
cells x k array of log-likelihood for each cell/cluster pair
"""
if sparse.issparse(data):
return sparse_poisson_ll(data, means)
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose() + eps
#ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0)
ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)
return ll | [
"def",
"poisson_ll",
"(",
"data",
",",
"means",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"data",
")",
":",
"return",
"sparse_poisson_ll",
"(",
"data",
",",
"means",
")",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"clusters",
"=",
"means",... | Calculates the Poisson log-likelihood.
Args:
data (array): 2d numpy array of genes x cells
means (array): 2d numpy array of genes x k
Returns:
cells x k array of log-likelihood for each cell/cluster pair | [
"Calculates",
"the",
"Poisson",
"log",
"-",
"likelihood",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/pois_ll.py#L22-L43 | train | 47,271 |
yjzhang/uncurl_python | uncurl/pois_ll.py | poisson_dist | def poisson_dist(p1, p2):
"""
Calculates the Poisson distance between two vectors.
p1 can be a sparse matrix, while p2 has to be a dense matrix.
"""
# ugh...
p1_ = p1 + eps
p2_ = p2 + eps
return np.dot(p1_-p2_, np.log(p1_/p2_)) | python | def poisson_dist(p1, p2):
"""
Calculates the Poisson distance between two vectors.
p1 can be a sparse matrix, while p2 has to be a dense matrix.
"""
# ugh...
p1_ = p1 + eps
p2_ = p2 + eps
return np.dot(p1_-p2_, np.log(p1_/p2_)) | [
"def",
"poisson_dist",
"(",
"p1",
",",
"p2",
")",
":",
"# ugh...",
"p1_",
"=",
"p1",
"+",
"eps",
"p2_",
"=",
"p2",
"+",
"eps",
"return",
"np",
".",
"dot",
"(",
"p1_",
"-",
"p2_",
",",
"np",
".",
"log",
"(",
"p1_",
"/",
"p2_",
")",
")"
] | Calculates the Poisson distance between two vectors.
p1 can be a sparse matrix, while p2 has to be a dense matrix. | [
"Calculates",
"the",
"Poisson",
"distance",
"between",
"two",
"vectors",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/pois_ll.py#L53-L62 | train | 47,272 |
moonso/loqusdb | loqusdb/commands/delete.py | delete | def delete(ctx, family_file, family_type, case_id):
"""Delete the variants of a case."""
if not (family_file or case_id):
LOG.error("Please provide a family file")
ctx.abort()
adapter = ctx.obj['adapter']
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
if not case_id:
LOG.warning("Please provide a case id")
ctx.abort()
existing_case = adapter.case({'case_id': case_id})
if not existing_case:
LOG.warning("Case %s does not exist in database" %case_id)
context.abort
start_deleting = datetime.now()
try:
delete_command(
adapter=adapter,
case_obj=existing_case,
)
except (CaseError, IOError) as error:
LOG.warning(error)
ctx.abort() | python | def delete(ctx, family_file, family_type, case_id):
"""Delete the variants of a case."""
if not (family_file or case_id):
LOG.error("Please provide a family file")
ctx.abort()
adapter = ctx.obj['adapter']
# Get a ped_parser.Family object from family file
family = None
family_id = None
if family_file:
with open(family_file, 'r') as family_lines:
family = get_case(
family_lines=family_lines,
family_type=family_type
)
family_id = family.family_id
# There has to be a case_id or a family at this stage.
case_id = case_id or family_id
if not case_id:
LOG.warning("Please provide a case id")
ctx.abort()
existing_case = adapter.case({'case_id': case_id})
if not existing_case:
LOG.warning("Case %s does not exist in database" %case_id)
context.abort
start_deleting = datetime.now()
try:
delete_command(
adapter=adapter,
case_obj=existing_case,
)
except (CaseError, IOError) as error:
LOG.warning(error)
ctx.abort() | [
"def",
"delete",
"(",
"ctx",
",",
"family_file",
",",
"family_type",
",",
"case_id",
")",
":",
"if",
"not",
"(",
"family_file",
"or",
"case_id",
")",
":",
"LOG",
".",
"error",
"(",
"\"Please provide a family file\"",
")",
"ctx",
".",
"abort",
"(",
")",
"... | Delete the variants of a case. | [
"Delete",
"the",
"variants",
"of",
"a",
"case",
"."
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/delete.py#L28-L68 | train | 47,273 |
markperdue/pyvesync | src/pyvesync/vesyncoutlet.py | VeSyncOutlet.update_energy | def update_energy(self, bypass_check: bool = False):
"""Builds weekly, monthly and yearly dictionaries"""
if bypass_check or (not bypass_check and self.update_time_check):
self.get_weekly_energy()
if 'week' in self.energy:
self.get_monthly_energy()
self.get_yearly_energy()
if not bypass_check:
self.update_energy_ts = time.time() | python | def update_energy(self, bypass_check: bool = False):
"""Builds weekly, monthly and yearly dictionaries"""
if bypass_check or (not bypass_check and self.update_time_check):
self.get_weekly_energy()
if 'week' in self.energy:
self.get_monthly_energy()
self.get_yearly_energy()
if not bypass_check:
self.update_energy_ts = time.time() | [
"def",
"update_energy",
"(",
"self",
",",
"bypass_check",
":",
"bool",
"=",
"False",
")",
":",
"if",
"bypass_check",
"or",
"(",
"not",
"bypass_check",
"and",
"self",
".",
"update_time_check",
")",
":",
"self",
".",
"get_weekly_energy",
"(",
")",
"if",
"'we... | Builds weekly, monthly and yearly dictionaries | [
"Builds",
"weekly",
"monthly",
"and",
"yearly",
"dictionaries"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncoutlet.py#L61-L69 | train | 47,274 |
markperdue/pyvesync | src/pyvesync/vesyncoutlet.py | VeSyncOutlet15A.turn_on_nightlight | def turn_on_nightlight(self):
"""Turn on nightlight"""
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
body['mode'] = 'auto'
response, _ = helpers.call_api(
'/15a/v1/device/nightlightstatus',
'put',
headers=helpers.req_headers(self.manager),
json=body
)
return helpers.check_response(response, '15a_ntlight') | python | def turn_on_nightlight(self):
"""Turn on nightlight"""
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
body['mode'] = 'auto'
response, _ = helpers.call_api(
'/15a/v1/device/nightlightstatus',
'put',
headers=helpers.req_headers(self.manager),
json=body
)
return helpers.check_response(response, '15a_ntlight') | [
"def",
"turn_on_nightlight",
"(",
"self",
")",
":",
"body",
"=",
"helpers",
".",
"req_body",
"(",
"self",
".",
"manager",
",",
"'devicestatus'",
")",
"body",
"[",
"'uuid'",
"]",
"=",
"self",
".",
"uuid",
"body",
"[",
"'mode'",
"]",
"=",
"'auto'",
"resp... | Turn on nightlight | [
"Turn",
"on",
"nightlight"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncoutlet.py#L441-L454 | train | 47,275 |
johncosta/django-like-button | like_button/templatetags/like_button.py | like_button_js_tag | def like_button_js_tag(context):
""" This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed.
"""
if FACEBOOK_APP_ID is None:
log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings")
# make sure FACEBOOK_APP_ID is setup correct and user is authenticated
if FACEBOOK_APP_ID:
request = context.get('request', None)
if request:
return {"LIKE_BUTTON_IS_VALID": True,
"facebook_app_id": FACEBOOK_APP_ID,
"channel_base_url": request.get_host()}
# if it is here, it isn't a valid setup, return False to not show the tag.
return {"LIKE_BUTTON_IS_VALID": False} | python | def like_button_js_tag(context):
""" This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed.
"""
if FACEBOOK_APP_ID is None:
log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings")
# make sure FACEBOOK_APP_ID is setup correct and user is authenticated
if FACEBOOK_APP_ID:
request = context.get('request', None)
if request:
return {"LIKE_BUTTON_IS_VALID": True,
"facebook_app_id": FACEBOOK_APP_ID,
"channel_base_url": request.get_host()}
# if it is here, it isn't a valid setup, return False to not show the tag.
return {"LIKE_BUTTON_IS_VALID": False} | [
"def",
"like_button_js_tag",
"(",
"context",
")",
":",
"if",
"FACEBOOK_APP_ID",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"FACEBOOK_APP_ID isn't setup correctly in your settings\"",
")",
"# make sure FACEBOOK_APP_ID is setup correct and user is authenticated",
"if",
"FAC... | This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed. | [
"This",
"tag",
"will",
"check",
"to",
"see",
"if",
"they",
"have",
"the",
"FACEBOOK_LIKE_APP_ID",
"setup",
"correctly",
"in",
"the",
"django",
"settings",
"if",
"so",
"then",
"it",
"will",
"pass",
"the",
"data",
"along",
"to",
"the",
"intercom_tag",
"templat... | c93a1be9c041d76e8de9a26f424ad4f836ab97bd | https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L34-L55 | train | 47,276 |
johncosta/django-like-button | like_button/templatetags/like_button.py | like_button_tag | def like_button_tag(context):
""" This tag will check to see if they have the FACEBOOK_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed.
s
"""
if FACEBOOK_APP_ID is None:
log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings")
# make sure INTERCOM_APPID is setup correct and user is authenticated
if FACEBOOK_APP_ID:
request = context.get('request', None)
if request:
path_to_like = (
"http://" + request.get_host() + request.get_full_path())
show_send = true_false_converter(FACEBOOK_SHOW_SEND)
like_width = FACEBOOK_LIKE_WIDTH
show_faces = true_false_converter(FACEBOOK_SHOW_FACES)
font = FACEBOOK_FONT
return {"LIKE_BUTTON_IS_VALID": True,
"path_to_like": path_to_like,
"show_send": show_send,
"like_width": like_width,
"show_faces": show_faces,
"font": font,
"like_layout": FACEBOOK_LIKE_LAYOUT}
# if it is here, it isn't a valid setup, return False to not show the tag.
return {"LIKE_BUTTON_IS_VALID": False} | python | def like_button_tag(context):
""" This tag will check to see if they have the FACEBOOK_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed.
s
"""
if FACEBOOK_APP_ID is None:
log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings")
# make sure INTERCOM_APPID is setup correct and user is authenticated
if FACEBOOK_APP_ID:
request = context.get('request', None)
if request:
path_to_like = (
"http://" + request.get_host() + request.get_full_path())
show_send = true_false_converter(FACEBOOK_SHOW_SEND)
like_width = FACEBOOK_LIKE_WIDTH
show_faces = true_false_converter(FACEBOOK_SHOW_FACES)
font = FACEBOOK_FONT
return {"LIKE_BUTTON_IS_VALID": True,
"path_to_like": path_to_like,
"show_send": show_send,
"like_width": like_width,
"show_faces": show_faces,
"font": font,
"like_layout": FACEBOOK_LIKE_LAYOUT}
# if it is here, it isn't a valid setup, return False to not show the tag.
return {"LIKE_BUTTON_IS_VALID": False} | [
"def",
"like_button_tag",
"(",
"context",
")",
":",
"if",
"FACEBOOK_APP_ID",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"FACEBOOK_APP_ID isn't setup correctly in your settings\"",
")",
"# make sure INTERCOM_APPID is setup correct and user is authenticated",
"if",
"FACEBOO... | This tag will check to see if they have the FACEBOOK_APP_ID setup
correctly in the django settings, if so then it will pass the data
along to the intercom_tag template to be displayed.
If something isn't perfect we will return False, which will then not
install the javascript since it isn't needed.
s | [
"This",
"tag",
"will",
"check",
"to",
"see",
"if",
"they",
"have",
"the",
"FACEBOOK_APP_ID",
"setup",
"correctly",
"in",
"the",
"django",
"settings",
"if",
"so",
"then",
"it",
"will",
"pass",
"the",
"data",
"along",
"to",
"the",
"intercom_tag",
"template",
... | c93a1be9c041d76e8de9a26f424ad4f836ab97bd | https://github.com/johncosta/django-like-button/blob/c93a1be9c041d76e8de9a26f424ad4f836ab97bd/like_button/templatetags/like_button.py#L59-L94 | train | 47,277 |
moonso/loqusdb | loqusdb/plugins/mongo/structural_variant.py | SVMixin.get_structural_variant | def get_structural_variant(self, variant):
"""Check if there are any overlapping sv clusters
Search the sv variants with chrom start end_chrom end and sv_type
Args:
variant (dict): A variant dictionary
Returns:
variant (dict): A variant dictionary
"""
# Create a query for the database
# This will include more variants than we want
# The rest of the calculations will be done in python
query = {
'chrom': variant['chrom'],
'end_chrom': variant['end_chrom'],
'sv_type': variant['sv_type'],
'$and': [
{'pos_left': {'$lte': variant['pos']}},
{'pos_right': {'$gte': variant['pos']}},
]
}
res = self.db.structural_variant.find(query).sort('pos_left',1)
match = None
distance = None
closest_hit = None
# First we check that the coordinates are correct
# Then we count the distance to mean on both ends to see which variant is closest
for hit in res:
# We know from the query that the variants position is larger than the left most part of
# the cluster.
# If the right most part of the cluster is smaller than the variant position they do
# not overlap
if hit['end_left'] > variant['end']:
continue
if hit['end_right'] < variant['end']:
continue
# We need to calculate the distance to see what cluster that was closest to the variant
distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) +
abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2))
# If we have no cluster yet we set the curent to be the hit
if closest_hit is None:
match = hit
closest_hit = distance
continue
# If the distance is closer than previous we choose current cluster
if distance < closest_hit:
# Set match to the current closest hit
match = hit
# Update the closest distance
closest_hit = distance
return match | python | def get_structural_variant(self, variant):
"""Check if there are any overlapping sv clusters
Search the sv variants with chrom start end_chrom end and sv_type
Args:
variant (dict): A variant dictionary
Returns:
variant (dict): A variant dictionary
"""
# Create a query for the database
# This will include more variants than we want
# The rest of the calculations will be done in python
query = {
'chrom': variant['chrom'],
'end_chrom': variant['end_chrom'],
'sv_type': variant['sv_type'],
'$and': [
{'pos_left': {'$lte': variant['pos']}},
{'pos_right': {'$gte': variant['pos']}},
]
}
res = self.db.structural_variant.find(query).sort('pos_left',1)
match = None
distance = None
closest_hit = None
# First we check that the coordinates are correct
# Then we count the distance to mean on both ends to see which variant is closest
for hit in res:
# We know from the query that the variants position is larger than the left most part of
# the cluster.
# If the right most part of the cluster is smaller than the variant position they do
# not overlap
if hit['end_left'] > variant['end']:
continue
if hit['end_right'] < variant['end']:
continue
# We need to calculate the distance to see what cluster that was closest to the variant
distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) +
abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2))
# If we have no cluster yet we set the curent to be the hit
if closest_hit is None:
match = hit
closest_hit = distance
continue
# If the distance is closer than previous we choose current cluster
if distance < closest_hit:
# Set match to the current closest hit
match = hit
# Update the closest distance
closest_hit = distance
return match | [
"def",
"get_structural_variant",
"(",
"self",
",",
"variant",
")",
":",
"# Create a query for the database",
"# This will include more variants than we want",
"# The rest of the calculations will be done in python",
"query",
"=",
"{",
"'chrom'",
":",
"variant",
"[",
"'chrom'",
... | Check if there are any overlapping sv clusters
Search the sv variants with chrom start end_chrom end and sv_type
Args:
variant (dict): A variant dictionary
Returns:
variant (dict): A variant dictionary | [
"Check",
"if",
"there",
"are",
"any",
"overlapping",
"sv",
"clusters"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L145-L202 | train | 47,278 |
moonso/loqusdb | loqusdb/plugins/mongo/structural_variant.py | SVMixin.get_sv_variants | def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None,
pos=None, end=None):
"""Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
query['sv_type'] = sv_type
if pos:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'pos_left': {'$lte': pos}})
query['$and'].append({'pos_right': {'$gte': pos}})
if end:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'end_left': {'$lte': end}})
query['$and'].append({'end_right': {'$gte': end}})
LOG.info("Find all sv variants {}".format(query))
return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)]) | python | def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None,
pos=None, end=None):
"""Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant))
"""
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
query['sv_type'] = sv_type
if pos:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'pos_left': {'$lte': pos}})
query['$and'].append({'pos_right': {'$gte': pos}})
if end:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'end_left': {'$lte': end}})
query['$and'].append({'end_right': {'$gte': end}})
LOG.info("Find all sv variants {}".format(query))
return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)]) | [
"def",
"get_sv_variants",
"(",
"self",
",",
"chromosome",
"=",
"None",
",",
"end_chromosome",
"=",
"None",
",",
"sv_type",
"=",
"None",
",",
"pos",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"query",
"=",
"{",
"}",
"if",
"chromosome",
":",
"quer... | Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant)) | [
"Return",
"all",
"structural",
"variants",
"in",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/structural_variant.py#L204-L240 | train | 47,279 |
markperdue/pyvesync | src/pyvesync/vesyncfan.py | VeSyncAir131.get_details | def get_details(self):
"""Build details dictionary"""
body = helpers.req_body(self.manager, 'devicedetail')
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api('/131airpurifier/v1/device/deviceDetail',
method='post', headers=head, json=body)
if r is not None and helpers.check_response(r, 'airpur_detail'):
self.device_status = r.get('deviceStatus', 'unknown')
self.connection_status = r.get('connectionStatus', 'unknown')
self.details['active_time'] = r.get('activeTime', 0)
self.details['filter_life'] = r.get('filterLife', {})
self.details['screeen_status'] = r.get('screenStatus', 'unknown')
self.details['mode'] = r.get('mode', 'unknown')
self.details['level'] = r.get('level', None) | python | def get_details(self):
"""Build details dictionary"""
body = helpers.req_body(self.manager, 'devicedetail')
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api('/131airpurifier/v1/device/deviceDetail',
method='post', headers=head, json=body)
if r is not None and helpers.check_response(r, 'airpur_detail'):
self.device_status = r.get('deviceStatus', 'unknown')
self.connection_status = r.get('connectionStatus', 'unknown')
self.details['active_time'] = r.get('activeTime', 0)
self.details['filter_life'] = r.get('filterLife', {})
self.details['screeen_status'] = r.get('screenStatus', 'unknown')
self.details['mode'] = r.get('mode', 'unknown')
self.details['level'] = r.get('level', None) | [
"def",
"get_details",
"(",
"self",
")",
":",
"body",
"=",
"helpers",
".",
"req_body",
"(",
"self",
".",
"manager",
",",
"'devicedetail'",
")",
"head",
"=",
"helpers",
".",
"req_headers",
"(",
"self",
".",
"manager",
")",
"r",
",",
"_",
"=",
"helpers",
... | Build details dictionary | [
"Build",
"details",
"dictionary"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L17-L32 | train | 47,280 |
markperdue/pyvesync | src/pyvesync/vesyncfan.py | VeSyncAir131.turn_on | def turn_on(self):
"""Turn Air Purifier on"""
if self.device_status != 'on':
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
body['status'] = 'on'
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api('/131airPurifier/v1/device/deviceStatus',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.device_status = 'on'
return True
else:
return False | python | def turn_on(self):
"""Turn Air Purifier on"""
if self.device_status != 'on':
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
body['status'] = 'on'
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api('/131airPurifier/v1/device/deviceStatus',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.device_status = 'on'
return True
else:
return False | [
"def",
"turn_on",
"(",
"self",
")",
":",
"if",
"self",
".",
"device_status",
"!=",
"'on'",
":",
"body",
"=",
"helpers",
".",
"req_body",
"(",
"self",
".",
"manager",
",",
"'devicestatus'",
")",
"body",
"[",
"'uuid'",
"]",
"=",
"self",
".",
"uuid",
"b... | Turn Air Purifier on | [
"Turn",
"Air",
"Purifier",
"on"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L44-L59 | train | 47,281 |
markperdue/pyvesync | src/pyvesync/vesyncfan.py | VeSyncAir131.fan_speed | def fan_speed(self, speed: int = None) -> bool:
"""Adjust Fan Speed by Specifying 1,2,3 as argument or cycle
through speeds increasing by one"""
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if self.details.get('mode') != 'manual':
self.mode_toggle('manual')
else:
if speed is not None:
level = int(self.details.get('level'))
if speed == level:
return False
elif speed in [1, 2, 3]:
body['level'] = speed
else:
if (level + 1) > 3:
body['level'] = 1
else:
body['level'] = int(level + 1)
r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.details['level'] = body['level']
return True
else:
return False | python | def fan_speed(self, speed: int = None) -> bool:
"""Adjust Fan Speed by Specifying 1,2,3 as argument or cycle
through speeds increasing by one"""
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if self.details.get('mode') != 'manual':
self.mode_toggle('manual')
else:
if speed is not None:
level = int(self.details.get('level'))
if speed == level:
return False
elif speed in [1, 2, 3]:
body['level'] = speed
else:
if (level + 1) > 3:
body['level'] = 1
else:
body['level'] = int(level + 1)
r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.details['level'] = body['level']
return True
else:
return False | [
"def",
"fan_speed",
"(",
"self",
",",
"speed",
":",
"int",
"=",
"None",
")",
"->",
"bool",
":",
"body",
"=",
"helpers",
".",
"req_body",
"(",
"self",
".",
"manager",
",",
"'devicestatus'",
")",
"body",
"[",
"'uuid'",
"]",
"=",
"self",
".",
"uuid",
... | Adjust Fan Speed by Specifying 1,2,3 as argument or cycle
through speeds increasing by one | [
"Adjust",
"Fan",
"Speed",
"by",
"Specifying",
"1",
"2",
"3",
"as",
"argument",
"or",
"cycle",
"through",
"speeds",
"increasing",
"by",
"one"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L90-L118 | train | 47,282 |
markperdue/pyvesync | src/pyvesync/vesyncfan.py | VeSyncAir131.mode_toggle | def mode_toggle(self, mode: str) -> bool:
"""Set mode to manual, auto or sleep"""
head = helpers.req_headers(self.manager)
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
if mode != body['mode'] and mode in ['sleep', 'auto', 'manual']:
body['mode'] = mode
if mode == 'manual':
body['level'] = 1
r, _ = helpers.call_api('/131airPurifier/v1/device/updateMode',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.details['mode'] = mode
return True
return False | python | def mode_toggle(self, mode: str) -> bool:
"""Set mode to manual, auto or sleep"""
head = helpers.req_headers(self.manager)
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
if mode != body['mode'] and mode in ['sleep', 'auto', 'manual']:
body['mode'] = mode
if mode == 'manual':
body['level'] = 1
r, _ = helpers.call_api('/131airPurifier/v1/device/updateMode',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.details['mode'] = mode
return True
return False | [
"def",
"mode_toggle",
"(",
"self",
",",
"mode",
":",
"str",
")",
"->",
"bool",
":",
"head",
"=",
"helpers",
".",
"req_headers",
"(",
"self",
".",
"manager",
")",
"body",
"=",
"helpers",
".",
"req_body",
"(",
"self",
".",
"manager",
",",
"'devicestatus'... | Set mode to manual, auto or sleep | [
"Set",
"mode",
"to",
"manual",
"auto",
"or",
"sleep"
] | 7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc | https://github.com/markperdue/pyvesync/blob/7552dd1a6dd5ebc452acf78e33fd8f6e721e8cfc/src/pyvesync/vesyncfan.py#L120-L137 | train | 47,283 |
yjzhang/uncurl_python | uncurl/lineage.py | fourier_series | def fourier_series(x, *a):
"""
Arbitrary dimensionality fourier series.
The first parameter is a_0, and the second parameter is the interval/scale
parameter.
The parameters are altering sin and cos paramters.
n = (len(a)-2)/2
"""
output = 0
output += a[0]/2
w = a[1]
for n in range(2, len(a), 2):
n_ = n/2
val1 = a[n]
val2 = a[n+1]
output += val1*np.sin(n_*x*w)
output += val2*np.cos(n_*x*w)
return output | python | def fourier_series(x, *a):
"""
Arbitrary dimensionality fourier series.
The first parameter is a_0, and the second parameter is the interval/scale
parameter.
The parameters are altering sin and cos paramters.
n = (len(a)-2)/2
"""
output = 0
output += a[0]/2
w = a[1]
for n in range(2, len(a), 2):
n_ = n/2
val1 = a[n]
val2 = a[n+1]
output += val1*np.sin(n_*x*w)
output += val2*np.cos(n_*x*w)
return output | [
"def",
"fourier_series",
"(",
"x",
",",
"*",
"a",
")",
":",
"output",
"=",
"0",
"output",
"+=",
"a",
"[",
"0",
"]",
"/",
"2",
"w",
"=",
"a",
"[",
"1",
"]",
"for",
"n",
"in",
"range",
"(",
"2",
",",
"len",
"(",
"a",
")",
",",
"2",
")",
"... | Arbitrary dimensionality fourier series.
The first parameter is a_0, and the second parameter is the interval/scale
parameter.
The parameters are altering sin and cos paramters.
n = (len(a)-2)/2 | [
"Arbitrary",
"dimensionality",
"fourier",
"series",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L10-L30 | train | 47,284 |
yjzhang/uncurl_python | uncurl/lineage.py | poly_curve | def poly_curve(x, *a):
"""
Arbitrary dimension polynomial.
"""
output = 0.0
for n in range(0, len(a)):
output += a[n]*x**n
return output | python | def poly_curve(x, *a):
"""
Arbitrary dimension polynomial.
"""
output = 0.0
for n in range(0, len(a)):
output += a[n]*x**n
return output | [
"def",
"poly_curve",
"(",
"x",
",",
"*",
"a",
")",
":",
"output",
"=",
"0.0",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"a",
")",
")",
":",
"output",
"+=",
"a",
"[",
"n",
"]",
"*",
"x",
"**",
"n",
"return",
"output"
] | Arbitrary dimension polynomial. | [
"Arbitrary",
"dimension",
"polynomial",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L65-L72 | train | 47,285 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.set_ocha_url | def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url | python | def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url | [
"def",
"set_ocha_url",
"(",
"cls",
",",
"url",
"=",
"None",
")",
":",
"# type: (str) -> None",
"if",
"url",
"is",
"None",
":",
"url",
"=",
"cls",
".",
"_ochaurl_int",
"cls",
".",
"_ochaurl",
"=",
"url"
] | Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None | [
"Set",
"World",
"Bank",
"url",
"from",
"which",
"to",
"retrieve",
"countries",
"data"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L170-L183 | train | 47,286 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_info_from_iso3 | def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None | python | def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None | [
"def",
"get_country_info_from_iso3",
"(",
"cls",
",",
"iso3",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]",
"countriesdata",
"=",
"cls",
".",
"countriesdata",
"(",
"use... | Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information | [
"Get",
"country",
"information",
"from",
"ISO3",
"code"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L186-L205 | train | 47,287 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_name_from_iso3 | def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None | python | def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None | [
"def",
"get_country_name_from_iso3",
"(",
"cls",
",",
"iso3",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]",
"countryinfo",
"=",
"cls",
".",
"get_country_info_from_iso3",
"(",
... | Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name | [
"Get",
"country",
"name",
"from",
"ISO3",
"code"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L208-L223 | train | 47,288 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso2_from_iso3 | def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None | python | def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None | [
"def",
"get_iso2_from_iso3",
"(",
"cls",
",",
"iso3",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]",
"countriesdata",
"=",
"cls",
".",
"countriesdata",
"(",
"use_live",
"=",... | Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code | [
"Get",
"ISO2",
"from",
"ISO3",
"code"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L226-L245 | train | 47,289 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_m49_from_iso3 | def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None | python | def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None | [
"def",
"get_m49_from_iso3",
"(",
"cls",
",",
"iso3",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]",
"countriesdata",
"=",
"cls",
".",
"countriesdata",
"(",
"use_live",
"=",
... | Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code | [
"Get",
"M49",
"from",
"ISO3",
"code"
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L306-L325 | train | 47,290 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.simplify_countryname | def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words | python | def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words | [
"def",
"simplify_countryname",
"(",
"cls",
",",
"country",
")",
":",
"# type: (str) -> (str, List[str])",
"countryupper",
"=",
"country",
".",
"upper",
"(",
")",
"words",
"=",
"get_words_in_sentence",
"(",
"countryupper",
")",
"index",
"=",
"countryupper",
".",
"f... | Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words | [
"Simplifies",
"country",
"name",
"by",
"removing",
"descriptive",
"text",
"eg",
".",
"DEMOCRATIC",
"REPUBLIC",
"OF",
"etc",
"."
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L409-L446 | train | 47,291 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso3_country_code | def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None | python | def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None | [
"def",
"get_iso3_country_code",
"(",
"cls",
",",
"country",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]",
"countriesdata",
"=",
"cls",
".",
"countriesdata",
"(",
"use_live",
... | Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None | [
"Get",
"ISO3",
"code",
"for",
"cls",
".",
"Only",
"exact",
"matches",
"or",
"None",
"are",
"returned",
"."
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L449-L483 | train | 47,292 |
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso3_country_code_fuzzy | def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False | python | def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False | [
"def",
"get_iso3_country_code_fuzzy",
"(",
"cls",
",",
"country",
",",
"use_live",
"=",
"True",
",",
"exception",
"=",
"None",
")",
":",
"# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]",
"countriesdata",
"=",
"cls",
".",
"countriesdata",
... | Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False). | [
"Get",
"ISO3",
"code",
"for",
"cls",
".",
"A",
"tuple",
"is",
"returned",
"with",
"the",
"first",
"value",
"being",
"the",
"ISO3",
"code",
"and",
"the",
"second",
"showing",
"if",
"the",
"match",
"is",
"exact",
"or",
"not",
"."
] | e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6 | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L486-L556 | train | 47,293 |
moonso/loqusdb | loqusdb/commands/load_profile.py | load_profile | def load_profile(ctx, variant_file, update, stats, profile_threshold):
"""
Command for profiling of samples. User may upload variants used in profiling
from a vcf, update the profiles for all samples, and get some stats
from the profiles in the database.
Profiling is used to monitor duplicates in the database. The profile is
based on the variants in the 'profile_variant' collection, assessing
the genotypes for each sample at the position of these variants.
"""
adapter = ctx.obj['adapter']
if variant_file:
load_profile_variants(adapter, variant_file)
if update:
update_profiles(adapter)
if stats:
distance_dict = profile_stats(adapter, threshold=profile_threshold)
click.echo(table_from_dict(distance_dict)) | python | def load_profile(ctx, variant_file, update, stats, profile_threshold):
"""
Command for profiling of samples. User may upload variants used in profiling
from a vcf, update the profiles for all samples, and get some stats
from the profiles in the database.
Profiling is used to monitor duplicates in the database. The profile is
based on the variants in the 'profile_variant' collection, assessing
the genotypes for each sample at the position of these variants.
"""
adapter = ctx.obj['adapter']
if variant_file:
load_profile_variants(adapter, variant_file)
if update:
update_profiles(adapter)
if stats:
distance_dict = profile_stats(adapter, threshold=profile_threshold)
click.echo(table_from_dict(distance_dict)) | [
"def",
"load_profile",
"(",
"ctx",
",",
"variant_file",
",",
"update",
",",
"stats",
",",
"profile_threshold",
")",
":",
"adapter",
"=",
"ctx",
".",
"obj",
"[",
"'adapter'",
"]",
"if",
"variant_file",
":",
"load_profile_variants",
"(",
"adapter",
",",
"varia... | Command for profiling of samples. User may upload variants used in profiling
from a vcf, update the profiles for all samples, and get some stats
from the profiles in the database.
Profiling is used to monitor duplicates in the database. The profile is
based on the variants in the 'profile_variant' collection, assessing
the genotypes for each sample at the position of these variants. | [
"Command",
"for",
"profiling",
"of",
"samples",
".",
"User",
"may",
"upload",
"variants",
"used",
"in",
"profiling",
"from",
"a",
"vcf",
"update",
"the",
"profiles",
"for",
"all",
"samples",
"and",
"get",
"some",
"stats",
"from",
"the",
"profiles",
"in",
"... | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/commands/load_profile.py#L36-L59 | train | 47,294 |
moonso/loqusdb | loqusdb/plugins/mongo/profile_variant.py | ProfileVariantMixin.add_profile_variants | def add_profile_variants(self, profile_variants):
"""Add several variants to the profile_variant collection in the
database
Args:
profile_variants(list(models.ProfileVariant))
"""
results = self.db.profile_variant.insert_many(profile_variants)
return results | python | def add_profile_variants(self, profile_variants):
"""Add several variants to the profile_variant collection in the
database
Args:
profile_variants(list(models.ProfileVariant))
"""
results = self.db.profile_variant.insert_many(profile_variants)
return results | [
"def",
"add_profile_variants",
"(",
"self",
",",
"profile_variants",
")",
":",
"results",
"=",
"self",
".",
"db",
".",
"profile_variant",
".",
"insert_many",
"(",
"profile_variants",
")",
"return",
"results"
] | Add several variants to the profile_variant collection in the
database
Args:
profile_variants(list(models.ProfileVariant)) | [
"Add",
"several",
"variants",
"to",
"the",
"profile_variant",
"collection",
"in",
"the",
"database"
] | 792dcd0d461aff5adc703c49eebf58964913a513 | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/plugins/mongo/profile_variant.py#L7-L20 | train | 47,295 |
yjzhang/uncurl_python | uncurl/zip_clustering.py | zip_fit_params | def zip_fit_params(data):
"""
Returns the ZIP parameters that best fit a given data set.
Args:
data (array): 2d array of genes x cells belonging to a given cluster
Returns:
L (array): 1d array of means
M (array): 1d array of zero-inflation parameter
"""
genes, cells = data.shape
m = data.mean(1)
v = data.var(1)
M = (v-m)/(m**2+v-m)
#M = v/(v+m**2)
#M[np.isnan(M)] = 0.0
M = np.array([min(1.0, max(0.0, x)) for x in M])
L = m + v/m - 1.0
#L = (v + m**2)/m
L[np.isnan(L)] = 0.0
L = np.array([max(0.0, x) for x in L])
return L, M | python | def zip_fit_params(data):
"""
Returns the ZIP parameters that best fit a given data set.
Args:
data (array): 2d array of genes x cells belonging to a given cluster
Returns:
L (array): 1d array of means
M (array): 1d array of zero-inflation parameter
"""
genes, cells = data.shape
m = data.mean(1)
v = data.var(1)
M = (v-m)/(m**2+v-m)
#M = v/(v+m**2)
#M[np.isnan(M)] = 0.0
M = np.array([min(1.0, max(0.0, x)) for x in M])
L = m + v/m - 1.0
#L = (v + m**2)/m
L[np.isnan(L)] = 0.0
L = np.array([max(0.0, x) for x in L])
return L, M | [
"def",
"zip_fit_params",
"(",
"data",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"m",
"=",
"data",
".",
"mean",
"(",
"1",
")",
"v",
"=",
"data",
".",
"var",
"(",
"1",
")",
"M",
"=",
"(",
"v",
"-",
"m",
")",
"/",
"(",
"m",
... | Returns the ZIP parameters that best fit a given data set.
Args:
data (array): 2d array of genes x cells belonging to a given cluster
Returns:
L (array): 1d array of means
M (array): 1d array of zero-inflation parameter | [
"Returns",
"the",
"ZIP",
"parameters",
"that",
"best",
"fit",
"a",
"given",
"data",
"set",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_clustering.py#L11-L33 | train | 47,296 |
yjzhang/uncurl_python | uncurl/zip_clustering.py | zip_cluster | def zip_cluster(data, k, init=None, max_iters=100):
"""
Performs hard EM clustering using the zero-inflated Poisson distribution.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
assignments (array): integer assignments of cells to clusters (length cells)
L (array): Poisson parameter (genes x k)
M (array): zero-inflation parameter (genes x k)
"""
genes, cells = data.shape
init, new_assignments = kmeans_pp(data+eps, k, centers=init)
centers = np.copy(init)
M = np.zeros(centers.shape)
assignments = new_assignments
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
for it in range(max_iters):
lls = zip_ll(data, centers, M)
new_assignments = np.argmax(lls, 1)
if np.equal(assignments, new_assignments).all():
return assignments, centers, M
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
assignments = new_assignments
return assignments, centers, M | python | def zip_cluster(data, k, init=None, max_iters=100):
"""
Performs hard EM clustering using the zero-inflated Poisson distribution.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
assignments (array): integer assignments of cells to clusters (length cells)
L (array): Poisson parameter (genes x k)
M (array): zero-inflation parameter (genes x k)
"""
genes, cells = data.shape
init, new_assignments = kmeans_pp(data+eps, k, centers=init)
centers = np.copy(init)
M = np.zeros(centers.shape)
assignments = new_assignments
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
for it in range(max_iters):
lls = zip_ll(data, centers, M)
new_assignments = np.argmax(lls, 1)
if np.equal(assignments, new_assignments).all():
return assignments, centers, M
for c in range(k):
centers[:,c], M[:,c] = zip_fit_params_mle(data[:, assignments==c])
assignments = new_assignments
return assignments, centers, M | [
"def",
"zip_cluster",
"(",
"data",
",",
"k",
",",
"init",
"=",
"None",
",",
"max_iters",
"=",
"100",
")",
":",
"genes",
",",
"cells",
"=",
"data",
".",
"shape",
"init",
",",
"new_assignments",
"=",
"kmeans_pp",
"(",
"data",
"+",
"eps",
",",
"k",
",... | Performs hard EM clustering using the zero-inflated Poisson distribution.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
assignments (array): integer assignments of cells to clusters (length cells)
L (array): Poisson parameter (genes x k)
M (array): zero-inflation parameter (genes x k) | [
"Performs",
"hard",
"EM",
"clustering",
"using",
"the",
"zero",
"-",
"inflated",
"Poisson",
"distribution",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/zip_clustering.py#L46-L76 | train | 47,297 |
yjzhang/uncurl_python | uncurl/dimensionality_reduction.py | diffusion_mds | def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | python | def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | [
"def",
"diffusion_mds",
"(",
"means",
",",
"weights",
",",
"d",
",",
"diffusion_rounds",
"=",
"10",
")",
":",
"for",
"i",
"in",
"range",
"(",
"diffusion_rounds",
")",
":",
"weights",
"=",
"weights",
"*",
"weights",
"weights",
"=",
"weights",
"/",
"weight... | Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) | [
"Dimensionality",
"reduction",
"using",
"MDS",
"while",
"running",
"diffusion",
"on",
"W",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L9-L28 | train | 47,298 |
yjzhang/uncurl_python | uncurl/dimensionality_reduction.py | mds | def mds(means, weights, d):
"""
Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | python | def mds(means, weights, d):
"""
Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | [
"def",
"mds",
"(",
"means",
",",
"weights",
",",
"d",
")",
":",
"X",
"=",
"dim_reduce",
"(",
"means",
",",
"weights",
",",
"d",
")",
"if",
"X",
".",
"shape",
"[",
"0",
"]",
"==",
"2",
":",
"return",
"X",
".",
"dot",
"(",
"weights",
")",
"else... | Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) | [
"Dimensionality",
"reduction",
"using",
"MDS",
"."
] | 55c58ca5670f87699d3bd5752fdfa4baa07724dd | https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/dimensionality_reduction.py#L31-L47 | train | 47,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.