body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
d66445ca3f2820ec71632b3770dda3dd0572493d205b175221aac6123bd88487 | def getSliceDiv(self, rowmax=None, colmax=None):
'\n Determine how to slice the arrays.\n\n Args:\n rowmax (int): Maximum number of rows in each slice; default None\n uses entire row.\n colmax (int): Maximum number of columns in each slice; default\n None uses entire column.\n\n Returns:\n tuple: rowstarts, rowends, colstarts, colends.\n '
numrows = self.gdict.ny
numcols = self.gdict.nx
if ((rowmax is None) or (rowmax > numrows)):
rowmax = numrows
if ((colmax is None) or (colmax > numcols)):
colmax = numcols
(numrowslice, rmrow) = divmod(numrows, rowmax)
(numcolslice, rmcol) = divmod(numcols, colmax)
rowst = np.arange(0, (numrowslice * rowmax), rowmax)
rowen = np.arange(rowmax, ((numrowslice + 1) * rowmax), rowmax)
if (rmrow > 0):
rowst = np.hstack([rowst, (numrowslice * rowmax)])
rowen = np.hstack([rowen, None])
else:
rowen = np.hstack([rowen[:(- 1)], None])
colst = np.arange(0, (numcolslice * colmax), colmax)
colen = np.arange(colmax, ((numcolslice + 1) * colmax), colmax)
if (rmcol > 0):
colst = np.hstack([colst, (numcolslice * colmax)])
colen = np.hstack([colen, None])
else:
colen = np.hstack([colen[:(- 1)], None])
rowstarts = np.tile(rowst, len(colst))
colstarts = np.repeat(colst, len(rowst))
rowends = np.tile(rowen, len(colen))
colends = np.repeat(colen, len(rowen))
return (rowstarts, rowends, colstarts, colends) | Determine how to slice the arrays.
Args:
rowmax (int): Maximum number of rows in each slice; default None
uses entire row.
colmax (int): Maximum number of columns in each slice; default
None uses entire column.
Returns:
tuple: rowstarts, rowends, colstarts, colends. | gfail/temphdf.py | getSliceDiv | mhearne-usgs/groundfailure | 9 | python | def getSliceDiv(self, rowmax=None, colmax=None):
'\n Determine how to slice the arrays.\n\n Args:\n rowmax (int): Maximum number of rows in each slice; default None\n uses entire row.\n colmax (int): Maximum number of columns in each slice; default\n None uses entire column.\n\n Returns:\n tuple: rowstarts, rowends, colstarts, colends.\n '
numrows = self.gdict.ny
numcols = self.gdict.nx
if ((rowmax is None) or (rowmax > numrows)):
rowmax = numrows
if ((colmax is None) or (colmax > numcols)):
colmax = numcols
(numrowslice, rmrow) = divmod(numrows, rowmax)
(numcolslice, rmcol) = divmod(numcols, colmax)
rowst = np.arange(0, (numrowslice * rowmax), rowmax)
rowen = np.arange(rowmax, ((numrowslice + 1) * rowmax), rowmax)
if (rmrow > 0):
rowst = np.hstack([rowst, (numrowslice * rowmax)])
rowen = np.hstack([rowen, None])
else:
rowen = np.hstack([rowen[:(- 1)], None])
colst = np.arange(0, (numcolslice * colmax), colmax)
colen = np.arange(colmax, ((numcolslice + 1) * colmax), colmax)
if (rmcol > 0):
colst = np.hstack([colst, (numcolslice * colmax)])
colen = np.hstack([colen, None])
else:
colen = np.hstack([colen[:(- 1)], None])
rowstarts = np.tile(rowst, len(colst))
colstarts = np.repeat(colst, len(rowst))
rowends = np.tile(rowen, len(colen))
colends = np.repeat(colen, len(rowen))
return (rowstarts, rowends, colstarts, colends) | def getSliceDiv(self, rowmax=None, colmax=None):
'\n Determine how to slice the arrays.\n\n Args:\n rowmax (int): Maximum number of rows in each slice; default None\n uses entire row.\n colmax (int): Maximum number of columns in each slice; default\n None uses entire column.\n\n Returns:\n tuple: rowstarts, rowends, colstarts, colends.\n '
numrows = self.gdict.ny
numcols = self.gdict.nx
if ((rowmax is None) or (rowmax > numrows)):
rowmax = numrows
if ((colmax is None) or (colmax > numcols)):
colmax = numcols
(numrowslice, rmrow) = divmod(numrows, rowmax)
(numcolslice, rmcol) = divmod(numcols, colmax)
rowst = np.arange(0, (numrowslice * rowmax), rowmax)
rowen = np.arange(rowmax, ((numrowslice + 1) * rowmax), rowmax)
if (rmrow > 0):
rowst = np.hstack([rowst, (numrowslice * rowmax)])
rowen = np.hstack([rowen, None])
else:
rowen = np.hstack([rowen[:(- 1)], None])
colst = np.arange(0, (numcolslice * colmax), colmax)
colen = np.arange(colmax, ((numcolslice + 1) * colmax), colmax)
if (rmcol > 0):
colst = np.hstack([colst, (numcolslice * colmax)])
colen = np.hstack([colen, None])
else:
colen = np.hstack([colen[:(- 1)], None])
rowstarts = np.tile(rowst, len(colst))
colstarts = np.repeat(colst, len(rowst))
rowends = np.tile(rowen, len(colen))
colends = np.repeat(colen, len(rowen))
return (rowstarts, rowends, colstarts, colends)<|docstring|>Determine how to slice the arrays.
Args:
rowmax (int): Maximum number of rows in each slice; default None
uses entire row.
colmax (int): Maximum number of columns in each slice; default
None uses entire column.
Returns:
tuple: rowstarts, rowends, colstarts, colends.<|endoftext|> |
6ce8efd4d140b0281f1ef5ba981985b3f723a2fdaf5ae57dfa91d137a9651464 | def train_pmf(X_train, params, X_val=None, fit_params=None):
'\n Train a PMF model with given training and validation data\n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model | Train a PMF model with given training and validation data | src/single_species_mf_hp_search.py | train_pmf | lrgr/xsmf | 2 | python | def train_pmf(X_train, params, X_val=None, fit_params=None):
'\n \n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model | def train_pmf(X_train, params, X_val=None, fit_params=None):
'\n \n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model<|docstring|>Train a PMF model with given training and validation data<|endoftext|> |
d0b5e3c4c92ccb5168d7bd2a12e3dbe88182d6a132a1987523def5c4ab34122e | def pmf_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmf(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters | src/single_species_mf_hp_search.py | pmf_objective | lrgr/xsmf | 2 | python | def pmf_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmf(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | def pmf_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmf(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj<|docstring|>Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters<|endoftext|> |
9534430bda1677c8a93ae167981d084b8becc181c24160d957c7dbfe400b74da | def train_pmfb(X_train, params, X_val=None, fit_params=None):
'\n Train a PMF model with given training and validation data\n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF_b(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model | Train a PMF model with given training and validation data | src/single_species_mf_hp_search.py | train_pmfb | lrgr/xsmf | 2 | python | def train_pmfb(X_train, params, X_val=None, fit_params=None):
'\n \n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF_b(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model | def train_pmfb(X_train, params, X_val=None, fit_params=None):
'\n \n '
assert (fit_params is None)
if ('rank' in params):
params['rank'] = np.int(params['rank'])
pmf_model = PMF_b(**params)
pmf_model.fit(X_train, X_val=X_val)
return pmf_model<|docstring|>Train a PMF model with given training and validation data<|endoftext|> |
cd8b33e751a697a784f8de92287a2e79cff8bbcfb7dee9d6daaea6608ecf7829 | def pmfb_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmfb(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters | src/single_species_mf_hp_search.py | pmfb_objective | lrgr/xsmf | 2 | python | def pmfb_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmfb(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | def pmfb_objective(X_train, X_val, fit_params=None):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is None)
def obj(params):
pmf_model = train_pmfb(X_train, params, X_val=X_val)
r2_loss = pmf_model.score(X_val)
params = {**params, **pmf_model.get_params()}
params['max_iter'] = pmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj<|docstring|>Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters<|endoftext|> |
6571c41622db8e6147f74d61dda49679fb55517711ab7a8b3aec558e516bd138 | def train_kpmfb(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmfb_model = KPMF_b(**_params)
kpmfb_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmfb_model | Train a PMF model with given training and validation data.
`fit_params`: contains dictionary containing side information (kernel)
used sat training time. | src/single_species_mf_hp_search.py | train_kpmfb | lrgr/xsmf | 2 | python | def train_kpmfb(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmfb_model = KPMF_b(**_params)
kpmfb_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmfb_model | def train_kpmfb(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmfb_model = KPMF_b(**_params)
kpmfb_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmfb_model<|docstring|>Train a PMF model with given training and validation data.
`fit_params`: contains dictionary containing side information (kernel)
used sat training time.<|endoftext|> |
35460f4af25e1158d85317f04bfb76ac1223cb7b0477ec0900f99bd5e8605350 | def kpmfb_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmfb_model = train_kpmfb(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmfb_model.score(X_val)
params = {**params, **kpmfb_model.get_params()}
print(params)
params['max_iter'] = kpmfb_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters | src/single_species_mf_hp_search.py | kpmfb_objective | lrgr/xsmf | 2 | python | def kpmfb_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmfb_model = train_kpmfb(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmfb_model.score(X_val)
params = {**params, **kpmfb_model.get_params()}
print(params)
params['max_iter'] = kpmfb_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | def kpmfb_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmfb_model = train_kpmfb(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmfb_model.score(X_val)
params = {**params, **kpmfb_model.get_params()}
print(params)
params['max_iter'] = kpmfb_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj<|docstring|>Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters<|endoftext|> |
c559b61131847cdffbf484be841a30c0e7c7d677cf976bcaa36e720249bfd2cc | def train_kpmf(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmf_model = KPMF(**_params)
kpmf_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmf_model | Train a PMF model with given training and validation data.
`fit_params`: contains dictionary containing side information (kernel)
used sat training time. | src/single_species_mf_hp_search.py | train_kpmf | lrgr/xsmf | 2 | python | def train_kpmf(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmf_model = KPMF(**_params)
kpmf_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmf_model | def train_kpmf(X_train, params, fit_params, X_val=None):
'\n Train a PMF model with given training and validation data.\n `fit_params`: contains dictionary containing side information (kernel)\n used sat training time.\n '
_params = params.copy()
L = fit_params['L']
rl_lambda = _params['rl_lambda']
RL = np.linalg.inv((np.eye(len(L)) + (rl_lambda * L)))
_params.pop('rl_lambda', None)
if ('rank' in _params):
_params['rank'] = np.int(_params['rank'])
kpmf_model = KPMF(**_params)
kpmf_model.fit(X_train, X_val=X_val, F_kernel=RL)
return kpmf_model<|docstring|>Train a PMF model with given training and validation data.
`fit_params`: contains dictionary containing side information (kernel)
used sat training time.<|endoftext|> |
5d5d5d053c933f25dc7574aa5ae9d5b4bec4994481d7536fa2631d4b254fff9f | def kpmf_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmf_model = train_kpmf(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmf_model.score(X_val)
params = {**params, **kpmf_model.get_params()}
params['max_iter'] = kpmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters | src/single_species_mf_hp_search.py | kpmf_objective | lrgr/xsmf | 2 | python | def kpmf_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmf_model = train_kpmf(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmf_model.score(X_val)
params = {**params, **kpmf_model.get_params()}
params['max_iter'] = kpmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | def kpmf_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n PMF model on validation data given model hyperparameters\n '
assert (fit_params is not None)
def obj(params):
kpmf_model = train_kpmf(X_train, params, fit_params=fit_params, X_val=X_val)
r2_loss = kpmf_model.score(X_val)
params = {**params, **kpmf_model.get_params()}
params['max_iter'] = kpmf_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj<|docstring|>Return an objective function that that computes R2 of the trained
PMF model on validation data given model hyperparameters<|endoftext|> |
fed8f53ea5985e47db2c2e842c410cac634105b964a3725202fc68dd4422ce9e | def ngmc_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n NGMC model on validation data given model hyperparameters\n '
def obj(params):
ngmc_model = train_ngmc(X_train, params, fit_params, X_val=X_val)
r2_loss = ngmc_model.score(X_val)
params = {**params, **ngmc_model.get_params()}
params['max_iter'] = ngmc_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | Return an objective function that that computes R2 of the trained
NGMC model on validation data given model hyperparameters | src/single_species_mf_hp_search.py | ngmc_objective | lrgr/xsmf | 2 | python | def ngmc_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n NGMC model on validation data given model hyperparameters\n '
def obj(params):
ngmc_model = train_ngmc(X_train, params, fit_params, X_val=X_val)
r2_loss = ngmc_model.score(X_val)
params = {**params, **ngmc_model.get_params()}
params['max_iter'] = ngmc_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj | def ngmc_objective(X_train, X_val, fit_params):
'\n Return an objective function that that computes R2 of the trained\n NGMC model on validation data given model hyperparameters\n '
def obj(params):
ngmc_model = train_ngmc(X_train, params, fit_params, X_val=X_val)
r2_loss = ngmc_model.score(X_val)
params = {**params, **ngmc_model.get_params()}
params['max_iter'] = ngmc_model.iters_ran
return {'loss': (1 - r2_loss), 'status': hyperopt.STATUS_OK, 'params': params}
return obj<|docstring|>Return an objective function that that computes R2 of the trained
NGMC model on validation data given model hyperparameters<|endoftext|> |
1756edb0ead0cf3d5bb803708037ab0259c20249a1bcbc051c6b5ec88c941a44 | def compute_training_curve(train_model, X_train, X_val, params, fit_params=None):
'\n Compute training curve given a function that that trains a model\n '
model = train_model(X_train, params, X_val=X_val, fit_params=fit_params)
return dict(train=np.asarray(model.train_r2_hist), val=np.asarray(model.val_r2_hist)) | Compute training curve given a function that that trains a model | src/single_species_mf_hp_search.py | compute_training_curve | lrgr/xsmf | 2 | python | def compute_training_curve(train_model, X_train, X_val, params, fit_params=None):
'\n \n '
model = train_model(X_train, params, X_val=X_val, fit_params=fit_params)
return dict(train=np.asarray(model.train_r2_hist), val=np.asarray(model.val_r2_hist)) | def compute_training_curve(train_model, X_train, X_val, params, fit_params=None):
'\n \n '
model = train_model(X_train, params, X_val=X_val, fit_params=fit_params)
return dict(train=np.asarray(model.train_r2_hist), val=np.asarray(model.val_r2_hist))<|docstring|>Compute training curve given a function that that trains a model<|endoftext|> |
a85e1adaacd0302e1c957c2d30b283eff0d30d80e88d61de394020ec7f728f72 | def __init__(self, dataset, unprivileged_groups=None, privileged_groups=None):
'\n Args:\n dataset (BinaryLabelDataset): A BinaryLabelDataset.\n privileged_groups (list(dict)): Privileged groups. Format is a list\n of `dicts` where the keys are `protected_attribute_names` and\n the values are values in `protected_attributes`. Each `dict`\n element describes a single group. See examples for more details.\n unprivileged_groups (list(dict)): Unprivileged groups in the same\n format as `privileged_groups`.\n\n Raises:\n TypeError: `dataset` must be a\n :obj:`~aif360.datasets.BinaryLabelDataset` type.\n '
if (not isinstance(dataset, BinaryLabelDataset)):
raise TypeError("'dataset' should be a BinaryLabelDataset")
super(BinaryLabelDatasetMetric, self).__init__(dataset, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) | Args:
dataset (BinaryLabelDataset): A BinaryLabelDataset.
privileged_groups (list(dict)): Privileged groups. Format is a list
of `dicts` where the keys are `protected_attribute_names` and
the values are values in `protected_attributes`. Each `dict`
element describes a single group. See examples for more details.
unprivileged_groups (list(dict)): Unprivileged groups in the same
format as `privileged_groups`.
Raises:
TypeError: `dataset` must be a
:obj:`~aif360.datasets.BinaryLabelDataset` type. | aif360/metrics/binary_label_dataset_metric.py | __init__ | kcha/AIF360 | 3 | python | def __init__(self, dataset, unprivileged_groups=None, privileged_groups=None):
'\n Args:\n dataset (BinaryLabelDataset): A BinaryLabelDataset.\n privileged_groups (list(dict)): Privileged groups. Format is a list\n of `dicts` where the keys are `protected_attribute_names` and\n the values are values in `protected_attributes`. Each `dict`\n element describes a single group. See examples for more details.\n unprivileged_groups (list(dict)): Unprivileged groups in the same\n format as `privileged_groups`.\n\n Raises:\n TypeError: `dataset` must be a\n :obj:`~aif360.datasets.BinaryLabelDataset` type.\n '
if (not isinstance(dataset, BinaryLabelDataset)):
raise TypeError("'dataset' should be a BinaryLabelDataset")
super(BinaryLabelDatasetMetric, self).__init__(dataset, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) | def __init__(self, dataset, unprivileged_groups=None, privileged_groups=None):
'\n Args:\n dataset (BinaryLabelDataset): A BinaryLabelDataset.\n privileged_groups (list(dict)): Privileged groups. Format is a list\n of `dicts` where the keys are `protected_attribute_names` and\n the values are values in `protected_attributes`. Each `dict`\n element describes a single group. See examples for more details.\n unprivileged_groups (list(dict)): Unprivileged groups in the same\n format as `privileged_groups`.\n\n Raises:\n TypeError: `dataset` must be a\n :obj:`~aif360.datasets.BinaryLabelDataset` type.\n '
if (not isinstance(dataset, BinaryLabelDataset)):
raise TypeError("'dataset' should be a BinaryLabelDataset")
super(BinaryLabelDatasetMetric, self).__init__(dataset, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)<|docstring|>Args:
dataset (BinaryLabelDataset): A BinaryLabelDataset.
privileged_groups (list(dict)): Privileged groups. Format is a list
of `dicts` where the keys are `protected_attribute_names` and
the values are values in `protected_attributes`. Each `dict`
element describes a single group. See examples for more details.
unprivileged_groups (list(dict)): Unprivileged groups in the same
format as `privileged_groups`.
Raises:
TypeError: `dataset` must be a
:obj:`~aif360.datasets.BinaryLabelDataset` type.<|endoftext|> |
b4cb2163de3525340c7432d767b1cbac73841dcee1ca6bff21898ccae14051ea | def num_positives(self, privileged=None):
'Compute the number of positives,\n :math:`P = \\sum_{i=1}^n \\mathbb{1}[y_i = 1]`,\n optionally conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition) | Compute the number of positives,
:math:`P = \sum_{i=1}^n \mathbb{1}[y_i = 1]`,
optionally conditioned on protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Raises:
AttributeError: `privileged_groups` or `unprivileged_groups` must be
must be provided at initialization to condition on them. | aif360/metrics/binary_label_dataset_metric.py | num_positives | kcha/AIF360 | 3 | python | def num_positives(self, privileged=None):
'Compute the number of positives,\n :math:`P = \\sum_{i=1}^n \\mathbb{1}[y_i = 1]`,\n optionally conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition) | def num_positives(self, privileged=None):
'Compute the number of positives,\n :math:`P = \\sum_{i=1}^n \\mathbb{1}[y_i = 1]`,\n optionally conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition)<|docstring|>Compute the number of positives,
:math:`P = \sum_{i=1}^n \mathbb{1}[y_i = 1]`,
optionally conditioned on protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Raises:
AttributeError: `privileged_groups` or `unprivileged_groups` must be
must be provided at initialization to condition on them.<|endoftext|> |
9fc10c5c61ed3e825c5168be1a2208ad84c201272b9a3503867a05a4ad002452 | def num_negatives(self, privileged=None):
'Compute the number of negatives,\n :math:`N = \\sum_{i=1}^n \\mathbb{1}[y_i = 0]`, optionally conditioned on\n protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.unfavorable_label, condition=condition) | Compute the number of negatives,
:math:`N = \sum_{i=1}^n \mathbb{1}[y_i = 0]`, optionally conditioned on
protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Raises:
AttributeError: `privileged_groups` or `unprivileged_groups` must be
must be provided at initialization to condition on them. | aif360/metrics/binary_label_dataset_metric.py | num_negatives | kcha/AIF360 | 3 | python | def num_negatives(self, privileged=None):
'Compute the number of negatives,\n :math:`N = \\sum_{i=1}^n \\mathbb{1}[y_i = 0]`, optionally conditioned on\n protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.unfavorable_label, condition=condition) | def num_negatives(self, privileged=None):
'Compute the number of negatives,\n :math:`N = \\sum_{i=1}^n \\mathbb{1}[y_i = 0]`, optionally conditioned on\n protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n\n Raises:\n AttributeError: `privileged_groups` or `unprivileged_groups` must be\n must be provided at initialization to condition on them.\n '
condition = self._to_condition(privileged)
return utils.compute_num_pos_neg(self.dataset.protected_attributes, self.dataset.labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.unfavorable_label, condition=condition)<|docstring|>Compute the number of negatives,
:math:`N = \sum_{i=1}^n \mathbb{1}[y_i = 0]`, optionally conditioned on
protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Raises:
AttributeError: `privileged_groups` or `unprivileged_groups` must be
must be provided at initialization to condition on them.<|endoftext|> |
325498c112e67387cc917dac80d20b3d212990e39d73f611e22dbf4c598b1a54 | def base_rate(self, privileged=None):
'Compute the base rate, :math:`Pr(Y = 1) = P/(P+N)`, optionally\n conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n Returns:\n float: Base rate (optionally conditioned).\n '
return (self.num_positives(privileged=privileged) / self.num_instances(privileged=privileged)) | Compute the base rate, :math:`Pr(Y = 1) = P/(P+N)`, optionally
conditioned on protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Returns:
float: Base rate (optionally conditioned). | aif360/metrics/binary_label_dataset_metric.py | base_rate | kcha/AIF360 | 3 | python | def base_rate(self, privileged=None):
'Compute the base rate, :math:`Pr(Y = 1) = P/(P+N)`, optionally\n conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n Returns:\n float: Base rate (optionally conditioned).\n '
return (self.num_positives(privileged=privileged) / self.num_instances(privileged=privileged)) | def base_rate(self, privileged=None):
'Compute the base rate, :math:`Pr(Y = 1) = P/(P+N)`, optionally\n conditioned on protected attributes.\n\n Args:\n privileged (bool, optional): Boolean prescribing whether to\n condition this metric on the `privileged_groups`, if `True`, or\n the `unprivileged_groups`, if `False`. Defaults to `None`\n meaning this metric is computed over the entire dataset.\n Returns:\n float: Base rate (optionally conditioned).\n '
return (self.num_positives(privileged=privileged) / self.num_instances(privileged=privileged))<|docstring|>Compute the base rate, :math:`Pr(Y = 1) = P/(P+N)`, optionally
conditioned on protected attributes.
Args:
privileged (bool, optional): Boolean prescribing whether to
condition this metric on the `privileged_groups`, if `True`, or
the `unprivileged_groups`, if `False`. Defaults to `None`
meaning this metric is computed over the entire dataset.
Returns:
float: Base rate (optionally conditioned).<|endoftext|> |
709c8dfafb888879c44876318d4fe52bc432552a853c1c228b9b1dc8905f19e1 | def disparate_impact(self):
'\n .. math::\n \\frac{Pr(Y = 1 | D = \\text{unprivileged})}\n {Pr(Y = 1 | D = \\text{privileged})}\n '
return self.ratio(self.base_rate) | .. math::
\frac{Pr(Y = 1 | D = \text{unprivileged})}
{Pr(Y = 1 | D = \text{privileged})} | aif360/metrics/binary_label_dataset_metric.py | disparate_impact | kcha/AIF360 | 3 | python | def disparate_impact(self):
'\n .. math::\n \\frac{Pr(Y = 1 | D = \\text{unprivileged})}\n {Pr(Y = 1 | D = \\text{privileged})}\n '
return self.ratio(self.base_rate) | def disparate_impact(self):
'\n .. math::\n \\frac{Pr(Y = 1 | D = \\text{unprivileged})}\n {Pr(Y = 1 | D = \\text{privileged})}\n '
return self.ratio(self.base_rate)<|docstring|>.. math::
\frac{Pr(Y = 1 | D = \text{unprivileged})}
{Pr(Y = 1 | D = \text{privileged})}<|endoftext|> |
430fe26e77c3a9a731e4e638625195394bcaba43b3654dd324ec729fb5f54de1 | def statistical_parity_difference(self):
'\n .. math::\n Pr(Y = 1 | D = \\text{unprivileged})\n - Pr(Y = 1 | D = \\text{privileged})\n '
return self.difference(self.base_rate) | .. math::
Pr(Y = 1 | D = \text{unprivileged})
- Pr(Y = 1 | D = \text{privileged}) | aif360/metrics/binary_label_dataset_metric.py | statistical_parity_difference | kcha/AIF360 | 3 | python | def statistical_parity_difference(self):
'\n .. math::\n Pr(Y = 1 | D = \\text{unprivileged})\n - Pr(Y = 1 | D = \\text{privileged})\n '
return self.difference(self.base_rate) | def statistical_parity_difference(self):
'\n .. math::\n Pr(Y = 1 | D = \\text{unprivileged})\n - Pr(Y = 1 | D = \\text{privileged})\n '
return self.difference(self.base_rate)<|docstring|>.. math::
Pr(Y = 1 | D = \text{unprivileged})
- Pr(Y = 1 | D = \text{privileged})<|endoftext|> |
93136761076ec85b7ee3f32f15f61831f9637d83e4bace92f7c091b780601c7f | def consistency(self, n_neighbors=5):
'Individual fairness metric from [1]_ that measures how similar the\n labels are for similar instances.\n\n .. math::\n 1 - \\frac{1}{n\\cdot\\text{n_neighbors}}\\sum_{i=1}^n |\\hat{y}_i -\n \\sum_{j\\in\\mathcal{N}_{\\text{n_neighbors}}(x_i)} \\hat{y}_j|\n\n Args:\n n_neighbors (int, optional): Number of neighbors for the knn\n computation.\n\n References:\n .. [1] R. Zemel, Y. Wu, K. Swersky, T. Pitassi, and C. Dwork,\n "Learning Fair Representations,"\n International Conference on Machine Learning, 2013.\n '
X = self.dataset.features
num_samples = X.shape[0]
y = self.dataset.labels
nbrs = NearestNeighbors(n_neighbors, algorithm='ball_tree').fit(X)
(_, indices) = nbrs.kneighbors(X)
consistency = 0.0
for i in range(num_samples):
consistency += np.abs((y[i] - np.mean(y[indices[i]])))
consistency = (1.0 - (consistency / num_samples))
return consistency | Individual fairness metric from [1]_ that measures how similar the
labels are for similar instances.
.. math::
1 - \frac{1}{n\cdot\text{n_neighbors}}\sum_{i=1}^n |\hat{y}_i -
\sum_{j\in\mathcal{N}_{\text{n_neighbors}}(x_i)} \hat{y}_j|
Args:
n_neighbors (int, optional): Number of neighbors for the knn
computation.
References:
.. [1] R. Zemel, Y. Wu, K. Swersky, T. Pitassi, and C. Dwork,
"Learning Fair Representations,"
International Conference on Machine Learning, 2013. | aif360/metrics/binary_label_dataset_metric.py | consistency | kcha/AIF360 | 3 | python | def consistency(self, n_neighbors=5):
'Individual fairness metric from [1]_ that measures how similar the\n labels are for similar instances.\n\n .. math::\n 1 - \\frac{1}{n\\cdot\\text{n_neighbors}}\\sum_{i=1}^n |\\hat{y}_i -\n \\sum_{j\\in\\mathcal{N}_{\\text{n_neighbors}}(x_i)} \\hat{y}_j|\n\n Args:\n n_neighbors (int, optional): Number of neighbors for the knn\n computation.\n\n References:\n .. [1] R. Zemel, Y. Wu, K. Swersky, T. Pitassi, and C. Dwork,\n "Learning Fair Representations,"\n International Conference on Machine Learning, 2013.\n '
X = self.dataset.features
num_samples = X.shape[0]
y = self.dataset.labels
nbrs = NearestNeighbors(n_neighbors, algorithm='ball_tree').fit(X)
(_, indices) = nbrs.kneighbors(X)
consistency = 0.0
for i in range(num_samples):
consistency += np.abs((y[i] - np.mean(y[indices[i]])))
consistency = (1.0 - (consistency / num_samples))
return consistency | def consistency(self, n_neighbors=5):
'Individual fairness metric from [1]_ that measures how similar the\n labels are for similar instances.\n\n .. math::\n 1 - \\frac{1}{n\\cdot\\text{n_neighbors}}\\sum_{i=1}^n |\\hat{y}_i -\n \\sum_{j\\in\\mathcal{N}_{\\text{n_neighbors}}(x_i)} \\hat{y}_j|\n\n Args:\n n_neighbors (int, optional): Number of neighbors for the knn\n computation.\n\n References:\n .. [1] R. Zemel, Y. Wu, K. Swersky, T. Pitassi, and C. Dwork,\n "Learning Fair Representations,"\n International Conference on Machine Learning, 2013.\n '
X = self.dataset.features
num_samples = X.shape[0]
y = self.dataset.labels
nbrs = NearestNeighbors(n_neighbors, algorithm='ball_tree').fit(X)
(_, indices) = nbrs.kneighbors(X)
consistency = 0.0
for i in range(num_samples):
consistency += np.abs((y[i] - np.mean(y[indices[i]])))
consistency = (1.0 - (consistency / num_samples))
return consistency<|docstring|>Individual fairness metric from [1]_ that measures how similar the
labels are for similar instances.
.. math::
1 - \frac{1}{n\cdot\text{n_neighbors}}\sum_{i=1}^n |\hat{y}_i -
\sum_{j\in\mathcal{N}_{\text{n_neighbors}}(x_i)} \hat{y}_j|
Args:
n_neighbors (int, optional): Number of neighbors for the knn
computation.
References:
.. [1] R. Zemel, Y. Wu, K. Swersky, T. Pitassi, and C. Dwork,
"Learning Fair Representations,"
International Conference on Machine Learning, 2013.<|endoftext|> |
bdfe2995d1dc1dcc83cec9f9dc3c4fff5939b5d6408321fda11c2b652d652c42 | def _smoothed_base_rates(self, labels, concentration=1.0):
'Dirichlet-smoothed base rates for each intersecting group in the\n dataset.\n '
if (concentration < 0):
raise ValueError('Concentration parameter must be non-negative.')
num_classes = 2
dirichlet_alpha = (concentration / num_classes)
intersect_groups = np.unique(self.dataset.protected_attributes, axis=0)
num_intersects = len(intersect_groups)
counts_pos = np.zeros(num_intersects)
counts_total = np.zeros(num_intersects)
for i in range(num_intersects):
condition = [dict(zip(self.dataset.protected_attribute_names, intersect_groups[i]))]
counts_total[i] = utils.compute_num_instances(self.dataset.protected_attributes, self.dataset.instance_weights, self.dataset.protected_attribute_names, condition=condition)
counts_pos[i] = utils.compute_num_pos_neg(self.dataset.protected_attributes, labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition)
return ((counts_pos + dirichlet_alpha) / (counts_total + concentration)) | Dirichlet-smoothed base rates for each intersecting group in the
dataset. | aif360/metrics/binary_label_dataset_metric.py | _smoothed_base_rates | kcha/AIF360 | 3 | python | def _smoothed_base_rates(self, labels, concentration=1.0):
'Dirichlet-smoothed base rates for each intersecting group in the\n dataset.\n '
if (concentration < 0):
raise ValueError('Concentration parameter must be non-negative.')
num_classes = 2
dirichlet_alpha = (concentration / num_classes)
intersect_groups = np.unique(self.dataset.protected_attributes, axis=0)
num_intersects = len(intersect_groups)
counts_pos = np.zeros(num_intersects)
counts_total = np.zeros(num_intersects)
for i in range(num_intersects):
condition = [dict(zip(self.dataset.protected_attribute_names, intersect_groups[i]))]
counts_total[i] = utils.compute_num_instances(self.dataset.protected_attributes, self.dataset.instance_weights, self.dataset.protected_attribute_names, condition=condition)
counts_pos[i] = utils.compute_num_pos_neg(self.dataset.protected_attributes, labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition)
return ((counts_pos + dirichlet_alpha) / (counts_total + concentration)) | def _smoothed_base_rates(self, labels, concentration=1.0):
'Dirichlet-smoothed base rates for each intersecting group in the\n dataset.\n '
if (concentration < 0):
raise ValueError('Concentration parameter must be non-negative.')
num_classes = 2
dirichlet_alpha = (concentration / num_classes)
intersect_groups = np.unique(self.dataset.protected_attributes, axis=0)
num_intersects = len(intersect_groups)
counts_pos = np.zeros(num_intersects)
counts_total = np.zeros(num_intersects)
for i in range(num_intersects):
condition = [dict(zip(self.dataset.protected_attribute_names, intersect_groups[i]))]
counts_total[i] = utils.compute_num_instances(self.dataset.protected_attributes, self.dataset.instance_weights, self.dataset.protected_attribute_names, condition=condition)
counts_pos[i] = utils.compute_num_pos_neg(self.dataset.protected_attributes, labels, self.dataset.instance_weights, self.dataset.protected_attribute_names, self.dataset.favorable_label, condition=condition)
return ((counts_pos + dirichlet_alpha) / (counts_total + concentration))<|docstring|>Dirichlet-smoothed base rates for each intersecting group in the
dataset.<|endoftext|> |
09bb80245cf35788717dc329449ce2908bf2811216945b6491aa4a930ad86634 | def smoothed_empirical_differential_fairness(self, concentration=1.0):
'Smoothed EDF from [#foulds18]_.\n\n Args:\n concentration (float, optional): Concentration parameter for\n Dirichlet smoothing. Must be non-negative.\n\n Examples:\n To use with non-binary protected attributes, the column must be\n converted to ordinal:\n \n >>> mapping = {\'Black\': 0, \'White\': 1, \'Asian-Pac-Islander\': 2,\n ... \'Amer-Indian-Eskimo\': 3, \'Other\': 4}\n >>> def map_race(df):\n ... df[\'race-num\'] = df.race.map(mapping)\n ... return df\n ...\n >>> adult = AdultDataset(protected_attribute_names=[\'sex\',\n ... \'race-num\'], privileged_classes=[[\'Male\'], [1]],\n ... categorical_features=[\'workclass\', \'education\',\n ... \'marital-status\', \'occupation\', \'relationship\',\n ... \'native-country\', \'race\'], custom_preprocessing=map_race)\n >>> metric = BinaryLabelDatasetMetric(adult)\n >>> metric.smoothed_empirical_differential_fairness()\n 1.7547611985549287\n\n References:\n .. [#foulds18] J. R. Foulds, R. Islam, K. N. Keya, and S. Pan,\n "An Intersectional Definition of Fairness," arXiv preprint\n arXiv:1807.08362, 2018.\n '
sbr = self._smoothed_base_rates(self.dataset.labels, concentration)
def pos_ratio(i, j):
return abs((np.log(sbr[i]) - np.log(sbr[j])))
def neg_ratio(i, j):
return abs((np.log((1 - sbr[i])) - np.log((1 - sbr[j]))))
return max((max(pos_ratio(i, j), neg_ratio(i, j)) for i in range(len(sbr)) for j in range(len(sbr)) if (i != j))) | Smoothed EDF from [#foulds18]_.
Args:
concentration (float, optional): Concentration parameter for
Dirichlet smoothing. Must be non-negative.
Examples:
To use with non-binary protected attributes, the column must be
converted to ordinal:
>>> mapping = {'Black': 0, 'White': 1, 'Asian-Pac-Islander': 2,
... 'Amer-Indian-Eskimo': 3, 'Other': 4}
>>> def map_race(df):
... df['race-num'] = df.race.map(mapping)
... return df
...
>>> adult = AdultDataset(protected_attribute_names=['sex',
... 'race-num'], privileged_classes=[['Male'], [1]],
... categorical_features=['workclass', 'education',
... 'marital-status', 'occupation', 'relationship',
... 'native-country', 'race'], custom_preprocessing=map_race)
>>> metric = BinaryLabelDatasetMetric(adult)
>>> metric.smoothed_empirical_differential_fairness()
1.7547611985549287
References:
.. [#foulds18] J. R. Foulds, R. Islam, K. N. Keya, and S. Pan,
"An Intersectional Definition of Fairness," arXiv preprint
arXiv:1807.08362, 2018. | aif360/metrics/binary_label_dataset_metric.py | smoothed_empirical_differential_fairness | kcha/AIF360 | 3 | python | def smoothed_empirical_differential_fairness(self, concentration=1.0):
'Smoothed EDF from [#foulds18]_.\n\n Args:\n concentration (float, optional): Concentration parameter for\n Dirichlet smoothing. Must be non-negative.\n\n Examples:\n To use with non-binary protected attributes, the column must be\n converted to ordinal:\n \n >>> mapping = {\'Black\': 0, \'White\': 1, \'Asian-Pac-Islander\': 2,\n ... \'Amer-Indian-Eskimo\': 3, \'Other\': 4}\n >>> def map_race(df):\n ... df[\'race-num\'] = df.race.map(mapping)\n ... return df\n ...\n >>> adult = AdultDataset(protected_attribute_names=[\'sex\',\n ... \'race-num\'], privileged_classes=[[\'Male\'], [1]],\n ... categorical_features=[\'workclass\', \'education\',\n ... \'marital-status\', \'occupation\', \'relationship\',\n ... \'native-country\', \'race\'], custom_preprocessing=map_race)\n >>> metric = BinaryLabelDatasetMetric(adult)\n >>> metric.smoothed_empirical_differential_fairness()\n 1.7547611985549287\n\n References:\n .. [#foulds18] J. R. Foulds, R. Islam, K. N. Keya, and S. Pan,\n "An Intersectional Definition of Fairness," arXiv preprint\n arXiv:1807.08362, 2018.\n '
sbr = self._smoothed_base_rates(self.dataset.labels, concentration)
def pos_ratio(i, j):
return abs((np.log(sbr[i]) - np.log(sbr[j])))
def neg_ratio(i, j):
return abs((np.log((1 - sbr[i])) - np.log((1 - sbr[j]))))
return max((max(pos_ratio(i, j), neg_ratio(i, j)) for i in range(len(sbr)) for j in range(len(sbr)) if (i != j))) | def smoothed_empirical_differential_fairness(self, concentration=1.0):
'Smoothed EDF from [#foulds18]_.\n\n Args:\n concentration (float, optional): Concentration parameter for\n Dirichlet smoothing. Must be non-negative.\n\n Examples:\n To use with non-binary protected attributes, the column must be\n converted to ordinal:\n \n >>> mapping = {\'Black\': 0, \'White\': 1, \'Asian-Pac-Islander\': 2,\n ... \'Amer-Indian-Eskimo\': 3, \'Other\': 4}\n >>> def map_race(df):\n ... df[\'race-num\'] = df.race.map(mapping)\n ... return df\n ...\n >>> adult = AdultDataset(protected_attribute_names=[\'sex\',\n ... \'race-num\'], privileged_classes=[[\'Male\'], [1]],\n ... categorical_features=[\'workclass\', \'education\',\n ... \'marital-status\', \'occupation\', \'relationship\',\n ... \'native-country\', \'race\'], custom_preprocessing=map_race)\n >>> metric = BinaryLabelDatasetMetric(adult)\n >>> metric.smoothed_empirical_differential_fairness()\n 1.7547611985549287\n\n References:\n .. [#foulds18] J. R. Foulds, R. Islam, K. N. Keya, and S. Pan,\n "An Intersectional Definition of Fairness," arXiv preprint\n arXiv:1807.08362, 2018.\n '
sbr = self._smoothed_base_rates(self.dataset.labels, concentration)
def pos_ratio(i, j):
return abs((np.log(sbr[i]) - np.log(sbr[j])))
def neg_ratio(i, j):
return abs((np.log((1 - sbr[i])) - np.log((1 - sbr[j]))))
return max((max(pos_ratio(i, j), neg_ratio(i, j)) for i in range(len(sbr)) for j in range(len(sbr)) if (i != j)))<|docstring|>Smoothed EDF from [#foulds18]_.
Args:
concentration (float, optional): Concentration parameter for
Dirichlet smoothing. Must be non-negative.
Examples:
To use with non-binary protected attributes, the column must be
converted to ordinal:
>>> mapping = {'Black': 0, 'White': 1, 'Asian-Pac-Islander': 2,
... 'Amer-Indian-Eskimo': 3, 'Other': 4}
>>> def map_race(df):
... df['race-num'] = df.race.map(mapping)
... return df
...
>>> adult = AdultDataset(protected_attribute_names=['sex',
... 'race-num'], privileged_classes=[['Male'], [1]],
... categorical_features=['workclass', 'education',
... 'marital-status', 'occupation', 'relationship',
... 'native-country', 'race'], custom_preprocessing=map_race)
>>> metric = BinaryLabelDatasetMetric(adult)
>>> metric.smoothed_empirical_differential_fairness()
1.7547611985549287
References:
.. [#foulds18] J. R. Foulds, R. Islam, K. N. Keya, and S. Pan,
"An Intersectional Definition of Fairness," arXiv preprint
arXiv:1807.08362, 2018.<|endoftext|> |
9a394217c887d8d91e8e0f3a9756cc61238ab91efa099a064b65d913e6f4395a | def mean_difference(self):
'Alias of :meth:`statistical_parity_difference`.'
return self.statistical_parity_difference() | Alias of :meth:`statistical_parity_difference`. | aif360/metrics/binary_label_dataset_metric.py | mean_difference | kcha/AIF360 | 3 | python | def mean_difference(self):
return self.statistical_parity_difference() | def mean_difference(self):
return self.statistical_parity_difference()<|docstring|>Alias of :meth:`statistical_parity_difference`.<|endoftext|> |
d41ecfffabaf6eae042ad49fcd7fd2384df7d043c719c6c576df0bd370aa7424 | def has_object_permission(self, request, view, obj):
'Allows user to view all profiles but edit only their profile.'
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.id) | Allows user to view all profiles but edit only their profile. | dev/profiles/permissions.py | has_object_permission | saurabhrautela/django_docker_boilerplate | 0 | python | def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.id) | def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS):
return True
return (obj.id == request.id)<|docstring|>Allows user to view all profiles but edit only their profile.<|endoftext|> |
2b60832bac640f8c48c92e683ceeeb5a4d009516f7ccd5093ca2e544bdf28104 | def extendOrder(self, orderModel):
'\n orderModel[]\n packageModel[]\n itemModel[]\n '
extendedOrderModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = ('itemModel' if ('itemModel' in packageModel) else 'ItemModelList')
for itemModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
item_dict = self.grabChildren(itemModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedOrderModel.append(new_dict)
return extendedOrderModel | orderModel[]
packageModel[]
itemModel[] | src/OrderList.py | extendOrder | tm-softworks/OrderGetterR | 2 | python | def extendOrder(self, orderModel):
'\n orderModel[]\n packageModel[]\n itemModel[]\n '
extendedOrderModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = ('itemModel' if ('itemModel' in packageModel) else 'ItemModelList')
for itemModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
item_dict = self.grabChildren(itemModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedOrderModel.append(new_dict)
return extendedOrderModel | def extendOrder(self, orderModel):
'\n orderModel[]\n packageModel[]\n itemModel[]\n '
extendedOrderModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = ('itemModel' if ('itemModel' in packageModel) else 'ItemModelList')
for itemModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
item_dict = self.grabChildren(itemModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedOrderModel.append(new_dict)
return extendedOrderModel<|docstring|>orderModel[]
packageModel[]
itemModel[]<|endoftext|> |
811c1464767550d8a5815d89724a16f6d179b0bfc5657f7dca3c1bd2b00c2b78 | def extendCouponDetail(self, orderModel):
'\n orderModel[]\n couponModel[]\n '
extendedCouponModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('couponModel' if ('couponModel' in orderModel) else 'CouponModelList')
if (orderModel[key1] is not None):
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
extendedCouponModel.append(new_dict)
return extendedCouponModel | orderModel[]
couponModel[] | src/OrderList.py | extendCouponDetail | tm-softworks/OrderGetterR | 2 | python | def extendCouponDetail(self, orderModel):
'\n orderModel[]\n couponModel[]\n '
extendedCouponModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('couponModel' if ('couponModel' in orderModel) else 'CouponModelList')
if (orderModel[key1] is not None):
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
extendedCouponModel.append(new_dict)
return extendedCouponModel | def extendCouponDetail(self, orderModel):
'\n orderModel[]\n couponModel[]\n '
extendedCouponModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('couponModel' if ('couponModel' in orderModel) else 'CouponModelList')
if (orderModel[key1] is not None):
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
extendedCouponModel.append(new_dict)
return extendedCouponModel<|docstring|>orderModel[]
couponModel[]<|endoftext|> |
a9a0ac324da74b562e9f7fef381bfd86fd00c0a645d00ab804c1fdf4a5efd98b | def extendShippingDetail(self, orderModel):
'\n orderModel[]\n shippingModel[]\n '
extendedShippingModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = 'ShippingModelList'
if (packageModel.get(key2) is not None):
for packageModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
logger.debug('{}'.format(packageModel))
item_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedShippingModel.append(new_dict)
return extendedShippingModel | orderModel[]
shippingModel[] | src/OrderList.py | extendShippingDetail | tm-softworks/OrderGetterR | 2 | python | def extendShippingDetail(self, orderModel):
'\n orderModel[]\n shippingModel[]\n '
extendedShippingModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = 'ShippingModelList'
if (packageModel.get(key2) is not None):
for packageModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
logger.debug('{}'.format(packageModel))
item_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedShippingModel.append(new_dict)
return extendedShippingModel | def extendShippingDetail(self, orderModel):
'\n orderModel[]\n shippingModel[]\n '
extendedShippingModel = []
order_dict = self.grabChildren(orderModel)
key1 = ('packageModel' if ('packageModel' in orderModel) else 'PackageModelList')
for packageModel in orderModel[key1]:
prefix = (key1 + '.')
logger.debug('{}'.format(packageModel))
pkg_dict = self.grabChildren(packageModel, prefix)
key2 = 'ShippingModelList'
if (packageModel.get(key2) is not None):
for packageModel in packageModel[key2]:
prefix = (((key1 + '.') + key2) + '.')
logger.debug('{}'.format(packageModel))
item_dict = self.grabChildren(packageModel, prefix)
new_dict = copy.copy(order_dict)
new_dict.update(pkg_dict)
new_dict.update(item_dict)
extendedShippingModel.append(new_dict)
return extendedShippingModel<|docstring|>orderModel[]
shippingModel[]<|endoftext|> |
c0e9729075ea26e788422a80ec3048d2de369c54d6a845f69cccb88df1cd15d0 | def publish_abc():
' publish alembiccache\n \n '
def get_cam_info(abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_cams = cmds.ls('{}*'.format(fileCode), fl=1, type='camera')
if (not _cams):
return
for _cam in _cams:
_cam_trans = cmds.listRelatives(_cam, p=1)[0]
_production_file = '{}/{}.{}'.format(_production_path, _cam_trans, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _cam_trans, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _cam_trans, *EXPORTATTR)
_job.append(_joborder)
_json.append(['', _cam_trans, '', _production_file])
_dict[_publish_file] = _production_file
def get_asset_info(renderdag, abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_assets = yeticache.get_asset_list()
fixmeshname.fix_deformed_mesh_name('_rendering', renderdag)
for _dag in renderdag:
_ns = cmds.referenceQuery(_dag, ns=1)
if _ns.startswith(':'):
_ns = _ns[1:]
if (_ns in _assets):
_sns = cmds.referenceQuery(_dag, ns=1, shn=1)
_production_file = '{}/{}.{}'.format(_production_path, _sns, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _sns, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _dag, *EXPORTATTR)
_job.append(_joborder)
_json.append([_assets[_ns], _sns, _dag.split(':')[(- 1)], _production_file])
_dict[_publish_file] = _production_file
_attr_code = 'cache/alembiccache'
_file_suffix = 'json'
_abc_suffix = 'abc'
_link = record.current_link()
_project_step_id = record.current_project_step_id()
_project_step_handle = zfused_api.step.ProjectStep(_project_step_id)
_step_code = _project_step_handle.code()
_software_code = zfused_api.software.Software(_project_step_handle.data['SoftwareId']).code()
_object_handle = zfused_api.objects.Objects(_link[0], _link[1])
_link_production_path = _object_handle.production_path()
_link_publish_path = _object_handle.publish_path()
_file_code = _object_handle.file_code()
_production_path = '{}/{}/{}/{}'.format(_link_production_path, _step_code, _software_code, _attr_code)
_publish_path = '{}/{}/{}/{}'.format(_link_publish_path, _step_code, _software_code, _attr_code)
_cover_file = ('%s/%s.%s' % (_production_path, _file_code, _file_suffix))
_publish_file = ('%s/%s.%s' % (_publish_path, _file_code, _file_suffix))
_publish_file_dir = os.path.dirname(_publish_file)
if (not os.path.isdir(_publish_file_dir)):
os.makedirs(_publish_file_dir)
_production_json_file = '{}/{}.{}'.format(_production_path, _file_code, _file_suffix)
_publish_json_file = '{}/{}.{}'.format(_publish_path, _file_code, _file_suffix)
_start_frame = (int(cmds.playbackOptions(q=True, min=True)) - PREPFRAME)
_end_frame = (int(cmds.playbackOptions(q=True, max=True)) + PREPFRAME)
renderdag = []
if (not renderdag):
renderdag = renderinggroup.nodes()
_norenders = displaylayer.norender_info(displaylayer.nodes())
if _norenders:
for i in _norenders:
_attr = '{}.v'.format(i)
if (cmds.objExists(_attr) and (cmds.getAttr(_attr) != 0)):
cmds.setAttr(_attr, 0)
_alljob = []
_json_info = []
upload_dict = {}
get_asset_info(renderdag, _abc_suffix, _file_code, _start_frame, _end_frame, _alljob, _json_info, upload_dict)
if (not os.path.isdir(_publish_path)):
logger.info('create publish dir {}'.format(_publish_path))
os.makedirs(_publish_path)
try:
with open(_publish_json_file, 'w') as info:
json.dump(_json_info, info, indent=4, separators=(',', ':'))
_result = filefunc.publish_file(_publish_json_file, _production_json_file, True)
cmds.AbcExport(j=_alljob)
for (_k, _v) in upload_dict.items():
_result = filefunc.publish_file(_k, _v, True)
except Exception as e:
logger.error(e)
return False
return True | publish alembiccache | zfused_maya/zfused_maya/node/outputattr/cfx/publishabc.py | publish_abc | zhoulh0322/zfused_outsource_old | 1 | python | def publish_abc():
' \n \n '
def get_cam_info(abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_cams = cmds.ls('{}*'.format(fileCode), fl=1, type='camera')
if (not _cams):
return
for _cam in _cams:
_cam_trans = cmds.listRelatives(_cam, p=1)[0]
_production_file = '{}/{}.{}'.format(_production_path, _cam_trans, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _cam_trans, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _cam_trans, *EXPORTATTR)
_job.append(_joborder)
_json.append([, _cam_trans, , _production_file])
_dict[_publish_file] = _production_file
def get_asset_info(renderdag, abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_assets = yeticache.get_asset_list()
fixmeshname.fix_deformed_mesh_name('_rendering', renderdag)
for _dag in renderdag:
_ns = cmds.referenceQuery(_dag, ns=1)
if _ns.startswith(':'):
_ns = _ns[1:]
if (_ns in _assets):
_sns = cmds.referenceQuery(_dag, ns=1, shn=1)
_production_file = '{}/{}.{}'.format(_production_path, _sns, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _sns, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _dag, *EXPORTATTR)
_job.append(_joborder)
_json.append([_assets[_ns], _sns, _dag.split(':')[(- 1)], _production_file])
_dict[_publish_file] = _production_file
_attr_code = 'cache/alembiccache'
_file_suffix = 'json'
_abc_suffix = 'abc'
_link = record.current_link()
_project_step_id = record.current_project_step_id()
_project_step_handle = zfused_api.step.ProjectStep(_project_step_id)
_step_code = _project_step_handle.code()
_software_code = zfused_api.software.Software(_project_step_handle.data['SoftwareId']).code()
_object_handle = zfused_api.objects.Objects(_link[0], _link[1])
_link_production_path = _object_handle.production_path()
_link_publish_path = _object_handle.publish_path()
_file_code = _object_handle.file_code()
_production_path = '{}/{}/{}/{}'.format(_link_production_path, _step_code, _software_code, _attr_code)
_publish_path = '{}/{}/{}/{}'.format(_link_publish_path, _step_code, _software_code, _attr_code)
_cover_file = ('%s/%s.%s' % (_production_path, _file_code, _file_suffix))
_publish_file = ('%s/%s.%s' % (_publish_path, _file_code, _file_suffix))
_publish_file_dir = os.path.dirname(_publish_file)
if (not os.path.isdir(_publish_file_dir)):
os.makedirs(_publish_file_dir)
_production_json_file = '{}/{}.{}'.format(_production_path, _file_code, _file_suffix)
_publish_json_file = '{}/{}.{}'.format(_publish_path, _file_code, _file_suffix)
_start_frame = (int(cmds.playbackOptions(q=True, min=True)) - PREPFRAME)
_end_frame = (int(cmds.playbackOptions(q=True, max=True)) + PREPFRAME)
renderdag = []
if (not renderdag):
renderdag = renderinggroup.nodes()
_norenders = displaylayer.norender_info(displaylayer.nodes())
if _norenders:
for i in _norenders:
_attr = '{}.v'.format(i)
if (cmds.objExists(_attr) and (cmds.getAttr(_attr) != 0)):
cmds.setAttr(_attr, 0)
_alljob = []
_json_info = []
upload_dict = {}
get_asset_info(renderdag, _abc_suffix, _file_code, _start_frame, _end_frame, _alljob, _json_info, upload_dict)
if (not os.path.isdir(_publish_path)):
logger.info('create publish dir {}'.format(_publish_path))
os.makedirs(_publish_path)
try:
with open(_publish_json_file, 'w') as info:
json.dump(_json_info, info, indent=4, separators=(',', ':'))
_result = filefunc.publish_file(_publish_json_file, _production_json_file, True)
cmds.AbcExport(j=_alljob)
for (_k, _v) in upload_dict.items():
_result = filefunc.publish_file(_k, _v, True)
except Exception as e:
logger.error(e)
return False
return True | def publish_abc():
' \n \n '
def get_cam_info(abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_cams = cmds.ls('{}*'.format(fileCode), fl=1, type='camera')
if (not _cams):
return
for _cam in _cams:
_cam_trans = cmds.listRelatives(_cam, p=1)[0]
_production_file = '{}/{}.{}'.format(_production_path, _cam_trans, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _cam_trans, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _cam_trans, *EXPORTATTR)
_job.append(_joborder)
_json.append([, _cam_trans, , _production_file])
_dict[_publish_file] = _production_file
def get_asset_info(renderdag, abcSuffix, fileCode, startFrame, endFrame, _job=[], _json=[], _dict={}):
_assets = yeticache.get_asset_list()
fixmeshname.fix_deformed_mesh_name('_rendering', renderdag)
for _dag in renderdag:
_ns = cmds.referenceQuery(_dag, ns=1)
if _ns.startswith(':'):
_ns = _ns[1:]
if (_ns in _assets):
_sns = cmds.referenceQuery(_dag, ns=1, shn=1)
_production_file = '{}/{}.{}'.format(_production_path, _sns, abcSuffix)
_publish_file = '{}/{}.{}'.format(_publish_path, _sns, abcSuffix)
_joborder = alembiccache.create_frame_cache(_publish_file, startFrame, endFrame, _dag, *EXPORTATTR)
_job.append(_joborder)
_json.append([_assets[_ns], _sns, _dag.split(':')[(- 1)], _production_file])
_dict[_publish_file] = _production_file
_attr_code = 'cache/alembiccache'
_file_suffix = 'json'
_abc_suffix = 'abc'
_link = record.current_link()
_project_step_id = record.current_project_step_id()
_project_step_handle = zfused_api.step.ProjectStep(_project_step_id)
_step_code = _project_step_handle.code()
_software_code = zfused_api.software.Software(_project_step_handle.data['SoftwareId']).code()
_object_handle = zfused_api.objects.Objects(_link[0], _link[1])
_link_production_path = _object_handle.production_path()
_link_publish_path = _object_handle.publish_path()
_file_code = _object_handle.file_code()
_production_path = '{}/{}/{}/{}'.format(_link_production_path, _step_code, _software_code, _attr_code)
_publish_path = '{}/{}/{}/{}'.format(_link_publish_path, _step_code, _software_code, _attr_code)
_cover_file = ('%s/%s.%s' % (_production_path, _file_code, _file_suffix))
_publish_file = ('%s/%s.%s' % (_publish_path, _file_code, _file_suffix))
_publish_file_dir = os.path.dirname(_publish_file)
if (not os.path.isdir(_publish_file_dir)):
os.makedirs(_publish_file_dir)
_production_json_file = '{}/{}.{}'.format(_production_path, _file_code, _file_suffix)
_publish_json_file = '{}/{}.{}'.format(_publish_path, _file_code, _file_suffix)
_start_frame = (int(cmds.playbackOptions(q=True, min=True)) - PREPFRAME)
_end_frame = (int(cmds.playbackOptions(q=True, max=True)) + PREPFRAME)
renderdag = []
if (not renderdag):
renderdag = renderinggroup.nodes()
_norenders = displaylayer.norender_info(displaylayer.nodes())
if _norenders:
for i in _norenders:
_attr = '{}.v'.format(i)
if (cmds.objExists(_attr) and (cmds.getAttr(_attr) != 0)):
cmds.setAttr(_attr, 0)
_alljob = []
_json_info = []
upload_dict = {}
get_asset_info(renderdag, _abc_suffix, _file_code, _start_frame, _end_frame, _alljob, _json_info, upload_dict)
if (not os.path.isdir(_publish_path)):
logger.info('create publish dir {}'.format(_publish_path))
os.makedirs(_publish_path)
try:
with open(_publish_json_file, 'w') as info:
json.dump(_json_info, info, indent=4, separators=(',', ':'))
_result = filefunc.publish_file(_publish_json_file, _production_json_file, True)
cmds.AbcExport(j=_alljob)
for (_k, _v) in upload_dict.items():
_result = filefunc.publish_file(_k, _v, True)
except Exception as e:
logger.error(e)
return False
return True<|docstring|>publish alembiccache<|endoftext|> |
ef0937922190585a583e70ae2b2eabe26b169f4f830938e40e35712cd71754de | def _run_op_test(self, op_fun, inputs, indices=None, latent_indicators=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
'Run a single test for a single op.'
op_name = op_fun.__name__
device_name = ('/gpu:0' if on_gpu else '/cpu:0')
print2(('--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, latent_indicators=%s, inference=%s, log=%s' % (op_name, on_gpu, inputs.shape, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), ('MPE' if (inf_type == spn.InferenceType.MPE) else 'MARGINAL'), log)), self.file)
input_size = inputs.shape[1]
true_out = self._true_output(op_fun, inputs, indices, latent_indicators, inf_type)
tf.reset_default_graph()
with tf.device(device_name):
inputs_pl = spn.RawLeaf(num_vars=input_size)
if (latent_indicators is None):
latent_indicators_pl = [None for _ in range(self.num_sums)]
elif (op_fun is Ops.sum):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=1, num_vals=input_size) for _ in range(self.num_sums)]
elif ((op_fun is Ops.par_sums) or Ops.sums):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=self.num_sums, num_vals=input_size)]
start_time = time.time()
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log)
for _ in range((self.num_ops - 1)):
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log, tf.tuple([ops])[0])
setup_time = (time.time() - start_time)
graph_size = len(tf.get_default_graph().get_operations())
output_correct = True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=False, log_device_placement=self.log_devs)) as sess:
start_time = time.time()
init_ops.run()
weights_init_time = (time.time() - start_time)
run_times = []
feed = {inputs_pl: inputs}
if (latent_indicators is not None):
for iv_pl in latent_indicators_pl:
feed[iv_pl] = latent_indicators
for n in range(self.num_runs):
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append((time.time() - start_time))
try:
np.testing.assert_array_almost_equal(out, (np.log(true_out) if log else true_out))
except AssertionError:
output_correct = False
self.test_failed = True
if self.profile:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options, run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if (not os.path.exists(self.profiles_dir)):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ('_GPU' if on_gpu else '_CPU')
file_name += (('_MPE-LOG' if log else '_MPE') if (inf_type == spn.InferenceType.MPE) else ('_MARGINAL-LOG' if log else '_MARGINAL'))
if (indices is not None):
file_name += '_Indices'
if (latent_indicators is not None):
file_name += '_IVS'
with open(('%s/timeline_value_%s.json' % (self.profiles_dir, file_name)), 'w') as f:
f.write(chrome_trace)
return OpTestResult(op_name, on_gpu, graph_size, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), setup_time, weights_init_time, run_times, output_correct) | Run a single test for a single op. | libspn/tests/perf_sum_value.py | _run_op_test | pronobis/libspn | 22 | python | def _run_op_test(self, op_fun, inputs, indices=None, latent_indicators=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
op_name = op_fun.__name__
device_name = ('/gpu:0' if on_gpu else '/cpu:0')
print2(('--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, latent_indicators=%s, inference=%s, log=%s' % (op_name, on_gpu, inputs.shape, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), ('MPE' if (inf_type == spn.InferenceType.MPE) else 'MARGINAL'), log)), self.file)
input_size = inputs.shape[1]
true_out = self._true_output(op_fun, inputs, indices, latent_indicators, inf_type)
tf.reset_default_graph()
with tf.device(device_name):
inputs_pl = spn.RawLeaf(num_vars=input_size)
if (latent_indicators is None):
latent_indicators_pl = [None for _ in range(self.num_sums)]
elif (op_fun is Ops.sum):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=1, num_vals=input_size) for _ in range(self.num_sums)]
elif ((op_fun is Ops.par_sums) or Ops.sums):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=self.num_sums, num_vals=input_size)]
start_time = time.time()
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log)
for _ in range((self.num_ops - 1)):
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log, tf.tuple([ops])[0])
setup_time = (time.time() - start_time)
graph_size = len(tf.get_default_graph().get_operations())
output_correct = True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=False, log_device_placement=self.log_devs)) as sess:
start_time = time.time()
init_ops.run()
weights_init_time = (time.time() - start_time)
run_times = []
feed = {inputs_pl: inputs}
if (latent_indicators is not None):
for iv_pl in latent_indicators_pl:
feed[iv_pl] = latent_indicators
for n in range(self.num_runs):
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append((time.time() - start_time))
try:
np.testing.assert_array_almost_equal(out, (np.log(true_out) if log else true_out))
except AssertionError:
output_correct = False
self.test_failed = True
if self.profile:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options, run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if (not os.path.exists(self.profiles_dir)):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ('_GPU' if on_gpu else '_CPU')
file_name += (('_MPE-LOG' if log else '_MPE') if (inf_type == spn.InferenceType.MPE) else ('_MARGINAL-LOG' if log else '_MARGINAL'))
if (indices is not None):
file_name += '_Indices'
if (latent_indicators is not None):
file_name += '_IVS'
with open(('%s/timeline_value_%s.json' % (self.profiles_dir, file_name)), 'w') as f:
f.write(chrome_trace)
return OpTestResult(op_name, on_gpu, graph_size, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), setup_time, weights_init_time, run_times, output_correct) | def _run_op_test(self, op_fun, inputs, indices=None, latent_indicators=None, inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
op_name = op_fun.__name__
device_name = ('/gpu:0' if on_gpu else '/cpu:0')
print2(('--> %s: on_gpu=%s, inputs_shape=%s, indices=%s, latent_indicators=%s, inference=%s, log=%s' % (op_name, on_gpu, inputs.shape, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), ('MPE' if (inf_type == spn.InferenceType.MPE) else 'MARGINAL'), log)), self.file)
input_size = inputs.shape[1]
true_out = self._true_output(op_fun, inputs, indices, latent_indicators, inf_type)
tf.reset_default_graph()
with tf.device(device_name):
inputs_pl = spn.RawLeaf(num_vars=input_size)
if (latent_indicators is None):
latent_indicators_pl = [None for _ in range(self.num_sums)]
elif (op_fun is Ops.sum):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=1, num_vals=input_size) for _ in range(self.num_sums)]
elif ((op_fun is Ops.par_sums) or Ops.sums):
latent_indicators_pl = [spn.IndicatorLeaf(num_vars=self.num_sums, num_vals=input_size)]
start_time = time.time()
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log)
for _ in range((self.num_ops - 1)):
(init_ops, ops) = op_fun(inputs_pl, indices, latent_indicators_pl, self.num_sums, inf_type, log, tf.tuple([ops])[0])
setup_time = (time.time() - start_time)
graph_size = len(tf.get_default_graph().get_operations())
output_correct = True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=False, log_device_placement=self.log_devs)) as sess:
start_time = time.time()
init_ops.run()
weights_init_time = (time.time() - start_time)
run_times = []
feed = {inputs_pl: inputs}
if (latent_indicators is not None):
for iv_pl in latent_indicators_pl:
feed[iv_pl] = latent_indicators
for n in range(self.num_runs):
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append((time.time() - start_time))
try:
np.testing.assert_array_almost_equal(out, (np.log(true_out) if log else true_out))
except AssertionError:
output_correct = False
self.test_failed = True
if self.profile:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options, run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if (not os.path.exists(self.profiles_dir)):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ('_GPU' if on_gpu else '_CPU')
file_name += (('_MPE-LOG' if log else '_MPE') if (inf_type == spn.InferenceType.MPE) else ('_MARGINAL-LOG' if log else '_MARGINAL'))
if (indices is not None):
file_name += '_Indices'
if (latent_indicators is not None):
file_name += '_IVS'
with open(('%s/timeline_value_%s.json' % (self.profiles_dir, file_name)), 'w') as f:
f.write(chrome_trace)
return OpTestResult(op_name, on_gpu, graph_size, ('No' if (indices is None) else 'Yes'), ('No' if (latent_indicators is None) else 'Yes'), setup_time, weights_init_time, run_times, output_correct)<|docstring|>Run a single test for a single op.<|endoftext|> |
00fc84aa320df7ff4316dddaaf46de218de93327f52e1a9d6c7a250ffb1111fe | def _run_test(self, test_name, op_funs, inputs, indices, latent_indicators, inf_type, log):
'Run a single test for multiple ops and devices.'
cpu_results = []
gpu_results = []
for (op_fun, inp, ind, iv) in zip(op_funs, inputs, indices, latent_indicators):
if (not self.without_cpu):
cpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=False))
if (not self.without_gpu):
gpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results) | Run a single test for multiple ops and devices. | libspn/tests/perf_sum_value.py | _run_test | pronobis/libspn | 22 | python | def _run_test(self, test_name, op_funs, inputs, indices, latent_indicators, inf_type, log):
cpu_results = []
gpu_results = []
for (op_fun, inp, ind, iv) in zip(op_funs, inputs, indices, latent_indicators):
if (not self.without_cpu):
cpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=False))
if (not self.without_gpu):
gpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results) | def _run_test(self, test_name, op_funs, inputs, indices, latent_indicators, inf_type, log):
cpu_results = []
gpu_results = []
for (op_fun, inp, ind, iv) in zip(op_funs, inputs, indices, latent_indicators):
if (not self.without_cpu):
cpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=False))
if (not self.without_gpu):
gpu_results.append(self._run_op_test(op_fun, inp, indices=None, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=None, inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append(self._run_op_test(op_fun, inp, indices=ind, latent_indicators=iv, inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results)<|docstring|>Run a single test for multiple ops and devices.<|endoftext|> |
3c1c1c6bc6550d072836b5e799ae30b63a95ef7df6c5acb197b39daf9f7b99af | def run(self):
'Run all tests.'
print1('Running tests:', self.file)
results = []
sum_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sum_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sum_latent_indicators = np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1)
par_sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
par_sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
par_sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
r = self._run_test('InferenceType: MARGINAL', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
for res in results:
res.print(self.file)
if self.test_failed:
print('\n ATLEAST ONE TEST FAILED!') | Run all tests. | libspn/tests/perf_sum_value.py | run | pronobis/libspn | 22 | python | def run(self):
print1('Running tests:', self.file)
results = []
sum_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sum_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sum_latent_indicators = np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1)
par_sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
par_sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
par_sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
r = self._run_test('InferenceType: MARGINAL', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
for res in results:
res.print(self.file)
if self.test_failed:
print('\n ATLEAST ONE TEST FAILED!') | def run(self):
print1('Running tests:', self.file)
results = []
sum_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sum_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sum_latent_indicators = np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1)
par_sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
par_sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
par_sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
sums_inputs = np.random.rand(self.num_input_rows, self.num_input_cols)
sums_indices = list(range((self.num_input_cols - 1), (- 1), (- 1)))
sums_latent_indicators = np.tile(np.expand_dims(np.random.randint(self.num_input_cols, size=self.num_input_rows), axis=1), (1, self.num_sums))
r = self._run_test('InferenceType: MARGINAL', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG', [Ops.sum, Ops.par_sums, Ops.sums], [sum_inputs, par_sums_inputs, sums_inputs], [sum_indices, par_sums_indices, sums_indices], [sum_latent_indicators, par_sums_latent_indicators, sums_latent_indicators], inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
for res in results:
res.print(self.file)
if self.test_failed:
print('\n ATLEAST ONE TEST FAILED!')<|docstring|>Run all tests.<|endoftext|> |
6a5ad03e2767a3b7e70821cda35d9d734ddfa910ae87cdff9929e7f7a01345d3 | def get_res(res):
'Helper function printing a single result.'
return ('%15s %5d %5s %7s %11.2f %15.2f %15.2f %14.2f %10s' % (res.op_name, res.graph_size, res.indices, res.latent_indicators, (res.setup_time * 1000), (res.weights_init_time * 1000), (res.run_times[0] * 1000), (np.mean(res.run_times[1:]) * 1000), res.output_correct)) | Helper function printing a single result. | libspn/tests/perf_sum_value.py | get_res | pronobis/libspn | 22 | python | def get_res(res):
return ('%15s %5d %5s %7s %11.2f %15.2f %15.2f %14.2f %10s' % (res.op_name, res.graph_size, res.indices, res.latent_indicators, (res.setup_time * 1000), (res.weights_init_time * 1000), (res.run_times[0] * 1000), (np.mean(res.run_times[1:]) * 1000), res.output_correct)) | def get_res(res):
return ('%15s %5d %5s %7s %11.2f %15.2f %15.2f %14.2f %10s' % (res.op_name, res.graph_size, res.indices, res.latent_indicators, (res.setup_time * 1000), (res.weights_init_time * 1000), (res.run_times[0] * 1000), (np.mean(res.run_times[1:]) * 1000), res.output_correct))<|docstring|>Helper function printing a single result.<|endoftext|> |
2285d4a9a2d31291a19b6ec3193e53d3571b487933513deb3dac472bc5f08889 | def create_logger(d):
"\n Creates logfile using Python's logging module and saves to a 'log' directory. Uses timed \n rotating file handler to archive logfile under a different name within the log directory, \n create a new empty one, and delete all archived logfiles once a maximum number of archived\n files has been reached. \n Returns logger object.\n d: Dictionary containing the following fields\n - bkup_inttype: One character designating type of interval logfile will be archived \n after (i.e., 'D' is day, 'M' is month)\n - bkup_interval: Integer, amount of specified interval logfile will be archived after \n (i.e., if bkup_inttype = 'D' and bkup_interval = 30, the file will be \n archived after 30 days)\n - bkup_count: Integer, how many backup logfiles to keep until all will be deleted \n - bkup_suffix: String, date format that will be appended to logfile when it is archived\n - homedir: String, filepath\n - config: ConfigParser object, points to config file\n "
homedir = d['homedir']
config = d['config']
bkup_inttype = d['bkup_inttype']
bkup_interval = d['bkup_interval']
bkup_count = d['bkup_count']
bkup_suffix = d['bkup_suffix']
if (not os.path.exists('log')):
os.makedirs('log')
log_dirpath = os.path.join(homedir, 'log')
logfile = os.path.join(log_dirpath, config.get('SETUP', 'logfile'))
logger = logging.getLogger(__name__)
log_format = '%(asctime)s %(levelname)s - %(message)s'
log_datefmt = '%Y-%m-%d %H:%M:%S'
main_formatter = logging.Formatter(fmt=log_format, datefmt=log_datefmt)
file_handler = logging.handlers.TimedRotatingFileHandler(filename=logfile, when=bkup_inttype, interval=bkup_interval, backupCount=bkup_count)
file_handler.suffix = bkup_suffix
file_handler.setFormatter(main_formatter)
logger.addHandler(file_handler)
level = config.get('SETUP', 'logging_level')
if (level.upper() not in ['INFO', 'DEBUG', 'WARNING', 'ERROR']):
print('Invalid value set for logger_level in config file. Accepted values \n include info, debug, warning, and error (lower or upper case). Setting \n logger_level to INFO.')
logger.setLevel('INFO')
else:
logger.setLevel(level.upper())
return logger | Creates logfile using Python's logging module and saves to a 'log' directory. Uses timed
rotating file handler to archive logfile under a different name within the log directory,
create a new empty one, and delete all archived logfiles once a maximum number of archived
files has been reached.
Returns logger object.
d: Dictionary containing the following fields
- bkup_inttype: One character designating type of interval logfile will be archived
after (i.e., 'D' is day, 'M' is month)
- bkup_interval: Integer, amount of specified interval logfile will be archived after
(i.e., if bkup_inttype = 'D' and bkup_interval = 30, the file will be
archived after 30 days)
- bkup_count: Integer, how many backup logfiles to keep until all will be deleted
- bkup_suffix: String, date format that will be appended to logfile when it is archived
- homedir: String, filepath
- config: ConfigParser object, points to config file | prototype/ted/trigger_funcs.py | create_logger | usgs/earthquake-ted | 2 | python | def create_logger(d):
"\n Creates logfile using Python's logging module and saves to a 'log' directory. Uses timed \n rotating file handler to archive logfile under a different name within the log directory, \n create a new empty one, and delete all archived logfiles once a maximum number of archived\n files has been reached. \n Returns logger object.\n d: Dictionary containing the following fields\n - bkup_inttype: One character designating type of interval logfile will be archived \n after (i.e., 'D' is day, 'M' is month)\n - bkup_interval: Integer, amount of specified interval logfile will be archived after \n (i.e., if bkup_inttype = 'D' and bkup_interval = 30, the file will be \n archived after 30 days)\n - bkup_count: Integer, how many backup logfiles to keep until all will be deleted \n - bkup_suffix: String, date format that will be appended to logfile when it is archived\n - homedir: String, filepath\n - config: ConfigParser object, points to config file\n "
homedir = d['homedir']
config = d['config']
bkup_inttype = d['bkup_inttype']
bkup_interval = d['bkup_interval']
bkup_count = d['bkup_count']
bkup_suffix = d['bkup_suffix']
if (not os.path.exists('log')):
os.makedirs('log')
log_dirpath = os.path.join(homedir, 'log')
logfile = os.path.join(log_dirpath, config.get('SETUP', 'logfile'))
logger = logging.getLogger(__name__)
log_format = '%(asctime)s %(levelname)s - %(message)s'
log_datefmt = '%Y-%m-%d %H:%M:%S'
main_formatter = logging.Formatter(fmt=log_format, datefmt=log_datefmt)
file_handler = logging.handlers.TimedRotatingFileHandler(filename=logfile, when=bkup_inttype, interval=bkup_interval, backupCount=bkup_count)
file_handler.suffix = bkup_suffix
file_handler.setFormatter(main_formatter)
logger.addHandler(file_handler)
level = config.get('SETUP', 'logging_level')
if (level.upper() not in ['INFO', 'DEBUG', 'WARNING', 'ERROR']):
print('Invalid value set for logger_level in config file. Accepted values \n include info, debug, warning, and error (lower or upper case). Setting \n logger_level to INFO.')
logger.setLevel('INFO')
else:
logger.setLevel(level.upper())
return logger | def create_logger(d):
"\n Creates logfile using Python's logging module and saves to a 'log' directory. Uses timed \n rotating file handler to archive logfile under a different name within the log directory, \n create a new empty one, and delete all archived logfiles once a maximum number of archived\n files has been reached. \n Returns logger object.\n d: Dictionary containing the following fields\n - bkup_inttype: One character designating type of interval logfile will be archived \n after (i.e., 'D' is day, 'M' is month)\n - bkup_interval: Integer, amount of specified interval logfile will be archived after \n (i.e., if bkup_inttype = 'D' and bkup_interval = 30, the file will be \n archived after 30 days)\n - bkup_count: Integer, how many backup logfiles to keep until all will be deleted \n - bkup_suffix: String, date format that will be appended to logfile when it is archived\n - homedir: String, filepath\n - config: ConfigParser object, points to config file\n "
homedir = d['homedir']
config = d['config']
bkup_inttype = d['bkup_inttype']
bkup_interval = d['bkup_interval']
bkup_count = d['bkup_count']
bkup_suffix = d['bkup_suffix']
if (not os.path.exists('log')):
os.makedirs('log')
log_dirpath = os.path.join(homedir, 'log')
logfile = os.path.join(log_dirpath, config.get('SETUP', 'logfile'))
logger = logging.getLogger(__name__)
log_format = '%(asctime)s %(levelname)s - %(message)s'
log_datefmt = '%Y-%m-%d %H:%M:%S'
main_formatter = logging.Formatter(fmt=log_format, datefmt=log_datefmt)
file_handler = logging.handlers.TimedRotatingFileHandler(filename=logfile, when=bkup_inttype, interval=bkup_interval, backupCount=bkup_count)
file_handler.suffix = bkup_suffix
file_handler.setFormatter(main_formatter)
logger.addHandler(file_handler)
level = config.get('SETUP', 'logging_level')
if (level.upper() not in ['INFO', 'DEBUG', 'WARNING', 'ERROR']):
print('Invalid value set for logger_level in config file. Accepted values \n include info, debug, warning, and error (lower or upper case). Setting \n logger_level to INFO.')
logger.setLevel('INFO')
else:
logger.setLevel(level.upper())
return logger<|docstring|>Creates logfile using Python's logging module and saves to a 'log' directory. Uses timed
rotating file handler to archive logfile under a different name within the log directory,
create a new empty one, and delete all archived logfiles once a maximum number of archived
files has been reached.
Returns logger object.
d: Dictionary containing the following fields
- bkup_inttype: One character designating type of interval logfile will be archived
after (i.e., 'D' is day, 'M' is month)
- bkup_interval: Integer, amount of specified interval logfile will be archived after
(i.e., if bkup_inttype = 'D' and bkup_interval = 30, the file will be
archived after 30 days)
- bkup_count: Integer, how many backup logfiles to keep until all will be deleted
- bkup_suffix: String, date format that will be appended to logfile when it is archived
- homedir: String, filepath
- config: ConfigParser object, points to config file<|endoftext|> |
db75dc4ab33c4058020031984b0699006c644cb2646d72e995d6a27b07bd28b9 | def get_region_name(lat, lon):
'\n Return the short version of the FE region name.\n lat: Latitude of input point.\n lat: Latitude of input point.\n Returns short form of the Flinn-Engdahl region name.\n '
url = 'http://earthquake.usgs.gov/ws/geoserve/regions.json?latitude=LAT&longitude=LON&type=fe'
url = url.replace('LAT', str(lat))
url = url.replace('LON', str(lon))
locstr = ('%.3f, %.3f' % (lat, lon))
try:
fh = urllib.request.urlopen(url)
regstr = fh.read()
fh.close()
jdict = json.loads(regstr)
locstr = jdict['fe']['features'][0]['properties']['name']
except:
pass
return locstr | Return the short version of the FE region name.
lat: Latitude of input point.
lat: Latitude of input point.
Returns short form of the Flinn-Engdahl region name. | prototype/ted/trigger_funcs.py | get_region_name | usgs/earthquake-ted | 2 | python | def get_region_name(lat, lon):
'\n Return the short version of the FE region name.\n lat: Latitude of input point.\n lat: Latitude of input point.\n Returns short form of the Flinn-Engdahl region name.\n '
url = 'http://earthquake.usgs.gov/ws/geoserve/regions.json?latitude=LAT&longitude=LON&type=fe'
url = url.replace('LAT', str(lat))
url = url.replace('LON', str(lon))
locstr = ('%.3f, %.3f' % (lat, lon))
try:
fh = urllib.request.urlopen(url)
regstr = fh.read()
fh.close()
jdict = json.loads(regstr)
locstr = jdict['fe']['features'][0]['properties']['name']
except:
pass
return locstr | def get_region_name(lat, lon):
'\n Return the short version of the FE region name.\n lat: Latitude of input point.\n lat: Latitude of input point.\n Returns short form of the Flinn-Engdahl region name.\n '
url = 'http://earthquake.usgs.gov/ws/geoserve/regions.json?latitude=LAT&longitude=LON&type=fe'
url = url.replace('LAT', str(lat))
url = url.replace('LON', str(lon))
locstr = ('%.3f, %.3f' % (lat, lon))
try:
fh = urllib.request.urlopen(url)
regstr = fh.read()
fh.close()
jdict = json.loads(regstr)
locstr = jdict['fe']['features'][0]['properties']['name']
except:
pass
return locstr<|docstring|>Return the short version of the FE region name.
lat: Latitude of input point.
lat: Latitude of input point.
Returns short form of the Flinn-Engdahl region name.<|endoftext|> |
89df8026358e00693ca2136addc1b3fae6f08e13f17aca804773087e3ec3891d | def __init__(self, prefix=None, suggestions=None, city_filter=None, state_filter=None, prefer=None, prefer_ratio=None, geolocate_type=None):
'\n In addition to holding all of the input data for this lookup, this class also will contain the result \n of the lookup after it comes back from the API.\n \n See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-request-input-fields"\n \n :param prefix: The beginning of an address (required)\n :param suggestions: Maximum number of suggestions\n :param city_filter: List of cities from which to include suggestions\n :param state_filter: List of states from which to include suggestions\n :param prefer: List of cities/states. Suggestions from the members of this list will appear first\n :param prefer_ratio: Percentage of suggestions that will be from preferred cities/states.\n (Decimal value between 0 and 1)\n :param geolocate_type: This field corresponds to the geolocate and geolocate_precision fields in the \n US Autocomplete API. Use the constants in geolocation_type.py to set this field\n '
self.result = []
self.prefix = prefix
self.max_suggestions = suggestions
self.city_filter = (city_filter or [])
self.state_filter = (state_filter or [])
self.prefer = (prefer or [])
self.prefer_ratio = prefer_ratio
self.geolocate_type = geolocate_type | In addition to holding all of the input data for this lookup, this class also will contain the result
of the lookup after it comes back from the API.
See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-request-input-fields"
:param prefix: The beginning of an address (required)
:param suggestions: Maximum number of suggestions
:param city_filter: List of cities from which to include suggestions
:param state_filter: List of states from which to include suggestions
:param prefer: List of cities/states. Suggestions from the members of this list will appear first
:param prefer_ratio: Percentage of suggestions that will be from preferred cities/states.
(Decimal value between 0 and 1)
:param geolocate_type: This field corresponds to the geolocate and geolocate_precision fields in the
US Autocomplete API. Use the constants in geolocation_type.py to set this field | smartystreets_python_sdk/us_autocomplete/lookup.py | __init__ | jasonrfarkas/smartystreets-python-sdk | 19 | python | def __init__(self, prefix=None, suggestions=None, city_filter=None, state_filter=None, prefer=None, prefer_ratio=None, geolocate_type=None):
'\n In addition to holding all of the input data for this lookup, this class also will contain the result \n of the lookup after it comes back from the API.\n \n See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-request-input-fields"\n \n :param prefix: The beginning of an address (required)\n :param suggestions: Maximum number of suggestions\n :param city_filter: List of cities from which to include suggestions\n :param state_filter: List of states from which to include suggestions\n :param prefer: List of cities/states. Suggestions from the members of this list will appear first\n :param prefer_ratio: Percentage of suggestions that will be from preferred cities/states.\n (Decimal value between 0 and 1)\n :param geolocate_type: This field corresponds to the geolocate and geolocate_precision fields in the \n US Autocomplete API. Use the constants in geolocation_type.py to set this field\n '
self.result = []
self.prefix = prefix
self.max_suggestions = suggestions
self.city_filter = (city_filter or [])
self.state_filter = (state_filter or [])
self.prefer = (prefer or [])
self.prefer_ratio = prefer_ratio
self.geolocate_type = geolocate_type | def __init__(self, prefix=None, suggestions=None, city_filter=None, state_filter=None, prefer=None, prefer_ratio=None, geolocate_type=None):
'\n In addition to holding all of the input data for this lookup, this class also will contain the result \n of the lookup after it comes back from the API.\n \n See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-request-input-fields"\n \n :param prefix: The beginning of an address (required)\n :param suggestions: Maximum number of suggestions\n :param city_filter: List of cities from which to include suggestions\n :param state_filter: List of states from which to include suggestions\n :param prefer: List of cities/states. Suggestions from the members of this list will appear first\n :param prefer_ratio: Percentage of suggestions that will be from preferred cities/states.\n (Decimal value between 0 and 1)\n :param geolocate_type: This field corresponds to the geolocate and geolocate_precision fields in the \n US Autocomplete API. Use the constants in geolocation_type.py to set this field\n '
self.result = []
self.prefix = prefix
self.max_suggestions = suggestions
self.city_filter = (city_filter or [])
self.state_filter = (state_filter or [])
self.prefer = (prefer or [])
self.prefer_ratio = prefer_ratio
self.geolocate_type = geolocate_type<|docstring|>In addition to holding all of the input data for this lookup, this class also will contain the result
of the lookup after it comes back from the API.
See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-request-input-fields"
:param prefix: The beginning of an address (required)
:param suggestions: Maximum number of suggestions
:param city_filter: List of cities from which to include suggestions
:param state_filter: List of states from which to include suggestions
:param prefer: List of cities/states. Suggestions from the members of this list will appear first
:param prefer_ratio: Percentage of suggestions that will be from preferred cities/states.
(Decimal value between 0 and 1)
:param geolocate_type: This field corresponds to the geolocate and geolocate_precision fields in the
US Autocomplete API. Use the constants in geolocation_type.py to set this field<|endoftext|> |
2fe55cd548dfe7f40dff3ff2cb4c55a40a41aa4f53d7f4947d0f38275c4ee120 | def H(N, Nlegs, S, J=[1, 1], gamma=[0, 0], U=0, delx=[1, 1], dely=[1, 1], delz=[1, 1], beta=[1, 1], phi=[1, 1], mode=['open', 'open']):
"\n Args: 'N' Number of sites\n 'S' maximum spin for a single particle\n 'J1' nearest neigbor interaction strength\n 'U' same site interaction engergy\n 'delx,dely,delz' anisotropy along the X,Y,Z directions\n \n The quasi field is given by the term: cos(2*pi*beta*k + phi)\n 'gamma' quasi field interaction strength\n 'beta' adjust frequency of quasi field\n 'phi' adjust phase of quasi field\n \n 'mode' sets boundary condition as 'open' or 'periodic'\n "
sx = so.sx(S)
sy = so.sy(S)
sz = so.sz(S)
mstates = int(((2 * S) + 1))
dim = (mstates ** N)
H = []
phi1 = phi[0]
phi2 = phi[1]
gamma1 = gamma[0]
gamma2 = gamma[1]
J1 = J[0]
J2 = J[1]
delx1 = delx[0]
delx2 = delx[1]
dely1 = dely[0]
dely2 = dely[1]
delz1 = delz[0]
delz2 = delz[1]
beta1 = beta[0]
beta2 = beta[1]
modex = mode[0]
modey = mode[1]
Nint = int((N / Nlegs))
for k in range(1, (N + 1)):
Sxp = st.spintensor(k, N, sx, S)
Syp = st.spintensor(k, N, sy, S)
Szp = st.spintensor(k, N, sz, S)
if ((k % Nint) == 0):
if (modex == 'periodic'):
couple1x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
elif (modex == 'open'):
Sxp1 = sp.sparse.csr_matrix((dim, dim))
Syp1 = sp.sparse.csr_matrix((dim, dim))
Szp1 = sp.sparse.csr_matrix((dim, dim))
else:
couple1x = st.spintensor((k + 1), N, sx, S)
couple1y = st.spintensor((k + 1), N, sy, S)
couple1z = st.spintensor((k + 1), N, sz, S)
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
if ((N - k) < Nint):
if (modey == 'periodic'):
couple2x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
elif (modey == 'open'):
Sxp2 = sp.sparse.csr_matrix((dim, dim))
Syp2 = sp.sparse.csr_matrix((dim, dim))
Szp2 = sp.sparse.csr_matrix((dim, dim))
else:
couple2x = st.spintensor((k + Nint), N, sx, S)
couple2y = st.spintensor((k + Nint), N, sy, S)
couple2z = st.spintensor((k + Nint), N, sz, S)
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
Sk1 = (Szp * np.cos(((((2 * np.pi) * beta1) * k) + phi1)))
Sk2 = (Szp * np.cos(((((2 * np.pi) * beta2) * k) + phi2)))
Sk = Szp
def interaction(J, xint, yint, zint, siteint, A, B, C, G):
return ((J * (((A * xint) + (B * yint)) + (C * zint))) + (G * siteint))
H1 = interaction(J1, Sxp1.real, Syp1.real, Szp1.real, Sk1, delx1, dely1, delz1, gamma1)
H2 = ((J2 * (((delx2 * Sxp2.real) + (dely2 * Syp2.real)) + (delz2 * Szp2.real))) + (gamma2 * Sk2))
H.append((((U * Sk) + H1) + H2))
return H | Args: 'N' Number of sites
'S' maximum spin for a single particle
'J1' nearest neigbor interaction strength
'U' same site interaction engergy
'delx,dely,delz' anisotropy along the X,Y,Z directions
The quasi field is given by the term: cos(2*pi*beta*k + phi)
'gamma' quasi field interaction strength
'beta' adjust frequency of quasi field
'phi' adjust phase of quasi field
'mode' sets boundary condition as 'open' or 'periodic' | multispinsys/Hamiltonians/TwoDHeisenberg.py | H | Marcupio/SpinProgram | 0 | python | def H(N, Nlegs, S, J=[1, 1], gamma=[0, 0], U=0, delx=[1, 1], dely=[1, 1], delz=[1, 1], beta=[1, 1], phi=[1, 1], mode=['open', 'open']):
"\n Args: 'N' Number of sites\n 'S' maximum spin for a single particle\n 'J1' nearest neigbor interaction strength\n 'U' same site interaction engergy\n 'delx,dely,delz' anisotropy along the X,Y,Z directions\n \n The quasi field is given by the term: cos(2*pi*beta*k + phi)\n 'gamma' quasi field interaction strength\n 'beta' adjust frequency of quasi field\n 'phi' adjust phase of quasi field\n \n 'mode' sets boundary condition as 'open' or 'periodic'\n "
sx = so.sx(S)
sy = so.sy(S)
sz = so.sz(S)
mstates = int(((2 * S) + 1))
dim = (mstates ** N)
H = []
phi1 = phi[0]
phi2 = phi[1]
gamma1 = gamma[0]
gamma2 = gamma[1]
J1 = J[0]
J2 = J[1]
delx1 = delx[0]
delx2 = delx[1]
dely1 = dely[0]
dely2 = dely[1]
delz1 = delz[0]
delz2 = delz[1]
beta1 = beta[0]
beta2 = beta[1]
modex = mode[0]
modey = mode[1]
Nint = int((N / Nlegs))
for k in range(1, (N + 1)):
Sxp = st.spintensor(k, N, sx, S)
Syp = st.spintensor(k, N, sy, S)
Szp = st.spintensor(k, N, sz, S)
if ((k % Nint) == 0):
if (modex == 'periodic'):
couple1x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
elif (modex == 'open'):
Sxp1 = sp.sparse.csr_matrix((dim, dim))
Syp1 = sp.sparse.csr_matrix((dim, dim))
Szp1 = sp.sparse.csr_matrix((dim, dim))
else:
couple1x = st.spintensor((k + 1), N, sx, S)
couple1y = st.spintensor((k + 1), N, sy, S)
couple1z = st.spintensor((k + 1), N, sz, S)
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
if ((N - k) < Nint):
if (modey == 'periodic'):
couple2x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
elif (modey == 'open'):
Sxp2 = sp.sparse.csr_matrix((dim, dim))
Syp2 = sp.sparse.csr_matrix((dim, dim))
Szp2 = sp.sparse.csr_matrix((dim, dim))
else:
couple2x = st.spintensor((k + Nint), N, sx, S)
couple2y = st.spintensor((k + Nint), N, sy, S)
couple2z = st.spintensor((k + Nint), N, sz, S)
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
Sk1 = (Szp * np.cos(((((2 * np.pi) * beta1) * k) + phi1)))
Sk2 = (Szp * np.cos(((((2 * np.pi) * beta2) * k) + phi2)))
Sk = Szp
def interaction(J, xint, yint, zint, siteint, A, B, C, G):
return ((J * (((A * xint) + (B * yint)) + (C * zint))) + (G * siteint))
H1 = interaction(J1, Sxp1.real, Syp1.real, Szp1.real, Sk1, delx1, dely1, delz1, gamma1)
H2 = ((J2 * (((delx2 * Sxp2.real) + (dely2 * Syp2.real)) + (delz2 * Szp2.real))) + (gamma2 * Sk2))
H.append((((U * Sk) + H1) + H2))
return H | def H(N, Nlegs, S, J=[1, 1], gamma=[0, 0], U=0, delx=[1, 1], dely=[1, 1], delz=[1, 1], beta=[1, 1], phi=[1, 1], mode=['open', 'open']):
"\n Args: 'N' Number of sites\n 'S' maximum spin for a single particle\n 'J1' nearest neigbor interaction strength\n 'U' same site interaction engergy\n 'delx,dely,delz' anisotropy along the X,Y,Z directions\n \n The quasi field is given by the term: cos(2*pi*beta*k + phi)\n 'gamma' quasi field interaction strength\n 'beta' adjust frequency of quasi field\n 'phi' adjust phase of quasi field\n \n 'mode' sets boundary condition as 'open' or 'periodic'\n "
sx = so.sx(S)
sy = so.sy(S)
sz = so.sz(S)
mstates = int(((2 * S) + 1))
dim = (mstates ** N)
H = []
phi1 = phi[0]
phi2 = phi[1]
gamma1 = gamma[0]
gamma2 = gamma[1]
J1 = J[0]
J2 = J[1]
delx1 = delx[0]
delx2 = delx[1]
dely1 = dely[0]
dely2 = dely[1]
delz1 = delz[0]
delz2 = delz[1]
beta1 = beta[0]
beta2 = beta[1]
modex = mode[0]
modey = mode[1]
Nint = int((N / Nlegs))
for k in range(1, (N + 1)):
Sxp = st.spintensor(k, N, sx, S)
Syp = st.spintensor(k, N, sy, S)
Szp = st.spintensor(k, N, sz, S)
if ((k % Nint) == 0):
if (modex == 'periodic'):
couple1x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
couple1z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - Nlegs) - 1))), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** (((N - k) + Nlegs) + 1)))))
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
elif (modex == 'open'):
Sxp1 = sp.sparse.csr_matrix((dim, dim))
Syp1 = sp.sparse.csr_matrix((dim, dim))
Szp1 = sp.sparse.csr_matrix((dim, dim))
else:
couple1x = st.spintensor((k + 1), N, sx, S)
couple1y = st.spintensor((k + 1), N, sy, S)
couple1z = st.spintensor((k + 1), N, sz, S)
Sxp1 = (Sxp @ couple1x)
Syp1 = (Syp @ couple1y)
Szp1 = (Szp @ couple1z)
if ((N - k) < Nint):
if (modey == 'periodic'):
couple2x = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sx, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2y = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sy, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
couple2z = sp.sparse.kron(sp.sparse.identity((((2 * S) + 1) ** ((k - (Nint * (Nlegs - 1))) - 1)), sp.sparse.kron(sz, sp.sparse.identity((((2 * S) + 1) ** ((N - k) + (Nint * (Nlegs - 1))))))))
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
elif (modey == 'open'):
Sxp2 = sp.sparse.csr_matrix((dim, dim))
Syp2 = sp.sparse.csr_matrix((dim, dim))
Szp2 = sp.sparse.csr_matrix((dim, dim))
else:
couple2x = st.spintensor((k + Nint), N, sx, S)
couple2y = st.spintensor((k + Nint), N, sy, S)
couple2z = st.spintensor((k + Nint), N, sz, S)
Sxp2 = (Sxp @ couple2x)
Syp2 = (Syp @ couple2y)
Szp2 = (Szp @ couple2z)
Sk1 = (Szp * np.cos(((((2 * np.pi) * beta1) * k) + phi1)))
Sk2 = (Szp * np.cos(((((2 * np.pi) * beta2) * k) + phi2)))
Sk = Szp
def interaction(J, xint, yint, zint, siteint, A, B, C, G):
return ((J * (((A * xint) + (B * yint)) + (C * zint))) + (G * siteint))
H1 = interaction(J1, Sxp1.real, Syp1.real, Szp1.real, Sk1, delx1, dely1, delz1, gamma1)
H2 = ((J2 * (((delx2 * Sxp2.real) + (dely2 * Syp2.real)) + (delz2 * Szp2.real))) + (gamma2 * Sk2))
H.append((((U * Sk) + H1) + H2))
return H<|docstring|>Args: 'N' Number of sites
'S' maximum spin for a single particle
'J1' nearest neigbor interaction strength
'U' same site interaction engergy
'delx,dely,delz' anisotropy along the X,Y,Z directions
The quasi field is given by the term: cos(2*pi*beta*k + phi)
'gamma' quasi field interaction strength
'beta' adjust frequency of quasi field
'phi' adjust phase of quasi field
'mode' sets boundary condition as 'open' or 'periodic'<|endoftext|> |
62002b17d93f5ac28b2b4d6fb6d47c99cdebb042d888b5e3f24fc0ebb0582ba5 | def get_version(poetry='pyproject.toml') -> str:
'Get the version of the package from pyproject file'
with open('pyproject.toml', 'r', encoding='utf-8') as f:
data = toml.loads(f.read())
return data['tool']['poetry']['version'].strip() | Get the version of the package from pyproject file | scripts/get_version.py | get_version | dymaxionlabs/nb_workflows | 4 | python | def get_version(poetry='pyproject.toml') -> str:
with open('pyproject.toml', 'r', encoding='utf-8') as f:
data = toml.loads(f.read())
return data['tool']['poetry']['version'].strip() | def get_version(poetry='pyproject.toml') -> str:
with open('pyproject.toml', 'r', encoding='utf-8') as f:
data = toml.loads(f.read())
return data['tool']['poetry']['version'].strip()<|docstring|>Get the version of the package from pyproject file<|endoftext|> |
d62fa8b096aeb64947cb7e75783a72399daed4b01211e3cb3f821b3bed0803a3 | def find_duplicate_index(candidate, data):
'Returns the index of the occurrence of candidate in data. If no duplicate, returns -1'
i = 0
for arret in data['stops']:
if (candidate['properties']['nomlong'] == arret['name']):
return i
i += 1
return (- 1) | Returns the index of the occurrence of candidate in data. If no duplicate, returns -1 | pyscripts_and_misc/loc_marker_toThinJson.py | find_duplicate_index | BretzelLudique/fatcat | 2 | python | def find_duplicate_index(candidate, data):
i = 0
for arret in data['stops']:
if (candidate['properties']['nomlong'] == arret['name']):
return i
i += 1
return (- 1) | def find_duplicate_index(candidate, data):
i = 0
for arret in data['stops']:
if (candidate['properties']['nomlong'] == arret['name']):
return i
i += 1
return (- 1)<|docstring|>Returns the index of the occurrence of candidate in data. If no duplicate, returns -1<|endoftext|> |
c44e74b81e13a2e118db3c40b0066f4cd2d739daa68e558950ee8a14fdd6b76d | def run(self):
'主循环,逐帧进行处理,每帧逐bbox进行处理,\n 每个bbox:对self.have_tracked中的各个particle进行比较,求算质心距离,符合的则归为一类,不符合的则创建一类\n 对一帧处理结束要给所有的正在追踪Particle + 1,而之前每次放入新的Particle轨迹会初始化其数值\n '
flag = 0
for i in range(len(self.bboxs)):
for j in range(len(self.bboxs[i])):
bbox = self.bboxs[i][j]
if (len(bbox) == 6):
self.search(i, j, bbox)
if (len(self.have_tracked) > 0):
win_no_nano = self.Frame_limit
if (self.Method == 0):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
elif ((self.have_tracked[k][0][2] >= win_no_nano) and ((self.have_tracked[k][(- 1)][0] % self.SubImg_T) != 0)):
self.have_tracked[k][0][2] = 'OVER'
elif (self.Method == 1):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
if ((len(self.have_tracked[k]) >= 2) and (self.have_tracked[k][0][2] >= 5)):
if (self.have_tracked[k][(- 1)][2] == 'debinding'):
self.have_tracked[k][0][2] = 'OVER'
flag += 1
if (self.quick_flag == 1):
break
prograssbar_value = (round((flag / len(self.bboxs)), 2) * 100)
self.progressBarValue[int].emit(int(prograssbar_value))
for i in range(len(self.bboxs)):
newbbox = self.bboxs[i]
name = int(self.dir[i].split('.')[0])
filename = ('%d.tif' % name)
xmlwriter = PascalVocWriter('VOC2007', filename, [523, 525, 1])
for box in newbbox:
xmlwriter.addBndBox(box[1], box[2], box[3], box[4], box[0], '0', box[5])
xmlwriter.save((((self.track_xml_dir + '/') + str(name)) + '.xml'))
self.after_track[int].emit(1) | 主循环,逐帧进行处理,每帧逐bbox进行处理,
每个bbox:对self.have_tracked中的各个particle进行比较,求算质心距离,符合的则归为一类,不符合的则创建一类
对一帧处理结束要给所有的正在追踪Particle + 1,而之前每次放入新的Particle轨迹会初始化其数值 | libs/nanotrack.py | run | bevarb/AutoDetect | 1 | python | def run(self):
'主循环,逐帧进行处理,每帧逐bbox进行处理,\n 每个bbox:对self.have_tracked中的各个particle进行比较,求算质心距离,符合的则归为一类,不符合的则创建一类\n 对一帧处理结束要给所有的正在追踪Particle + 1,而之前每次放入新的Particle轨迹会初始化其数值\n '
flag = 0
for i in range(len(self.bboxs)):
for j in range(len(self.bboxs[i])):
bbox = self.bboxs[i][j]
if (len(bbox) == 6):
self.search(i, j, bbox)
if (len(self.have_tracked) > 0):
win_no_nano = self.Frame_limit
if (self.Method == 0):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
elif ((self.have_tracked[k][0][2] >= win_no_nano) and ((self.have_tracked[k][(- 1)][0] % self.SubImg_T) != 0)):
self.have_tracked[k][0][2] = 'OVER'
elif (self.Method == 1):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
if ((len(self.have_tracked[k]) >= 2) and (self.have_tracked[k][0][2] >= 5)):
if (self.have_tracked[k][(- 1)][2] == 'debinding'):
self.have_tracked[k][0][2] = 'OVER'
flag += 1
if (self.quick_flag == 1):
break
prograssbar_value = (round((flag / len(self.bboxs)), 2) * 100)
self.progressBarValue[int].emit(int(prograssbar_value))
for i in range(len(self.bboxs)):
newbbox = self.bboxs[i]
name = int(self.dir[i].split('.')[0])
filename = ('%d.tif' % name)
xmlwriter = PascalVocWriter('VOC2007', filename, [523, 525, 1])
for box in newbbox:
xmlwriter.addBndBox(box[1], box[2], box[3], box[4], box[0], '0', box[5])
xmlwriter.save((((self.track_xml_dir + '/') + str(name)) + '.xml'))
self.after_track[int].emit(1) | def run(self):
'主循环,逐帧进行处理,每帧逐bbox进行处理,\n 每个bbox:对self.have_tracked中的各个particle进行比较,求算质心距离,符合的则归为一类,不符合的则创建一类\n 对一帧处理结束要给所有的正在追踪Particle + 1,而之前每次放入新的Particle轨迹会初始化其数值\n '
flag = 0
for i in range(len(self.bboxs)):
for j in range(len(self.bboxs[i])):
bbox = self.bboxs[i][j]
if (len(bbox) == 6):
self.search(i, j, bbox)
if (len(self.have_tracked) > 0):
win_no_nano = self.Frame_limit
if (self.Method == 0):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
elif ((self.have_tracked[k][0][2] >= win_no_nano) and ((self.have_tracked[k][(- 1)][0] % self.SubImg_T) != 0)):
self.have_tracked[k][0][2] = 'OVER'
elif (self.Method == 1):
for k in range(len(self.have_tracked)):
if (self.have_tracked[k][0][2] != 'OVER'):
if (self.have_tracked[k][0][2] < win_no_nano):
self.have_tracked[k][0][2] += 1
if ((len(self.have_tracked[k]) >= 2) and (self.have_tracked[k][0][2] >= 5)):
if (self.have_tracked[k][(- 1)][2] == 'debinding'):
self.have_tracked[k][0][2] = 'OVER'
flag += 1
if (self.quick_flag == 1):
break
prograssbar_value = (round((flag / len(self.bboxs)), 2) * 100)
self.progressBarValue[int].emit(int(prograssbar_value))
for i in range(len(self.bboxs)):
newbbox = self.bboxs[i]
name = int(self.dir[i].split('.')[0])
filename = ('%d.tif' % name)
xmlwriter = PascalVocWriter('VOC2007', filename, [523, 525, 1])
for box in newbbox:
xmlwriter.addBndBox(box[1], box[2], box[3], box[4], box[0], '0', box[5])
xmlwriter.save((((self.track_xml_dir + '/') + str(name)) + '.xml'))
self.after_track[int].emit(1)<|docstring|>主循环,逐帧进行处理,每帧逐bbox进行处理,
每个bbox:对self.have_tracked中的各个particle进行比较,求算质心距离,符合的则归为一类,不符合的则创建一类
对一帧处理结束要给所有的正在追踪Particle + 1,而之前每次放入新的Particle轨迹会初始化其数值<|endoftext|> |
2d4b8b4831c5824d9627841bb81ae3b8626aa1c85547e712388964e6d8a08f1e | def search(self, frame, box_id, bbox):
'TODO:现在用的是中心位置,后面可以换成图片最亮点的位置,这样精度会提高很多'
central = [((bbox[3] + bbox[1]) // 2), ((bbox[4] + bbox[2]) // 2), 0]
flag = (- 1)
if (len(self.have_tracked) == 0):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, 0)
else:
for i in range(len(self.have_tracked)):
if (self.have_tracked[i][0][2] != 'OVER'):
temp = self.have_tracked[i][0]
dist = self.distEclud(central, temp)
if (dist < self.L_limit):
name = self.bboxs[frame][box_id][0]
if (((self.have_tracked[i][(- 1)][0] % self.SubImg_T) == 0) and (name not in ['NONE', 'None', 'none']) and (self.Method == 0)):
pass
else:
flag = 1
self.have_tracked[i].append([int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]])
self.update_mass_central(i)
self.xml_modify(frame, box_id, i)
break
if (flag == (- 1)):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, (len(self.have_tracked) - 1)) | TODO:现在用的是中心位置,后面可以换成图片最亮点的位置,这样精度会提高很多 | libs/nanotrack.py | search | bevarb/AutoDetect | 1 | python | def search(self, frame, box_id, bbox):
central = [((bbox[3] + bbox[1]) // 2), ((bbox[4] + bbox[2]) // 2), 0]
flag = (- 1)
if (len(self.have_tracked) == 0):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, 0)
else:
for i in range(len(self.have_tracked)):
if (self.have_tracked[i][0][2] != 'OVER'):
temp = self.have_tracked[i][0]
dist = self.distEclud(central, temp)
if (dist < self.L_limit):
name = self.bboxs[frame][box_id][0]
if (((self.have_tracked[i][(- 1)][0] % self.SubImg_T) == 0) and (name not in ['NONE', 'None', 'none']) and (self.Method == 0)):
pass
else:
flag = 1
self.have_tracked[i].append([int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]])
self.update_mass_central(i)
self.xml_modify(frame, box_id, i)
break
if (flag == (- 1)):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, (len(self.have_tracked) - 1)) | def search(self, frame, box_id, bbox):
central = [((bbox[3] + bbox[1]) // 2), ((bbox[4] + bbox[2]) // 2), 0]
flag = (- 1)
if (len(self.have_tracked) == 0):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, 0)
else:
for i in range(len(self.have_tracked)):
if (self.have_tracked[i][0][2] != 'OVER'):
temp = self.have_tracked[i][0]
dist = self.distEclud(central, temp)
if (dist < self.L_limit):
name = self.bboxs[frame][box_id][0]
if (((self.have_tracked[i][(- 1)][0] % self.SubImg_T) == 0) and (name not in ['NONE', 'None', 'none']) and (self.Method == 0)):
pass
else:
flag = 1
self.have_tracked[i].append([int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]])
self.update_mass_central(i)
self.xml_modify(frame, box_id, i)
break
if (flag == (- 1)):
self.have_tracked.append([central, [int(self.dir[frame].split('.')[0]), box_id, bbox[0], bbox[5]]])
self.xml_modify(frame, box_id, (len(self.have_tracked) - 1))<|docstring|>TODO:现在用的是中心位置,后面可以换成图片最亮点的位置,这样精度会提高很多<|endoftext|> |
a28355540a4650c75b303e24c28d43ce103071e3065f85f28f8a3fabcf897ea7 | def update_mass_central(self, ID):
'更新质心位置,每次放入新的粒子都需要更新'
ID_info = self.have_tracked[ID]
x = 0
y = 0
for i in range((len(ID_info) - 1)):
frame_name = ID_info[(i + 1)][0]
temp_box_id = ID_info[(i + 1)][1]
bboxs = self.bboxs[((frame_name - 1) // self.SubImg_Step)][temp_box_id]
(temp_x, temp_y) = self.get_central_point(bboxs)
x += temp_x
y += temp_y
x_mean = (x // (len(ID_info) - 1))
y_mean = (y // (len(ID_info) - 1))
self.have_tracked[ID][0][0:3] = [x_mean, y_mean, 0] | 更新质心位置,每次放入新的粒子都需要更新 | libs/nanotrack.py | update_mass_central | bevarb/AutoDetect | 1 | python | def update_mass_central(self, ID):
ID_info = self.have_tracked[ID]
x = 0
y = 0
for i in range((len(ID_info) - 1)):
frame_name = ID_info[(i + 1)][0]
temp_box_id = ID_info[(i + 1)][1]
bboxs = self.bboxs[((frame_name - 1) // self.SubImg_Step)][temp_box_id]
(temp_x, temp_y) = self.get_central_point(bboxs)
x += temp_x
y += temp_y
x_mean = (x // (len(ID_info) - 1))
y_mean = (y // (len(ID_info) - 1))
self.have_tracked[ID][0][0:3] = [x_mean, y_mean, 0] | def update_mass_central(self, ID):
ID_info = self.have_tracked[ID]
x = 0
y = 0
for i in range((len(ID_info) - 1)):
frame_name = ID_info[(i + 1)][0]
temp_box_id = ID_info[(i + 1)][1]
bboxs = self.bboxs[((frame_name - 1) // self.SubImg_Step)][temp_box_id]
(temp_x, temp_y) = self.get_central_point(bboxs)
x += temp_x
y += temp_y
x_mean = (x // (len(ID_info) - 1))
y_mean = (y // (len(ID_info) - 1))
self.have_tracked[ID][0][0:3] = [x_mean, y_mean, 0]<|docstring|>更新质心位置,每次放入新的粒子都需要更新<|endoftext|> |
0ed482f2e2091aca4d29c2c6ffbf68431209df784748592bba59953f222c2b17 | def distEclud(self, vecA, vecB):
'欧式距离\n 输入:向量A, 向量B\n 输出:两个向量的欧式距离\n '
a = ((vecA[0] - vecB[0]) ** 2)
b = ((vecA[1] - vecB[1]) ** 2)
return math.sqrt((a + b)) | 欧式距离
输入:向量A, 向量B
输出:两个向量的欧式距离 | libs/nanotrack.py | distEclud | bevarb/AutoDetect | 1 | python | def distEclud(self, vecA, vecB):
'欧式距离\n 输入:向量A, 向量B\n 输出:两个向量的欧式距离\n '
a = ((vecA[0] - vecB[0]) ** 2)
b = ((vecA[1] - vecB[1]) ** 2)
return math.sqrt((a + b)) | def distEclud(self, vecA, vecB):
'欧式距离\n 输入:向量A, 向量B\n 输出:两个向量的欧式距离\n '
a = ((vecA[0] - vecB[0]) ** 2)
b = ((vecA[1] - vecB[1]) ** 2)
return math.sqrt((a + b))<|docstring|>欧式距离
输入:向量A, 向量B
输出:两个向量的欧式距离<|endoftext|> |
b7f9088efd02a740a3874fdb7afb5f3f11d8d56dfd3fe7ca9935b9aa0133f45c | def xml_modify(self, frame, box_id, ID, find_debinding=0):
'用于修改每个Particle的分类'
name = self.bboxs[frame][box_id][0]
if (name in ['NONE', 'None', 'none']):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
for i in range(1, len(self.have_tracked[ID])):
if ([int(self.dir[frame].split('.')[0]), box_id] == self.have_tracked[ID][i]):
self.have_tracked[ID][i].append((('NONE' + '__ID:') + str(ID)))
self.have_tracked[ID][0].append('Have None')
elif (len(self.have_tracked[ID][0]) == 4):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
else:
self.bboxs[frame][box_id][0] = ((('ID:' + str(ID)) + ('|%s' % name)) + ('|%s' % self.bboxs[frame][box_id][5])) | 用于修改每个Particle的分类 | libs/nanotrack.py | xml_modify | bevarb/AutoDetect | 1 | python | def xml_modify(self, frame, box_id, ID, find_debinding=0):
name = self.bboxs[frame][box_id][0]
if (name in ['NONE', 'None', 'none']):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
for i in range(1, len(self.have_tracked[ID])):
if ([int(self.dir[frame].split('.')[0]), box_id] == self.have_tracked[ID][i]):
self.have_tracked[ID][i].append((('NONE' + '__ID:') + str(ID)))
self.have_tracked[ID][0].append('Have None')
elif (len(self.have_tracked[ID][0]) == 4):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
else:
self.bboxs[frame][box_id][0] = ((('ID:' + str(ID)) + ('|%s' % name)) + ('|%s' % self.bboxs[frame][box_id][5])) | def xml_modify(self, frame, box_id, ID, find_debinding=0):
name = self.bboxs[frame][box_id][0]
if (name in ['NONE', 'None', 'none']):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
for i in range(1, len(self.have_tracked[ID])):
if ([int(self.dir[frame].split('.')[0]), box_id] == self.have_tracked[ID][i]):
self.have_tracked[ID][i].append((('NONE' + '__ID:') + str(ID)))
self.have_tracked[ID][0].append('Have None')
elif (len(self.have_tracked[ID][0]) == 4):
self.bboxs[frame][box_id][0] = (('NONE' + '__ID:') + str(ID))
else:
self.bboxs[frame][box_id][0] = ((('ID:' + str(ID)) + ('|%s' % name)) + ('|%s' % self.bboxs[frame][box_id][5]))<|docstring|>用于修改每个Particle的分类<|endoftext|> |
7c6924fe51630ae70f8e0a51ef988398109fd7b8f2633064a34cba8103e234ee | def cli_to_args():
'\n converts the command line interface to a series of args\n '
cli = argparse.ArgumentParser(description='')
cli.add_argument('-input_dir', type=str, required=True, help='Location where the input dir is.')
cli.add_argument('-output_dir', type=str, required=True, help='Top level output dir')
cli.add_argument('-png_size', type=int, default=200, help='Side of a png square for the output')
return cli.parse_args() | converts the command line interface to a series of args | scripts/cowboyfy.py | cli_to_args | timsee/cowboymoji | 17 | python | def cli_to_args():
'\n \n '
cli = argparse.ArgumentParser(description=)
cli.add_argument('-input_dir', type=str, required=True, help='Location where the input dir is.')
cli.add_argument('-output_dir', type=str, required=True, help='Top level output dir')
cli.add_argument('-png_size', type=int, default=200, help='Side of a png square for the output')
return cli.parse_args() | def cli_to_args():
'\n \n '
cli = argparse.ArgumentParser(description=)
cli.add_argument('-input_dir', type=str, required=True, help='Location where the input dir is.')
cli.add_argument('-output_dir', type=str, required=True, help='Top level output dir')
cli.add_argument('-png_size', type=int, default=200, help='Side of a png square for the output')
return cli.parse_args()<|docstring|>converts the command line interface to a series of args<|endoftext|> |
8db859227ee810d1fc3eb6a9c5eedfc20edb5bd7dab9af07dfed092297e2f872 | def test_year_2025(self):
" In 2025 King's Day is on 26 April "
holidays = self.cal.holidays_set(2025)
self.assertIn(date(2025, 4, 26), holidays) | In 2025 King's Day is on 26 April | workalendar/tests/test_europe.py | test_year_2025 | JanMalte/workalendar | 0 | python | def test_year_2025(self):
" "
holidays = self.cal.holidays_set(2025)
self.assertIn(date(2025, 4, 26), holidays) | def test_year_2025(self):
" "
holidays = self.cal.holidays_set(2025)
self.assertIn(date(2025, 4, 26), holidays)<|docstring|>In 2025 King's Day is on 26 April<|endoftext|> |
feff2da6608810e11eced6f2190df933250cccdd4ee9599cba2d15000cae34fd | def test_year_1990(self):
" In 1990 Queen's day was on 30 April "
holidays = self.cal.holidays_set(1990)
self.assertIn(date(1990, 4, 30), holidays) | In 1990 Queen's day was on 30 April | workalendar/tests/test_europe.py | test_year_1990 | JanMalte/workalendar | 0 | python | def test_year_1990(self):
" "
holidays = self.cal.holidays_set(1990)
self.assertIn(date(1990, 4, 30), holidays) | def test_year_1990(self):
" "
holidays = self.cal.holidays_set(1990)
self.assertIn(date(1990, 4, 30), holidays)<|docstring|>In 1990 Queen's day was on 30 April<|endoftext|> |
bfccb871598c8913e3a80bb20299540d3268d152c1f83edb707027354baba0e8 | @staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
'The HGST connector properties.'
return {} | The HGST connector properties. | os_brick/initiator/connectors/hgst.py | get_connector_properties | hemna/os-brick-1 | 1 | python | @staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
return {} | @staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
return {}<|docstring|>The HGST connector properties.<|endoftext|> |
07e35064a11ea4e276b23bb66fcfb122729bd46fbf90d239af8683ff575d162e | def _log_cli_err(self, err):
'Dumps the full command output to a logfile in error cases.'
LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\nerr: %(stderr)s", {'cmd': err.cmd, 'code': err.exit_code, 'stdout': err.stdout, 'stderr': err.stderr}) | Dumps the full command output to a logfile in error cases. | os_brick/initiator/connectors/hgst.py | _log_cli_err | hemna/os-brick-1 | 1 | python | def _log_cli_err(self, err):
LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\nerr: %(stderr)s", {'cmd': err.cmd, 'code': err.exit_code, 'stdout': err.stdout, 'stderr': err.stderr}) | def _log_cli_err(self, err):
LOG.error("CLI fail: '%(cmd)s' = %(code)s\nout: %(stdout)s\nerr: %(stderr)s", {'cmd': err.cmd, 'code': err.exit_code, 'stdout': err.stdout, 'stderr': err.stderr})<|docstring|>Dumps the full command output to a logfile in error cases.<|endoftext|> |
826bd24777eaabbd8d5dc9b718b9655f4e9bb74bd2fece0cf12c3b1f177b07bb | def _find_vgc_host(self):
'Finds vgc-cluster hostname for this box.'
params = [self.VGCCLUSTER, 'domain-list', '-1']
try:
(out, unused) = self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of domain members, check that the cluster is running.')
raise exception.BrickException(message=msg)
domain = out.splitlines()
params = ['ip', 'addr', 'list']
try:
(out, unused) = self._execute(*params, run_as_root=False)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of IP addresses on this host, check permissions and networking.')
raise exception.BrickException(message=msg)
nets = out.splitlines()
for host in domain:
try:
ip = socket.gethostbyname(host)
for l in nets:
x = l.strip()
if x.startswith(('inet %s/' % ip)):
return host
except socket.error:
pass
msg = _("Current host isn't part of HGST domain.")
raise exception.BrickException(message=msg) | Finds vgc-cluster hostname for this box. | os_brick/initiator/connectors/hgst.py | _find_vgc_host | hemna/os-brick-1 | 1 | python | def _find_vgc_host(self):
params = [self.VGCCLUSTER, 'domain-list', '-1']
try:
(out, unused) = self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of domain members, check that the cluster is running.')
raise exception.BrickException(message=msg)
domain = out.splitlines()
params = ['ip', 'addr', 'list']
try:
(out, unused) = self._execute(*params, run_as_root=False)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of IP addresses on this host, check permissions and networking.')
raise exception.BrickException(message=msg)
nets = out.splitlines()
for host in domain:
try:
ip = socket.gethostbyname(host)
for l in nets:
x = l.strip()
if x.startswith(('inet %s/' % ip)):
return host
except socket.error:
pass
msg = _("Current host isn't part of HGST domain.")
raise exception.BrickException(message=msg) | def _find_vgc_host(self):
params = [self.VGCCLUSTER, 'domain-list', '-1']
try:
(out, unused) = self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of domain members, check that the cluster is running.')
raise exception.BrickException(message=msg)
domain = out.splitlines()
params = ['ip', 'addr', 'list']
try:
(out, unused) = self._execute(*params, run_as_root=False)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = _('Unable to get list of IP addresses on this host, check permissions and networking.')
raise exception.BrickException(message=msg)
nets = out.splitlines()
for host in domain:
try:
ip = socket.gethostbyname(host)
for l in nets:
x = l.strip()
if x.startswith(('inet %s/' % ip)):
return host
except socket.error:
pass
msg = _("Current host isn't part of HGST domain.")
raise exception.BrickException(message=msg)<|docstring|>Finds vgc-cluster hostname for this box.<|endoftext|> |
1d99430cfdcade25b88cda8eeb225b02ee4366fa21a7aedf0b2ffbbb19f5a313 | def _hostname(self):
'Returns hostname to use for cluster operations on this box.'
if (self._vgc_host is None):
self._vgc_host = self._find_vgc_host()
return self._vgc_host | Returns hostname to use for cluster operations on this box. | os_brick/initiator/connectors/hgst.py | _hostname | hemna/os-brick-1 | 1 | python | def _hostname(self):
if (self._vgc_host is None):
self._vgc_host = self._find_vgc_host()
return self._vgc_host | def _hostname(self):
if (self._vgc_host is None):
self._vgc_host = self._find_vgc_host()
return self._vgc_host<|docstring|>Returns hostname to use for cluster operations on this box.<|endoftext|> |
0cbddac031f99d2698ffd47672d2bf411f687bc2cc11929b90db5be83bacb576 | @utils.trace
def connect_volume(self, connection_properties):
'Attach a Space volume to running host.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n connection_properties for HGST must include:\n name - Name of space to attach\n :type connection_properties: dict\n :returns: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
device_info = {'type': 'block', 'device': connection_properties['name'], 'path': ('/dev/' + connection_properties['name'])}
volname = device_info['device']
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', volname]
params += ['-A', self._hostname()]
params += ['--action', 'ADD']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % volname)
raise exception.BrickException(message=msg)
return device_info | Attach a Space volume to running host.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
connection_properties for HGST must include:
name - Name of space to attach
:type connection_properties: dict
:returns: dict | os_brick/initiator/connectors/hgst.py | connect_volume | hemna/os-brick-1 | 1 | python | @utils.trace
def connect_volume(self, connection_properties):
'Attach a Space volume to running host.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n connection_properties for HGST must include:\n name - Name of space to attach\n :type connection_properties: dict\n :returns: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
device_info = {'type': 'block', 'device': connection_properties['name'], 'path': ('/dev/' + connection_properties['name'])}
volname = device_info['device']
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', volname]
params += ['-A', self._hostname()]
params += ['--action', 'ADD']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % volname)
raise exception.BrickException(message=msg)
return device_info | @utils.trace
def connect_volume(self, connection_properties):
'Attach a Space volume to running host.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n connection_properties for HGST must include:\n name - Name of space to attach\n :type connection_properties: dict\n :returns: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
device_info = {'type': 'block', 'device': connection_properties['name'], 'path': ('/dev/' + connection_properties['name'])}
volname = device_info['device']
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', volname]
params += ['-A', self._hostname()]
params += ['--action', 'ADD']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % volname)
raise exception.BrickException(message=msg)
return device_info<|docstring|>Attach a Space volume to running host.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
connection_properties for HGST must include:
name - Name of space to attach
:type connection_properties: dict
:returns: dict<|endoftext|> |
0370d3832ebdb202eaec108f1df12cf1cefc936b83fad5b87f0cebb1a6216506 | @utils.trace
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False):
'Detach and flush the volume.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n For HGST must include:\n name - Name of space to detach\n noremovehost - Host which should never be removed\n :type connection_properties: dict\n :param device_info: historical difference, but same as connection_props\n :type device_info: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
if ('noremovehost' not in connection_properties):
msg = _("Connection properties missing 'noremovehost' field.")
raise exception.BrickException(message=msg)
if (connection_properties['noremovehost'] != self._hostname()):
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', connection_properties['name']]
params += ['-A', self._hostname()]
params += ['--action', 'DELETE']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % connection_properties['name'])
raise exception.BrickException(message=msg) | Detach and flush the volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
For HGST must include:
name - Name of space to detach
noremovehost - Host which should never be removed
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict | os_brick/initiator/connectors/hgst.py | disconnect_volume | hemna/os-brick-1 | 1 | python | @utils.trace
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False):
'Detach and flush the volume.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n For HGST must include:\n name - Name of space to detach\n noremovehost - Host which should never be removed\n :type connection_properties: dict\n :param device_info: historical difference, but same as connection_props\n :type device_info: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
if ('noremovehost' not in connection_properties):
msg = _("Connection properties missing 'noremovehost' field.")
raise exception.BrickException(message=msg)
if (connection_properties['noremovehost'] != self._hostname()):
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', connection_properties['name']]
params += ['-A', self._hostname()]
params += ['--action', 'DELETE']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % connection_properties['name'])
raise exception.BrickException(message=msg) | @utils.trace
def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False):
'Detach and flush the volume.\n\n :param connection_properties: The dictionary that describes all\n of the target volume attributes.\n For HGST must include:\n name - Name of space to detach\n noremovehost - Host which should never be removed\n :type connection_properties: dict\n :param device_info: historical difference, but same as connection_props\n :type device_info: dict\n '
if (connection_properties is None):
msg = _('Connection properties passed in as None.')
raise exception.BrickException(message=msg)
if ('name' not in connection_properties):
msg = _("Connection properties missing 'name' field.")
raise exception.BrickException(message=msg)
if ('noremovehost' not in connection_properties):
msg = _("Connection properties missing 'noremovehost' field.")
raise exception.BrickException(message=msg)
if (connection_properties['noremovehost'] != self._hostname()):
params = [self.VGCCLUSTER, 'space-set-apphosts']
params += ['-n', connection_properties['name']]
params += ['-A', self._hostname()]
params += ['--action', 'DELETE']
try:
self._execute(*params, run_as_root=True, root_helper=self._root_helper)
except putils.ProcessExecutionError as err:
self._log_cli_err(err)
msg = (_('Unable to set apphost for space %s') % connection_properties['name'])
raise exception.BrickException(message=msg)<|docstring|>Detach and flush the volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
For HGST must include:
name - Name of space to detach
noremovehost - Host which should never be removed
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict<|endoftext|> |
a47415db6ff6dc52bf519ead81702800c8c5351489ffa3669e4890c9c68c9404 | def apply(self, Weight):
'\n Map the weights to arrays.\n require args:\n Weight: The weights of a layer (float), include weight & bias\n '
WeightMax = max(Weight.max(), abs(Weight.min()))
Weight = np.round(((Weight / WeightMax) * self.RangeMax))
Weight = np.where((Weight > (self.RangeMax - 1)), (self.RangeMax - 1), Weight)
Weight += self.RangeMax
Weight = np.abs(Weight)
self.numLayerOutput = Weight.shape[1]
if (self.numCellperWeight > 1):
Weight = Weight.T
WeightSp = []
Base = np.asarray([((2 ** self.CellBits) ** x) for x in range((self.numCellperWeight - 1), (- 1), (- 1))])
for i in range(Weight.shape[0]):
for j in range(self.numCellperWeight):
SpiltTmp = (Weight[i] // Base[j])
Weight[i] = (Weight[i] % Base[j])
WeightSp.append(SpiltTmp)
WeightSp = np.asarray(WeightSp)
Weight = WeightSp.T
numCoreV = int(np.ceil((Weight.shape[0] / self.numRow)))
numCoreH = int(np.ceil((Weight.shape[1] / (self.numCol - self.numCellperWeight))))
CoresInfo = (numCoreV, numCoreH)
numInput = (numCoreV * self.numRow)
numOutput = (numCoreH * (self.numCol - self.numCellperWeight))
WeightMap = np.concatenate((np.zeros(((numInput - Weight.shape[0]), Weight.shape[1])), Weight), axis=0)
WeightMap = np.concatenate((WeightMap, np.zeros((numInput, (numOutput - Weight.shape[1])))), axis=1)
WeightVsp = np.vsplit(WeightMap, numCoreV)
for i in range(numCoreV):
WeightHsp = np.hsplit(WeightVsp[i], numCoreH)
for j in range(numCoreH):
WeightHsp[j] = np.concatenate((WeightHsp[j], ((2 ** (self.CellBits - 1)) * np.ones((self.numRow, 1))), np.zeros((self.numRow, (self.numCellperWeight - 1)))), axis=1)
WeightVsp[i] = WeightHsp
Weight = WeightVsp
Gstep = ((self.Gmax - self.Gmin) / ((2 ** self.CellBits) - 1))
for i in range(numCoreV):
for j in range(numCoreH):
Weight[i][j] = (self.Gmin + (Gstep * Weight[i][j]))
Weight2 = Weight[i][j]
coeff = [(- 0.0006), 0.062, 0.72]
WeightSD = ((((coeff[0] * (Weight[i][j] * 1000000.0)) * (Weight[i][j] * 1000000.0)) + (coeff[1] * (Weight[i][j] * 1000000.0))) + coeff[2])
WeightSD = (WeightSD * 1e-06)
Weight[i][j] = ((np.random.randn(self.numRow, self.numCol) * WeightSD) + Weight[i][j])
return (Weight, CoresInfo) | Map the weights to arrays.
require args:
Weight: The weights of a layer (float), include weight & bias | simulator/simulator/nnsim/module/nncompile.py | apply | thuime/XPEsim | 27 | python | def apply(self, Weight):
'\n Map the weights to arrays.\n require args:\n Weight: The weights of a layer (float), include weight & bias\n '
WeightMax = max(Weight.max(), abs(Weight.min()))
Weight = np.round(((Weight / WeightMax) * self.RangeMax))
Weight = np.where((Weight > (self.RangeMax - 1)), (self.RangeMax - 1), Weight)
Weight += self.RangeMax
Weight = np.abs(Weight)
self.numLayerOutput = Weight.shape[1]
if (self.numCellperWeight > 1):
Weight = Weight.T
WeightSp = []
Base = np.asarray([((2 ** self.CellBits) ** x) for x in range((self.numCellperWeight - 1), (- 1), (- 1))])
for i in range(Weight.shape[0]):
for j in range(self.numCellperWeight):
SpiltTmp = (Weight[i] // Base[j])
Weight[i] = (Weight[i] % Base[j])
WeightSp.append(SpiltTmp)
WeightSp = np.asarray(WeightSp)
Weight = WeightSp.T
numCoreV = int(np.ceil((Weight.shape[0] / self.numRow)))
numCoreH = int(np.ceil((Weight.shape[1] / (self.numCol - self.numCellperWeight))))
CoresInfo = (numCoreV, numCoreH)
numInput = (numCoreV * self.numRow)
numOutput = (numCoreH * (self.numCol - self.numCellperWeight))
WeightMap = np.concatenate((np.zeros(((numInput - Weight.shape[0]), Weight.shape[1])), Weight), axis=0)
WeightMap = np.concatenate((WeightMap, np.zeros((numInput, (numOutput - Weight.shape[1])))), axis=1)
WeightVsp = np.vsplit(WeightMap, numCoreV)
for i in range(numCoreV):
WeightHsp = np.hsplit(WeightVsp[i], numCoreH)
for j in range(numCoreH):
WeightHsp[j] = np.concatenate((WeightHsp[j], ((2 ** (self.CellBits - 1)) * np.ones((self.numRow, 1))), np.zeros((self.numRow, (self.numCellperWeight - 1)))), axis=1)
WeightVsp[i] = WeightHsp
Weight = WeightVsp
Gstep = ((self.Gmax - self.Gmin) / ((2 ** self.CellBits) - 1))
for i in range(numCoreV):
for j in range(numCoreH):
Weight[i][j] = (self.Gmin + (Gstep * Weight[i][j]))
Weight2 = Weight[i][j]
coeff = [(- 0.0006), 0.062, 0.72]
WeightSD = ((((coeff[0] * (Weight[i][j] * 1000000.0)) * (Weight[i][j] * 1000000.0)) + (coeff[1] * (Weight[i][j] * 1000000.0))) + coeff[2])
WeightSD = (WeightSD * 1e-06)
Weight[i][j] = ((np.random.randn(self.numRow, self.numCol) * WeightSD) + Weight[i][j])
return (Weight, CoresInfo) | def apply(self, Weight):
'\n Map the weights to arrays.\n require args:\n Weight: The weights of a layer (float), include weight & bias\n '
WeightMax = max(Weight.max(), abs(Weight.min()))
Weight = np.round(((Weight / WeightMax) * self.RangeMax))
Weight = np.where((Weight > (self.RangeMax - 1)), (self.RangeMax - 1), Weight)
Weight += self.RangeMax
Weight = np.abs(Weight)
self.numLayerOutput = Weight.shape[1]
if (self.numCellperWeight > 1):
Weight = Weight.T
WeightSp = []
Base = np.asarray([((2 ** self.CellBits) ** x) for x in range((self.numCellperWeight - 1), (- 1), (- 1))])
for i in range(Weight.shape[0]):
for j in range(self.numCellperWeight):
SpiltTmp = (Weight[i] // Base[j])
Weight[i] = (Weight[i] % Base[j])
WeightSp.append(SpiltTmp)
WeightSp = np.asarray(WeightSp)
Weight = WeightSp.T
numCoreV = int(np.ceil((Weight.shape[0] / self.numRow)))
numCoreH = int(np.ceil((Weight.shape[1] / (self.numCol - self.numCellperWeight))))
CoresInfo = (numCoreV, numCoreH)
numInput = (numCoreV * self.numRow)
numOutput = (numCoreH * (self.numCol - self.numCellperWeight))
WeightMap = np.concatenate((np.zeros(((numInput - Weight.shape[0]), Weight.shape[1])), Weight), axis=0)
WeightMap = np.concatenate((WeightMap, np.zeros((numInput, (numOutput - Weight.shape[1])))), axis=1)
WeightVsp = np.vsplit(WeightMap, numCoreV)
for i in range(numCoreV):
WeightHsp = np.hsplit(WeightVsp[i], numCoreH)
for j in range(numCoreH):
WeightHsp[j] = np.concatenate((WeightHsp[j], ((2 ** (self.CellBits - 1)) * np.ones((self.numRow, 1))), np.zeros((self.numRow, (self.numCellperWeight - 1)))), axis=1)
WeightVsp[i] = WeightHsp
Weight = WeightVsp
Gstep = ((self.Gmax - self.Gmin) / ((2 ** self.CellBits) - 1))
for i in range(numCoreV):
for j in range(numCoreH):
Weight[i][j] = (self.Gmin + (Gstep * Weight[i][j]))
Weight2 = Weight[i][j]
coeff = [(- 0.0006), 0.062, 0.72]
WeightSD = ((((coeff[0] * (Weight[i][j] * 1000000.0)) * (Weight[i][j] * 1000000.0)) + (coeff[1] * (Weight[i][j] * 1000000.0))) + coeff[2])
WeightSD = (WeightSD * 1e-06)
Weight[i][j] = ((np.random.randn(self.numRow, self.numCol) * WeightSD) + Weight[i][j])
return (Weight, CoresInfo)<|docstring|>Map the weights to arrays.
require args:
Weight: The weights of a layer (float), include weight & bias<|endoftext|> |
0653fbce92bca52aedb280a74e20650ce464562f558a6908f347ad438ddc7875 | @register.simple_tag()
def dbs_field(field, inline=False, sr_label=False, prepend=None, append=None):
'\n Basic usage:\n {% dbs_field field %}\n Advanced usage:\n {% dbs_field field inline=True sr_label=True prepend="$" append=".00" %}\n '
if ((not isinstance(field, forms.BoundField)) and settings.DEBUG):
raise Exception('dbs_field: invalid or non-existent field')
templates = getattr(settings, 'DBS_TEMPLATES', 'bootstrap4')
dbs_field = utils.render_dbs_field(templates, field, inline, sr_label, prepend, append)
return dbs_field | Basic usage:
{% dbs_field field %}
Advanced usage:
{% dbs_field field inline=True sr_label=True prepend="$" append=".00" %} | dbs_fields/templatetags/dbs_tags.py | dbs_field | mtokoly/django-bootstrap-fields | 1 | python | @register.simple_tag()
def dbs_field(field, inline=False, sr_label=False, prepend=None, append=None):
'\n Basic usage:\n {% dbs_field field %}\n Advanced usage:\n {% dbs_field field inline=True sr_label=True prepend="$" append=".00" %}\n '
if ((not isinstance(field, forms.BoundField)) and settings.DEBUG):
raise Exception('dbs_field: invalid or non-existent field')
templates = getattr(settings, 'DBS_TEMPLATES', 'bootstrap4')
dbs_field = utils.render_dbs_field(templates, field, inline, sr_label, prepend, append)
return dbs_field | @register.simple_tag()
def dbs_field(field, inline=False, sr_label=False, prepend=None, append=None):
'\n Basic usage:\n {% dbs_field field %}\n Advanced usage:\n {% dbs_field field inline=True sr_label=True prepend="$" append=".00" %}\n '
if ((not isinstance(field, forms.BoundField)) and settings.DEBUG):
raise Exception('dbs_field: invalid or non-existent field')
templates = getattr(settings, 'DBS_TEMPLATES', 'bootstrap4')
dbs_field = utils.render_dbs_field(templates, field, inline, sr_label, prepend, append)
return dbs_field<|docstring|>Basic usage:
{% dbs_field field %}
Advanced usage:
{% dbs_field field inline=True sr_label=True prepend="$" append=".00" %}<|endoftext|> |
36200c479f8deed3f0eafc353e03abc727d9d2b3ad11ca32cf3614fcffbbf739 | async def rpa(url, file, name, window=None, bar=None):
'RPA function\n Args:\n url (string): URL do formulário\n file (string): Arquivo da planilha\n name (string): Nome do candidato\n window (tkinter): window para atualizar\n bar (boolean): Barra de progresso\n\n Returns:\n int: Códido HTTP da submissão\n '
fileRel = xlsxutils.extract(file)
questsRel = (await formutils.get_questions(url))
tasks = []
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
for row in fileRel['rows']:
row['Nome completo do candidato'] = name
quests = []
ans = []
for (c, v) in row.items():
if (c in questsRel):
quests.append(questsRel[c])
ans.append(v)
tasks.append(loop.run_in_executor(executor, formutils.post_answers, *(url, quests, ans)))
result = {'ok': 0, 'notOk': 0}
lenTasks = len(tasks)
if (bar and window):
style = ttk.Style()
style.theme_use('default')
style.configure('blue.Horizontal.TProgressbar', background='blue')
submitBar = Progressbar(window, style='blue.Horizontal.TProgressbar')
submitBar.grid(padx=10, pady=(0, 10), columnspan=2, sticky=(W + E))
submitBar['value'] = 0
submitBar['maximum'] = lenTasks
while len(tasks):
(done, tasks) = (await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED))
for task in done:
if (window and bar):
submitBar['value'] += 1
if window:
window.update()
result[('ok' if (task.result() == 200) else 'notOk')] += 1
if (window and bar):
submitBar.destroy()
return result | RPA function
Args:
url (string): URL do formulário
file (string): Arquivo da planilha
name (string): Nome do candidato
window (tkinter): window para atualizar
bar (boolean): Barra de progresso
Returns:
int: Códido HTTP da submissão | src/another_python_rpa/rpa.py | rpa | aretw0/another-python-rpa | 0 | python | async def rpa(url, file, name, window=None, bar=None):
'RPA function\n Args:\n url (string): URL do formulário\n file (string): Arquivo da planilha\n name (string): Nome do candidato\n window (tkinter): window para atualizar\n bar (boolean): Barra de progresso\n\n Returns:\n int: Códido HTTP da submissão\n '
fileRel = xlsxutils.extract(file)
questsRel = (await formutils.get_questions(url))
tasks = []
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
for row in fileRel['rows']:
row['Nome completo do candidato'] = name
quests = []
ans = []
for (c, v) in row.items():
if (c in questsRel):
quests.append(questsRel[c])
ans.append(v)
tasks.append(loop.run_in_executor(executor, formutils.post_answers, *(url, quests, ans)))
result = {'ok': 0, 'notOk': 0}
lenTasks = len(tasks)
if (bar and window):
style = ttk.Style()
style.theme_use('default')
style.configure('blue.Horizontal.TProgressbar', background='blue')
submitBar = Progressbar(window, style='blue.Horizontal.TProgressbar')
submitBar.grid(padx=10, pady=(0, 10), columnspan=2, sticky=(W + E))
submitBar['value'] = 0
submitBar['maximum'] = lenTasks
while len(tasks):
(done, tasks) = (await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED))
for task in done:
if (window and bar):
submitBar['value'] += 1
if window:
window.update()
result[('ok' if (task.result() == 200) else 'notOk')] += 1
if (window and bar):
submitBar.destroy()
return result | async def rpa(url, file, name, window=None, bar=None):
'RPA function\n Args:\n url (string): URL do formulário\n file (string): Arquivo da planilha\n name (string): Nome do candidato\n window (tkinter): window para atualizar\n bar (boolean): Barra de progresso\n\n Returns:\n int: Códido HTTP da submissão\n '
fileRel = xlsxutils.extract(file)
questsRel = (await formutils.get_questions(url))
tasks = []
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
for row in fileRel['rows']:
row['Nome completo do candidato'] = name
quests = []
ans = []
for (c, v) in row.items():
if (c in questsRel):
quests.append(questsRel[c])
ans.append(v)
tasks.append(loop.run_in_executor(executor, formutils.post_answers, *(url, quests, ans)))
result = {'ok': 0, 'notOk': 0}
lenTasks = len(tasks)
if (bar and window):
style = ttk.Style()
style.theme_use('default')
style.configure('blue.Horizontal.TProgressbar', background='blue')
submitBar = Progressbar(window, style='blue.Horizontal.TProgressbar')
submitBar.grid(padx=10, pady=(0, 10), columnspan=2, sticky=(W + E))
submitBar['value'] = 0
submitBar['maximum'] = lenTasks
while len(tasks):
(done, tasks) = (await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED))
for task in done:
if (window and bar):
submitBar['value'] += 1
if window:
window.update()
result[('ok' if (task.result() == 200) else 'notOk')] += 1
if (window and bar):
submitBar.destroy()
return result<|docstring|>RPA function
Args:
url (string): URL do formulário
file (string): Arquivo da planilha
name (string): Nome do candidato
window (tkinter): window para atualizar
bar (boolean): Barra de progresso
Returns:
int: Códido HTTP da submissão<|endoftext|> |
2d31e077ed278fa6e6f843d55bdf317a73cf5915009ba0797c2504f414da2ae9 | def parse_args(args):
'Parse command line parameters\n\n Args:\n args ([str]): command line parameters as list of strings\n\n Returns:\n :obj:`argparse.Namespace`: command line parameters namespace\n '
parser = argparse.ArgumentParser(description='RPA demonstration')
parser.add_argument('--name', '-n', dest='name', help='Nome do candidato', type=str, metavar='NOME')
parser.add_argument('--url', dest='url', help='URL do formulário', type=str, metavar='URL')
parser.add_argument('--file', '-f', dest='file', help='Arquivo de planilha', type=str, metavar='FILE')
parser.add_argument('--version', action='version', version='another-python-rpa {ver}'.format(ver=__version__))
parser.add_argument('-v', '--verbose', dest='loglevel', help='set loglevel to INFO', action='store_const', const=logging.INFO)
parser.add_argument('-vv', '--very-verbose', dest='loglevel', help='set loglevel to DEBUG', action='store_const', const=logging.DEBUG)
return parser.parse_args(args) | Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace | src/another_python_rpa/rpa.py | parse_args | aretw0/another-python-rpa | 0 | python | def parse_args(args):
'Parse command line parameters\n\n Args:\n args ([str]): command line parameters as list of strings\n\n Returns:\n :obj:`argparse.Namespace`: command line parameters namespace\n '
parser = argparse.ArgumentParser(description='RPA demonstration')
parser.add_argument('--name', '-n', dest='name', help='Nome do candidato', type=str, metavar='NOME')
parser.add_argument('--url', dest='url', help='URL do formulário', type=str, metavar='URL')
parser.add_argument('--file', '-f', dest='file', help='Arquivo de planilha', type=str, metavar='FILE')
parser.add_argument('--version', action='version', version='another-python-rpa {ver}'.format(ver=__version__))
parser.add_argument('-v', '--verbose', dest='loglevel', help='set loglevel to INFO', action='store_const', const=logging.INFO)
parser.add_argument('-vv', '--very-verbose', dest='loglevel', help='set loglevel to DEBUG', action='store_const', const=logging.DEBUG)
return parser.parse_args(args) | def parse_args(args):
'Parse command line parameters\n\n Args:\n args ([str]): command line parameters as list of strings\n\n Returns:\n :obj:`argparse.Namespace`: command line parameters namespace\n '
parser = argparse.ArgumentParser(description='RPA demonstration')
parser.add_argument('--name', '-n', dest='name', help='Nome do candidato', type=str, metavar='NOME')
parser.add_argument('--url', dest='url', help='URL do formulário', type=str, metavar='URL')
parser.add_argument('--file', '-f', dest='file', help='Arquivo de planilha', type=str, metavar='FILE')
parser.add_argument('--version', action='version', version='another-python-rpa {ver}'.format(ver=__version__))
parser.add_argument('-v', '--verbose', dest='loglevel', help='set loglevel to INFO', action='store_const', const=logging.INFO)
parser.add_argument('-vv', '--very-verbose', dest='loglevel', help='set loglevel to DEBUG', action='store_const', const=logging.DEBUG)
return parser.parse_args(args)<|docstring|>Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace<|endoftext|> |
e8b3f770d6d59dd588f6a4e27532485976f7c5d5aaca548ad17c8415f2c93284 | def setup_logging(loglevel):
'Setup basic logging\n\n Args:\n loglevel (int): minimum loglevel for emitting messages\n '
logformat = '[%(asctime)s] %(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=loglevel, stream=sys.stdout, format=logformat, datefmt='%Y-%m-%d %H:%M:%S') | Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages | src/another_python_rpa/rpa.py | setup_logging | aretw0/another-python-rpa | 0 | python | def setup_logging(loglevel):
'Setup basic logging\n\n Args:\n loglevel (int): minimum loglevel for emitting messages\n '
logformat = '[%(asctime)s] %(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=loglevel, stream=sys.stdout, format=logformat, datefmt='%Y-%m-%d %H:%M:%S') | def setup_logging(loglevel):
'Setup basic logging\n\n Args:\n loglevel (int): minimum loglevel for emitting messages\n '
logformat = '[%(asctime)s] %(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=loglevel, stream=sys.stdout, format=logformat, datefmt='%Y-%m-%d %H:%M:%S')<|docstring|>Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages<|endoftext|> |
499cf0381c68a383bad78a23aa7e35141c6f16885f1ed6a184dc58e11595ab4b | def main(args):
'Main entry point allowing external calls\n\n Args:\n args ([str]): command line parameter list\n '
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug('Starting view and controllers...')
rpa_window(args.url, args.file, args.name)
' \n file = args.file\n if not file:\n print("Informe o arquivo (caminho completo): ")\n file = input()\n '
'\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(rpa(args.url,file, args.name))\n finally:\n loop.close() '
_logger.info('Script ends here') | Main entry point allowing external calls
Args:
args ([str]): command line parameter list | src/another_python_rpa/rpa.py | main | aretw0/another-python-rpa | 0 | python | def main(args):
'Main entry point allowing external calls\n\n Args:\n args ([str]): command line parameter list\n '
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug('Starting view and controllers...')
rpa_window(args.url, args.file, args.name)
' \n file = args.file\n if not file:\n print("Informe o arquivo (caminho completo): ")\n file = input()\n '
'\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(rpa(args.url,file, args.name))\n finally:\n loop.close() '
_logger.info('Script ends here') | def main(args):
'Main entry point allowing external calls\n\n Args:\n args ([str]): command line parameter list\n '
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug('Starting view and controllers...')
rpa_window(args.url, args.file, args.name)
' \n file = args.file\n if not file:\n print("Informe o arquivo (caminho completo): ")\n file = input()\n '
'\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(rpa(args.url,file, args.name))\n finally:\n loop.close() '
_logger.info('Script ends here')<|docstring|>Main entry point allowing external calls
Args:
args ([str]): command line parameter list<|endoftext|> |
a8232617d58ef3e2a10aafd75108de78a93c9ef4281fba1badb9f5a9a4c32feb | def run():
'Entry point for console_scripts\n '
main(sys.argv[1:]) | Entry point for console_scripts | src/another_python_rpa/rpa.py | run | aretw0/another-python-rpa | 0 | python | def run():
'\n '
main(sys.argv[1:]) | def run():
'\n '
main(sys.argv[1:])<|docstring|>Entry point for console_scripts<|endoftext|> |
525585ae0b78782202a182ca0775ec779d70f00fa2e5b5a927b8985110ef7380 | def reporter(name):
'\n Decorator to register function as a callback when the config sets the\n `"reporter"` config value to `name`.\n\n The wrapped function will be called with each `LintIssue` and the\n contents of the file being linted.\n\n >>> @reporter(\'json\')\n ... def reporter(issue, file_contents):\n ... print(json.dumps(issue._asdict()))\n '
def wrapper(fn):
_REPORTERS[name] = fn
@functools.wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
return wrapper | Decorator to register function as a callback when the config sets the
`"reporter"` config value to `name`.
The wrapped function will be called with each `LintIssue` and the
contents of the file being linted.
>>> @reporter('json')
... def reporter(issue, file_contents):
... print(json.dumps(issue._asdict())) | projects/squabble/squabble/reporter.py | reporter | erik/sketches | 1 | python | def reporter(name):
'\n Decorator to register function as a callback when the config sets the\n `"reporter"` config value to `name`.\n\n The wrapped function will be called with each `LintIssue` and the\n contents of the file being linted.\n\n >>> @reporter(\'json\')\n ... def reporter(issue, file_contents):\n ... print(json.dumps(issue._asdict()))\n '
def wrapper(fn):
_REPORTERS[name] = fn
@functools.wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
return wrapper | def reporter(name):
'\n Decorator to register function as a callback when the config sets the\n `"reporter"` config value to `name`.\n\n The wrapped function will be called with each `LintIssue` and the\n contents of the file being linted.\n\n >>> @reporter(\'json\')\n ... def reporter(issue, file_contents):\n ... print(json.dumps(issue._asdict()))\n '
def wrapper(fn):
_REPORTERS[name] = fn
@functools.wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
return wrapper<|docstring|>Decorator to register function as a callback when the config sets the
`"reporter"` config value to `name`.
The wrapped function will be called with each `LintIssue` and the
contents of the file being linted.
>>> @reporter('json')
... def reporter(issue, file_contents):
... print(json.dumps(issue._asdict()))<|endoftext|> |
6f11f8a8bf4a9f8ae55b9fbd1bc17deab4d83e147d4d2cac195e4708d18a4923 | def report(reporter_name, issues):
'\n Call the named reporter function for every issue in the list of issues.\n '
if (reporter_name not in _REPORTERS):
raise UnknownReporterException(reporter_name)
fn = _REPORTERS[reporter_name]
files = {}
for i in issues:
if ((i.file is not None) and (i.file not in files)):
with open(i.file, 'r') as fp:
files[i.file] = fp.read()
file_contents = files.get(i.file, '')
fn(i, file_contents) | Call the named reporter function for every issue in the list of issues. | projects/squabble/squabble/reporter.py | report | erik/sketches | 1 | python | def report(reporter_name, issues):
'\n \n '
if (reporter_name not in _REPORTERS):
raise UnknownReporterException(reporter_name)
fn = _REPORTERS[reporter_name]
files = {}
for i in issues:
if ((i.file is not None) and (i.file not in files)):
with open(i.file, 'r') as fp:
files[i.file] = fp.read()
file_contents = files.get(i.file, )
fn(i, file_contents) | def report(reporter_name, issues):
'\n \n '
if (reporter_name not in _REPORTERS):
raise UnknownReporterException(reporter_name)
fn = _REPORTERS[reporter_name]
files = {}
for i in issues:
if ((i.file is not None) and (i.file not in files)):
with open(i.file, 'r') as fp:
files[i.file] = fp.read()
file_contents = files.get(i.file, )
fn(i, file_contents)<|docstring|>Call the named reporter function for every issue in the list of issues.<|endoftext|> |
f816afb2f450b748c43934eef8ea46c5213f23a311a8bff2859ea955c81b9ed1 | def location_for_issue(issue):
'\n Correctly return the offset into the file for this issue, or None if it\n cannot be determined.\n '
if (issue.node and (issue.node.location != pglast.Missing)):
return issue.node.location.value
return issue.location | Correctly return the offset into the file for this issue, or None if it
cannot be determined. | projects/squabble/squabble/reporter.py | location_for_issue | erik/sketches | 1 | python | def location_for_issue(issue):
'\n Correctly return the offset into the file for this issue, or None if it\n cannot be determined.\n '
if (issue.node and (issue.node.location != pglast.Missing)):
return issue.node.location.value
return issue.location | def location_for_issue(issue):
'\n Correctly return the offset into the file for this issue, or None if it\n cannot be determined.\n '
if (issue.node and (issue.node.location != pglast.Missing)):
return issue.node.location.value
return issue.location<|docstring|>Correctly return the offset into the file for this issue, or None if it
cannot be determined.<|endoftext|> |
be6a2f7ed9e413a232b7fd8a7dfc58000f65d89a3747ad51fbeb78aa9d780c9b | def issue_to_file_location(issue, contents):
"\n Given a `LintIssue` (which may or may not have a `pglast.Node` with a\n `location` field) and the contents of the file containing that node, return\n the (line_str, line, column) that node is located at, or `('', 1, 0)`.\n "
loc = location_for_issue(issue)
if (loc is None):
return ('', 1, 0)
lines = contents.splitlines()
for (i, line) in enumerate(lines, start=1):
if (loc <= len(line)):
return (line, i, loc)
loc -= (len(line) + 1)
return ('', 1, 0) | Given a `LintIssue` (which may or may not have a `pglast.Node` with a
`location` field) and the contents of the file containing that node, return
the (line_str, line, column) that node is located at, or `('', 1, 0)`. | projects/squabble/squabble/reporter.py | issue_to_file_location | erik/sketches | 1 | python | def issue_to_file_location(issue, contents):
"\n Given a `LintIssue` (which may or may not have a `pglast.Node` with a\n `location` field) and the contents of the file containing that node, return\n the (line_str, line, column) that node is located at, or `(, 1, 0)`.\n "
loc = location_for_issue(issue)
if (loc is None):
return (, 1, 0)
lines = contents.splitlines()
for (i, line) in enumerate(lines, start=1):
if (loc <= len(line)):
return (line, i, loc)
loc -= (len(line) + 1)
return (, 1, 0) | def issue_to_file_location(issue, contents):
"\n Given a `LintIssue` (which may or may not have a `pglast.Node` with a\n `location` field) and the contents of the file containing that node, return\n the (line_str, line, column) that node is located at, or `(, 1, 0)`.\n "
loc = location_for_issue(issue)
if (loc is None):
return (, 1, 0)
lines = contents.splitlines()
for (i, line) in enumerate(lines, start=1):
if (loc <= len(line)):
return (line, i, loc)
loc -= (len(line) + 1)
return (, 1, 0)<|docstring|>Given a `LintIssue` (which may or may not have a `pglast.Node` with a
`location` field) and the contents of the file containing that node, return
the (line_str, line, column) that node is located at, or `('', 1, 0)`.<|endoftext|> |
ca28ccfb519a23ac2bcbce174d32fbe782738efab5f00ae1469bce63f772f4d1 | @parameterize
def export(obj, all_list=None):
'Add a function or class to the __all__.\n\n When exporting something with out using as a decorator do it like so:\n `func = exporter(func)`\n '
all_list.append(obj.__name__)
return obj | Add a function or class to the __all__.
When exporting something with out using as a decorator do it like so:
`func = exporter(func)` | python/baseline/utils.py | export | amyhemmeter/baseline | 0 | python | @parameterize
def export(obj, all_list=None):
'Add a function or class to the __all__.\n\n When exporting something with out using as a decorator do it like so:\n `func = exporter(func)`\n '
all_list.append(obj.__name__)
return obj | @parameterize
def export(obj, all_list=None):
'Add a function or class to the __all__.\n\n When exporting something with out using as a decorator do it like so:\n `func = exporter(func)`\n '
all_list.append(obj.__name__)
return obj<|docstring|>Add a function or class to the __all__.
When exporting something with out using as a decorator do it like so:
`func = exporter(func)`<|endoftext|> |
517f4947430c3ef416ae81684d17ef5258fd9e24f0bce047aa50d5a24206e6a7 | @exporter
def get_logging_level(level):
'Get the logging level as a logging module constant.\n\n :param level: `str` The log level to get.\n\n :returns: The log level, defaults to `INFO`\n '
return getattr(logging, level.upper(), logging.INFO) | Get the logging level as a logging module constant.
:param level: `str` The log level to get.
:returns: The log level, defaults to `INFO` | python/baseline/utils.py | get_logging_level | amyhemmeter/baseline | 0 | python | @exporter
def get_logging_level(level):
'Get the logging level as a logging module constant.\n\n :param level: `str` The log level to get.\n\n :returns: The log level, defaults to `INFO`\n '
return getattr(logging, level.upper(), logging.INFO) | @exporter
def get_logging_level(level):
'Get the logging level as a logging module constant.\n\n :param level: `str` The log level to get.\n\n :returns: The log level, defaults to `INFO`\n '
return getattr(logging, level.upper(), logging.INFO)<|docstring|>Get the logging level as a logging module constant.
:param level: `str` The log level to get.
:returns: The log level, defaults to `INFO`<|endoftext|> |
77aefcc6e9ea02db1398d6849935e5a50f472a2e6d3b20dafa7e81aaa7ff8a48 | @exporter
def get_console_logger(name, level=None, env_key='LOG_LEVEL'):
'A small default logging setup.\n\n This is a default logging setup to print json formatted logging to\n the console. This is used as a default for when baseline/mead is used\n as an API. This can be overridden with the logging config.\n\n The level defaults to `INFO` but can also be read from an env var\n of you choice with a back off to `LOG_LEVEL`\n\n :param name: `str` The logger to create.\n :param level: `str` The level to look for.\n :param env_key: `str` The env var to look in.\n\n :returns: logging.Logger\n '
if (level is None):
level = os.getenv(env_key, os.getenv('LOG_LEVEL', 'INFO'))
level = get_logging_level(level)
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = JSONFormatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
return logger | A small default logging setup.
This is a default logging setup to print json formatted logging to
the console. This is used as a default for when baseline/mead is used
as an API. This can be overridden with the logging config.
The level defaults to `INFO` but can also be read from an env var
of you choice with a back off to `LOG_LEVEL`
:param name: `str` The logger to create.
:param level: `str` The level to look for.
:param env_key: `str` The env var to look in.
:returns: logging.Logger | python/baseline/utils.py | get_console_logger | amyhemmeter/baseline | 0 | python | @exporter
def get_console_logger(name, level=None, env_key='LOG_LEVEL'):
'A small default logging setup.\n\n This is a default logging setup to print json formatted logging to\n the console. This is used as a default for when baseline/mead is used\n as an API. This can be overridden with the logging config.\n\n The level defaults to `INFO` but can also be read from an env var\n of you choice with a back off to `LOG_LEVEL`\n\n :param name: `str` The logger to create.\n :param level: `str` The level to look for.\n :param env_key: `str` The env var to look in.\n\n :returns: logging.Logger\n '
if (level is None):
level = os.getenv(env_key, os.getenv('LOG_LEVEL', 'INFO'))
level = get_logging_level(level)
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = JSONFormatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
return logger | @exporter
def get_console_logger(name, level=None, env_key='LOG_LEVEL'):
'A small default logging setup.\n\n This is a default logging setup to print json formatted logging to\n the console. This is used as a default for when baseline/mead is used\n as an API. This can be overridden with the logging config.\n\n The level defaults to `INFO` but can also be read from an env var\n of you choice with a back off to `LOG_LEVEL`\n\n :param name: `str` The logger to create.\n :param level: `str` The level to look for.\n :param env_key: `str` The env var to look in.\n\n :returns: logging.Logger\n '
if (level is None):
level = os.getenv(env_key, os.getenv('LOG_LEVEL', 'INFO'))
level = get_logging_level(level)
logger = logging.getLogger(name)
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = JSONFormatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
return logger<|docstring|>A small default logging setup.
This is a default logging setup to print json formatted logging to
the console. This is used as a default for when baseline/mead is used
as an API. This can be overridden with the logging config.
The level defaults to `INFO` but can also be read from an env var
of you choice with a back off to `LOG_LEVEL`
:param name: `str` The logger to create.
:param level: `str` The level to look for.
:param env_key: `str` The env var to look in.
:returns: logging.Logger<|endoftext|> |
960a3cf9516349d74779e3a3f1fbcd0bed9c108212d4377a495c7fc12dce5a00 | @exporter
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
'Create a CRF mask.\n\n Returns a mask with invalid moves as 0 and valid as 1.\n\n :param vocab: dict, Label vocabulary mapping name to index.\n :param span_type: str, The sequence labeling formalism {IOB, IOB2, BIO, or IOBES}\n :param s_idx: int, What is the index of the GO symbol?\n :param e_idx: int, What is the index of the EOS symbol?\n :param pad_idx: int, What is the index of the PAD symbol?\n\n Note:\n In this mask the PAD symbol is between the last symbol and EOS, PADS can\n only move to pad and the EOS. Any symbol that can move to an EOS can also\n move to a pad.\n '
rev_lut = {v: k for (k, v) in vocab.items()}
start = rev_lut[s_idx]
end = rev_lut[e_idx]
pad = (None if (pad_idx is None) else rev_lut[pad_idx])
if (span_type.upper() == 'IOB'):
mask = iob_mask(vocab, start, end, pad)
if ((span_type.upper() == 'IOB2') or (span_type.upper() == 'BIO')):
mask = iob2_mask(vocab, start, end, pad)
if (span_type.upper() == 'IOBES'):
mask = iobes_mask(vocab, start, end, pad)
return mask | Create a CRF mask.
Returns a mask with invalid moves as 0 and valid as 1.
:param vocab: dict, Label vocabulary mapping name to index.
:param span_type: str, The sequence labeling formalism {IOB, IOB2, BIO, or IOBES}
:param s_idx: int, What is the index of the GO symbol?
:param e_idx: int, What is the index of the EOS symbol?
:param pad_idx: int, What is the index of the PAD symbol?
Note:
In this mask the PAD symbol is between the last symbol and EOS, PADS can
only move to pad and the EOS. Any symbol that can move to an EOS can also
move to a pad. | python/baseline/utils.py | transition_mask | amyhemmeter/baseline | 0 | python | @exporter
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
'Create a CRF mask.\n\n Returns a mask with invalid moves as 0 and valid as 1.\n\n :param vocab: dict, Label vocabulary mapping name to index.\n :param span_type: str, The sequence labeling formalism {IOB, IOB2, BIO, or IOBES}\n :param s_idx: int, What is the index of the GO symbol?\n :param e_idx: int, What is the index of the EOS symbol?\n :param pad_idx: int, What is the index of the PAD symbol?\n\n Note:\n In this mask the PAD symbol is between the last symbol and EOS, PADS can\n only move to pad and the EOS. Any symbol that can move to an EOS can also\n move to a pad.\n '
rev_lut = {v: k for (k, v) in vocab.items()}
start = rev_lut[s_idx]
end = rev_lut[e_idx]
pad = (None if (pad_idx is None) else rev_lut[pad_idx])
if (span_type.upper() == 'IOB'):
mask = iob_mask(vocab, start, end, pad)
if ((span_type.upper() == 'IOB2') or (span_type.upper() == 'BIO')):
mask = iob2_mask(vocab, start, end, pad)
if (span_type.upper() == 'IOBES'):
mask = iobes_mask(vocab, start, end, pad)
return mask | @exporter
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
'Create a CRF mask.\n\n Returns a mask with invalid moves as 0 and valid as 1.\n\n :param vocab: dict, Label vocabulary mapping name to index.\n :param span_type: str, The sequence labeling formalism {IOB, IOB2, BIO, or IOBES}\n :param s_idx: int, What is the index of the GO symbol?\n :param e_idx: int, What is the index of the EOS symbol?\n :param pad_idx: int, What is the index of the PAD symbol?\n\n Note:\n In this mask the PAD symbol is between the last symbol and EOS, PADS can\n only move to pad and the EOS. Any symbol that can move to an EOS can also\n move to a pad.\n '
rev_lut = {v: k for (k, v) in vocab.items()}
start = rev_lut[s_idx]
end = rev_lut[e_idx]
pad = (None if (pad_idx is None) else rev_lut[pad_idx])
if (span_type.upper() == 'IOB'):
mask = iob_mask(vocab, start, end, pad)
if ((span_type.upper() == 'IOB2') or (span_type.upper() == 'BIO')):
mask = iob2_mask(vocab, start, end, pad)
if (span_type.upper() == 'IOBES'):
mask = iobes_mask(vocab, start, end, pad)
return mask<|docstring|>Create a CRF mask.
Returns a mask with invalid moves as 0 and valid as 1.
:param vocab: dict, Label vocabulary mapping name to index.
:param span_type: str, The sequence labeling formalism {IOB, IOB2, BIO, or IOBES}
:param s_idx: int, What is the index of the GO symbol?
:param e_idx: int, What is the index of the EOS symbol?
:param pad_idx: int, What is the index of the PAD symbol?
Note:
In this mask the PAD symbol is between the last symbol and EOS, PADS can
only move to pad and the EOS. Any symbol that can move to an EOS can also
move to a pad.<|endoftext|> |
21ae66f10eaf54409bd49c52e97ee1b2045ff36c99fa5d197938ffd7a2817f85 | @exporter
def listify(x):
'Take a scalar or list and make it a list iff not already a sequence or numpy array\n\n :param x: The input to convert\n :return: A list\n '
if (is_sequence(x) or isinstance(x, np.ndarray)):
return x
return ([x] if (x is not None) else []) | Take a scalar or list and make it a list iff not already a sequence or numpy array
:param x: The input to convert
:return: A list | python/baseline/utils.py | listify | amyhemmeter/baseline | 0 | python | @exporter
def listify(x):
'Take a scalar or list and make it a list iff not already a sequence or numpy array\n\n :param x: The input to convert\n :return: A list\n '
if (is_sequence(x) or isinstance(x, np.ndarray)):
return x
return ([x] if (x is not None) else []) | @exporter
def listify(x):
'Take a scalar or list and make it a list iff not already a sequence or numpy array\n\n :param x: The input to convert\n :return: A list\n '
if (is_sequence(x) or isinstance(x, np.ndarray)):
return x
return ([x] if (x is not None) else [])<|docstring|>Take a scalar or list and make it a list iff not already a sequence or numpy array
:param x: The input to convert
:return: A list<|endoftext|> |
bfb356fe22ee5df99e008b44f7198430fc2949c61f8528dca6edf4800ba32419 | @exporter
def read_json(filepath, default_value=None, strict=False):
"Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read JSON object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
return json.load(f) | Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error
:param filepath: str, A file to load
:param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.
:param strict: bool, If true raise an error on file not found.
:return: dict, The read JSON object | python/baseline/utils.py | read_json | amyhemmeter/baseline | 0 | python | @exporter
def read_json(filepath, default_value=None, strict=False):
"Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read JSON object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
return json.load(f) | @exporter
def read_json(filepath, default_value=None, strict=False):
"Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read JSON object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
return json.load(f)<|docstring|>Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error
:param filepath: str, A file to load
:param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.
:param strict: bool, If true raise an error on file not found.
:return: dict, The read JSON object<|endoftext|> |
8fc8dd3aad842ebc0b60b9f429dd8ef35b61e979d606385f5c5dae9950a2818c | @exporter
def read_yaml(filepath, default_value=None, strict=False):
"Read a YAML file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read yaml object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
import yaml
from distutils.version import LooseVersion
if (LooseVersion(yaml.__version__) >= LooseVersion('5.1')):
return yaml.load(f, Loader=yaml.FullLoader)
return yaml.load(f) | Read a YAML file in. If no file is found and default value is set, return that instead. Otherwise error
:param filepath: str, A file to load
:param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.
:param strict: bool, If true raise an error on file not found.
:return: dict, The read yaml object | python/baseline/utils.py | read_yaml | amyhemmeter/baseline | 0 | python | @exporter
def read_yaml(filepath, default_value=None, strict=False):
"Read a YAML file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read yaml object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
import yaml
from distutils.version import LooseVersion
if (LooseVersion(yaml.__version__) >= LooseVersion('5.1')):
return yaml.load(f, Loader=yaml.FullLoader)
return yaml.load(f) | @exporter
def read_yaml(filepath, default_value=None, strict=False):
"Read a YAML file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: str, A file to load\n :param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.\n :param strict: bool, If true raise an error on file not found.\n\n :return: dict, The read yaml object\n "
if (not os.path.exists(filepath)):
if strict:
raise IOError('No file {} found'.format(filepath))
return (default_value if (default_value is not None) else {})
with open(filepath) as f:
import yaml
from distutils.version import LooseVersion
if (LooseVersion(yaml.__version__) >= LooseVersion('5.1')):
return yaml.load(f, Loader=yaml.FullLoader)
return yaml.load(f)<|docstring|>Read a YAML file in. If no file is found and default value is set, return that instead. Otherwise error
:param filepath: str, A file to load
:param default_value: If the file doesn't exist, return return this. Defaults to an empty dict.
:param strict: bool, If true raise an error on file not found.
:return: dict, The read yaml object<|endoftext|> |
19c250158bd2412a835729a43bc6b3802a13a95ac2877121911653acd7cd0b0f | @exporter
def read_config_file(config_file):
'Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz\n\n :param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed\n :return: (``dict``) An object\n '
if (config_file.endswith('.yml') or config_file.endswith('.yaml')):
return read_yaml(config_file, strict=True)
return read_json(config_file, strict=True) | Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz
:param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed
:return: (``dict``) An object | python/baseline/utils.py | read_config_file | amyhemmeter/baseline | 0 | python | @exporter
def read_config_file(config_file):
'Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz\n\n :param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed\n :return: (``dict``) An object\n '
if (config_file.endswith('.yml') or config_file.endswith('.yaml')):
return read_yaml(config_file, strict=True)
return read_json(config_file, strict=True) | @exporter
def read_config_file(config_file):
'Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz\n\n :param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed\n :return: (``dict``) An object\n '
if (config_file.endswith('.yml') or config_file.endswith('.yaml')):
return read_yaml(config_file, strict=True)
return read_json(config_file, strict=True)<|docstring|>Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz
:param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed
:return: (``dict``) An object<|endoftext|> |
6ae3069000627b8ffa7ee32b9c0eeddd7f74c1fe27cf7831b7c146de57f05055 | @exporter
def read_config_stream(config_stream):
'Read a config stream. This may be a path to a YAML or JSON file, or it may be a str containing JSON or the name\n of an env variable, or even a JSON object directly\n\n :param config_stream:\n :return:\n '
if (isinstance(config_stream, (dict, list)) or (config_stream is None)):
return config_stream
if (os.path.exists(config_stream) and os.path.isfile(config_stream)):
logger.info("Reading config file '{}'".format(config_stream))
return read_config_file(config_stream)
config = config_stream
if config_stream.startswith('$'):
logger.info("Reading config from '{}'".format(config_stream))
config = os.getenv(config_stream[1:])
elif validate_url(config_stream):
(path_to_save, _) = urlretrieve(config_stream)
return read_config_stream(path_to_save)
else:
logger.info("No file found '{}...', loading as string".format(config_stream[:12]))
return json.loads(config) | Read a config stream. This may be a path to a YAML or JSON file, or it may be a str containing JSON or the name
of an env variable, or even a JSON object directly
:param config_stream:
:return: | python/baseline/utils.py | read_config_stream | amyhemmeter/baseline | 0 | python | @exporter
def read_config_stream(config_stream):
'Read a config stream. This may be a path to a YAML or JSON file, or it may be a str containing JSON or the name\n of an env variable, or even a JSON object directly\n\n :param config_stream:\n :return:\n '
if (isinstance(config_stream, (dict, list)) or (config_stream is None)):
return config_stream
if (os.path.exists(config_stream) and os.path.isfile(config_stream)):
logger.info("Reading config file '{}'".format(config_stream))
return read_config_file(config_stream)
config = config_stream
if config_stream.startswith('$'):
logger.info("Reading config from '{}'".format(config_stream))
config = os.getenv(config_stream[1:])
elif validate_url(config_stream):
(path_to_save, _) = urlretrieve(config_stream)
return read_config_stream(path_to_save)
else:
logger.info("No file found '{}...', loading as string".format(config_stream[:12]))
return json.loads(config) | @exporter
def read_config_stream(config_stream):
'Read a config stream. This may be a path to a YAML or JSON file, or it may be a str containing JSON or the name\n of an env variable, or even a JSON object directly\n\n :param config_stream:\n :return:\n '
if (isinstance(config_stream, (dict, list)) or (config_stream is None)):
return config_stream
if (os.path.exists(config_stream) and os.path.isfile(config_stream)):
logger.info("Reading config file '{}'".format(config_stream))
return read_config_file(config_stream)
config = config_stream
if config_stream.startswith('$'):
logger.info("Reading config from '{}'".format(config_stream))
config = os.getenv(config_stream[1:])
elif validate_url(config_stream):
(path_to_save, _) = urlretrieve(config_stream)
return read_config_stream(path_to_save)
else:
logger.info("No file found '{}...', loading as string".format(config_stream[:12]))
return json.loads(config)<|docstring|>Read a config stream. This may be a path to a YAML or JSON file, or it may be a str containing JSON or the name
of an env variable, or even a JSON object directly
:param config_stream:
:return:<|endoftext|> |
482c4526a80454302c5fd5c2ba813d56e151af7b8c56df873c613b1a26ccda6c | @exporter
def ls_props(thing):
'List all of the properties on some object\n\n :param thing: Some object\n :return: The list of properties\n '
return [x for x in dir(thing) if isinstance(getattr(type(thing), x, None), property)] | List all of the properties on some object
:param thing: Some object
:return: The list of properties | python/baseline/utils.py | ls_props | amyhemmeter/baseline | 0 | python | @exporter
def ls_props(thing):
'List all of the properties on some object\n\n :param thing: Some object\n :return: The list of properties\n '
return [x for x in dir(thing) if isinstance(getattr(type(thing), x, None), property)] | @exporter
def ls_props(thing):
'List all of the properties on some object\n\n :param thing: Some object\n :return: The list of properties\n '
return [x for x in dir(thing) if isinstance(getattr(type(thing), x, None), property)]<|docstring|>List all of the properties on some object
:param thing: Some object
:return: The list of properties<|endoftext|> |
d2b93ef72a9b23a948d1ff4dfc1952bf2ba44c7516a161a1a8d031cdbc6ac953 | def _idempotent_append(element, data):
'Append to a list if that element is not already in the list.\n\n :param element: The element to add to the list.\n :param data: `List` the list to add to.\n :returns: `List` the list with the element in it.\n '
if (element not in data):
data.append(element)
return data | Append to a list if that element is not already in the list.
:param element: The element to add to the list.
:param data: `List` the list to add to.
:returns: `List` the list with the element in it. | python/baseline/utils.py | _idempotent_append | amyhemmeter/baseline | 0 | python | def _idempotent_append(element, data):
'Append to a list if that element is not already in the list.\n\n :param element: The element to add to the list.\n :param data: `List` the list to add to.\n :returns: `List` the list with the element in it.\n '
if (element not in data):
data.append(element)
return data | def _idempotent_append(element, data):
'Append to a list if that element is not already in the list.\n\n :param element: The element to add to the list.\n :param data: `List` the list to add to.\n :returns: `List` the list with the element in it.\n '
if (element not in data):
data.append(element)
return data<|docstring|>Append to a list if that element is not already in the list.
:param element: The element to add to the list.
:param data: `List` the list to add to.
:returns: `List` the list with the element in it.<|endoftext|> |
4ea4d25028d74b10ed8be248b7a98e8cb13977b57f1799e9c98c4f2e3debe8d0 | def _parse_module_as_path(module_name):
"Convert a path to a file to a format that it can be imported.\n\n :param module_name: The module as a path.\n :returns: `Tuple[str, str]` the module name (without a file ext) and the\n absolute path of the dir the file lives in (or '' if the module_name\n is just a filename).\n "
(module_dir, module_name) = os.path.split(module_name)
module_dir = (os.path.realpath(os.path.expanduser(module_dir)) if module_dir else module_dir)
(module_name, _) = os.path.splitext(module_name)
return (module_name, module_dir) | Convert a path to a file to a format that it can be imported.
:param module_name: The module as a path.
:returns: `Tuple[str, str]` the module name (without a file ext) and the
absolute path of the dir the file lives in (or '' if the module_name
is just a filename). | python/baseline/utils.py | _parse_module_as_path | amyhemmeter/baseline | 0 | python | def _parse_module_as_path(module_name):
"Convert a path to a file to a format that it can be imported.\n\n :param module_name: The module as a path.\n :returns: `Tuple[str, str]` the module name (without a file ext) and the\n absolute path of the dir the file lives in (or if the module_name\n is just a filename).\n "
(module_dir, module_name) = os.path.split(module_name)
module_dir = (os.path.realpath(os.path.expanduser(module_dir)) if module_dir else module_dir)
(module_name, _) = os.path.splitext(module_name)
return (module_name, module_dir) | def _parse_module_as_path(module_name):
"Convert a path to a file to a format that it can be imported.\n\n :param module_name: The module as a path.\n :returns: `Tuple[str, str]` the module name (without a file ext) and the\n absolute path of the dir the file lives in (or if the module_name\n is just a filename).\n "
(module_dir, module_name) = os.path.split(module_name)
module_dir = (os.path.realpath(os.path.expanduser(module_dir)) if module_dir else module_dir)
(module_name, _) = os.path.splitext(module_name)
return (module_name, module_dir)<|docstring|>Convert a path to a file to a format that it can be imported.
:param module_name: The module as a path.
:returns: `Tuple[str, str]` the module name (without a file ext) and the
absolute path of the dir the file lives in (or '' if the module_name
is just a filename).<|endoftext|> |
b7c29ae47f9842f6c697a4b281a55081a36354a0739c38ed34fb0a604e297070 | @exporter
def import_user_module(module_name):
'Load a module that is in the python path\n\n :param model_name: (``str``) - the name of the module\n :return:\n '
addon_path = os.path.dirname(os.path.realpath(addons.__file__))
_idempotent_append(addon_path, sys.path)
if any((module_name.endswith(suffix) for suffix in importlib.machinery.SOURCE_SUFFIXES)):
module_path = module_name
(module_name, _) = _parse_module_as_path(module_path)
spec = importlib.util.spec_from_file_location(module_name, module_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
sys.modules[module_name] = mod
return mod
mod = importlib.import_module(module_name)
return mod | Load a module that is in the python path
:param model_name: (``str``) - the name of the module
:return: | python/baseline/utils.py | import_user_module | amyhemmeter/baseline | 0 | python | @exporter
def import_user_module(module_name):
'Load a module that is in the python path\n\n :param model_name: (``str``) - the name of the module\n :return:\n '
addon_path = os.path.dirname(os.path.realpath(addons.__file__))
_idempotent_append(addon_path, sys.path)
if any((module_name.endswith(suffix) for suffix in importlib.machinery.SOURCE_SUFFIXES)):
module_path = module_name
(module_name, _) = _parse_module_as_path(module_path)
spec = importlib.util.spec_from_file_location(module_name, module_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
sys.modules[module_name] = mod
return mod
mod = importlib.import_module(module_name)
return mod | @exporter
def import_user_module(module_name):
'Load a module that is in the python path\n\n :param model_name: (``str``) - the name of the module\n :return:\n '
addon_path = os.path.dirname(os.path.realpath(addons.__file__))
_idempotent_append(addon_path, sys.path)
if any((module_name.endswith(suffix) for suffix in importlib.machinery.SOURCE_SUFFIXES)):
module_path = module_name
(module_name, _) = _parse_module_as_path(module_path)
spec = importlib.util.spec_from_file_location(module_name, module_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
sys.modules[module_name] = mod
return mod
mod = importlib.import_module(module_name)
return mod<|docstring|>Load a module that is in the python path
:param model_name: (``str``) - the name of the module
:return:<|endoftext|> |
51203f0b36f36173d839dc50fa0de0c22f0b7a0aff2fb280ff527c90aadb3f2f | @exporter
def get_model_file(task, platform, basedir=None):
'Model name file helper to abstract different DL platforms (FWs)\n\n :param dictionary:\n :param task:\n :param platform:\n :return:\n '
basedir = ('./' if (basedir is None) else basedir)
base = '{}/{}-model'.format(basedir, task)
rid = os.getpid()
if platform.startswith('pyt'):
name = ('%s-%d.pyt' % (base, rid))
else:
name = ('%s-%s-%d' % (base, platform, rid))
logger.info(('model file [%s]' % name))
return name | Model name file helper to abstract different DL platforms (FWs)
:param dictionary:
:param task:
:param platform:
:return: | python/baseline/utils.py | get_model_file | amyhemmeter/baseline | 0 | python | @exporter
def get_model_file(task, platform, basedir=None):
'Model name file helper to abstract different DL platforms (FWs)\n\n :param dictionary:\n :param task:\n :param platform:\n :return:\n '
basedir = ('./' if (basedir is None) else basedir)
base = '{}/{}-model'.format(basedir, task)
rid = os.getpid()
if platform.startswith('pyt'):
name = ('%s-%d.pyt' % (base, rid))
else:
name = ('%s-%s-%d' % (base, platform, rid))
logger.info(('model file [%s]' % name))
return name | @exporter
def get_model_file(task, platform, basedir=None):
'Model name file helper to abstract different DL platforms (FWs)\n\n :param dictionary:\n :param task:\n :param platform:\n :return:\n '
basedir = ('./' if (basedir is None) else basedir)
base = '{}/{}-model'.format(basedir, task)
rid = os.getpid()
if platform.startswith('pyt'):
name = ('%s-%d.pyt' % (base, rid))
else:
name = ('%s-%s-%d' % (base, platform, rid))
logger.info(('model file [%s]' % name))
return name<|docstring|>Model name file helper to abstract different DL platforms (FWs)
:param dictionary:
:param task:
:param platform:
:return:<|endoftext|> |
9eda99a84e1f479d4f83def0dd63083cf2a8521b3405beb09342cceffd074a7a | @exporter
def lookup_sentence(rlut, seq, reverse=False, padchar=''):
'Lookup a sentence by id and return words\n\n :param rlut: an index -> word lookup table\n :param seq: A temporal sequence\n :param reverse: (``bool``) Should reverse?\n :param padchar: What padding character to use when replacing with words\n :return:\n '
s = (seq[::(- 1)] if reverse else seq)
res = []
for idx in s:
idx = int(idx)
char = padchar
if (idx == Offsets.EOS):
break
if ((idx != Offsets.PAD) and (idx != Offsets.GO)):
char = rlut[idx]
res.append(char)
return ' '.join(res).strip() | Lookup a sentence by id and return words
:param rlut: an index -> word lookup table
:param seq: A temporal sequence
:param reverse: (``bool``) Should reverse?
:param padchar: What padding character to use when replacing with words
:return: | python/baseline/utils.py | lookup_sentence | amyhemmeter/baseline | 0 | python | @exporter
def lookup_sentence(rlut, seq, reverse=False, padchar=):
'Lookup a sentence by id and return words\n\n :param rlut: an index -> word lookup table\n :param seq: A temporal sequence\n :param reverse: (``bool``) Should reverse?\n :param padchar: What padding character to use when replacing with words\n :return:\n '
s = (seq[::(- 1)] if reverse else seq)
res = []
for idx in s:
idx = int(idx)
char = padchar
if (idx == Offsets.EOS):
break
if ((idx != Offsets.PAD) and (idx != Offsets.GO)):
char = rlut[idx]
res.append(char)
return ' '.join(res).strip() | @exporter
def lookup_sentence(rlut, seq, reverse=False, padchar=):
'Lookup a sentence by id and return words\n\n :param rlut: an index -> word lookup table\n :param seq: A temporal sequence\n :param reverse: (``bool``) Should reverse?\n :param padchar: What padding character to use when replacing with words\n :return:\n '
s = (seq[::(- 1)] if reverse else seq)
res = []
for idx in s:
idx = int(idx)
char = padchar
if (idx == Offsets.EOS):
break
if ((idx != Offsets.PAD) and (idx != Offsets.GO)):
char = rlut[idx]
res.append(char)
return ' '.join(res).strip()<|docstring|>Lookup a sentence by id and return words
:param rlut: an index -> word lookup table
:param seq: A temporal sequence
:param reverse: (``bool``) Should reverse?
:param padchar: What padding character to use when replacing with words
:return:<|endoftext|> |
f3be8ad8d93f05761792f8fa12a49c74d9d54240b2b69392d5b453e67a109330 | @exporter
def topk(k, probs):
'Get a sparse index (dictionary of top values).'
idx = np.argpartition(probs, (probs.size - k))[(- k):]
sort = idx[np.argsort(probs[idx])][::(- 1)]
return dict(zip(sort, probs[sort])) | Get a sparse index (dictionary of top values). | python/baseline/utils.py | topk | amyhemmeter/baseline | 0 | python | @exporter
def topk(k, probs):
idx = np.argpartition(probs, (probs.size - k))[(- k):]
sort = idx[np.argsort(probs[idx])][::(- 1)]
return dict(zip(sort, probs[sort])) | @exporter
def topk(k, probs):
idx = np.argpartition(probs, (probs.size - k))[(- k):]
sort = idx[np.argsort(probs[idx])][::(- 1)]
return dict(zip(sort, probs[sort]))<|docstring|>Get a sparse index (dictionary of top values).<|endoftext|> |
18f20c0b9a7b83ffd84fe041231fafe82603c183716c63264ac2296cf45bd3c6 | @exporter
def beam_multinomial(k, probs):
'Prune all elements in a large probability distribution below the top K.\n\n Renormalize the distribution with only top K, and then sample n times out of that.\n '
tops = topk(k, probs)
i = 0
n = len(tops.keys())
ary = np.zeros(n)
idx = []
for (abs_idx, v) in tops.items():
ary[i] = v
idx.append(abs_idx)
i += 1
ary /= np.sum(ary)
sample_idx = np.argmax(np.random.multinomial(1, ary))
return idx[sample_idx] | Prune all elements in a large probability distribution below the top K.
Renormalize the distribution with only top K, and then sample n times out of that. | python/baseline/utils.py | beam_multinomial | amyhemmeter/baseline | 0 | python | @exporter
def beam_multinomial(k, probs):
'Prune all elements in a large probability distribution below the top K.\n\n Renormalize the distribution with only top K, and then sample n times out of that.\n '
tops = topk(k, probs)
i = 0
n = len(tops.keys())
ary = np.zeros(n)
idx = []
for (abs_idx, v) in tops.items():
ary[i] = v
idx.append(abs_idx)
i += 1
ary /= np.sum(ary)
sample_idx = np.argmax(np.random.multinomial(1, ary))
return idx[sample_idx] | @exporter
def beam_multinomial(k, probs):
'Prune all elements in a large probability distribution below the top K.\n\n Renormalize the distribution with only top K, and then sample n times out of that.\n '
tops = topk(k, probs)
i = 0
n = len(tops.keys())
ary = np.zeros(n)
idx = []
for (abs_idx, v) in tops.items():
ary[i] = v
idx.append(abs_idx)
i += 1
ary /= np.sum(ary)
sample_idx = np.argmax(np.random.multinomial(1, ary))
return idx[sample_idx]<|docstring|>Prune all elements in a large probability distribution below the top K.
Renormalize the distribution with only top K, and then sample n times out of that.<|endoftext|> |
ee5fd9443e0e658628be9cd437dd80a3bab1a424efad48c31db22ab035341a71 | @exporter
def fill_y(nc, yidx):
'Convert a `B` sparse array to a dense one, to expand labels\n\n :param nc: (``int``) The number of labels\n :param yidx: The sparse array of the labels\n :return: A dense array\n '
xidx = np.arange(0, yidx.shape[0], 1)
dense = np.zeros((yidx.shape[0], nc), dtype=int)
dense[(xidx, yidx)] = 1
return dense | Convert a `B` sparse array to a dense one, to expand labels
:param nc: (``int``) The number of labels
:param yidx: The sparse array of the labels
:return: A dense array | python/baseline/utils.py | fill_y | amyhemmeter/baseline | 0 | python | @exporter
def fill_y(nc, yidx):
'Convert a `B` sparse array to a dense one, to expand labels\n\n :param nc: (``int``) The number of labels\n :param yidx: The sparse array of the labels\n :return: A dense array\n '
xidx = np.arange(0, yidx.shape[0], 1)
dense = np.zeros((yidx.shape[0], nc), dtype=int)
dense[(xidx, yidx)] = 1
return dense | @exporter
def fill_y(nc, yidx):
'Convert a `B` sparse array to a dense one, to expand labels\n\n :param nc: (``int``) The number of labels\n :param yidx: The sparse array of the labels\n :return: A dense array\n '
xidx = np.arange(0, yidx.shape[0], 1)
dense = np.zeros((yidx.shape[0], nc), dtype=int)
dense[(xidx, yidx)] = 1
return dense<|docstring|>Convert a `B` sparse array to a dense one, to expand labels
:param nc: (``int``) The number of labels
:param yidx: The sparse array of the labels
:return: A dense array<|endoftext|> |
4f3c61b67894f1f428ac74126f8ab3697e1fabb7be077d52c2cb276fa5963791 | @exporter
@optional_params
def str_file(func, **kwargs):
"A decorator to automatically open arguments that are files.\n\n If there are kwargs then they are name=mode. When the function is\n called if the argument name is a string then the file is opened with\n mode.\n\n If there are no kwargs then it is assumed the first argument is a\n file that should be opened as 'r'\n "
possible_files = kwargs
if inspect.isgeneratorfunction(func):
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
for x in func(f, *args[1:], **kwargs):
(yield x)
else:
for x in func(*args, **kwargs):
(yield x)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
for x in func(**arg):
(yield x)
finally:
for f in to_close:
arg[f].close()
else:
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
return func(f, *args[1:], **kwargs)
else:
return func(*args, **kwargs)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
return func(**arg)
finally:
for f in to_close:
arg[f].close()
return open_files | A decorator to automatically open arguments that are files.
If there are kwargs then they are name=mode. When the function is
called if the argument name is a string then the file is opened with
mode.
If there are no kwargs then it is assumed the first argument is a
file that should be opened as 'r' | python/baseline/utils.py | str_file | amyhemmeter/baseline | 0 | python | @exporter
@optional_params
def str_file(func, **kwargs):
"A decorator to automatically open arguments that are files.\n\n If there are kwargs then they are name=mode. When the function is\n called if the argument name is a string then the file is opened with\n mode.\n\n If there are no kwargs then it is assumed the first argument is a\n file that should be opened as 'r'\n "
possible_files = kwargs
if inspect.isgeneratorfunction(func):
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
for x in func(f, *args[1:], **kwargs):
(yield x)
else:
for x in func(*args, **kwargs):
(yield x)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
for x in func(**arg):
(yield x)
finally:
for f in to_close:
arg[f].close()
else:
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
return func(f, *args[1:], **kwargs)
else:
return func(*args, **kwargs)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
return func(**arg)
finally:
for f in to_close:
arg[f].close()
return open_files | @exporter
@optional_params
def str_file(func, **kwargs):
"A decorator to automatically open arguments that are files.\n\n If there are kwargs then they are name=mode. When the function is\n called if the argument name is a string then the file is opened with\n mode.\n\n If there are no kwargs then it is assumed the first argument is a\n file that should be opened as 'r'\n "
possible_files = kwargs
if inspect.isgeneratorfunction(func):
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
for x in func(f, *args[1:], **kwargs):
(yield x)
else:
for x in func(*args, **kwargs):
(yield x)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
for x in func(**arg):
(yield x)
finally:
for f in to_close:
arg[f].close()
else:
@six.wraps(func)
def open_files(*args, **kwargs):
if (not possible_files):
if isinstance(args[0], six.string_types):
with io.open(args[0], mode='r', encoding='utf-8') as f:
return func(f, *args[1:], **kwargs)
else:
return func(*args, **kwargs)
else:
to_close = []
arg = inspect.getcallargs(func, *args, **kwargs)
try:
for (f, mode) in possible_files.items():
if isinstance(arg[f], six.string_types):
arg[f] = io.open(arg[f], mode=mode, encoding=(None if ('b' in mode) else 'utf-8'))
to_close.append(f)
return func(**arg)
finally:
for f in to_close:
arg[f].close()
return open_files<|docstring|>A decorator to automatically open arguments that are files.
If there are kwargs then they are name=mode. When the function is
called if the argument name is a string then the file is opened with
mode.
If there are no kwargs then it is assumed the first argument is a
file that should be opened as 'r'<|endoftext|> |
d63356f63ee53df64cdeb72ad4ed433378df9428e327eac8dff83d1276b5ec0a | @exporter
def normalize_indices(xs, length):
'Normalize negative indices into positive.\n\n :param xs: `List[int]` The indices.\n :param length: `int` The length of the thing to be indexed\n\n :returns: `List[int]` The indices converted to positive only.\n '
return list(map((lambda x: ((length + x) if (x < 0) else x)), xs)) | Normalize negative indices into positive.
:param xs: `List[int]` The indices.
:param length: `int` The length of the thing to be indexed
:returns: `List[int]` The indices converted to positive only. | python/baseline/utils.py | normalize_indices | amyhemmeter/baseline | 0 | python | @exporter
def normalize_indices(xs, length):
'Normalize negative indices into positive.\n\n :param xs: `List[int]` The indices.\n :param length: `int` The length of the thing to be indexed\n\n :returns: `List[int]` The indices converted to positive only.\n '
return list(map((lambda x: ((length + x) if (x < 0) else x)), xs)) | @exporter
def normalize_indices(xs, length):
'Normalize negative indices into positive.\n\n :param xs: `List[int]` The indices.\n :param length: `int` The length of the thing to be indexed\n\n :returns: `List[int]` The indices converted to positive only.\n '
return list(map((lambda x: ((length + x) if (x < 0) else x)), xs))<|docstring|>Normalize negative indices into positive.
:param xs: `List[int]` The indices.
:param length: `int` The length of the thing to be indexed
:returns: `List[int]` The indices converted to positive only.<|endoftext|> |
092c81b8e24a9ee76b0d135e49cfb039478137668fd015a04065a047dadc17a1 | @exporter
def convert_iob_to_bio(seq):
'Convert a sequence of IOB tags to BIO tags.\n\n The difference between IOB and BIO (also called IOB2) is that in IOB\n the B- prefix is only used to separate two chunks of the same type\n while in BIO the B- prefix is used to start every chunk.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str] The list of BIO tags.\n '
new = []
prev = 'O'
for token in seq:
if token.startswith('I-'):
if ((prev == 'O') or (token[2:] != prev[2:])):
token = ('B-' + token[2:])
new.append(token)
prev = token
return new | Convert a sequence of IOB tags to BIO tags.
The difference between IOB and BIO (also called IOB2) is that in IOB
the B- prefix is only used to separate two chunks of the same type
while in BIO the B- prefix is used to start every chunk.
:param seq: `List[str]` The list of IOB tags.
:returns: `List[str] The list of BIO tags. | python/baseline/utils.py | convert_iob_to_bio | amyhemmeter/baseline | 0 | python | @exporter
def convert_iob_to_bio(seq):
'Convert a sequence of IOB tags to BIO tags.\n\n The difference between IOB and BIO (also called IOB2) is that in IOB\n the B- prefix is only used to separate two chunks of the same type\n while in BIO the B- prefix is used to start every chunk.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str] The list of BIO tags.\n '
new = []
prev = 'O'
for token in seq:
if token.startswith('I-'):
if ((prev == 'O') or (token[2:] != prev[2:])):
token = ('B-' + token[2:])
new.append(token)
prev = token
return new | @exporter
def convert_iob_to_bio(seq):
'Convert a sequence of IOB tags to BIO tags.\n\n The difference between IOB and BIO (also called IOB2) is that in IOB\n the B- prefix is only used to separate two chunks of the same type\n while in BIO the B- prefix is used to start every chunk.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str] The list of BIO tags.\n '
new = []
prev = 'O'
for token in seq:
if token.startswith('I-'):
if ((prev == 'O') or (token[2:] != prev[2:])):
token = ('B-' + token[2:])
new.append(token)
prev = token
return new<|docstring|>Convert a sequence of IOB tags to BIO tags.
The difference between IOB and BIO (also called IOB2) is that in IOB
the B- prefix is only used to separate two chunks of the same type
while in BIO the B- prefix is used to start every chunk.
:param seq: `List[str]` The list of IOB tags.
:returns: `List[str] The list of BIO tags.<|endoftext|> |
722518f033f4168717a64bf84ead71e9798965d1b385ad9ac181f8d3666b80df | @exporter
def convert_bio_to_iob(seq):
'Convert a sequence of BIO tags to IOB tags.\n\n The difference between BIO and IOB is that in IOB the B- prefix is only\n used to separate two chunks of the same type while in BIO the B- prefix\n starts every chunk. To convert we only need to look at the B- tokens.\n If they are following a chunk of the same type we leave it as a B-\n otherwise it converts it back to an I-\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
new = []
prev_ty = 'O'
for token in seq:
ty = ('O' if (token == 'O') else token[2:])
if token.startswith('B-'):
if (prev_ty != ty):
token = ('I-' + ty)
new.append(token)
prev_ty = ty
return new | Convert a sequence of BIO tags to IOB tags.
The difference between BIO and IOB is that in IOB the B- prefix is only
used to separate two chunks of the same type while in BIO the B- prefix
starts every chunk. To convert we only need to look at the B- tokens.
If they are following a chunk of the same type we leave it as a B-
otherwise it converts it back to an I-
:param seq: `List[str]` The list of BIO tags.
:returns: `List[str]` The list of IOB tags. | python/baseline/utils.py | convert_bio_to_iob | amyhemmeter/baseline | 0 | python | @exporter
def convert_bio_to_iob(seq):
'Convert a sequence of BIO tags to IOB tags.\n\n The difference between BIO and IOB is that in IOB the B- prefix is only\n used to separate two chunks of the same type while in BIO the B- prefix\n starts every chunk. To convert we only need to look at the B- tokens.\n If they are following a chunk of the same type we leave it as a B-\n otherwise it converts it back to an I-\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
new = []
prev_ty = 'O'
for token in seq:
ty = ('O' if (token == 'O') else token[2:])
if token.startswith('B-'):
if (prev_ty != ty):
token = ('I-' + ty)
new.append(token)
prev_ty = ty
return new | @exporter
def convert_bio_to_iob(seq):
'Convert a sequence of BIO tags to IOB tags.\n\n The difference between BIO and IOB is that in IOB the B- prefix is only\n used to separate two chunks of the same type while in BIO the B- prefix\n starts every chunk. To convert we only need to look at the B- tokens.\n If they are following a chunk of the same type we leave it as a B-\n otherwise it converts it back to an I-\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
new = []
prev_ty = 'O'
for token in seq:
ty = ('O' if (token == 'O') else token[2:])
if token.startswith('B-'):
if (prev_ty != ty):
token = ('I-' + ty)
new.append(token)
prev_ty = ty
return new<|docstring|>Convert a sequence of BIO tags to IOB tags.
The difference between BIO and IOB is that in IOB the B- prefix is only
used to separate two chunks of the same type while in BIO the B- prefix
starts every chunk. To convert we only need to look at the B- tokens.
If they are following a chunk of the same type we leave it as a B-
otherwise it converts it back to an I-
:param seq: `List[str]` The list of BIO tags.
:returns: `List[str]` The list of IOB tags.<|endoftext|> |
9083845835a9b51834e2d84dfc40369092579dda53838119a52e1df812adfc27 | @exporter
def convert_bio_to_iobes(seq):
'Convert a sequence of BIO tags to IOBES tags.\n\n The difference between BIO and IOBES tags is that in IOBES the end\n of a multi-token entity is marked with the E- prefix while in BIO\n it would end with an I- prefix.\n\n The other difference is that a single token entity in BIO is a\n just a B- whereas in IOBES it uses the special S- prefix.\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
new = []
for (c, n) in zip(seq, chain(seq[1:], ['O'])):
if c.startswith('B-'):
if (n == c.replace('B-', 'I-')):
new.append(c)
else:
new.append(c.replace('B-', 'S-'))
elif c.startswith('I-'):
if (n == c):
new.append(c)
else:
new.append(c.replace('I-', 'E-'))
else:
new.append(c)
return new | Convert a sequence of BIO tags to IOBES tags.
The difference between BIO and IOBES tags is that in IOBES the end
of a multi-token entity is marked with the E- prefix while in BIO
it would end with an I- prefix.
The other difference is that a single token entity in BIO is a
just a B- whereas in IOBES it uses the special S- prefix.
:param seq: `List[str]` The list of BIO tags.
:returns: `List[str]` The list of IOBES tags. | python/baseline/utils.py | convert_bio_to_iobes | amyhemmeter/baseline | 0 | python | @exporter
def convert_bio_to_iobes(seq):
'Convert a sequence of BIO tags to IOBES tags.\n\n The difference between BIO and IOBES tags is that in IOBES the end\n of a multi-token entity is marked with the E- prefix while in BIO\n it would end with an I- prefix.\n\n The other difference is that a single token entity in BIO is a\n just a B- whereas in IOBES it uses the special S- prefix.\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
new = []
for (c, n) in zip(seq, chain(seq[1:], ['O'])):
if c.startswith('B-'):
if (n == c.replace('B-', 'I-')):
new.append(c)
else:
new.append(c.replace('B-', 'S-'))
elif c.startswith('I-'):
if (n == c):
new.append(c)
else:
new.append(c.replace('I-', 'E-'))
else:
new.append(c)
return new | @exporter
def convert_bio_to_iobes(seq):
'Convert a sequence of BIO tags to IOBES tags.\n\n The difference between BIO and IOBES tags is that in IOBES the end\n of a multi-token entity is marked with the E- prefix while in BIO\n it would end with an I- prefix.\n\n The other difference is that a single token entity in BIO is a\n just a B- whereas in IOBES it uses the special S- prefix.\n\n :param seq: `List[str]` The list of BIO tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
new = []
for (c, n) in zip(seq, chain(seq[1:], ['O'])):
if c.startswith('B-'):
if (n == c.replace('B-', 'I-')):
new.append(c)
else:
new.append(c.replace('B-', 'S-'))
elif c.startswith('I-'):
if (n == c):
new.append(c)
else:
new.append(c.replace('I-', 'E-'))
else:
new.append(c)
return new<|docstring|>Convert a sequence of BIO tags to IOBES tags.
The difference between BIO and IOBES tags is that in IOBES the end
of a multi-token entity is marked with the E- prefix while in BIO
it would end with an I- prefix.
The other difference is that a single token entity in BIO is a
just a B- whereas in IOBES it uses the special S- prefix.
:param seq: `List[str]` The list of BIO tags.
:returns: `List[str]` The list of IOBES tags.<|endoftext|> |
56b1175125f7f377fd851540d11ba91a20110602d8e8266bb71a4c7c14d27532 | @exporter
def convert_iobes_to_bio(seq):
'Convert a sequence of IOBES tags to BIO tags\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of BIO tags.\n '
return list(map((lambda x: re.sub('^S-', 'B-', re.sub('^E-', 'I-', x))), seq)) | Convert a sequence of IOBES tags to BIO tags
:param seq: `List[str]` The list of IOBES tags.
:returns: `List[str]` The list of BIO tags. | python/baseline/utils.py | convert_iobes_to_bio | amyhemmeter/baseline | 0 | python | @exporter
def convert_iobes_to_bio(seq):
'Convert a sequence of IOBES tags to BIO tags\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of BIO tags.\n '
return list(map((lambda x: re.sub('^S-', 'B-', re.sub('^E-', 'I-', x))), seq)) | @exporter
def convert_iobes_to_bio(seq):
'Convert a sequence of IOBES tags to BIO tags\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of BIO tags.\n '
return list(map((lambda x: re.sub('^S-', 'B-', re.sub('^E-', 'I-', x))), seq))<|docstring|>Convert a sequence of IOBES tags to BIO tags
:param seq: `List[str]` The list of IOBES tags.
:returns: `List[str]` The list of BIO tags.<|endoftext|> |
f846081975ce7f69651158025feab98dad3e2c32b4afe60d1ec68fc08bb7b800 | @exporter
def convert_iob_to_iobes(seq):
'Convert a sequence of IOB tags to IOBES tags.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
return convert_bio_to_iobes(convert_iob_to_bio(seq)) | Convert a sequence of IOB tags to IOBES tags.
:param seq: `List[str]` The list of IOB tags.
:returns: `List[str]` The list of IOBES tags. | python/baseline/utils.py | convert_iob_to_iobes | amyhemmeter/baseline | 0 | python | @exporter
def convert_iob_to_iobes(seq):
'Convert a sequence of IOB tags to IOBES tags.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
return convert_bio_to_iobes(convert_iob_to_bio(seq)) | @exporter
def convert_iob_to_iobes(seq):
'Convert a sequence of IOB tags to IOBES tags.\n\n :param seq: `List[str]` The list of IOB tags.\n\n :returns: `List[str]` The list of IOBES tags.\n '
return convert_bio_to_iobes(convert_iob_to_bio(seq))<|docstring|>Convert a sequence of IOB tags to IOBES tags.
:param seq: `List[str]` The list of IOB tags.
:returns: `List[str]` The list of IOBES tags.<|endoftext|> |
eb3f195042596ef0326dda3ebf83b578aa01fae347d4aca1431dfe9a56bf7e9d | @exporter
def convert_iobes_to_iob(seq):
'Convert a sequence of IOBES tags to IOB tags.\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
return convert_bio_to_iob(convert_iobes_to_bio(seq)) | Convert a sequence of IOBES tags to IOB tags.
:param seq: `List[str]` The list of IOBES tags.
:returns: `List[str]` The list of IOB tags. | python/baseline/utils.py | convert_iobes_to_iob | amyhemmeter/baseline | 0 | python | @exporter
def convert_iobes_to_iob(seq):
'Convert a sequence of IOBES tags to IOB tags.\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
return convert_bio_to_iob(convert_iobes_to_bio(seq)) | @exporter
def convert_iobes_to_iob(seq):
'Convert a sequence of IOBES tags to IOB tags.\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of IOB tags.\n '
return convert_bio_to_iob(convert_iobes_to_bio(seq))<|docstring|>Convert a sequence of IOBES tags to IOB tags.
:param seq: `List[str]` The list of IOBES tags.
:returns: `List[str]` The list of IOB tags.<|endoftext|> |
a23b5adae8e142fbac0484bcc914476d81ea60ef57d1d76fb32c44d0395ec90f | @str_file
def _sniff_conll_file(f, delim=None):
'Figure out how many columns are in a conll file.\n\n :param file_name: `str` The name of the file.\n :param delim: `str` The token between columns in the file.\n\n :returns: `int` The number of columns in the file.\n '
start = f.tell()
for line in f:
line = line.rstrip('\n')
if line.startswith('#'):
continue
parts = line.split(delim)
if (len(parts) > 1):
f.seek(start)
return len(parts) | Figure out how many columns are in a conll file.
:param file_name: `str` The name of the file.
:param delim: `str` The token between columns in the file.
:returns: `int` The number of columns in the file. | python/baseline/utils.py | _sniff_conll_file | amyhemmeter/baseline | 0 | python | @str_file
def _sniff_conll_file(f, delim=None):
'Figure out how many columns are in a conll file.\n\n :param file_name: `str` The name of the file.\n :param delim: `str` The token between columns in the file.\n\n :returns: `int` The number of columns in the file.\n '
start = f.tell()
for line in f:
line = line.rstrip('\n')
if line.startswith('#'):
continue
parts = line.split(delim)
if (len(parts) > 1):
f.seek(start)
return len(parts) | @str_file
def _sniff_conll_file(f, delim=None):
'Figure out how many columns are in a conll file.\n\n :param file_name: `str` The name of the file.\n :param delim: `str` The token between columns in the file.\n\n :returns: `int` The number of columns in the file.\n '
start = f.tell()
for line in f:
line = line.rstrip('\n')
if line.startswith('#'):
continue
parts = line.split(delim)
if (len(parts) > 1):
f.seek(start)
return len(parts)<|docstring|>Figure out how many columns are in a conll file.
:param file_name: `str` The name of the file.
:param delim: `str` The token between columns in the file.
:returns: `int` The number of columns in the file.<|endoftext|> |
e7987d8f67eae8dd0f99d08af443146972c0ddc640726438ba49220b0ccbed0d | @exporter
@str_file
def read_conll(f, doc_pattern=None, delim=None, metadata=False, allow_comments=False, comment_pattern='#'):
'Read from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` A pattern that matches the line that signals the\n beginning of a new document in the conll file. When None just the\n conll sentences are returned.\n :param delim: `str` The token between columns in the file\n :param metadata: `bool` Should meta data (lines starting with `#` before a\n sentence) be returned with our sentences.\n :param allow_comments: `bool` Are comments (lines starting with `#`) allowed in the file.\n\n :returns: `Generator` The sentences or documents from the file.\n '
if (metadata and (not allow_comments)):
raise ValueError("You have metadata set to `True` but allow_comments set to `False` you can't extract metadata from a file that doesn't allow comments.")
if (doc_pattern is not None):
if metadata:
for x in read_conll_docs_md(f, doc_pattern, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_docs(f, doc_pattern, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x)
elif metadata:
for x in read_conll_sentences_md(f, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_sentences(f, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x) | Read from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` A pattern that matches the line that signals the
beginning of a new document in the conll file. When None just the
conll sentences are returned.
:param delim: `str` The token between columns in the file
:param metadata: `bool` Should meta data (lines starting with `#` before a
sentence) be returned with our sentences.
:param allow_comments: `bool` Are comments (lines starting with `#`) allowed in the file.
:returns: `Generator` The sentences or documents from the file. | python/baseline/utils.py | read_conll | amyhemmeter/baseline | 0 | python | @exporter
@str_file
def read_conll(f, doc_pattern=None, delim=None, metadata=False, allow_comments=False, comment_pattern='#'):
'Read from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` A pattern that matches the line that signals the\n beginning of a new document in the conll file. When None just the\n conll sentences are returned.\n :param delim: `str` The token between columns in the file\n :param metadata: `bool` Should meta data (lines starting with `#` before a\n sentence) be returned with our sentences.\n :param allow_comments: `bool` Are comments (lines starting with `#`) allowed in the file.\n\n :returns: `Generator` The sentences or documents from the file.\n '
if (metadata and (not allow_comments)):
raise ValueError("You have metadata set to `True` but allow_comments set to `False` you can't extract metadata from a file that doesn't allow comments.")
if (doc_pattern is not None):
if metadata:
for x in read_conll_docs_md(f, doc_pattern, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_docs(f, doc_pattern, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x)
elif metadata:
for x in read_conll_sentences_md(f, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_sentences(f, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x) | @exporter
@str_file
def read_conll(f, doc_pattern=None, delim=None, metadata=False, allow_comments=False, comment_pattern='#'):
'Read from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` A pattern that matches the line that signals the\n beginning of a new document in the conll file. When None just the\n conll sentences are returned.\n :param delim: `str` The token between columns in the file\n :param metadata: `bool` Should meta data (lines starting with `#` before a\n sentence) be returned with our sentences.\n :param allow_comments: `bool` Are comments (lines starting with `#`) allowed in the file.\n\n :returns: `Generator` The sentences or documents from the file.\n '
if (metadata and (not allow_comments)):
raise ValueError("You have metadata set to `True` but allow_comments set to `False` you can't extract metadata from a file that doesn't allow comments.")
if (doc_pattern is not None):
if metadata:
for x in read_conll_docs_md(f, doc_pattern, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_docs(f, doc_pattern, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x)
elif metadata:
for x in read_conll_sentences_md(f, delim=delim, comment_pattern=comment_pattern):
(yield x)
else:
for x in read_conll_sentences(f, delim=delim, allow_comments=allow_comments, comment_pattern=comment_pattern):
(yield x)<|docstring|>Read from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` A pattern that matches the line that signals the
beginning of a new document in the conll file. When None just the
conll sentences are returned.
:param delim: `str` The token between columns in the file
:param metadata: `bool` Should meta data (lines starting with `#` before a
sentence) be returned with our sentences.
:param allow_comments: `bool` Are comments (lines starting with `#`) allowed in the file.
:returns: `Generator` The sentences or documents from the file.<|endoftext|> |
80f56dfd3909cb37f9fad99566ccda7204674f9f594c5e3bf97f4fb0e0b70df7 | @str_file
def read_conll_sentences(f, delim=None, allow_comments=True, comment_pattern='#'):
'Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will get eaten by the\n metadata. If this happens you need to set `allow_comments=True` and not have\n comments in the file. If you have comments in the file and set this then\n they will show up in the sentences\n\n :returns: `Generator[List[List[str]]]` A list of rows representing a sentence.\n '
sentence = []
for line in f:
line = line.rstrip()
if (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
(yield sentence)
sentence = []
continue
sentence.append(line.split(delim))
if sentence:
(yield sentence) | Read sentences from a conll file.
:param f: `str` The file to read from.
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will get eaten by the
metadata. If this happens you need to set `allow_comments=True` and not have
comments in the file. If you have comments in the file and set this then
they will show up in the sentences
:returns: `Generator[List[List[str]]]` A list of rows representing a sentence. | python/baseline/utils.py | read_conll_sentences | amyhemmeter/baseline | 0 | python | @str_file
def read_conll_sentences(f, delim=None, allow_comments=True, comment_pattern='#'):
'Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will get eaten by the\n metadata. If this happens you need to set `allow_comments=True` and not have\n comments in the file. If you have comments in the file and set this then\n they will show up in the sentences\n\n :returns: `Generator[List[List[str]]]` A list of rows representing a sentence.\n '
sentence = []
for line in f:
line = line.rstrip()
if (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
(yield sentence)
sentence = []
continue
sentence.append(line.split(delim))
if sentence:
(yield sentence) | @str_file
def read_conll_sentences(f, delim=None, allow_comments=True, comment_pattern='#'):
'Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will get eaten by the\n metadata. If this happens you need to set `allow_comments=True` and not have\n comments in the file. If you have comments in the file and set this then\n they will show up in the sentences\n\n :returns: `Generator[List[List[str]]]` A list of rows representing a sentence.\n '
sentence = []
for line in f:
line = line.rstrip()
if (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
(yield sentence)
sentence = []
continue
sentence.append(line.split(delim))
if sentence:
(yield sentence)<|docstring|>Read sentences from a conll file.
:param f: `str` The file to read from.
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will get eaten by the
metadata. If this happens you need to set `allow_comments=True` and not have
comments in the file. If you have comments in the file and set this then
they will show up in the sentences
:returns: `Generator[List[List[str]]]` A list of rows representing a sentence.<|endoftext|> |
2945fd56b6c30e9db35f4aecec50e89630c99744d11740fcfc3848f9ea5d6bf9 | @str_file
def read_conll_sentences_md(f, delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If there are document annotations in the conll file then they will show\n up in the meta data for what would be the first sentence of that doc\n\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[str]], List[List[str]]]`\n The first element is the list or rows, the second is a list of comment\n lines that preceded that sentence in the file.\n "
(sentence, meta) = ([], [])
for line in f:
line = line.rstrip()
if ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
(yield (sentence, meta))
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if sentence:
(yield (sentence, meta)) | Read sentences from a conll file.
:param f: `str` The file to read from.
:param delim: `str` The token between columns in the file.
Note:
If there are document annotations in the conll file then they will show
up in the meta data for what would be the first sentence of that doc
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[Tuple[List[List[str]], List[List[str]]]`
The first element is the list or rows, the second is a list of comment
lines that preceded that sentence in the file. | python/baseline/utils.py | read_conll_sentences_md | amyhemmeter/baseline | 0 | python | @str_file
def read_conll_sentences_md(f, delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If there are document annotations in the conll file then they will show\n up in the meta data for what would be the first sentence of that doc\n\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[str]], List[List[str]]]`\n The first element is the list or rows, the second is a list of comment\n lines that preceded that sentence in the file.\n "
(sentence, meta) = ([], [])
for line in f:
line = line.rstrip()
if ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
(yield (sentence, meta))
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if sentence:
(yield (sentence, meta)) | @str_file
def read_conll_sentences_md(f, delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param delim: `str` The token between columns in the file.\n\n Note:\n If there are document annotations in the conll file then they will show\n up in the meta data for what would be the first sentence of that doc\n\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[str]], List[List[str]]]`\n The first element is the list or rows, the second is a list of comment\n lines that preceded that sentence in the file.\n "
(sentence, meta) = ([], [])
for line in f:
line = line.rstrip()
if ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
(yield (sentence, meta))
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if sentence:
(yield (sentence, meta))<|docstring|>Read sentences from a conll file.
:param f: `str` The file to read from.
:param delim: `str` The token between columns in the file.
Note:
If there are document annotations in the conll file then they will show
up in the meta data for what would be the first sentence of that doc
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[Tuple[List[List[str]], List[List[str]]]`
The first element is the list or rows, the second is a list of comment
lines that preceded that sentence in the file.<|endoftext|> |
3b6e9c8f920b9c3abd2e0d35aa46c24953a79f1df291e3b1b86f269c4bcabcd1 | @str_file
def read_conll_docs(f, doc_pattern='# begin doc', delim=None, allow_comments=True, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[List[List[List[str]]]]`\n A document which is a list of sentences.\n "
(doc, sentence) = ([], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
if doc:
if sentence:
doc.append(sentence)
(yield doc)
(doc, sentence) = ([], [])
continue
elif (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sentence = []
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
(yield doc) | Read sentences from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` The beginning of lines that represent new documents
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[List[List[List[str]]]]`
A document which is a list of sentences. | python/baseline/utils.py | read_conll_docs | amyhemmeter/baseline | 0 | python | @str_file
def read_conll_docs(f, doc_pattern='# begin doc', delim=None, allow_comments=True, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[List[List[List[str]]]]`\n A document which is a list of sentences.\n "
(doc, sentence) = ([], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
if doc:
if sentence:
doc.append(sentence)
(yield doc)
(doc, sentence) = ([], [])
continue
elif (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sentence = []
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
(yield doc) | @str_file
def read_conll_docs(f, doc_pattern='# begin doc', delim=None, allow_comments=True, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[List[List[List[str]]]]`\n A document which is a list of sentences.\n "
(doc, sentence) = ([], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
if doc:
if sentence:
doc.append(sentence)
(yield doc)
(doc, sentence) = ([], [])
continue
elif (allow_comments and (not sentence) and line.startswith(comment_pattern)):
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sentence = []
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
(yield doc)<|docstring|>Read sentences from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` The beginning of lines that represent new documents
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[List[List[List[str]]]]`
A document which is a list of sentences.<|endoftext|> |
02e02bbea3a0bf7271806e71df16782b6499e9ec6de2f963d52b7ef02a2e3cff | @str_file
def read_conll_docs_md(f, doc_pattern='# begin doc', delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[List[str]]], List[str] List[List[str]]]`\n The first element is a document, the second is a list of comments\n lines that preceded the document break (includes the document line)\n since the last sentence. The last is a list of comments for each\n list in the document.\n "
(doc, sentence, doc_meta, sent_meta, meta) = ([], [], [], [], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
new_doc_meta = meta
meta = []
new_doc_meta.append(line)
if doc:
if sentence:
doc.append(sentence)
sentence = []
(yield (doc, doc_meta, sent_meta))
(doc, sentence, sent_meta) = ([], [], [])
doc_meta = new_doc_meta
continue
elif ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
meta = []
(yield (doc, doc_meta, sent_meta)) | Read sentences from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` The beginning of lines that represent new documents
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[Tuple[List[List[List[str]]], List[str] List[List[str]]]`
The first element is a document, the second is a list of comments
lines that preceded the document break (includes the document line)
since the last sentence. The last is a list of comments for each
list in the document. | python/baseline/utils.py | read_conll_docs_md | amyhemmeter/baseline | 0 | python | @str_file
def read_conll_docs_md(f, doc_pattern='# begin doc', delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[List[str]]], List[str] List[List[str]]]`\n The first element is a document, the second is a list of comments\n lines that preceded the document break (includes the document line)\n since the last sentence. The last is a list of comments for each\n list in the document.\n "
(doc, sentence, doc_meta, sent_meta, meta) = ([], [], [], [], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
new_doc_meta = meta
meta = []
new_doc_meta.append(line)
if doc:
if sentence:
doc.append(sentence)
sentence = []
(yield (doc, doc_meta, sent_meta))
(doc, sentence, sent_meta) = ([], [], [])
doc_meta = new_doc_meta
continue
elif ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
meta = []
(yield (doc, doc_meta, sent_meta)) | @str_file
def read_conll_docs_md(f, doc_pattern='# begin doc', delim=None, comment_pattern='#'):
"Read sentences from a conll file.\n\n :param f: `str` The file to read from.\n :param doc_pattern: `str` The beginning of lines that represent new documents\n :param delim: `str` The token between columns in the file.\n\n Note:\n If you have a sentence where the first token is `#` it will show up in the\n metadata. If this happens you'll need to update you comments to use a different\n comment pattern, something like `# comment:` I recommend having a space in\n you patten so it can't show up as a conll token\n\n :returns: `Generator[Tuple[List[List[List[str]]], List[str] List[List[str]]]`\n The first element is a document, the second is a list of comments\n lines that preceded the document break (includes the document line)\n since the last sentence. The last is a list of comments for each\n list in the document.\n "
(doc, sentence, doc_meta, sent_meta, meta) = ([], [], [], [], [])
for line in f:
line = line.rstrip()
if line.startswith(doc_pattern):
new_doc_meta = meta
meta = []
new_doc_meta.append(line)
if doc:
if sentence:
doc.append(sentence)
sentence = []
(yield (doc, doc_meta, sent_meta))
(doc, sentence, sent_meta) = ([], [], [])
doc_meta = new_doc_meta
continue
elif ((not sentence) and line.startswith(comment_pattern)):
meta.append(line)
continue
if (len(line) == 0):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
(sentence, meta) = ([], [])
continue
sentence.append(line.split(delim))
if (doc or sentence):
if sentence:
doc.append(sentence)
sent_meta.append(meta)
meta = []
(yield (doc, doc_meta, sent_meta))<|docstring|>Read sentences from a conll file.
:param f: `str` The file to read from.
:param doc_pattern: `str` The beginning of lines that represent new documents
:param delim: `str` The token between columns in the file.
Note:
If you have a sentence where the first token is `#` it will show up in the
metadata. If this happens you'll need to update you comments to use a different
comment pattern, something like `# comment:` I recommend having a space in
you patten so it can't show up as a conll token
:returns: `Generator[Tuple[List[List[List[str]]], List[str] List[List[str]]]`
The first element is a document, the second is a list of comments
lines that preceded the document break (includes the document line)
since the last sentence. The last is a list of comments for each
list in the document.<|endoftext|> |
184f0be68a30d03ea92c0e8610759a2285acd3add1f5b2ff0d4a78484ff1d597 | @exporter
@str_file(ifile='r', ofile='w')
def convert_conll_file(ifile, ofile, convert, fields=[(- 1)], delim=None):
'Convert the tagging scheme in a conll file.\n\n This function assumes the that columns that one wishes to convert are\n the right model columns.\n\n :param ifile: `str` The input file name.\n :param ofile: `str` The output file name.\n :param convert: `Callable(List[str]) -> List[str]` The function that\n transforms a sequence in one tag scheme to another scheme.\n :param fields: `List[int]` The columns to convert.\n :param delim: `str` The symbol that separates the columns.\n '
output_delim = (' ' if (delim is None) else delim)
conll_length = _sniff_conll_file(ifile, delim)
fields = set(normalize_indices(fields, conll_length))
for (lines, md) in read_conll_sentences_md(ifile, delim=delim):
lines = zip(*((convert(l) if (i in fields) else l) for (i, l) in enumerate(zip(*lines))))
if md:
ofile.write(('\n'.join(md) + '\n'))
ofile.write(('\n'.join((output_delim.join(l).rstrip() for l in lines)) + '\n\n')) | Convert the tagging scheme in a conll file.
This function assumes the that columns that one wishes to convert are
the right model columns.
:param ifile: `str` The input file name.
:param ofile: `str` The output file name.
:param convert: `Callable(List[str]) -> List[str]` The function that
transforms a sequence in one tag scheme to another scheme.
:param fields: `List[int]` The columns to convert.
:param delim: `str` The symbol that separates the columns. | python/baseline/utils.py | convert_conll_file | amyhemmeter/baseline | 0 | python | @exporter
@str_file(ifile='r', ofile='w')
def convert_conll_file(ifile, ofile, convert, fields=[(- 1)], delim=None):
'Convert the tagging scheme in a conll file.\n\n This function assumes the that columns that one wishes to convert are\n the right model columns.\n\n :param ifile: `str` The input file name.\n :param ofile: `str` The output file name.\n :param convert: `Callable(List[str]) -> List[str]` The function that\n transforms a sequence in one tag scheme to another scheme.\n :param fields: `List[int]` The columns to convert.\n :param delim: `str` The symbol that separates the columns.\n '
output_delim = (' ' if (delim is None) else delim)
conll_length = _sniff_conll_file(ifile, delim)
fields = set(normalize_indices(fields, conll_length))
for (lines, md) in read_conll_sentences_md(ifile, delim=delim):
lines = zip(*((convert(l) if (i in fields) else l) for (i, l) in enumerate(zip(*lines))))
if md:
ofile.write(('\n'.join(md) + '\n'))
ofile.write(('\n'.join((output_delim.join(l).rstrip() for l in lines)) + '\n\n')) | @exporter
@str_file(ifile='r', ofile='w')
def convert_conll_file(ifile, ofile, convert, fields=[(- 1)], delim=None):
'Convert the tagging scheme in a conll file.\n\n This function assumes the that columns that one wishes to convert are\n the right model columns.\n\n :param ifile: `str` The input file name.\n :param ofile: `str` The output file name.\n :param convert: `Callable(List[str]) -> List[str]` The function that\n transforms a sequence in one tag scheme to another scheme.\n :param fields: `List[int]` The columns to convert.\n :param delim: `str` The symbol that separates the columns.\n '
output_delim = (' ' if (delim is None) else delim)
conll_length = _sniff_conll_file(ifile, delim)
fields = set(normalize_indices(fields, conll_length))
for (lines, md) in read_conll_sentences_md(ifile, delim=delim):
lines = zip(*((convert(l) if (i in fields) else l) for (i, l) in enumerate(zip(*lines))))
if md:
ofile.write(('\n'.join(md) + '\n'))
ofile.write(('\n'.join((output_delim.join(l).rstrip() for l in lines)) + '\n\n'))<|docstring|>Convert the tagging scheme in a conll file.
This function assumes the that columns that one wishes to convert are
the right model columns.
:param ifile: `str` The input file name.
:param ofile: `str` The output file name.
:param convert: `Callable(List[str]) -> List[str]` The function that
transforms a sequence in one tag scheme to another scheme.
:param fields: `List[int]` The columns to convert.
:param delim: `str` The symbol that separates the columns.<|endoftext|> |
f434fb0e6c0f782c57eb2ba1b70a880642bea34481af2c988f4956fc2cd404df | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
'Convert a conll file from iob to bio.'
convert_conll_file(ifile, ofile, convert_iob_to_bio, fields, delim) | Convert a conll file from iob to bio. | python/baseline/utils.py | convert_iob_conll_to_bio | amyhemmeter/baseline | 0 | python | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iob_to_bio, fields, delim) | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iob_to_bio, fields, delim)<|docstring|>Convert a conll file from iob to bio.<|endoftext|> |
8aae71598dda3abe7c55ecb3ffff9dd9a2223ccc7ee771efd8a5b4335f230d48 | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
'Convert a conll file from iob to iobes.'
convert_conll_file(ifile, ofile, convert_iob_to_iobes, fields, delim) | Convert a conll file from iob to iobes. | python/baseline/utils.py | convert_iob_conll_to_iobes | amyhemmeter/baseline | 0 | python | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iob_to_iobes, fields, delim) | @exporter
@str_file(ifile='r', ofile='w')
def convert_iob_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iob_to_iobes, fields, delim)<|docstring|>Convert a conll file from iob to iobes.<|endoftext|> |
ab235bf3305b85e500780c7c065987f7c3fe4fefb801e73a4fa8828e2e03032d | @exporter
@str_file(ifile='r', ofile='w')
def convert_bio_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
'Convert a conll file from bio to iobes.'
convert_conll_file(ifile, ofile, convert_bio_to_iobes, fields, delim) | Convert a conll file from bio to iobes. | python/baseline/utils.py | convert_bio_conll_to_iobes | amyhemmeter/baseline | 0 | python | @exporter
@str_file(ifile='r', ofile='w')
def convert_bio_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_bio_to_iobes, fields, delim) | @exporter
@str_file(ifile='r', ofile='w')
def convert_bio_conll_to_iobes(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_bio_to_iobes, fields, delim)<|docstring|>Convert a conll file from bio to iobes.<|endoftext|> |
2ad05d84e86a172f0ef966c6d8a256e1928be2922f91356cab19b47f0b86bded | @exporter
@str_file(ifile='r', ofile='w')
def convert_iobes_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
'Convert a conll file from iobes to bio. Useful for formatting output to use `conlleval.pl`.'
convert_conll_file(ifile, ofile, convert_iobes_to_bio, fields, delim) | Convert a conll file from iobes to bio. Useful for formatting output to use `conlleval.pl`. | python/baseline/utils.py | convert_iobes_conll_to_bio | amyhemmeter/baseline | 0 | python | @exporter
@str_file(ifile='r', ofile='w')
def convert_iobes_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iobes_to_bio, fields, delim) | @exporter
@str_file(ifile='r', ofile='w')
def convert_iobes_conll_to_bio(ifile, ofile, fields=[(- 1)], delim=None):
convert_conll_file(ifile, ofile, convert_iobes_to_bio, fields, delim)<|docstring|>Convert a conll file from iobes to bio. Useful for formatting output to use `conlleval.pl`.<|endoftext|> |
23a8564729a366a25cdd64da1be822209b521ae3dd4c31eafa492a8afb63ed55 | @exporter
def to_spans(sequence, lut, span_type, verbose=False, delim='@'):
'Turn a sequence into a list of chunks.\n\n :param sequence: `List[int]` The tag sequence.\n :param lut: `Dict[int] -> str` A mapping for integers to tag names.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
sequence = [lut[y] for y in sequence]
return to_chunks(sequence, span_type, verbose, delim) | Turn a sequence into a list of chunks.
:param sequence: `List[int]` The tag sequence.
:param lut: `Dict[int] -> str` A mapping for integers to tag names.
:param span_type: `str` The tagging scheme.
:param verbose: `bool` Should we output warning on illegal transitions.
:param delim: `str` The symbol the separates output chunks from their indices.
:returns: `List[str]` The list of entities in the order they appear. The
entities are in the form {chunk_type}{delim}{index}{delim}{index}...
for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5
in the original sequence. | python/baseline/utils.py | to_spans | amyhemmeter/baseline | 0 | python | @exporter
def to_spans(sequence, lut, span_type, verbose=False, delim='@'):
'Turn a sequence into a list of chunks.\n\n :param sequence: `List[int]` The tag sequence.\n :param lut: `Dict[int] -> str` A mapping for integers to tag names.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
sequence = [lut[y] for y in sequence]
return to_chunks(sequence, span_type, verbose, delim) | @exporter
def to_spans(sequence, lut, span_type, verbose=False, delim='@'):
'Turn a sequence into a list of chunks.\n\n :param sequence: `List[int]` The tag sequence.\n :param lut: `Dict[int] -> str` A mapping for integers to tag names.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
sequence = [lut[y] for y in sequence]
return to_chunks(sequence, span_type, verbose, delim)<|docstring|>Turn a sequence into a list of chunks.
:param sequence: `List[int]` The tag sequence.
:param lut: `Dict[int] -> str` A mapping for integers to tag names.
:param span_type: `str` The tagging scheme.
:param verbose: `bool` Should we output warning on illegal transitions.
:param delim: `str` The symbol the separates output chunks from their indices.
:returns: `List[str]` The list of entities in the order they appear. The
entities are in the form {chunk_type}{delim}{index}{delim}{index}...
for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5
in the original sequence.<|endoftext|> |
25ce5a5a762f67c97e85d0fb9f5519ac566a909160a0218698d1ce526cd907af | @exporter
def to_chunks(sequence, span_type, verbose=False, delim='@'):
'Turn a sequence of tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
if (span_type == 'iobes'):
return to_chunks_iobes(sequence, verbose, delim)
strict_iob2 = ((span_type == 'iob2') or (span_type == 'bio'))
iobtype = (2 if strict_iob2 else 1)
chunks = []
current = None
for (i, label) in enumerate(sequence):
if ((not label.startswith('I-')) and (not (label == 'O'))):
if (current is not None):
chunks.append(delim.join(current))
current = [label.replace('B-', ''), ('%d' % i)]
elif label.startswith('I-'):
if (current is not None):
base = label.replace('I-', '')
if (base == current[0]):
current.append(('%d' % i))
else:
chunks.append(delim.join(current))
if ((iobtype == 2) and verbose):
logger.warning(('Warning: type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (label, current[0], i)))
current = [base, ('%d' % i)]
else:
current = [label.replace('I-', ''), ('%d' % i)]
if ((iobtype == 2) and verbose):
logger.warning(('Warning: unexpected format (I before B @ %d) %s' % (i, label)))
else:
if (current is not None):
chunks.append(delim.join(current))
current = None
if (current is not None):
chunks.append(delim.join(current))
return chunks | Turn a sequence of tags into a list of chunks.
:param sequence: `List[str]` The tag sequence.
:param span_type: `str` The tagging scheme.
:param verbose: `bool` Should we output warning on illegal transitions.
:param delim: `str` The symbol the separates output chunks from their indices.
:returns: `List[str]` The list of entities in the order they appear. The
entities are in the form {chunk_type}{delim}{index}{delim}{index}...
for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5
in the original sequence. | python/baseline/utils.py | to_chunks | amyhemmeter/baseline | 0 | python | @exporter
def to_chunks(sequence, span_type, verbose=False, delim='@'):
'Turn a sequence of tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
if (span_type == 'iobes'):
return to_chunks_iobes(sequence, verbose, delim)
strict_iob2 = ((span_type == 'iob2') or (span_type == 'bio'))
iobtype = (2 if strict_iob2 else 1)
chunks = []
current = None
for (i, label) in enumerate(sequence):
if ((not label.startswith('I-')) and (not (label == 'O'))):
if (current is not None):
chunks.append(delim.join(current))
current = [label.replace('B-', ), ('%d' % i)]
elif label.startswith('I-'):
if (current is not None):
base = label.replace('I-', )
if (base == current[0]):
current.append(('%d' % i))
else:
chunks.append(delim.join(current))
if ((iobtype == 2) and verbose):
logger.warning(('Warning: type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (label, current[0], i)))
current = [base, ('%d' % i)]
else:
current = [label.replace('I-', ), ('%d' % i)]
if ((iobtype == 2) and verbose):
logger.warning(('Warning: unexpected format (I before B @ %d) %s' % (i, label)))
else:
if (current is not None):
chunks.append(delim.join(current))
current = None
if (current is not None):
chunks.append(delim.join(current))
return chunks | @exporter
def to_chunks(sequence, span_type, verbose=False, delim='@'):
'Turn a sequence of tags into a list of chunks.\n\n :param sequence: `List[str]` The tag sequence.\n :param span_type: `str` The tagging scheme.\n :param verbose: `bool` Should we output warning on illegal transitions.\n :param delim: `str` The symbol the separates output chunks from their indices.\n\n :returns: `List[str]` The list of entities in the order they appear. The\n entities are in the form {chunk_type}{delim}{index}{delim}{index}...\n for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5\n in the original sequence.\n '
if (span_type == 'iobes'):
return to_chunks_iobes(sequence, verbose, delim)
strict_iob2 = ((span_type == 'iob2') or (span_type == 'bio'))
iobtype = (2 if strict_iob2 else 1)
chunks = []
current = None
for (i, label) in enumerate(sequence):
if ((not label.startswith('I-')) and (not (label == 'O'))):
if (current is not None):
chunks.append(delim.join(current))
current = [label.replace('B-', ), ('%d' % i)]
elif label.startswith('I-'):
if (current is not None):
base = label.replace('I-', )
if (base == current[0]):
current.append(('%d' % i))
else:
chunks.append(delim.join(current))
if ((iobtype == 2) and verbose):
logger.warning(('Warning: type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (label, current[0], i)))
current = [base, ('%d' % i)]
else:
current = [label.replace('I-', ), ('%d' % i)]
if ((iobtype == 2) and verbose):
logger.warning(('Warning: unexpected format (I before B @ %d) %s' % (i, label)))
else:
if (current is not None):
chunks.append(delim.join(current))
current = None
if (current is not None):
chunks.append(delim.join(current))
return chunks<|docstring|>Turn a sequence of tags into a list of chunks.
:param sequence: `List[str]` The tag sequence.
:param span_type: `str` The tagging scheme.
:param verbose: `bool` Should we output warning on illegal transitions.
:param delim: `str` The symbol the separates output chunks from their indices.
:returns: `List[str]` The list of entities in the order they appear. The
entities are in the form {chunk_type}{delim}{index}{delim}{index}...
for example LOC@3@4@5 means a Location chunk was at indices 3, 4, and 5
in the original sequence.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.