body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
fe6afcff6e9d9fd6d3fd442d37ff9417dae173f1a7375a7a64ebeab20fef0b03 | def coerce_dtypes(col, orig_dtype, final_dtype):
'\n Converts to destination dtype and runs some sanity checks\n to make sure no harm has been done in the conversion\n '
try:
new_col = col.astype(final_dtype)
except ValueError:
if (final_dtype == 'string'):
new_col = col.astype('str').astype('string').apply((lambda x: (None if (x == 'nan') else x))).astype('string')
else:
raise ValueError
assert (new_col.isna().sum() == col.isna().sum())
if final_dtype.lower().startswith('int'):
assert new_col.astype(final_dtype).astype(float).equals(new_col.astype(float))
assert all((new_col.index == col.index))
return new_col | Converts to destination dtype and runs some sanity checks
to make sure no harm has been done in the conversion | src/npi/utils/utils.py | coerce_dtypes | akilby/npi | 0 | python | def coerce_dtypes(col, orig_dtype, final_dtype):
'\n Converts to destination dtype and runs some sanity checks\n to make sure no harm has been done in the conversion\n '
try:
new_col = col.astype(final_dtype)
except ValueError:
if (final_dtype == 'string'):
new_col = col.astype('str').astype('string').apply((lambda x: (None if (x == 'nan') else x))).astype('string')
else:
raise ValueError
assert (new_col.isna().sum() == col.isna().sum())
if final_dtype.lower().startswith('int'):
assert new_col.astype(final_dtype).astype(float).equals(new_col.astype(float))
assert all((new_col.index == col.index))
return new_col | def coerce_dtypes(col, orig_dtype, final_dtype):
'\n Converts to destination dtype and runs some sanity checks\n to make sure no harm has been done in the conversion\n '
try:
new_col = col.astype(final_dtype)
except ValueError:
if (final_dtype == 'string'):
new_col = col.astype('str').astype('string').apply((lambda x: (None if (x == 'nan') else x))).astype('string')
else:
raise ValueError
assert (new_col.isna().sum() == col.isna().sum())
if final_dtype.lower().startswith('int'):
assert new_col.astype(final_dtype).astype(float).equals(new_col.astype(float))
assert all((new_col.index == col.index))
return new_col<|docstring|>Converts to destination dtype and runs some sanity checks
to make sure no harm has been done in the conversion<|endoftext|> |
32148bc0a100ae9b3d7310119fdd545f013e119ae06ca6198a47c815342c8e7f | def stata_elapsed_date_to_datetime(date, fmt):
'\n Original source for this code:\n https://www.statsmodels.org/0.8.0/_modules/statsmodels/iolib/foreign.html\n\n Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime\n\n Parameters\n ----------\n date : int\n The Stata Internal Format date to convert to datetime according to fmt\n fmt : str\n The format to convert to. Can be, tc, td, tw, tm, tq, th, ty\n\n Examples\n --------\n >>> _stata_elapsed_date_to_datetime(52, "%tw")\n datetime.datetime(1961, 1, 1, 0, 0)\n\n Notes\n -----\n datetime/c - tc\n milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day\n datetime/C - tC - NOT IMPLEMENTED\n milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds\n date - td\n days since 01jan1960 (01jan1960 = 0)\n weekly date - tw\n weeks since 1960w1\n This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.\n The datetime value is the start of the week in terms of days in the\n year, not ISO calendar weeks.\n monthly date - tm\n months since 1960m1\n quarterly date - tq\n quarters since 1960q1\n half-yearly date - th\n half-years since 1960h1 yearly\n date - ty\n years since 0000\n\n If you don\'t have pandas with datetime support, then you can\'t do\n milliseconds accurately.\n '
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if (fmt in ['%tc', 'tc']):
from dateutil.relativedelta import relativedelta
return (stata_epoch + relativedelta(microseconds=(date * 1000)))
elif (fmt in ['%tC', 'tC']):
from warnings import warn
warn('Encountered %tC format. Leaving in Stata Internal Format.', UserWarning)
return date
elif (fmt in ['%td', 'td']):
return (stata_epoch + datetime.timedelta(int(date)))
elif (fmt in ['%tw', 'tw']):
year = datetime.datetime((stata_epoch.year + (date // 52)), 1, 1)
day_delta = ((date % 52) * 7)
return (year + datetime.timedelta(int(day_delta)))
elif (fmt in ['%tm', 'tm']):
year = (stata_epoch.year + (date // 12))
month_delta = ((date % 12) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%tq', 'tq']):
year = (stata_epoch.year + (date // 4))
month_delta = (((date % 4) * 3) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%th', 'th']):
year = (stata_epoch.year + (date // 2))
month_delta = (((date % 2) * 6) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%ty', 'ty']):
if (date > 0):
return datetime.datetime(date, 1, 1)
else:
raise ValueError('Year 0 and before not implemented')
else:
raise ValueError(('Date fmt %s not understood' % fmt)) | Original source for this code:
https://www.statsmodels.org/0.8.0/_modules/statsmodels/iolib/foreign.html
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw")
datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately. | src/npi/utils/utils.py | stata_elapsed_date_to_datetime | akilby/npi | 0 | python | def stata_elapsed_date_to_datetime(date, fmt):
'\n Original source for this code:\n https://www.statsmodels.org/0.8.0/_modules/statsmodels/iolib/foreign.html\n\n Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime\n\n Parameters\n ----------\n date : int\n The Stata Internal Format date to convert to datetime according to fmt\n fmt : str\n The format to convert to. Can be, tc, td, tw, tm, tq, th, ty\n\n Examples\n --------\n >>> _stata_elapsed_date_to_datetime(52, "%tw")\n datetime.datetime(1961, 1, 1, 0, 0)\n\n Notes\n -----\n datetime/c - tc\n milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day\n datetime/C - tC - NOT IMPLEMENTED\n milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds\n date - td\n days since 01jan1960 (01jan1960 = 0)\n weekly date - tw\n weeks since 1960w1\n This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.\n The datetime value is the start of the week in terms of days in the\n year, not ISO calendar weeks.\n monthly date - tm\n months since 1960m1\n quarterly date - tq\n quarters since 1960q1\n half-yearly date - th\n half-years since 1960h1 yearly\n date - ty\n years since 0000\n\n If you don\'t have pandas with datetime support, then you can\'t do\n milliseconds accurately.\n '
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if (fmt in ['%tc', 'tc']):
from dateutil.relativedelta import relativedelta
return (stata_epoch + relativedelta(microseconds=(date * 1000)))
elif (fmt in ['%tC', 'tC']):
from warnings import warn
warn('Encountered %tC format. Leaving in Stata Internal Format.', UserWarning)
return date
elif (fmt in ['%td', 'td']):
return (stata_epoch + datetime.timedelta(int(date)))
elif (fmt in ['%tw', 'tw']):
year = datetime.datetime((stata_epoch.year + (date // 52)), 1, 1)
day_delta = ((date % 52) * 7)
return (year + datetime.timedelta(int(day_delta)))
elif (fmt in ['%tm', 'tm']):
year = (stata_epoch.year + (date // 12))
month_delta = ((date % 12) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%tq', 'tq']):
year = (stata_epoch.year + (date // 4))
month_delta = (((date % 4) * 3) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%th', 'th']):
year = (stata_epoch.year + (date // 2))
month_delta = (((date % 2) * 6) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%ty', 'ty']):
if (date > 0):
return datetime.datetime(date, 1, 1)
else:
raise ValueError('Year 0 and before not implemented')
else:
raise ValueError(('Date fmt %s not understood' % fmt)) | def stata_elapsed_date_to_datetime(date, fmt):
'\n Original source for this code:\n https://www.statsmodels.org/0.8.0/_modules/statsmodels/iolib/foreign.html\n\n Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime\n\n Parameters\n ----------\n date : int\n The Stata Internal Format date to convert to datetime according to fmt\n fmt : str\n The format to convert to. Can be, tc, td, tw, tm, tq, th, ty\n\n Examples\n --------\n >>> _stata_elapsed_date_to_datetime(52, "%tw")\n datetime.datetime(1961, 1, 1, 0, 0)\n\n Notes\n -----\n datetime/c - tc\n milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day\n datetime/C - tC - NOT IMPLEMENTED\n milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds\n date - td\n days since 01jan1960 (01jan1960 = 0)\n weekly date - tw\n weeks since 1960w1\n This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.\n The datetime value is the start of the week in terms of days in the\n year, not ISO calendar weeks.\n monthly date - tm\n months since 1960m1\n quarterly date - tq\n quarters since 1960q1\n half-yearly date - th\n half-years since 1960h1 yearly\n date - ty\n years since 0000\n\n If you don\'t have pandas with datetime support, then you can\'t do\n milliseconds accurately.\n '
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if (fmt in ['%tc', 'tc']):
from dateutil.relativedelta import relativedelta
return (stata_epoch + relativedelta(microseconds=(date * 1000)))
elif (fmt in ['%tC', 'tC']):
from warnings import warn
warn('Encountered %tC format. Leaving in Stata Internal Format.', UserWarning)
return date
elif (fmt in ['%td', 'td']):
return (stata_epoch + datetime.timedelta(int(date)))
elif (fmt in ['%tw', 'tw']):
year = datetime.datetime((stata_epoch.year + (date // 52)), 1, 1)
day_delta = ((date % 52) * 7)
return (year + datetime.timedelta(int(day_delta)))
elif (fmt in ['%tm', 'tm']):
year = (stata_epoch.year + (date // 12))
month_delta = ((date % 12) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%tq', 'tq']):
year = (stata_epoch.year + (date // 4))
month_delta = (((date % 4) * 3) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%th', 'th']):
year = (stata_epoch.year + (date // 2))
month_delta = (((date % 2) * 6) + 1)
return datetime.datetime(year, month_delta, 1)
elif (fmt in ['%ty', 'ty']):
if (date > 0):
return datetime.datetime(date, 1, 1)
else:
raise ValueError('Year 0 and before not implemented')
else:
raise ValueError(('Date fmt %s not understood' % fmt))<|docstring|>Original source for this code:
https://www.statsmodels.org/0.8.0/_modules/statsmodels/iolib/foreign.html
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw")
datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.<|endoftext|> |
225ea242a2188202d511a404594ed408afcdcfb5d0589b5da8658ca4989aa46a | def fit(self, X, y=None):
'A reference implementation of a fitting function for a transformer.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n The training input samples, where n_samples is the number of samples\n and n_features is the number of features.\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n '
X = check_array(X, accept_sparse=True, ensure_min_samples=3, estimator=self)
self.embedding_ = diffusion_mapping(X, n_components=self.n_components, n_neighbors=self.n_neighbors, alpha=self.alpha, t=self.t, gamma=self.gamma, metric=self.metric, p=self.p, metric_params=self.metric_params, n_jobs=self.n_jobs)
return self | A reference implementation of a fitting function for a transformer.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The training input samples, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself. | diffmap/diffmap_.py | fit | ckingdev/sklearn-diffmap | 0 | python | def fit(self, X, y=None):
'A reference implementation of a fitting function for a transformer.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n The training input samples, where n_samples is the number of samples\n and n_features is the number of features.\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n '
X = check_array(X, accept_sparse=True, ensure_min_samples=3, estimator=self)
self.embedding_ = diffusion_mapping(X, n_components=self.n_components, n_neighbors=self.n_neighbors, alpha=self.alpha, t=self.t, gamma=self.gamma, metric=self.metric, p=self.p, metric_params=self.metric_params, n_jobs=self.n_jobs)
return self | def fit(self, X, y=None):
'A reference implementation of a fitting function for a transformer.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n The training input samples, where n_samples is the number of samples\n and n_features is the number of features.\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n '
X = check_array(X, accept_sparse=True, ensure_min_samples=3, estimator=self)
self.embedding_ = diffusion_mapping(X, n_components=self.n_components, n_neighbors=self.n_neighbors, alpha=self.alpha, t=self.t, gamma=self.gamma, metric=self.metric, p=self.p, metric_params=self.metric_params, n_jobs=self.n_jobs)
return self<|docstring|>A reference implementation of a fitting function for a transformer.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The training input samples, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.<|endoftext|> |
8ac5090bb1f342fdb7a489b44a3f449d54c6d1de8f7150e4a6f0f0cc8f5109bc | def fit_transform(self, X, y=None, **fit_params):
'Fit the model from data in X and transform X.\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples\n and n_features is the number of features.\n\n Y: Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n '
self.fit(X)
return self.embedding_ | Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
Y: Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components) | diffmap/diffmap_.py | fit_transform | ckingdev/sklearn-diffmap | 0 | python | def fit_transform(self, X, y=None, **fit_params):
'Fit the model from data in X and transform X.\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples\n and n_features is the number of features.\n\n Y: Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n '
self.fit(X)
return self.embedding_ | def fit_transform(self, X, y=None, **fit_params):
'Fit the model from data in X and transform X.\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples\n and n_features is the number of features.\n\n Y: Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n '
self.fit(X)
return self.embedding_<|docstring|>Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
Y: Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)<|endoftext|> |
b970988dbb5396892f4ebefe9f36a32607892f73e93fd42463b5dfc1acab4385 | def __init__(self, secret_bytes, degree, crc_length, gf_exp):
'\n :param secret_bytes: secret in bytes format\n :param degree: polynomial degree as int\n :param crc_length: CRC length as int\n :param gf_exp: exponential in GF(2**gf_exp)\n '
self.degree = degree
self.crc_length = crc_length
self.secret_bit = BitArray(bytes=secret_bytes, length=(len(secret_bytes) * 8))
self.gf_exp = gf_exp
self.checksum_bit = BitArray(uint=binascii.crc32(self.secret_bit.bytes), length=self.crc_length)
self.total_bit = self.secret_bit.copy()
self.total_bit.append(self.checksum_bit)
self.coefficients = self.extract_coefficients()
self.poly_gf_32 = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, 32)
self.K = GF(2, gf_exp) | :param secret_bytes: secret in bytes format
:param degree: polynomial degree as int
:param crc_length: CRC length as int
:param gf_exp: exponential in GF(2**gf_exp) | Polynomial_Generator.py | __init__ | abb-iss/distributed-fuzzy-vault | 4 | python | def __init__(self, secret_bytes, degree, crc_length, gf_exp):
'\n :param secret_bytes: secret in bytes format\n :param degree: polynomial degree as int\n :param crc_length: CRC length as int\n :param gf_exp: exponential in GF(2**gf_exp)\n '
self.degree = degree
self.crc_length = crc_length
self.secret_bit = BitArray(bytes=secret_bytes, length=(len(secret_bytes) * 8))
self.gf_exp = gf_exp
self.checksum_bit = BitArray(uint=binascii.crc32(self.secret_bit.bytes), length=self.crc_length)
self.total_bit = self.secret_bit.copy()
self.total_bit.append(self.checksum_bit)
self.coefficients = self.extract_coefficients()
self.poly_gf_32 = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, 32)
self.K = GF(2, gf_exp) | def __init__(self, secret_bytes, degree, crc_length, gf_exp):
'\n :param secret_bytes: secret in bytes format\n :param degree: polynomial degree as int\n :param crc_length: CRC length as int\n :param gf_exp: exponential in GF(2**gf_exp)\n '
self.degree = degree
self.crc_length = crc_length
self.secret_bit = BitArray(bytes=secret_bytes, length=(len(secret_bytes) * 8))
self.gf_exp = gf_exp
self.checksum_bit = BitArray(uint=binascii.crc32(self.secret_bit.bytes), length=self.crc_length)
self.total_bit = self.secret_bit.copy()
self.total_bit.append(self.checksum_bit)
self.coefficients = self.extract_coefficients()
self.poly_gf_32 = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, 32)
self.K = GF(2, gf_exp)<|docstring|>:param secret_bytes: secret in bytes format
:param degree: polynomial degree as int
:param crc_length: CRC length as int
:param gf_exp: exponential in GF(2**gf_exp)<|endoftext|> |
71a5148eca069e3830aa158ff9ad6252e328816024bd98f6de5049bc04a93688 | def prune_secret(self, secret_bit):
' Prunes secret if secret length + CRC length is not multiple of\n polynomial degree + 1. Takes secret as BitArray\n :returns pruned secret as Bitarray '
remainder = ((len(secret_bit) + self.crc_length) % (self.degree + 1))
secret_len = (len(secret_bit) - remainder)
if (remainder == 0):
return secret_bit
else:
return secret_bit[0:secret_len] | Prunes secret if secret length + CRC length is not multiple of
polynomial degree + 1. Takes secret as BitArray
:returns pruned secret as Bitarray | Polynomial_Generator.py | prune_secret | abb-iss/distributed-fuzzy-vault | 4 | python | def prune_secret(self, secret_bit):
' Prunes secret if secret length + CRC length is not multiple of\n polynomial degree + 1. Takes secret as BitArray\n :returns pruned secret as Bitarray '
remainder = ((len(secret_bit) + self.crc_length) % (self.degree + 1))
secret_len = (len(secret_bit) - remainder)
if (remainder == 0):
return secret_bit
else:
return secret_bit[0:secret_len] | def prune_secret(self, secret_bit):
' Prunes secret if secret length + CRC length is not multiple of\n polynomial degree + 1. Takes secret as BitArray\n :returns pruned secret as Bitarray '
remainder = ((len(secret_bit) + self.crc_length) % (self.degree + 1))
secret_len = (len(secret_bit) - remainder)
if (remainder == 0):
return secret_bit
else:
return secret_bit[0:secret_len]<|docstring|>Prunes secret if secret length + CRC length is not multiple of
polynomial degree + 1. Takes secret as BitArray
:returns pruned secret as Bitarray<|endoftext|> |
76aaf3d8c89fed484b1f001d0cb23eeedc9bdf7e15c4e109ddaa13c025cf61e9 | def extract_coefficients(self):
' extracts coefficients of polynomial from bitstring\n :returns coefficients as list '
coefficients = []
assert ((len(self.total_bit) % (self.degree + 1)) == 0)
step = int((len(self.total_bit) / (self.degree + 1)))
for i in range(0, len(self.total_bit), step):
coefficients.append(self.total_bit[i:(i + step)].uint)
return coefficients | extracts coefficients of polynomial from bitstring
:returns coefficients as list | Polynomial_Generator.py | extract_coefficients | abb-iss/distributed-fuzzy-vault | 4 | python | def extract_coefficients(self):
' extracts coefficients of polynomial from bitstring\n :returns coefficients as list '
coefficients = []
assert ((len(self.total_bit) % (self.degree + 1)) == 0)
step = int((len(self.total_bit) / (self.degree + 1)))
for i in range(0, len(self.total_bit), step):
coefficients.append(self.total_bit[i:(i + step)].uint)
return coefficients | def extract_coefficients(self):
' extracts coefficients of polynomial from bitstring\n :returns coefficients as list '
coefficients = []
assert ((len(self.total_bit) % (self.degree + 1)) == 0)
step = int((len(self.total_bit) / (self.degree + 1)))
for i in range(0, len(self.total_bit), step):
coefficients.append(self.total_bit[i:(i + step)].uint)
return coefficients<|docstring|>extracts coefficients of polynomial from bitstring
:returns coefficients as list<|endoftext|> |
81914aeb056be6313c2f967016e11dde7dc660b9de286aa66dbf0a04e19a1b10 | def evaluate_polynomial_gf_2(self, x):
' Evaluate polynomial of this polynomial generator at x in GF(2**m)\n :param x: int\n :param m: exponential in GF(2**m)\n :returns function result as int '
m = self.gf_exp
if (m == 32):
poly_gf = self.poly_gf_32
else:
poly_gf = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, m)
x_gf = GaloisConverter.convert_int_to_element_in_gf_2(x, m)
y_gf = self.K.eval_poly(poly_gf, x_gf)
result = GaloisConverter.convert_gf_2_element_to_int(y_gf, m)
if (result > ((2 ** m) * 2)):
raise ValueError('Too large number generated in polynomial GF(2**{}):{}'.format(m, result))
return result | Evaluate polynomial of this polynomial generator at x in GF(2**m)
:param x: int
:param m: exponential in GF(2**m)
:returns function result as int | Polynomial_Generator.py | evaluate_polynomial_gf_2 | abb-iss/distributed-fuzzy-vault | 4 | python | def evaluate_polynomial_gf_2(self, x):
' Evaluate polynomial of this polynomial generator at x in GF(2**m)\n :param x: int\n :param m: exponential in GF(2**m)\n :returns function result as int '
m = self.gf_exp
if (m == 32):
poly_gf = self.poly_gf_32
else:
poly_gf = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, m)
x_gf = GaloisConverter.convert_int_to_element_in_gf_2(x, m)
y_gf = self.K.eval_poly(poly_gf, x_gf)
result = GaloisConverter.convert_gf_2_element_to_int(y_gf, m)
if (result > ((2 ** m) * 2)):
raise ValueError('Too large number generated in polynomial GF(2**{}):{}'.format(m, result))
return result | def evaluate_polynomial_gf_2(self, x):
' Evaluate polynomial of this polynomial generator at x in GF(2**m)\n :param x: int\n :param m: exponential in GF(2**m)\n :returns function result as int '
m = self.gf_exp
if (m == 32):
poly_gf = self.poly_gf_32
else:
poly_gf = GaloisConverter.convert_int_list_to_gf_2_list(self.coefficients, m)
x_gf = GaloisConverter.convert_int_to_element_in_gf_2(x, m)
y_gf = self.K.eval_poly(poly_gf, x_gf)
result = GaloisConverter.convert_gf_2_element_to_int(y_gf, m)
if (result > ((2 ** m) * 2)):
raise ValueError('Too large number generated in polynomial GF(2**{}):{}'.format(m, result))
return result<|docstring|>Evaluate polynomial of this polynomial generator at x in GF(2**m)
:param x: int
:param m: exponential in GF(2**m)
:returns function result as int<|endoftext|> |
360e9fa7e591849ab535d2930242aaeb6208a174e14ca6b17d4bebac025aaf7f | def evaluate_polynomial_gf_2_array(self, array):
' Evaluate polynomial on list of integers\n :param array: list of integers\n :param m: exponential in GF(2**m) '
result = []
for x in array:
result.append(self.evaluate_polynomial_gf_2(x))
return result | Evaluate polynomial on list of integers
:param array: list of integers
:param m: exponential in GF(2**m) | Polynomial_Generator.py | evaluate_polynomial_gf_2_array | abb-iss/distributed-fuzzy-vault | 4 | python | def evaluate_polynomial_gf_2_array(self, array):
' Evaluate polynomial on list of integers\n :param array: list of integers\n :param m: exponential in GF(2**m) '
result = []
for x in array:
result.append(self.evaluate_polynomial_gf_2(x))
return result | def evaluate_polynomial_gf_2_array(self, array):
' Evaluate polynomial on list of integers\n :param array: list of integers\n :param m: exponential in GF(2**m) '
result = []
for x in array:
result.append(self.evaluate_polynomial_gf_2(x))
return result<|docstring|>Evaluate polynomial on list of integers
:param array: list of integers
:param m: exponential in GF(2**m)<|endoftext|> |
487ec47bbcead22abc9ad79cf9d4367f62814843447ee59eeebbe98dcabb2415 | def main__legacy_report(argv=sys.argv[1:]):
'\n Entry point to convert cedar csv metrics format into legacy perf.json\n :return: None\n '
parser = build_parser()
run(parser.parse_args(argv)) | Entry point to convert cedar csv metrics format into legacy perf.json
:return: None | src/python/genny/legacy_report.py | main__legacy_report | zachyam/genny | 0 | python | def main__legacy_report(argv=sys.argv[1:]):
'\n Entry point to convert cedar csv metrics format into legacy perf.json\n :return: None\n '
parser = build_parser()
run(parser.parse_args(argv)) | def main__legacy_report(argv=sys.argv[1:]):
'\n Entry point to convert cedar csv metrics format into legacy perf.json\n :return: None\n '
parser = build_parser()
run(parser.parse_args(argv))<|docstring|>Entry point to convert cedar csv metrics format into legacy perf.json
:return: None<|endoftext|> |
7073729c523c1649abaf498a2af115db23d198f5878cac0691d11012b893eb6c | def get_project_by_name(projects_path, project_name, project_class=None):
'\n Returns a project located in the given path and with the given name (if exists)\n :param projects_path: str\n :param project_name: str\n :param project_class: cls\n :return: Project or None\n '
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path "{}" does not exist!'.format(projects_path))
return None
all_projects = get_projects(projects_path, project_class=project_class)
for project in all_projects:
if (project.name == project_name):
return project
return None | Returns a project located in the given path and with the given name (if exists)
:param projects_path: str
:param project_name: str
:param project_class: cls
:return: Project or None | tpDcc/libs/qt/widgets/project.py | get_project_by_name | tpDcc/tpQtLib | 3 | python | def get_project_by_name(projects_path, project_name, project_class=None):
'\n Returns a project located in the given path and with the given name (if exists)\n :param projects_path: str\n :param project_name: str\n :param project_class: cls\n :return: Project or None\n '
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path "{}" does not exist!'.format(projects_path))
return None
all_projects = get_projects(projects_path, project_class=project_class)
for project in all_projects:
if (project.name == project_name):
return project
return None | def get_project_by_name(projects_path, project_name, project_class=None):
'\n Returns a project located in the given path and with the given name (if exists)\n :param projects_path: str\n :param project_name: str\n :param project_class: cls\n :return: Project or None\n '
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path "{}" does not exist!'.format(projects_path))
return None
all_projects = get_projects(projects_path, project_class=project_class)
for project in all_projects:
if (project.name == project_name):
return project
return None<|docstring|>Returns a project located in the given path and with the given name (if exists)
:param projects_path: str
:param project_name: str
:param project_class: cls
:return: Project or None<|endoftext|> |
f70c29f6c27ea0e23e88f3aa8ce665d427b7f0e241c840e9a944776dc3022e89 | def get_projects(projects_path, project_class=None):
'\n Returns all projects located in given path\n :param projects_path: str\n :param project_class: cls\n :return: list(Project)\n '
if (not project_class):
project_class = Project
projects_found = list()
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path {} is not valid!'.format(projects_path))
return projects_found
for (root, dirs, files) in os.walk(projects_path):
if (consts.PROJECTS_NAME in files):
new_project = project_class.create_project_from_data(path.join_path(root, consts.PROJECTS_NAME))
if (new_project is not None):
projects_found.append(new_project)
return projects_found | Returns all projects located in given path
:param projects_path: str
:param project_class: cls
:return: list(Project) | tpDcc/libs/qt/widgets/project.py | get_projects | tpDcc/tpQtLib | 3 | python | def get_projects(projects_path, project_class=None):
'\n Returns all projects located in given path\n :param projects_path: str\n :param project_class: cls\n :return: list(Project)\n '
if (not project_class):
project_class = Project
projects_found = list()
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path {} is not valid!'.format(projects_path))
return projects_found
for (root, dirs, files) in os.walk(projects_path):
if (consts.PROJECTS_NAME in files):
new_project = project_class.create_project_from_data(path.join_path(root, consts.PROJECTS_NAME))
if (new_project is not None):
projects_found.append(new_project)
return projects_found | def get_projects(projects_path, project_class=None):
'\n Returns all projects located in given path\n :param projects_path: str\n :param project_class: cls\n :return: list(Project)\n '
if (not project_class):
project_class = Project
projects_found = list()
if ((not projects_path) or (not os.path.isdir(projects_path))):
LOGGER.warning('Projects Path {} is not valid!'.format(projects_path))
return projects_found
for (root, dirs, files) in os.walk(projects_path):
if (consts.PROJECTS_NAME in files):
new_project = project_class.create_project_from_data(path.join_path(root, consts.PROJECTS_NAME))
if (new_project is not None):
projects_found.append(new_project)
return projects_found<|docstring|>Returns all projects located in given path
:param projects_path: str
:param project_class: cls
:return: list(Project)<|endoftext|> |
99f1f3649d7c0172483d30b975384bc0677d66c75640936667579d8351e63db7 | @classmethod
def create_project_from_data(cls, project_data_path):
'\n Creates a new project using a project data JSON file\n :param project_data_path: str, path where project JSON data file is located\n :return: Project\n '
if ((project_data_path is None) or (not path.is_file(project_data_path))):
LOGGER.warning('Project Data Path {} is not valid!'.format(project_data_path))
return None
project_data = settings.JSONSettings()
project_options = settings.JSONSettings()
project_dir = path.get_dirname(project_data_path)
project_name = path.get_basename(project_data_path)
project_data.set_directory(project_dir, project_name)
project_options.set_directory(project_dir, 'options.json')
project_name = project_data.get('name')
project_path = path.get_dirname(path.get_dirname(project_data_path))
project_image = project_data.get('image')
LOGGER.debug('New Project found [{}]: {}'.format(project_name, project_path))
project_data = core_project.ProjectData(name=project_name, project_path=project_path, settings=project_data, options=project_options)
new_project = cls(project_data=project_data)
if project_image:
new_project.set_image(project_image)
return new_project | Creates a new project using a project data JSON file
:param project_data_path: str, path where project JSON data file is located
:return: Project | tpDcc/libs/qt/widgets/project.py | create_project_from_data | tpDcc/tpQtLib | 3 | python | @classmethod
def create_project_from_data(cls, project_data_path):
'\n Creates a new project using a project data JSON file\n :param project_data_path: str, path where project JSON data file is located\n :return: Project\n '
if ((project_data_path is None) or (not path.is_file(project_data_path))):
LOGGER.warning('Project Data Path {} is not valid!'.format(project_data_path))
return None
project_data = settings.JSONSettings()
project_options = settings.JSONSettings()
project_dir = path.get_dirname(project_data_path)
project_name = path.get_basename(project_data_path)
project_data.set_directory(project_dir, project_name)
project_options.set_directory(project_dir, 'options.json')
project_name = project_data.get('name')
project_path = path.get_dirname(path.get_dirname(project_data_path))
project_image = project_data.get('image')
LOGGER.debug('New Project found [{}]: {}'.format(project_name, project_path))
project_data = core_project.ProjectData(name=project_name, project_path=project_path, settings=project_data, options=project_options)
new_project = cls(project_data=project_data)
if project_image:
new_project.set_image(project_image)
return new_project | @classmethod
def create_project_from_data(cls, project_data_path):
'\n Creates a new project using a project data JSON file\n :param project_data_path: str, path where project JSON data file is located\n :return: Project\n '
if ((project_data_path is None) or (not path.is_file(project_data_path))):
LOGGER.warning('Project Data Path {} is not valid!'.format(project_data_path))
return None
project_data = settings.JSONSettings()
project_options = settings.JSONSettings()
project_dir = path.get_dirname(project_data_path)
project_name = path.get_basename(project_data_path)
project_data.set_directory(project_dir, project_name)
project_options.set_directory(project_dir, 'options.json')
project_name = project_data.get('name')
project_path = path.get_dirname(path.get_dirname(project_data_path))
project_image = project_data.get('image')
LOGGER.debug('New Project found [{}]: {}'.format(project_name, project_path))
project_data = core_project.ProjectData(name=project_name, project_path=project_path, settings=project_data, options=project_options)
new_project = cls(project_data=project_data)
if project_image:
new_project.set_image(project_image)
return new_project<|docstring|>Creates a new project using a project data JSON file
:param project_data_path: str, path where project JSON data file is located
:return: Project<|endoftext|> |
2551ee228056b1c3b35aee9e497db9a64eebc8f8ee97236b34dc58715590e898 | def open(self):
'\n Opens project\n '
self._on_open_project() | Opens project | tpDcc/libs/qt/widgets/project.py | open | tpDcc/tpQtLib | 3 | python | def open(self):
'\n \n '
self._on_open_project() | def open(self):
'\n \n '
self._on_open_project()<|docstring|>Opens project<|endoftext|> |
730c4155bd94f40d37e290ea6de1a667a2d58b821f3f17ce050a02cdfe42b02a | def has_option(self, name, group=None):
'\n Returns whether the current object has given option or not\n :param name: str, name of the option\n :param group: variant, str || None, group of the option (optional)\n :return: bool\n '
if (not self._project_data):
return False
return self._project_data.has_option(name=name, group=group) | Returns whether the current object has given option or not
:param name: str, name of the option
:param group: variant, str || None, group of the option (optional)
:return: bool | tpDcc/libs/qt/widgets/project.py | has_option | tpDcc/tpQtLib | 3 | python | def has_option(self, name, group=None):
'\n Returns whether the current object has given option or not\n :param name: str, name of the option\n :param group: variant, str || None, group of the option (optional)\n :return: bool\n '
if (not self._project_data):
return False
return self._project_data.has_option(name=name, group=group) | def has_option(self, name, group=None):
'\n Returns whether the current object has given option or not\n :param name: str, name of the option\n :param group: variant, str || None, group of the option (optional)\n :return: bool\n '
if (not self._project_data):
return False
return self._project_data.has_option(name=name, group=group)<|docstring|>Returns whether the current object has given option or not
:param name: str, name of the option
:param group: variant, str || None, group of the option (optional)
:return: bool<|endoftext|> |
e14aae8db0ff666cc76e1203cb56477b89890e887170e8848e88c7fdddd70da4 | def add_option(self, name, value, group=None, option_type=None):
'\n Adds a new option to the options file\n :param name: str, name of the option\n :param value: variant, value of the option\n :param group: variant, str || None, group of the option (optional)\n :param option_type: variant, str || None, option type (optional)\n '
if (not self._project_data):
return
self._project_data.add_option(name, value, group=group, option_type=option_type) | Adds a new option to the options file
:param name: str, name of the option
:param value: variant, value of the option
:param group: variant, str || None, group of the option (optional)
:param option_type: variant, str || None, option type (optional) | tpDcc/libs/qt/widgets/project.py | add_option | tpDcc/tpQtLib | 3 | python | def add_option(self, name, value, group=None, option_type=None):
'\n Adds a new option to the options file\n :param name: str, name of the option\n :param value: variant, value of the option\n :param group: variant, str || None, group of the option (optional)\n :param option_type: variant, str || None, option type (optional)\n '
if (not self._project_data):
return
self._project_data.add_option(name, value, group=group, option_type=option_type) | def add_option(self, name, value, group=None, option_type=None):
'\n Adds a new option to the options file\n :param name: str, name of the option\n :param value: variant, value of the option\n :param group: variant, str || None, group of the option (optional)\n :param option_type: variant, str || None, option type (optional)\n '
if (not self._project_data):
return
self._project_data.add_option(name, value, group=group, option_type=option_type)<|docstring|>Adds a new option to the options file
:param name: str, name of the option
:param value: variant, value of the option
:param group: variant, str || None, group of the option (optional)
:param option_type: variant, str || None, option type (optional)<|endoftext|> |
6ad067757df649973c5d0bb20de37b4a8aed031589b4ef1503ae187952a835ca | def get_option(self, name, group=None, default=None):
'\n Returns option by name and group\n :param name: str, name of the option we want to retrieve\n :param group: variant, str || None, group of the option (optional)\n :return: variant\n '
if (not self._project_data):
return
return self._project_data.get_option(name, group=group, default=default) | Returns option by name and group
:param name: str, name of the option we want to retrieve
:param group: variant, str || None, group of the option (optional)
:return: variant | tpDcc/libs/qt/widgets/project.py | get_option | tpDcc/tpQtLib | 3 | python | def get_option(self, name, group=None, default=None):
'\n Returns option by name and group\n :param name: str, name of the option we want to retrieve\n :param group: variant, str || None, group of the option (optional)\n :return: variant\n '
if (not self._project_data):
return
return self._project_data.get_option(name, group=group, default=default) | def get_option(self, name, group=None, default=None):
'\n Returns option by name and group\n :param name: str, name of the option we want to retrieve\n :param group: variant, str || None, group of the option (optional)\n :return: variant\n '
if (not self._project_data):
return
return self._project_data.get_option(name, group=group, default=default)<|docstring|>Returns option by name and group
:param name: str, name of the option we want to retrieve
:param group: variant, str || None, group of the option (optional)
:return: variant<|endoftext|> |
5d66f6c0aac4370027b5526b6bd6474062c45eae0cb5dfef03cdc88e5200bc67 | def reload_options(self):
'\n Reload settings\n '
if (not self._project_data):
return
self._project_data.reload_options() | Reload settings | tpDcc/libs/qt/widgets/project.py | reload_options | tpDcc/tpQtLib | 3 | python | def reload_options(self):
'\n \n '
if (not self._project_data):
return
self._project_data.reload_options() | def reload_options(self):
'\n \n '
if (not self._project_data):
return
self._project_data.reload_options()<|docstring|>Reload settings<|endoftext|> |
df5f467a2085b2078f7bf5b96700c86c16e24f92e37a122fcba41615155c6383 | def clear_options(self):
'\n Clears all the options\n '
if (not self._project_data):
return
self._project_data.clear_options() | Clears all the options | tpDcc/libs/qt/widgets/project.py | clear_options | tpDcc/tpQtLib | 3 | python | def clear_options(self):
'\n \n '
if (not self._project_data):
return
self._project_data.clear_options() | def clear_options(self):
'\n \n '
if (not self._project_data):
return
self._project_data.clear_options()<|docstring|>Clears all the options<|endoftext|> |
3ddf8042df310693f579eaa11f2ef0e5ebf699b4549a3c02d779ecc0415dc64d | def load_project_data(self):
'\n Return dictionary data contained in the project\n :return: dict\n '
if (not self.settings):
return
return self.settings.data() | Return dictionary data contained in the project
:return: dict | tpDcc/libs/qt/widgets/project.py | load_project_data | tpDcc/tpQtLib | 3 | python | def load_project_data(self):
'\n Return dictionary data contained in the project\n :return: dict\n '
if (not self.settings):
return
return self.settings.data() | def load_project_data(self):
'\n Return dictionary data contained in the project\n :return: dict\n '
if (not self.settings):
return
return self.settings.data()<|docstring|>Return dictionary data contained in the project
:return: dict<|endoftext|> |
ea6ddf62ebf21e2bac944aa7212a49d8f5d73e454be6545e84a73fe2f332503c | def get_project_nodes(self):
'\n Returns path where nodes should be stored\n :return: str\n '
return [os.path.join(self.full_path, 'nodes'), os.path.join(self.full_path, 'components')] | Returns path where nodes should be stored
:return: str | tpDcc/libs/qt/widgets/project.py | get_project_nodes | tpDcc/tpQtLib | 3 | python | def get_project_nodes(self):
'\n Returns path where nodes should be stored\n :return: str\n '
return [os.path.join(self.full_path, 'nodes'), os.path.join(self.full_path, 'components')] | def get_project_nodes(self):
'\n Returns path where nodes should be stored\n :return: str\n '
return [os.path.join(self.full_path, 'nodes'), os.path.join(self.full_path, 'components')]<|docstring|>Returns path where nodes should be stored
:return: str<|endoftext|> |
c84dd4a76c497bfdeda475f47205d1da9d5420065f785fb900e244030b872543 | def get_options(self):
'\n Returns all options contained in the project\n :return: str\n '
return self._project_data.get_options() | Returns all options contained in the project
:return: str | tpDcc/libs/qt/widgets/project.py | get_options | tpDcc/tpQtLib | 3 | python | def get_options(self):
'\n Returns all options contained in the project\n :return: str\n '
return self._project_data.get_options() | def get_options(self):
'\n Returns all options contained in the project\n :return: str\n '
return self._project_data.get_options()<|docstring|>Returns all options contained in the project
:return: str<|endoftext|> |
c20c48dbcbf747976c042929c50f74db354bc0bf3360885ad34440424eac35f6 | def get_project_image(self):
'\n Returns the image used by the project\n :return: QPixmap\n '
return self._project_data.get_project_image() | Returns the image used by the project
:return: QPixmap | tpDcc/libs/qt/widgets/project.py | get_project_image | tpDcc/tpQtLib | 3 | python | def get_project_image(self):
'\n Returns the image used by the project\n :return: QPixmap\n '
return self._project_data.get_project_image() | def get_project_image(self):
'\n Returns the image used by the project\n :return: QPixmap\n '
return self._project_data.get_project_image()<|docstring|>Returns the image used by the project
:return: QPixmap<|endoftext|> |
ba6610c63e6bdae7764e1d6228026b2091716af9350e3053f1fd2501335040da | def _on_open_project(self):
'\n Internal callback function that is called when a project is opened\n '
LOGGER.info('Loading project "{}" ...'.format(self.full_path))
self.projectOpened.emit(self) | Internal callback function that is called when a project is opened | tpDcc/libs/qt/widgets/project.py | _on_open_project | tpDcc/tpQtLib | 3 | python | def _on_open_project(self):
'\n \n '
LOGGER.info('Loading project "{}" ...'.format(self.full_path))
self.projectOpened.emit(self) | def _on_open_project(self):
'\n \n '
LOGGER.info('Loading project "{}" ...'.format(self.full_path))
self.projectOpened.emit(self)<|docstring|>Internal callback function that is called when a project is opened<|endoftext|> |
9a8504b5c3f3e48f00fb1498ffbef0518fd69e1ac8dfa5ec4c6dadb2c179983a | def _on_remove_project(self):
'\n Internal callback function that is called when a project is removed\n '
valid_remove = self.remove()
if valid_remove:
self.projectRemoved.emit(self.name) | Internal callback function that is called when a project is removed | tpDcc/libs/qt/widgets/project.py | _on_remove_project | tpDcc/tpQtLib | 3 | python | def _on_remove_project(self):
'\n \n '
valid_remove = self.remove()
if valid_remove:
self.projectRemoved.emit(self.name) | def _on_remove_project(self):
'\n \n '
valid_remove = self.remove()
if valid_remove:
self.projectRemoved.emit(self.name)<|docstring|>Internal callback function that is called when a project is removed<|endoftext|> |
9a4d75a3411f35a7d9c362c044d79019508002467f1c22a40bab4bee65ea99e8 | def _on_open_in_browser(self):
'\n Internal callback function that is called when a project is browsed\n '
fileio.open_browser(self.full_path) | Internal callback function that is called when a project is browsed | tpDcc/libs/qt/widgets/project.py | _on_open_in_browser | tpDcc/tpQtLib | 3 | python | def _on_open_in_browser(self):
'\n \n '
fileio.open_browser(self.full_path) | def _on_open_in_browser(self):
'\n \n '
fileio.open_browser(self.full_path)<|docstring|>Internal callback function that is called when a project is browsed<|endoftext|> |
5f809e65980d48374abd6d0cc3e3a9bfbbb232d4fc5c74ef4ccd2cf9bba3c6dc | def _on_set_project_image(self):
'\n Internal callback function that is called when project image is set\n '
image_file = dcc.select_file_dialog(title='Select Project Image File', pattern='PNG Files (*.png)')
if ((image_file is None) or (not path.is_file(image_file))):
LOGGER.warning('Selected Image "{}" is not valid!'.format(image_file))
return
valid_change = self._project_data.set_project_image(image_file)
if valid_change:
project_image = self._project_data.settings.get('image')
if project_image:
self.set_image(project_image)
self.projectImageChanged.emit(image_file) | Internal callback function that is called when project image is set | tpDcc/libs/qt/widgets/project.py | _on_set_project_image | tpDcc/tpQtLib | 3 | python | def _on_set_project_image(self):
'\n \n '
image_file = dcc.select_file_dialog(title='Select Project Image File', pattern='PNG Files (*.png)')
if ((image_file is None) or (not path.is_file(image_file))):
LOGGER.warning('Selected Image "{}" is not valid!'.format(image_file))
return
valid_change = self._project_data.set_project_image(image_file)
if valid_change:
project_image = self._project_data.settings.get('image')
if project_image:
self.set_image(project_image)
self.projectImageChanged.emit(image_file) | def _on_set_project_image(self):
'\n \n '
image_file = dcc.select_file_dialog(title='Select Project Image File', pattern='PNG Files (*.png)')
if ((image_file is None) or (not path.is_file(image_file))):
LOGGER.warning('Selected Image "{}" is not valid!'.format(image_file))
return
valid_change = self._project_data.set_project_image(image_file)
if valid_change:
project_image = self._project_data.settings.get('image')
if project_image:
self.set_image(project_image)
self.projectImageChanged.emit(image_file)<|docstring|>Internal callback function that is called when project image is set<|endoftext|> |
a3957d838be99a80a8fd28ae68c2d077bcc07edefc61573d80d20bc7e6e97cc6 | def get_projects_list(self):
'\n Returns projects list widget\n :return: ProjectViewer\n '
return self._projects_list | Returns projects list widget
:return: ProjectViewer | tpDcc/libs/qt/widgets/project.py | get_projects_list | tpDcc/tpQtLib | 3 | python | def get_projects_list(self):
'\n Returns projects list widget\n :return: ProjectViewer\n '
return self._projects_list | def get_projects_list(self):
'\n Returns projects list widget\n :return: ProjectViewer\n '
return self._projects_list<|docstring|>Returns projects list widget
:return: ProjectViewer<|endoftext|> |
40bf891bcb22feebc141277a6c617aa6e015667ca7a52c3767c4d9899b1174be | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._projects_list.set_projects_path(self._projects_path)
self._update_ui()
self.projectsPathChanged.emit(self._projects_path) | Sets the path where we want to search for projects
:param projects_path: str | tpDcc/libs/qt/widgets/project.py | set_projects_path | tpDcc/tpQtLib | 3 | python | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._projects_list.set_projects_path(self._projects_path)
self._update_ui()
self.projectsPathChanged.emit(self._projects_path) | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._projects_list.set_projects_path(self._projects_path)
self._update_ui()
self.projectsPathChanged.emit(self._projects_path)<|docstring|>Sets the path where we want to search for projects
:param projects_path: str<|endoftext|> |
3dddd5ab646b6a50fb570b6e8c03563eefb6cd4749c49cd053c35cc2d7cbdc84 | def update_projects(self):
'\n Updates all available projects\n '
self._projects_list.update_projects() | Updates all available projects | tpDcc/libs/qt/widgets/project.py | update_projects | tpDcc/tpQtLib | 3 | python | def update_projects(self):
'\n \n '
self._projects_list.update_projects() | def update_projects(self):
'\n \n '
self._projects_list.update_projects()<|docstring|>Updates all available projects<|endoftext|> |
9c8e4a8ec91630ab120e8fd63b62d9f3560dceae954fb891df6f5051720cc2a9 | def _update_ui(self):
'\n Internal function that updates UI\n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._browse_widget.set_directory(directory=self._projects_path)
self.update_projects() | Internal function that updates UI | tpDcc/libs/qt/widgets/project.py | _update_ui | tpDcc/tpQtLib | 3 | python | def _update_ui(self):
'\n \n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._browse_widget.set_directory(directory=self._projects_path)
self.update_projects() | def _update_ui(self):
'\n \n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._browse_widget.set_directory(directory=self._projects_path)
self.update_projects()<|docstring|>Internal function that updates UI<|endoftext|> |
865ddd0e7f11288dbcefa601054de148fd61500910c85fc92a00f4ddd4ca1520 | def _on_search_project(self, project_text):
'\n Internal callback function that is called when the user types in the search projects filter\n :param project_text: str\n '
for project in self._projects_list.get_widgets():
project.setVisible((project_text.lower() in project.name.lower())) | Internal callback function that is called when the user types in the search projects filter
:param project_text: str | tpDcc/libs/qt/widgets/project.py | _on_search_project | tpDcc/tpQtLib | 3 | python | def _on_search_project(self, project_text):
'\n Internal callback function that is called when the user types in the search projects filter\n :param project_text: str\n '
for project in self._projects_list.get_widgets():
project.setVisible((project_text.lower() in project.name.lower())) | def _on_search_project(self, project_text):
'\n Internal callback function that is called when the user types in the search projects filter\n :param project_text: str\n '
for project in self._projects_list.get_widgets():
project.setVisible((project_text.lower() in project.name.lower()))<|docstring|>Internal callback function that is called when the user types in the search projects filter
:param project_text: str<|endoftext|> |
ca35c573b3fff9aed4fc42c5351f0f8ecae112b7590c9b211d5e76c306b5ed74 | def _on_directory_browsed(self, projects_path):
'\n Internal callback function that is triggered when the user browses a new projects path\n :param projects_path: str\n '
if ((not projects_path) or (not path.is_dir(projects_path))):
return
self.set_projects_path(projects_path)
self._update_ui() | Internal callback function that is triggered when the user browses a new projects path
:param projects_path: str | tpDcc/libs/qt/widgets/project.py | _on_directory_browsed | tpDcc/tpQtLib | 3 | python | def _on_directory_browsed(self, projects_path):
'\n Internal callback function that is triggered when the user browses a new projects path\n :param projects_path: str\n '
if ((not projects_path) or (not path.is_dir(projects_path))):
return
self.set_projects_path(projects_path)
self._update_ui() | def _on_directory_browsed(self, projects_path):
'\n Internal callback function that is triggered when the user browses a new projects path\n :param projects_path: str\n '
if ((not projects_path) or (not path.is_dir(projects_path))):
return
self.set_projects_path(projects_path)
self._update_ui()<|docstring|>Internal callback function that is triggered when the user browses a new projects path
:param projects_path: str<|endoftext|> |
344c847f77279c27e04f14ef5c90c05e0e24d5e020267f5986d2aa167379591c | def set_projects_path(self, projects_path):
'\n Set the path where projects are located\n '
self._projects_path = projects_path
self._update_ui() | Set the path where projects are located | tpDcc/libs/qt/widgets/project.py | set_projects_path | tpDcc/tpQtLib | 3 | python | def set_projects_path(self, projects_path):
'\n \n '
self._projects_path = projects_path
self._update_ui() | def set_projects_path(self, projects_path):
'\n \n '
self._projects_path = projects_path
self._update_ui()<|docstring|>Set the path where projects are located<|endoftext|> |
41ce9c6d95a35b82c9887486f8e1ed5047b8aaf7005a52d9fe9d466dcf71720e | def _update_ui(self):
'\n Update UI based on the stored settings if exists\n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._project_line.setText(self._projects_path)
self._project_btn.init_directory = self._projects_path | Update UI based on the stored settings if exists | tpDcc/libs/qt/widgets/project.py | _update_ui | tpDcc/tpQtLib | 3 | python | def _update_ui(self):
'\n \n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._project_line.setText(self._projects_path)
self._project_btn.init_directory = self._projects_path | def _update_ui(self):
'\n \n '
if ((not self._projects_path) or (not os.path.isdir(self._projects_path))):
return
self._project_line.setText(self._projects_path)
self._project_btn.init_directory = self._projects_path<|docstring|>Update UI based on the stored settings if exists<|endoftext|> |
d435e9368bcd60c2d9c8621c98d703dfa3faa537265a42b26d54271a29071449 | def set_projects_path(self, projects_path):
'\n Set the path where projects are located\n :param projects_path: str\n '
self._projects_path = projects_path
self.update_projects() | Set the path where projects are located
:param projects_path: str | tpDcc/libs/qt/widgets/project.py | set_projects_path | tpDcc/tpQtLib | 3 | python | def set_projects_path(self, projects_path):
'\n Set the path where projects are located\n :param projects_path: str\n '
self._projects_path = projects_path
self.update_projects() | def set_projects_path(self, projects_path):
'\n Set the path where projects are located\n :param projects_path: str\n '
self._projects_path = projects_path
self.update_projects()<|docstring|>Set the path where projects are located
:param projects_path: str<|endoftext|> |
bcb2108f2fee9265a1eeed9719637826e08f5ef59d2a00464273c46a20a3b62e | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._new_project.set_projects_path(projects_path)
self._open_project.set_projects_path(projects_path) | Sets the path where we want to search for projects
:param projects_path: str | tpDcc/libs/qt/widgets/project.py | set_projects_path | tpDcc/tpQtLib | 3 | python | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._new_project.set_projects_path(projects_path)
self._open_project.set_projects_path(projects_path) | def set_projects_path(self, projects_path):
'\n Sets the path where we want to search for projects\n :param projects_path: str\n '
self._projects_path = projects_path
self._new_project.set_projects_path(projects_path)
self._open_project.set_projects_path(projects_path)<|docstring|>Sets the path where we want to search for projects
:param projects_path: str<|endoftext|> |
29e3dfc3f97d5140d73873f13f5ee79122cfe722bb2ff1ce349e8d228157011a | def get_project_by_name(self, project_name, force_update=True):
'\n Returns project by its name\n :param project_name: str\n :param force_update: bool\n :return: Project\n '
if force_update:
self._open_project.get_projects_list().update_projects()
projects_list = self._open_project.get_projects_list()
return projects_list.get_project_by_name(project_name) | Returns project by its name
:param project_name: str
:param force_update: bool
:return: Project | tpDcc/libs/qt/widgets/project.py | get_project_by_name | tpDcc/tpQtLib | 3 | python | def get_project_by_name(self, project_name, force_update=True):
'\n Returns project by its name\n :param project_name: str\n :param force_update: bool\n :return: Project\n '
if force_update:
self._open_project.get_projects_list().update_projects()
projects_list = self._open_project.get_projects_list()
return projects_list.get_project_by_name(project_name) | def get_project_by_name(self, project_name, force_update=True):
'\n Returns project by its name\n :param project_name: str\n :param force_update: bool\n :return: Project\n '
if force_update:
self._open_project.get_projects_list().update_projects()
projects_list = self._open_project.get_projects_list()
return projects_list.get_project_by_name(project_name)<|docstring|>Returns project by its name
:param project_name: str
:param force_update: bool
:return: Project<|endoftext|> |
3dc848747574958f35bcdc56ee7154daf18fae193d1d505c9e557892d66bace2 | def open_project(self, project_name):
'\n Opens project with given name\n :param project_name: str\n :return: Project\n '
project_found = self.get_project_by_name(project_name)
if project_found:
project_found.open()
return project_found | Opens project with given name
:param project_name: str
:return: Project | tpDcc/libs/qt/widgets/project.py | open_project | tpDcc/tpQtLib | 3 | python | def open_project(self, project_name):
'\n Opens project with given name\n :param project_name: str\n :return: Project\n '
project_found = self.get_project_by_name(project_name)
if project_found:
project_found.open()
return project_found | def open_project(self, project_name):
'\n Opens project with given name\n :param project_name: str\n :return: Project\n '
project_found = self.get_project_by_name(project_name)
if project_found:
project_found.open()
return project_found<|docstring|>Opens project with given name
:param project_name: str
:return: Project<|endoftext|> |
c410f47d93421ecb9fb3059b6de33528f529b392c80abe75bf73227ebbc0001e | def create_app(config_name):
'Application factory, see docs_.\n\n .. _docs: http://flask.pocoo.org/docs/0.10/patterns/appfactories/\n '
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
moment.init_app(app)
sentry.init_app(app)
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app | Application factory, see docs_.
.. _docs: http://flask.pocoo.org/docs/0.10/patterns/appfactories/ | web/app/__init__.py | create_app | Limpan/bytardag | 0 | python | def create_app(config_name):
'Application factory, see docs_.\n\n .. _docs: http://flask.pocoo.org/docs/0.10/patterns/appfactories/\n '
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
moment.init_app(app)
sentry.init_app(app)
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app | def create_app(config_name):
'Application factory, see docs_.\n\n .. _docs: http://flask.pocoo.org/docs/0.10/patterns/appfactories/\n '
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
moment.init_app(app)
sentry.init_app(app)
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app<|docstring|>Application factory, see docs_.
.. _docs: http://flask.pocoo.org/docs/0.10/patterns/appfactories/<|endoftext|> |
6663c1fd9323df3158cef290d231c38e411ae05b298bd91e79d03b9250d0f74b | def p2():
"with open('logfile1.csv','w',newline='') as csvfile:\n\t\tfieldnames = ['frame_number', 'look_away', 'look_screen']\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\twhile True:\n\t\t\twriter.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})\n\t\t\ttime.sleep()\n\t"
while True:
log(glob[0], glob[1], glob[2])
time.sleep(0.05) | with open('logfile1.csv','w',newline='') as csvfile:
fieldnames = ['frame_number', 'look_away', 'look_screen']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while True:
writer.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})
time.sleep() | runexam.py | p2 | AadityaDeshpande/ExamSurveillance | 0 | python | def p2():
"with open('logfile1.csv','w',newline=) as csvfile:\n\t\tfieldnames = ['frame_number', 'look_away', 'look_screen']\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\twhile True:\n\t\t\twriter.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})\n\t\t\ttime.sleep()\n\t"
while True:
log(glob[0], glob[1], glob[2])
time.sleep(0.05) | def p2():
"with open('logfile1.csv','w',newline=) as csvfile:\n\t\tfieldnames = ['frame_number', 'look_away', 'look_screen']\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\twhile True:\n\t\t\twriter.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})\n\t\t\ttime.sleep()\n\t"
while True:
log(glob[0], glob[1], glob[2])
time.sleep(0.05)<|docstring|>with open('logfile1.csv','w',newline='') as csvfile:
fieldnames = ['frame_number', 'look_away', 'look_screen']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while True:
writer.writerow({'frame_number': int(glob[0]), 'look_away': int(glob[1]), 'look_screen': int(glob[2])})
time.sleep()<|endoftext|> |
e4882fdd94385ab51267e55d2a0ac00e1234379e80f54bc4e6952afb52b9759c | @click.group('source', short_help='Manage font sources')
def cli_source():
'Manage font sources.'
pass | Manage font sources. | fonty/commands/source.py | cli_source | jamesssooi/font-cli | 12 | python | @click.group('source', short_help='Manage font sources')
def cli_source():
pass | @click.group('source', short_help='Manage font sources')
def cli_source():
pass<|docstring|>Manage font sources.<|endoftext|> |
788d5bf7be3adc84ad2b976f1b1134ec89a65483819510121e65e0ba9d1ef5b7 | @cli_source.command(short_help='Add a new source')
@click.argument('url')
def add(url):
'Add a new source.'
start_time = timeit.default_timer()
task = Task("Loading '{}'...".format(colored(url, COLOR_INPUT)))
sub = Subscription.load_from_url(url).subscribe()
repo = sub.get_local_repository()
task.complete("Loaded '{}'".format(colored(repo.name, COLOR_INPUT)))
task = Task("Indexing {count} font families in '{repo}'".format(count=len(repo.families), repo=colored(repo.name, COLOR_INPUT)))
search.index_fonts(repo, sub.local_path)
task.complete('Indexed {count} new font families'.format(count=colored(len(repo.families), COLOR_INPUT)))
print('')
sub.pprint(output=True)
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, event_type=TelemetryEventTypes.SOURCE_ADD, execution_time=total_time).send() | Add a new source. | fonty/commands/source.py | add | jamesssooi/font-cli | 12 | python | @cli_source.command(short_help='Add a new source')
@click.argument('url')
def add(url):
start_time = timeit.default_timer()
task = Task("Loading '{}'...".format(colored(url, COLOR_INPUT)))
sub = Subscription.load_from_url(url).subscribe()
repo = sub.get_local_repository()
task.complete("Loaded '{}'".format(colored(repo.name, COLOR_INPUT)))
task = Task("Indexing {count} font families in '{repo}'".format(count=len(repo.families), repo=colored(repo.name, COLOR_INPUT)))
search.index_fonts(repo, sub.local_path)
task.complete('Indexed {count} new font families'.format(count=colored(len(repo.families), COLOR_INPUT)))
print()
sub.pprint(output=True)
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, event_type=TelemetryEventTypes.SOURCE_ADD, execution_time=total_time).send() | @cli_source.command(short_help='Add a new source')
@click.argument('url')
def add(url):
start_time = timeit.default_timer()
task = Task("Loading '{}'...".format(colored(url, COLOR_INPUT)))
sub = Subscription.load_from_url(url).subscribe()
repo = sub.get_local_repository()
task.complete("Loaded '{}'".format(colored(repo.name, COLOR_INPUT)))
task = Task("Indexing {count} font families in '{repo}'".format(count=len(repo.families), repo=colored(repo.name, COLOR_INPUT)))
search.index_fonts(repo, sub.local_path)
task.complete('Indexed {count} new font families'.format(count=colored(len(repo.families), COLOR_INPUT)))
print()
sub.pprint(output=True)
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, event_type=TelemetryEventTypes.SOURCE_ADD, execution_time=total_time).send()<|docstring|>Add a new source.<|endoftext|> |
fc61c01f239578c5f164c0b2253ee1b601f27ddd688718625d9d8931a744696a | @cli_source.command(short_help='Remove a source')
@click.argument('identifier', nargs=(- 1))
@click.pass_context
def remove(ctx, identifier: str):
'Remove a source.'
start_time = timeit.default_timer()
identifier = ' '.join((str(x) for x in identifier))
if (not identifier):
click.echo(ctx.get_help())
sys.exit(1)
task = Task("Looking for '{}'".format(colored(identifier, COLOR_INPUT)))
sub = Subscription.get(identifier)
if (sub is None):
time.sleep(0.3)
task.error("No subscriptions found with '{}'".format(colored(identifier, COLOR_INPUT)))
sys.exit(1)
task.message = "Unsubscribing '{}'".format(colored(sub.name, COLOR_INPUT))
sub.unsubscribe()
task.complete("Unsubscribed from '{}'".format(colored(sub.name, COLOR_INPUT)))
task = Task('Reindexing fonts...')
count = search.unindex_fonts(sub.local_path)
task.complete('Removed {} font families from index'.format(colored(str(count), 'cyan')))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_REMOVE).send() | Remove a source. | fonty/commands/source.py | remove | jamesssooi/font-cli | 12 | python | @cli_source.command(short_help='Remove a source')
@click.argument('identifier', nargs=(- 1))
@click.pass_context
def remove(ctx, identifier: str):
start_time = timeit.default_timer()
identifier = ' '.join((str(x) for x in identifier))
if (not identifier):
click.echo(ctx.get_help())
sys.exit(1)
task = Task("Looking for '{}'".format(colored(identifier, COLOR_INPUT)))
sub = Subscription.get(identifier)
if (sub is None):
time.sleep(0.3)
task.error("No subscriptions found with '{}'".format(colored(identifier, COLOR_INPUT)))
sys.exit(1)
task.message = "Unsubscribing '{}'".format(colored(sub.name, COLOR_INPUT))
sub.unsubscribe()
task.complete("Unsubscribed from '{}'".format(colored(sub.name, COLOR_INPUT)))
task = Task('Reindexing fonts...')
count = search.unindex_fonts(sub.local_path)
task.complete('Removed {} font families from index'.format(colored(str(count), 'cyan')))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_REMOVE).send() | @cli_source.command(short_help='Remove a source')
@click.argument('identifier', nargs=(- 1))
@click.pass_context
def remove(ctx, identifier: str):
start_time = timeit.default_timer()
identifier = ' '.join((str(x) for x in identifier))
if (not identifier):
click.echo(ctx.get_help())
sys.exit(1)
task = Task("Looking for '{}'".format(colored(identifier, COLOR_INPUT)))
sub = Subscription.get(identifier)
if (sub is None):
time.sleep(0.3)
task.error("No subscriptions found with '{}'".format(colored(identifier, COLOR_INPUT)))
sys.exit(1)
task.message = "Unsubscribing '{}'".format(colored(sub.name, COLOR_INPUT))
sub.unsubscribe()
task.complete("Unsubscribed from '{}'".format(colored(sub.name, COLOR_INPUT)))
task = Task('Reindexing fonts...')
count = search.unindex_fonts(sub.local_path)
task.complete('Removed {} font families from index'.format(colored(str(count), 'cyan')))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_REMOVE).send()<|docstring|>Remove a source.<|endoftext|> |
e06b1774f776d1e5fef49bf45d29904557e918d7db4e5661b62a9ad5281e446a | @cli_source.command(name='list', short_help='List subscribed sources')
def list_():
'List all subscribed sources.'
start_time = timeit.default_timer()
subscriptions = Subscription.load_entries()
count = 1
for sub in subscriptions:
s = sub.pprint(join=False)
count_str = '[{}] '.format(count)
s[0] = (count_str + s[0])
INDENT_COUNT = len(count_str)
for i in range(1, len(s)):
s[i] = ((' ' * INDENT_COUNT) + s[i])
click.echo('\n'.join(s))
click.echo('')
count += 1
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_LIST).send() | List all subscribed sources. | fonty/commands/source.py | list_ | jamesssooi/font-cli | 12 | python | @cli_source.command(name='list', short_help='List subscribed sources')
def list_():
start_time = timeit.default_timer()
subscriptions = Subscription.load_entries()
count = 1
for sub in subscriptions:
s = sub.pprint(join=False)
count_str = '[{}] '.format(count)
s[0] = (count_str + s[0])
INDENT_COUNT = len(count_str)
for i in range(1, len(s)):
s[i] = ((' ' * INDENT_COUNT) + s[i])
click.echo('\n'.join(s))
click.echo()
count += 1
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_LIST).send() | @cli_source.command(name='list', short_help='List subscribed sources')
def list_():
start_time = timeit.default_timer()
subscriptions = Subscription.load_entries()
count = 1
for sub in subscriptions:
s = sub.pprint(join=False)
count_str = '[{}] '.format(count)
s[0] = (count_str + s[0])
INDENT_COUNT = len(count_str)
for i in range(1, len(s)):
s[i] = ((' ' * INDENT_COUNT) + s[i])
click.echo('\n'.join(s))
click.echo()
count += 1
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_LIST).send()<|docstring|>List all subscribed sources.<|endoftext|> |
38171cb4047bb0bb15aa428d22a8b43bf66465aa036763f222132c59a80b1cbe | @cli_source.command(short_help='Check sources for updates')
@click.option('--force', '-f', is_flag=True, help='Force all sources to update.')
def update(force: bool):
'Check sources for updates.'
start_time = timeit.default_timer()
if force:
shutil.rmtree(SEARCH_INDEX_PATH)
subscriptions = Subscription.load_entries()
if (not subscriptions):
click.echo('No sources to update')
for sub in subscriptions:
name = colored(sub.get_local_repository().name, COLOR_INPUT)
task = Task("Updating '{}'".format(name))
(sub, has_changes) = sub.fetch()
if ((not has_changes) and (not force)):
task.complete("No updates available for '{}'".format(name))
continue
task.message = "Indexing '{}'".format(name)
updated_repo = sub.get_local_repository()
search.index_fonts(updated_repo, sub.local_path)
task.complete("Updated '{}'".format(name))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_UPDATE).send() | Check sources for updates. | fonty/commands/source.py | update | jamesssooi/font-cli | 12 | python | @cli_source.command(short_help='Check sources for updates')
@click.option('--force', '-f', is_flag=True, help='Force all sources to update.')
def update(force: bool):
start_time = timeit.default_timer()
if force:
shutil.rmtree(SEARCH_INDEX_PATH)
subscriptions = Subscription.load_entries()
if (not subscriptions):
click.echo('No sources to update')
for sub in subscriptions:
name = colored(sub.get_local_repository().name, COLOR_INPUT)
task = Task("Updating '{}'".format(name))
(sub, has_changes) = sub.fetch()
if ((not has_changes) and (not force)):
task.complete("No updates available for '{}'".format(name))
continue
task.message = "Indexing '{}'".format(name)
updated_repo = sub.get_local_repository()
search.index_fonts(updated_repo, sub.local_path)
task.complete("Updated '{}'".format(name))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_UPDATE).send() | @cli_source.command(short_help='Check sources for updates')
@click.option('--force', '-f', is_flag=True, help='Force all sources to update.')
def update(force: bool):
start_time = timeit.default_timer()
if force:
shutil.rmtree(SEARCH_INDEX_PATH)
subscriptions = Subscription.load_entries()
if (not subscriptions):
click.echo('No sources to update')
for sub in subscriptions:
name = colored(sub.get_local_repository().name, COLOR_INPUT)
task = Task("Updating '{}'".format(name))
(sub, has_changes) = sub.fetch()
if ((not has_changes) and (not force)):
task.complete("No updates available for '{}'".format(name))
continue
task.message = "Indexing '{}'".format(name)
updated_repo = sub.get_local_repository()
search.index_fonts(updated_repo, sub.local_path)
task.complete("Updated '{}'".format(name))
end_time = timeit.default_timer()
total_time = round((end_time - start_time), 2)
TelemetryEvent(status_code=0, execution_time=total_time, event_type=TelemetryEventTypes.SOURCE_UPDATE).send()<|docstring|>Check sources for updates.<|endoftext|> |
04d65b69de5ffb1d19fa452e69b523b89f1efc54103b6c8dd16560286eaad55f | def mergeTwoLists(self, l1, l2):
'\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n '
head = dummy = ListNode((- 1))
while (l1 and l2):
if (l1.val < l2.val):
head.next = l1
l1 = l1.next
else:
head.next = l2
l2 = l2.next
head = head.next
if l1:
head.next = l1
if l2:
head.next = l2
return dummy.next | :type l1: ListNode
:type l2: ListNode
:rtype: ListNode | linkedlist/merge-two-sorted-lists.py | mergeTwoLists | Neulana/leetcode | 2 | python | def mergeTwoLists(self, l1, l2):
'\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n '
head = dummy = ListNode((- 1))
while (l1 and l2):
if (l1.val < l2.val):
head.next = l1
l1 = l1.next
else:
head.next = l2
l2 = l2.next
head = head.next
if l1:
head.next = l1
if l2:
head.next = l2
return dummy.next | def mergeTwoLists(self, l1, l2):
'\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n '
head = dummy = ListNode((- 1))
while (l1 and l2):
if (l1.val < l2.val):
head.next = l1
l1 = l1.next
else:
head.next = l2
l2 = l2.next
head = head.next
if l1:
head.next = l1
if l2:
head.next = l2
return dummy.next<|docstring|>:type l1: ListNode
:type l2: ListNode
:rtype: ListNode<|endoftext|> |
b88ef89bdfdf47d218683a2d6bbbd161dd8dff1c91c9048a0d7f581520dc6383 | def get_cpu_temp():
'\n CPU temperature should be below 80C for normal operation.\n '
celcius = None
temp = '/sys/devices/virtual/thermal/thermal_zone0/temp'
if os.path.exists(temp):
celcius = (int(open(temp).read().strip()) / 1000)
return celcius | CPU temperature should be below 80C for normal operation. | lcd.py | get_cpu_temp | KoenVingerhoets/bitnodes-hardware | 45 | python | def get_cpu_temp():
'\n \n '
celcius = None
temp = '/sys/devices/virtual/thermal/thermal_zone0/temp'
if os.path.exists(temp):
celcius = (int(open(temp).read().strip()) / 1000)
return celcius | def get_cpu_temp():
'\n \n '
celcius = None
temp = '/sys/devices/virtual/thermal/thermal_zone0/temp'
if os.path.exists(temp):
celcius = (int(open(temp).read().strip()) / 1000)
return celcius<|docstring|>CPU temperature should be below 80C for normal operation.<|endoftext|> |
d498fb891c776ecfcce59cc9798a95da85df2127dbbb3bf25f56c79a061df3f3 | def test_0_registration_success(self):
'\n Registration success\n '
data = {'username': 'john', 'email': 'example@example.com', 'password1': 'password123!', 'password2': 'password123!', 'password': 'password123!'}
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('registration_register'), data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email=data['email'])
self.assertEqual(user.email, data['email']) | Registration success | bedjango_tailor/users/tests/test_user.py | test_0_registration_success | vapordecachoeira/tailor | 58 | python | def test_0_registration_success(self):
'\n \n '
data = {'username': 'john', 'email': 'example@example.com', 'password1': 'password123!', 'password2': 'password123!', 'password': 'password123!'}
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('registration_register'), data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email=data['email'])
self.assertEqual(user.email, data['email']) | def test_0_registration_success(self):
'\n \n '
data = {'username': 'john', 'email': 'example@example.com', 'password1': 'password123!', 'password2': 'password123!', 'password': 'password123!'}
response = self.client.get(reverse('registration_register'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('registration_register'), data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email=data['email'])
self.assertEqual(user.email, data['email'])<|docstring|>Registration success<|endoftext|> |
d954001dd7031fac4f5ac018d9ff8f721291698c2f1c8aa3e62d1676b7118725 | def test_1_login_success(self):
'\n Login success\n '
user = {'email': 'example@example.com', 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['user'].is_authenticated())
user = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['user'].is_authenticated()) | Login success | bedjango_tailor/users/tests/test_user.py | test_1_login_success | vapordecachoeira/tailor | 58 | python | def test_1_login_success(self):
'\n \n '
user = {'email': 'example@example.com', 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['user'].is_authenticated())
user = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['user'].is_authenticated()) | def test_1_login_success(self):
'\n \n '
user = {'email': 'example@example.com', 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['user'].is_authenticated())
user = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), user, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['user'].is_authenticated())<|docstring|>Login success<|endoftext|> |
04636f48bded0f6a09bdf9c17571722942b55a8ab553ff38f7bec2990f447bb0 | def test_2_logout_success(self):
'\n Logout success\n '
self.test_1_login_success()
response = self.client.post(reverse('logout'), self.test_user, follow=True)
self.assertFalse(response.context['user'].is_authenticated()) | Logout success | bedjango_tailor/users/tests/test_user.py | test_2_logout_success | vapordecachoeira/tailor | 58 | python | def test_2_logout_success(self):
'\n \n '
self.test_1_login_success()
response = self.client.post(reverse('logout'), self.test_user, follow=True)
self.assertFalse(response.context['user'].is_authenticated()) | def test_2_logout_success(self):
'\n \n '
self.test_1_login_success()
response = self.client.post(reverse('logout'), self.test_user, follow=True)
self.assertFalse(response.context['user'].is_authenticated())<|docstring|>Logout success<|endoftext|> |
e5d4eb5fc8c8bf4c156e6abb98073e315060efbba2243324e5f8f7aad4accf78 | def test_3_home(self):
'\n Test for home view\n '
self.test_1_login_success()
response = self.client.get(reverse('home'), self.test_user, follow=True)
self.assertEqual(response.status_code, 200) | Test for home view | bedjango_tailor/users/tests/test_user.py | test_3_home | vapordecachoeira/tailor | 58 | python | def test_3_home(self):
'\n \n '
self.test_1_login_success()
response = self.client.get(reverse('home'), self.test_user, follow=True)
self.assertEqual(response.status_code, 200) | def test_3_home(self):
'\n \n '
self.test_1_login_success()
response = self.client.get(reverse('home'), self.test_user, follow=True)
self.assertEqual(response.status_code, 200)<|docstring|>Test for home view<|endoftext|> |
3ca1ea36a6079f994213a29e37a2a5abb7ddef169bab7e2861c290ba1bf85cfc | def test_4_login_failed(self):
'\n Test login failed\n '
data = {'email': self.test_user['email'], 'password': 'failed'}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
data['email'] = 'example@example.com'
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
user = User.objects.get(email=data_user['email'])
user.is_active = False
user.save()
data = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated()) | Test login failed | bedjango_tailor/users/tests/test_user.py | test_4_login_failed | vapordecachoeira/tailor | 58 | python | def test_4_login_failed(self):
'\n \n '
data = {'email': self.test_user['email'], 'password': 'failed'}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
data['email'] = 'example@example.com'
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
user = User.objects.get(email=data_user['email'])
user.is_active = False
user.save()
data = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated()) | def test_4_login_failed(self):
'\n \n '
data = {'email': self.test_user['email'], 'password': 'failed'}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
data['email'] = 'example@example.com'
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())
user = User.objects.get(email=data_user['email'])
user.is_active = False
user.save()
data = {'email': self.test_user['email'], 'password': self.test_user['password']}
response = self.client.post(reverse('index'), data, follow=True)
self.assertFalse(response.context['user'].is_authenticated())<|docstring|>Test login failed<|endoftext|> |
2c72e4b5bcbcb78e15d71a328928c74a980d8b32bc9dd41a650f5c3feb51cb8a | def cycloid(loops=4, ofst=(0, 0), a=0.06, b=0.19, norm=True, vertical=False, flip=False):
' Generate a prolate cycloid (inductor spiral) that\n will always start and end at y=0.\n\n Parameters\n ----------\n loops : int\n Number of loops\n a, b : float\n Parameters. b>a for prolate (loopy) cycloid\n norm : bool\n Normalize the length to 1\n vertical, flip : bool\n Control the orientation of cycloid\n\n Returns\n -------\n path : array\n List of [x, y] coordinates defining the cycloid\n '
yint = np.arccos((a / b))
t = np.linspace(yint, (((2 * (loops + 1)) * np.pi) - yint), num=(loops * 50))
x = ((a * t) - (b * np.sin(t)))
y = (a - (b * np.cos(t)))
x = (x - x[0])
if norm:
x = (x / (x[(- 1)] - x[0]))
if flip:
y = (- y)
y = ((y * (max(y) - min(y))) / resheight)
if vertical:
(x, y) = (y, x)
x = (x + ofst[0])
y = (y + ofst[1])
path = np.transpose(np.vstack((x, y)))
return path | Generate a prolate cycloid (inductor spiral) that
will always start and end at y=0.
Parameters
----------
loops : int
Number of loops
a, b : float
Parameters. b>a for prolate (loopy) cycloid
norm : bool
Normalize the length to 1
vertical, flip : bool
Control the orientation of cycloid
Returns
-------
path : array
List of [x, y] coordinates defining the cycloid | schemdraw/elements/twoterm.py | cycloid | RonSheely/SchemDraw | 17 | python | def cycloid(loops=4, ofst=(0, 0), a=0.06, b=0.19, norm=True, vertical=False, flip=False):
' Generate a prolate cycloid (inductor spiral) that\n will always start and end at y=0.\n\n Parameters\n ----------\n loops : int\n Number of loops\n a, b : float\n Parameters. b>a for prolate (loopy) cycloid\n norm : bool\n Normalize the length to 1\n vertical, flip : bool\n Control the orientation of cycloid\n\n Returns\n -------\n path : array\n List of [x, y] coordinates defining the cycloid\n '
yint = np.arccos((a / b))
t = np.linspace(yint, (((2 * (loops + 1)) * np.pi) - yint), num=(loops * 50))
x = ((a * t) - (b * np.sin(t)))
y = (a - (b * np.cos(t)))
x = (x - x[0])
if norm:
x = (x / (x[(- 1)] - x[0]))
if flip:
y = (- y)
y = ((y * (max(y) - min(y))) / resheight)
if vertical:
(x, y) = (y, x)
x = (x + ofst[0])
y = (y + ofst[1])
path = np.transpose(np.vstack((x, y)))
return path | def cycloid(loops=4, ofst=(0, 0), a=0.06, b=0.19, norm=True, vertical=False, flip=False):
' Generate a prolate cycloid (inductor spiral) that\n will always start and end at y=0.\n\n Parameters\n ----------\n loops : int\n Number of loops\n a, b : float\n Parameters. b>a for prolate (loopy) cycloid\n norm : bool\n Normalize the length to 1\n vertical, flip : bool\n Control the orientation of cycloid\n\n Returns\n -------\n path : array\n List of [x, y] coordinates defining the cycloid\n '
yint = np.arccos((a / b))
t = np.linspace(yint, (((2 * (loops + 1)) * np.pi) - yint), num=(loops * 50))
x = ((a * t) - (b * np.sin(t)))
y = (a - (b * np.cos(t)))
x = (x - x[0])
if norm:
x = (x / (x[(- 1)] - x[0]))
if flip:
y = (- y)
y = ((y * (max(y) - min(y))) / resheight)
if vertical:
(x, y) = (y, x)
x = (x + ofst[0])
y = (y + ofst[1])
path = np.transpose(np.vstack((x, y)))
return path<|docstring|>Generate a prolate cycloid (inductor spiral) that
will always start and end at y=0.
Parameters
----------
loops : int
Number of loops
a, b : float
Parameters. b>a for prolate (loopy) cycloid
norm : bool
Normalize the length to 1
vertical, flip : bool
Control the orientation of cycloid
Returns
-------
path : array
List of [x, y] coordinates defining the cycloid<|endoftext|> |
397be5353f24a2e04d85437f18a6070de82b6b2c8cab6714986caef008fadb3c | def timestamp(datefmt='%Y-%m-%d, %H:%M:%S'):
'\n Create timestamp as a formatted string.\n '
return time.strftime(datefmt, time.localtime()) | Create timestamp as a formatted string. | sciluigi/util.py | timestamp | samuell/luigipp | 285 | python | def timestamp(datefmt='%Y-%m-%d, %H:%M:%S'):
'\n \n '
return time.strftime(datefmt, time.localtime()) | def timestamp(datefmt='%Y-%m-%d, %H:%M:%S'):
'\n \n '
return time.strftime(datefmt, time.localtime())<|docstring|>Create timestamp as a formatted string.<|endoftext|> |
0c68091ae24962a737e5637c8ef7c4561d5807861246908bb3b91e03d8834bf7 | def timepath(sep='_'):
'\n Create timestmap, formatted for use in file names.\n '
return timestamp('%Y%m%d{sep}%H%M%S'.format(sep=sep)) | Create timestmap, formatted for use in file names. | sciluigi/util.py | timepath | samuell/luigipp | 285 | python | def timepath(sep='_'):
'\n \n '
return timestamp('%Y%m%d{sep}%H%M%S'.format(sep=sep)) | def timepath(sep='_'):
'\n \n '
return timestamp('%Y%m%d{sep}%H%M%S'.format(sep=sep))<|docstring|>Create timestmap, formatted for use in file names.<|endoftext|> |
a959ad1e40be66ab65c308dd42253344e1ddf60b58306deafd36d04033d36a07 | def timelog():
'\n Create time stamp for use in log files.\n '
return timestamp('[%Y-%m-%d %H:%M:%S]') | Create time stamp for use in log files. | sciluigi/util.py | timelog | samuell/luigipp | 285 | python | def timelog():
'\n \n '
return timestamp('[%Y-%m-%d %H:%M:%S]') | def timelog():
'\n \n '
return timestamp('[%Y-%m-%d %H:%M:%S]')<|docstring|>Create time stamp for use in log files.<|endoftext|> |
252c01aa2487484a25a08be742b1f4b5db332ba01c11ca012a4bfd3527e07f86 | def ensuredir(dirpath):
'\n Ensure directory exists.\n '
if (not os.path.exists(dirpath)):
os.makedirs(dirpath) | Ensure directory exists. | sciluigi/util.py | ensuredir | samuell/luigipp | 285 | python | def ensuredir(dirpath):
'\n \n '
if (not os.path.exists(dirpath)):
os.makedirs(dirpath) | def ensuredir(dirpath):
'\n \n '
if (not os.path.exists(dirpath)):
os.makedirs(dirpath)<|docstring|>Ensure directory exists.<|endoftext|> |
303ad18ff332aec98f6cb84a5feb53e9c36ddf55ca6b820b8504172b2abfb794 | def recordfile_to_dict(filehandle):
'\n Convert a record file to a dictionary.\n '
csvrd = csv.reader(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
records = {}
for row in csvrd:
records[row[0]] = row[1]
return records | Convert a record file to a dictionary. | sciluigi/util.py | recordfile_to_dict | samuell/luigipp | 285 | python | def recordfile_to_dict(filehandle):
'\n \n '
csvrd = csv.reader(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
records = {}
for row in csvrd:
records[row[0]] = row[1]
return records | def recordfile_to_dict(filehandle):
'\n \n '
csvrd = csv.reader(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
records = {}
for row in csvrd:
records[row[0]] = row[1]
return records<|docstring|>Convert a record file to a dictionary.<|endoftext|> |
4b1fd94fb0ac6e06be706818a0827fdc7c6a1d84818ec9e670a00c12afaeeea8 | def dict_to_recordfile(filehandle, records):
'\n Convert a dictionary to a recordfile.\n '
csvwt = csv.writer(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
rows = []
for (key, val) in iteritems(records):
rows.append([key, val])
csvwt.writerows(rows) | Convert a dictionary to a recordfile. | sciluigi/util.py | dict_to_recordfile | samuell/luigipp | 285 | python | def dict_to_recordfile(filehandle, records):
'\n \n '
csvwt = csv.writer(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
rows = []
for (key, val) in iteritems(records):
rows.append([key, val])
csvwt.writerows(rows) | def dict_to_recordfile(filehandle, records):
'\n \n '
csvwt = csv.writer(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
rows = []
for (key, val) in iteritems(records):
rows.append([key, val])
csvwt.writerows(rows)<|docstring|>Convert a dictionary to a recordfile.<|endoftext|> |
4a7e0d682c0fd303eab6eab248825454dc257dc33e2d24556813f7f54fa0376c | def __init__(self, session: tf.Session(), data='mnist', input_size=28, lr=0.01, batch_size=64, epochs=10):
'\n :param session: tf session\n :param data: name of dataset\n :param input_size: input size of image\n :param lr: learning rate\n :param batch_size: batch size\n :param epochs: number of epochs to train on\n '
self.session = session
self.data = data
self.input_size = input_size
self.image_size = (input_size * input_size)
self.lr = lr
self.batch_size = batch_size
self.epochs = epochs | :param session: tf session
:param data: name of dataset
:param input_size: input size of image
:param lr: learning rate
:param batch_size: batch size
:param epochs: number of epochs to train on | 12.tf_dense.py | __init__ | mkumar73/neural-nets | 2 | python | def __init__(self, session: tf.Session(), data='mnist', input_size=28, lr=0.01, batch_size=64, epochs=10):
'\n :param session: tf session\n :param data: name of dataset\n :param input_size: input size of image\n :param lr: learning rate\n :param batch_size: batch size\n :param epochs: number of epochs to train on\n '
self.session = session
self.data = data
self.input_size = input_size
self.image_size = (input_size * input_size)
self.lr = lr
self.batch_size = batch_size
self.epochs = epochs | def __init__(self, session: tf.Session(), data='mnist', input_size=28, lr=0.01, batch_size=64, epochs=10):
'\n :param session: tf session\n :param data: name of dataset\n :param input_size: input size of image\n :param lr: learning rate\n :param batch_size: batch size\n :param epochs: number of epochs to train on\n '
self.session = session
self.data = data
self.input_size = input_size
self.image_size = (input_size * input_size)
self.lr = lr
self.batch_size = batch_size
self.epochs = epochs<|docstring|>:param session: tf session
:param data: name of dataset
:param input_size: input size of image
:param lr: learning rate
:param batch_size: batch size
:param epochs: number of epochs to train on<|endoftext|> |
3dd46c59e095c6706eecbfec2f1cbbdaa5267c4ce151347262089b327280b82c | def _load_data(self):
'\n :return: load the data from dataset library\n '
self.data.lower()
if (self.data == 'mnist'):
((self.x_train, self.y_train), (self.x_test, self.y_test)) = tf.keras.datasets.mnist.load_data()
else:
logging.error('Dataset error: Only implmented for MNIST as of now.!!')
return | :return: load the data from dataset library | 12.tf_dense.py | _load_data | mkumar73/neural-nets | 2 | python | def _load_data(self):
'\n \n '
self.data.lower()
if (self.data == 'mnist'):
((self.x_train, self.y_train), (self.x_test, self.y_test)) = tf.keras.datasets.mnist.load_data()
else:
logging.error('Dataset error: Only implmented for MNIST as of now.!!')
return | def _load_data(self):
'\n \n '
self.data.lower()
if (self.data == 'mnist'):
((self.x_train, self.y_train), (self.x_test, self.y_test)) = tf.keras.datasets.mnist.load_data()
else:
logging.error('Dataset error: Only implmented for MNIST as of now.!!')
return<|docstring|>:return: load the data from dataset library<|endoftext|> |
c07228124db94a93be14172a9676ceb91df47304c379935f203b65bdd75509f8 | def _data_preprocessing(self):
'\n private function\n :return: processed data\n '
self._load_data()
x_train = (self.x_train.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
x_test = (self.x_test.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
y_train = self.y_train.astype(np.int64)
y_test = self.y_test.astype(np.int64)
return (x_train, x_test, y_train, y_test) | private function
:return: processed data | 12.tf_dense.py | _data_preprocessing | mkumar73/neural-nets | 2 | python | def _data_preprocessing(self):
'\n private function\n :return: processed data\n '
self._load_data()
x_train = (self.x_train.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
x_test = (self.x_test.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
y_train = self.y_train.astype(np.int64)
y_test = self.y_test.astype(np.int64)
return (x_train, x_test, y_train, y_test) | def _data_preprocessing(self):
'\n private function\n :return: processed data\n '
self._load_data()
x_train = (self.x_train.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
x_test = (self.x_test.astype(np.float32).reshape((- 1), self.image_size) / 255.0)
y_train = self.y_train.astype(np.int64)
y_test = self.y_test.astype(np.int64)
return (x_train, x_test, y_train, y_test)<|docstring|>private function
:return: processed data<|endoftext|> |
436bf7b4400dadf6a56f07d633cbfc3344fbbaacc2bc86a3f989bbc8dacb714e | def _train_test_split(self, _index=5000):
'\n\n :param _index: range of trainig and validation data\n :return: train, validation and test set\n '
(x_train, x_test, y_train, y_test) = self._data_preprocessing()
(x_train, x_validation) = (x_train[5000:], x_train[:5000])
(y_train, y_validation) = (y_train[5000:], y_train[:5000])
return (x_train, x_validation, x_test, y_train, y_validation, y_test) | :param _index: range of trainig and validation data
:return: train, validation and test set | 12.tf_dense.py | _train_test_split | mkumar73/neural-nets | 2 | python | def _train_test_split(self, _index=5000):
'\n\n :param _index: range of trainig and validation data\n :return: train, validation and test set\n '
(x_train, x_test, y_train, y_test) = self._data_preprocessing()
(x_train, x_validation) = (x_train[5000:], x_train[:5000])
(y_train, y_validation) = (y_train[5000:], y_train[:5000])
return (x_train, x_validation, x_test, y_train, y_validation, y_test) | def _train_test_split(self, _index=5000):
'\n\n :param _index: range of trainig and validation data\n :return: train, validation and test set\n '
(x_train, x_test, y_train, y_test) = self._data_preprocessing()
(x_train, x_validation) = (x_train[5000:], x_train[:5000])
(y_train, y_validation) = (y_train[5000:], y_train[:5000])
return (x_train, x_validation, x_test, y_train, y_validation, y_test)<|docstring|>:param _index: range of trainig and validation data
:return: train, validation and test set<|endoftext|> |
fce68a12f1fc56907c8336b0c4abfdfbcaa68297b067c5f38d8412d0a0e1e619 | def shuffle_batch(self, x, y, batch_size):
'\n :param x: image\n :param y: labels\n :param batch_size: #samples in a batch\n :return: shuffeld samples, images and labels\n '
rnd_idx = np.random.permutation(len(x))
n_batches = (len(x) // batch_size)
for batch_idx in np.array_split(rnd_idx, n_batches):
(x_batch, y_batch) = (x[batch_idx], y[batch_idx])
(yield (x_batch, y_batch)) | :param x: image
:param y: labels
:param batch_size: #samples in a batch
:return: shuffeld samples, images and labels | 12.tf_dense.py | shuffle_batch | mkumar73/neural-nets | 2 | python | def shuffle_batch(self, x, y, batch_size):
'\n :param x: image\n :param y: labels\n :param batch_size: #samples in a batch\n :return: shuffeld samples, images and labels\n '
rnd_idx = np.random.permutation(len(x))
n_batches = (len(x) // batch_size)
for batch_idx in np.array_split(rnd_idx, n_batches):
(x_batch, y_batch) = (x[batch_idx], y[batch_idx])
(yield (x_batch, y_batch)) | def shuffle_batch(self, x, y, batch_size):
'\n :param x: image\n :param y: labels\n :param batch_size: #samples in a batch\n :return: shuffeld samples, images and labels\n '
rnd_idx = np.random.permutation(len(x))
n_batches = (len(x) // batch_size)
for batch_idx in np.array_split(rnd_idx, n_batches):
(x_batch, y_batch) = (x[batch_idx], y[batch_idx])
(yield (x_batch, y_batch))<|docstring|>:param x: image
:param y: labels
:param batch_size: #samples in a batch
:return: shuffeld samples, images and labels<|endoftext|> |
9ee5c08b967a6f2f699f164c443ab2f794ab9931d0396cf0303d27801ebb9652 | def check_sample_data(self):
'\n :return: print something\n '
(train, val, test, y_train, _, _) = self._train_test_split(_index=5000)
print('Size of train, validation and test set:\n')
print(train.shape)
print(val.shape)
print(test.shape)
print('Sample data:\n')
print(y_train.shape)
return | :return: print something | 12.tf_dense.py | check_sample_data | mkumar73/neural-nets | 2 | python | def check_sample_data(self):
'\n \n '
(train, val, test, y_train, _, _) = self._train_test_split(_index=5000)
print('Size of train, validation and test set:\n')
print(train.shape)
print(val.shape)
print(test.shape)
print('Sample data:\n')
print(y_train.shape)
return | def check_sample_data(self):
'\n \n '
(train, val, test, y_train, _, _) = self._train_test_split(_index=5000)
print('Size of train, validation and test set:\n')
print(train.shape)
print(val.shape)
print(test.shape)
print('Sample data:\n')
print(y_train.shape)
return<|docstring|>:return: print something<|endoftext|> |
fd12d4dc7722ca31eaa45aa82a58d408f1ca28e610f5dbd46cf44775fb408c8a | def build_network(self, session, n_h1, n_h2, n_output):
'\n\n :param session: tensorflow session\n :param n_h1: #neurons for h1\n :param n_h2: #neurons for h2\n :param n_output: #neurons for output layer\n :return: build and train the network\n '
X = tf.placeholder(tf.float32, shape=(None, self.image_size), name='X')
y = tf.placeholder(tf.int64, shape=None, name='y')
with tf.name_scope('fully_connected'):
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='hidden_1')
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='hidden_2')
logits = tf.layers.dense(h2, n_output, name='output')
with tf.name_scope('loss'):
entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(entropy, name='loss')
with tf.name_scope('optimize'):
threshold = 1.0
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
grad_var = optimizer.compute_gradients(loss)
clipped_grads = [(tf.clip_by_value(grad, (- threshold), threshold), var) for (grad, var) in grad_var]
training_op = optimizer.apply_gradients(clipped_grads)
with tf.name_scope('accuracy'):
correct = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
(x_train, x_validation, x_test, y_train, y_validation, y_test) = self._train_test_split()
init = tf.global_variables_initializer()
session.run(init)
for epoch in range(self.epochs):
for (x_batch, y_batch) in self.shuffle_batch(x_train, y_train, self.batch_size):
session.run(training_op, feed_dict={X: x_batch, y: y_batch})
acc_batch = session.run(accuracy, feed_dict={X: x_batch, y: y_batch})
acc_val = session.run(accuracy, feed_dict={X: x_validation, y: y_validation})
print('Epoch:', epoch, 'Batch accuracy:', acc_batch, 'Validation accuracy:', acc_val)
return | :param session: tensorflow session
:param n_h1: #neurons for h1
:param n_h2: #neurons for h2
:param n_output: #neurons for output layer
:return: build and train the network | 12.tf_dense.py | build_network | mkumar73/neural-nets | 2 | python | def build_network(self, session, n_h1, n_h2, n_output):
'\n\n :param session: tensorflow session\n :param n_h1: #neurons for h1\n :param n_h2: #neurons for h2\n :param n_output: #neurons for output layer\n :return: build and train the network\n '
X = tf.placeholder(tf.float32, shape=(None, self.image_size), name='X')
y = tf.placeholder(tf.int64, shape=None, name='y')
with tf.name_scope('fully_connected'):
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='hidden_1')
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='hidden_2')
logits = tf.layers.dense(h2, n_output, name='output')
with tf.name_scope('loss'):
entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(entropy, name='loss')
with tf.name_scope('optimize'):
threshold = 1.0
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
grad_var = optimizer.compute_gradients(loss)
clipped_grads = [(tf.clip_by_value(grad, (- threshold), threshold), var) for (grad, var) in grad_var]
training_op = optimizer.apply_gradients(clipped_grads)
with tf.name_scope('accuracy'):
correct = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
(x_train, x_validation, x_test, y_train, y_validation, y_test) = self._train_test_split()
init = tf.global_variables_initializer()
session.run(init)
for epoch in range(self.epochs):
for (x_batch, y_batch) in self.shuffle_batch(x_train, y_train, self.batch_size):
session.run(training_op, feed_dict={X: x_batch, y: y_batch})
acc_batch = session.run(accuracy, feed_dict={X: x_batch, y: y_batch})
acc_val = session.run(accuracy, feed_dict={X: x_validation, y: y_validation})
print('Epoch:', epoch, 'Batch accuracy:', acc_batch, 'Validation accuracy:', acc_val)
return | def build_network(self, session, n_h1, n_h2, n_output):
'\n\n :param session: tensorflow session\n :param n_h1: #neurons for h1\n :param n_h2: #neurons for h2\n :param n_output: #neurons for output layer\n :return: build and train the network\n '
X = tf.placeholder(tf.float32, shape=(None, self.image_size), name='X')
y = tf.placeholder(tf.int64, shape=None, name='y')
with tf.name_scope('fully_connected'):
h1 = tf.layers.dense(X, n_h1, activation=tf.nn.relu, name='hidden_1')
h2 = tf.layers.dense(h1, n_h2, activation=tf.nn.relu, name='hidden_2')
logits = tf.layers.dense(h2, n_output, name='output')
with tf.name_scope('loss'):
entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(entropy, name='loss')
with tf.name_scope('optimize'):
threshold = 1.0
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
grad_var = optimizer.compute_gradients(loss)
clipped_grads = [(tf.clip_by_value(grad, (- threshold), threshold), var) for (grad, var) in grad_var]
training_op = optimizer.apply_gradients(clipped_grads)
with tf.name_scope('accuracy'):
correct = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
(x_train, x_validation, x_test, y_train, y_validation, y_test) = self._train_test_split()
init = tf.global_variables_initializer()
session.run(init)
for epoch in range(self.epochs):
for (x_batch, y_batch) in self.shuffle_batch(x_train, y_train, self.batch_size):
session.run(training_op, feed_dict={X: x_batch, y: y_batch})
acc_batch = session.run(accuracy, feed_dict={X: x_batch, y: y_batch})
acc_val = session.run(accuracy, feed_dict={X: x_validation, y: y_validation})
print('Epoch:', epoch, 'Batch accuracy:', acc_batch, 'Validation accuracy:', acc_val)
return<|docstring|>:param session: tensorflow session
:param n_h1: #neurons for h1
:param n_h2: #neurons for h2
:param n_output: #neurons for output layer
:return: build and train the network<|endoftext|> |
c6a99c70cc34032ad06b36e25d01f4b78c621cbe0cb40dc3191c3b85b178a20f | def format_queries(self, db_ids):
'\n Formats the neo4j queries to use the correct cluster/user id on insertion\n :param db_ids:\n :return:\n '
cluster_id = db_ids['cluster_id']
user_id = db_ids['user_id']
for query in self.queries_dict.keys():
q = self.queries_dict[query]
q = q.replace('{CLUSTER_ID}', '"{0}"'.format(cluster_id))
q = q.replace('{USER_ID}', '"{0}"'.format(user_id))
self.queries_dict[query] = q | Formats the neo4j queries to use the correct cluster/user id on insertion
:param db_ids:
:return: | article_api/graph_fulfilment.py | format_queries | mrkarezina/graph-recommendation-api | 0 | python | def format_queries(self, db_ids):
'\n Formats the neo4j queries to use the correct cluster/user id on insertion\n :param db_ids:\n :return:\n '
cluster_id = db_ids['cluster_id']
user_id = db_ids['user_id']
for query in self.queries_dict.keys():
q = self.queries_dict[query]
q = q.replace('{CLUSTER_ID}', '"{0}"'.format(cluster_id))
q = q.replace('{USER_ID}', '"{0}"'.format(user_id))
self.queries_dict[query] = q | def format_queries(self, db_ids):
'\n Formats the neo4j queries to use the correct cluster/user id on insertion\n :param db_ids:\n :return:\n '
cluster_id = db_ids['cluster_id']
user_id = db_ids['user_id']
for query in self.queries_dict.keys():
q = self.queries_dict[query]
q = q.replace('{CLUSTER_ID}', '"{0}"'.format(cluster_id))
q = q.replace('{USER_ID}', '"{0}"'.format(user_id))
self.queries_dict[query] = q<|docstring|>Formats the neo4j queries to use the correct cluster/user id on insertion
:param db_ids:
:return:<|endoftext|> |
8f47f1e621ebbc568e8eaacc5a3c391bd5dc186d4d0b84d7fb72cc73fec3fd93 | def get_article_data(self, title):
'\n Gets data on title\n :param title:\n :return:\n '
article = list(self.graph.run(self.queries_dict['GET_ARTICLE_DATA'], TITLE=title))[0][0]
article_data = {'title': article['title'], 'summary': article['summary'], 'url': article['url'], 'img_url': article['img_url'], 'date': article['date'], 'entities': [], 'concepts': []}
entities = list(self.graph.run(self.queries_dict['GET_ENTITIES'], TITLE=title))
for entity in entities:
entity = list(entity)[0]
article_data['entities'].append({'label': entity['label']})
concepts = list(self.graph.run(self.queries_dict['GET_CONCEPTS'], TITLE=title))
for concept in concepts:
concept = list(concept)[0]
article_data['concepts'].append({'label': concept['label']})
return article_data | Gets data on title
:param title:
:return: | article_api/graph_fulfilment.py | get_article_data | mrkarezina/graph-recommendation-api | 0 | python | def get_article_data(self, title):
'\n Gets data on title\n :param title:\n :return:\n '
article = list(self.graph.run(self.queries_dict['GET_ARTICLE_DATA'], TITLE=title))[0][0]
article_data = {'title': article['title'], 'summary': article['summary'], 'url': article['url'], 'img_url': article['img_url'], 'date': article['date'], 'entities': [], 'concepts': []}
entities = list(self.graph.run(self.queries_dict['GET_ENTITIES'], TITLE=title))
for entity in entities:
entity = list(entity)[0]
article_data['entities'].append({'label': entity['label']})
concepts = list(self.graph.run(self.queries_dict['GET_CONCEPTS'], TITLE=title))
for concept in concepts:
concept = list(concept)[0]
article_data['concepts'].append({'label': concept['label']})
return article_data | def get_article_data(self, title):
'\n Gets data on title\n :param title:\n :return:\n '
article = list(self.graph.run(self.queries_dict['GET_ARTICLE_DATA'], TITLE=title))[0][0]
article_data = {'title': article['title'], 'summary': article['summary'], 'url': article['url'], 'img_url': article['img_url'], 'date': article['date'], 'entities': [], 'concepts': []}
entities = list(self.graph.run(self.queries_dict['GET_ENTITIES'], TITLE=title))
for entity in entities:
entity = list(entity)[0]
article_data['entities'].append({'label': entity['label']})
concepts = list(self.graph.run(self.queries_dict['GET_CONCEPTS'], TITLE=title))
for concept in concepts:
concept = list(concept)[0]
article_data['concepts'].append({'label': concept['label']})
return article_data<|docstring|>Gets data on title
:param title:
:return:<|endoftext|> |
5fcdfc20334628553ebc1a0606f5a86fada6c632875ff38f85d38a04c9c0cd9d | def get_most_related_articles(self, title):
'\n Gets the most related titles to a title\n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED'], TITLE=title))
articles = []
for related in most_related:
articles.append(related['title'])
return articles | Gets the most related titles to a title | article_api/graph_fulfilment.py | get_most_related_articles | mrkarezina/graph-recommendation-api | 0 | python | def get_most_related_articles(self, title):
'\n \n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED'], TITLE=title))
articles = []
for related in most_related:
articles.append(related['title'])
return articles | def get_most_related_articles(self, title):
'\n \n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED'], TITLE=title))
articles = []
for related in most_related:
articles.append(related['title'])
return articles<|docstring|>Gets the most related titles to a title<|endoftext|> |
cbb378f9feb22ec5d20b299ff065412a4653d2b4bcaf8e27577c5d5205435cfa | def get_most_related_by_embedding(self, embedding):
'\n Gets most related titles to embedding\n :param embedding:\n :return:\n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED_BY_EMBEDDING'], TARGET_EMBEDDING=embedding))
articles = []
for related in most_related:
articles.append(related['title'])
return articles | Gets most related titles to embedding
:param embedding:
:return: | article_api/graph_fulfilment.py | get_most_related_by_embedding | mrkarezina/graph-recommendation-api | 0 | python | def get_most_related_by_embedding(self, embedding):
'\n Gets most related titles to embedding\n :param embedding:\n :return:\n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED_BY_EMBEDDING'], TARGET_EMBEDDING=embedding))
articles = []
for related in most_related:
articles.append(related['title'])
return articles | def get_most_related_by_embedding(self, embedding):
'\n Gets most related titles to embedding\n :param embedding:\n :return:\n '
most_related = list(self.graph.run(self.queries_dict['GET_MOST_RELATED_BY_EMBEDDING'], TARGET_EMBEDDING=embedding))
articles = []
for related in most_related:
articles.append(related['title'])
return articles<|docstring|>Gets most related titles to embedding
:param embedding:
:return:<|endoftext|> |
1f7700d56029d05ab7498aa8109d15b03e38d25fbf1b69c89b728a8ca6998d80 | def get_title_from_url(self, url):
'\n Gets title from url\n :param url:\n :return:\n '
title = list(self.graph.run(self.queries_dict['GET_TITLE_FROM_URL'], URL=url))[0]['title']
return title | Gets title from url
:param url:
:return: | article_api/graph_fulfilment.py | get_title_from_url | mrkarezina/graph-recommendation-api | 0 | python | def get_title_from_url(self, url):
'\n Gets title from url\n :param url:\n :return:\n '
title = list(self.graph.run(self.queries_dict['GET_TITLE_FROM_URL'], URL=url))[0]['title']
return title | def get_title_from_url(self, url):
'\n Gets title from url\n :param url:\n :return:\n '
title = list(self.graph.run(self.queries_dict['GET_TITLE_FROM_URL'], URL=url))[0]['title']
return title<|docstring|>Gets title from url
:param url:
:return:<|endoftext|> |
11c11f2c6f6196b8e32a41a79f8e0ea93dd400ba5a270e0e721e2d7f07936b4a | def read_configuration_from_file(file: str) -> dict:
'\n\tReads configuration from YAML file\n\t'
with open(file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.exception('Unable to parse configurations')
return {} | Reads configuration from YAML file | yew/core/settings.py | read_configuration_from_file | Claudjos/yew | 0 | python | def read_configuration_from_file(file: str) -> dict:
'\n\t\n\t'
with open(file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.exception('Unable to parse configurations')
return {} | def read_configuration_from_file(file: str) -> dict:
'\n\t\n\t'
with open(file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.exception('Unable to parse configurations')
return {}<|docstring|>Reads configuration from YAML file<|endoftext|> |
504a648cd0ad780d2d8c530d37fe8e23778f9c1c9de23e46cabc016cc9d95918 | def build_server_sockets(looper: Looper, configurations: dict) -> List[Tuple[('socket.socket', 'data')]]:
'\n\tBuilds the server sockets.\n\t'
server_sockets = []
for server in Component.get_by_category('servers'):
server.set_looper(looper)
server_sockets.append(server.create_server_socket())
return server_sockets | Builds the server sockets. | yew/core/settings.py | build_server_sockets | Claudjos/yew | 0 | python | def build_server_sockets(looper: Looper, configurations: dict) -> List[Tuple[('socket.socket', 'data')]]:
'\n\t\n\t'
server_sockets = []
for server in Component.get_by_category('servers'):
server.set_looper(looper)
server_sockets.append(server.create_server_socket())
return server_sockets | def build_server_sockets(looper: Looper, configurations: dict) -> List[Tuple[('socket.socket', 'data')]]:
'\n\t\n\t'
server_sockets = []
for server in Component.get_by_category('servers'):
server.set_looper(looper)
server_sockets.append(server.create_server_socket())
return server_sockets<|docstring|>Builds the server sockets.<|endoftext|> |
db010ac3cc470fe9a6f19790c179609f0553c44b2dccc3ab2acc85ea0a72daed | def import_class(value: str) -> Type:
'\n\tPARAMS\n\t\t- value: a string in the format module:class (e.g., yew.modules.foo:BarClass)\n\t'
(module, klass) = value.split(':')
return getattr(importlib.import_module(module), klass) | PARAMS
- value: a string in the format module:class (e.g., yew.modules.foo:BarClass) | yew/core/settings.py | import_class | Claudjos/yew | 0 | python | def import_class(value: str) -> Type:
'\n\tPARAMS\n\t\t- value: a string in the format module:class (e.g., yew.modules.foo:BarClass)\n\t'
(module, klass) = value.split(':')
return getattr(importlib.import_module(module), klass) | def import_class(value: str) -> Type:
'\n\tPARAMS\n\t\t- value: a string in the format module:class (e.g., yew.modules.foo:BarClass)\n\t'
(module, klass) = value.split(':')
return getattr(importlib.import_module(module), klass)<|docstring|>PARAMS
- value: a string in the format module:class (e.g., yew.modules.foo:BarClass)<|endoftext|> |
defbbdf72c4a34ce17c52e468ca511163839cf0f233710b68d7d35eacf355baa | def load_component(category, component):
'\n\t- name: MyComponent\n\t class: mymodule:MyClass\n\t params: {}\n\t'
name = component.get('name')
klass = import_class(component.get('class'))
instance = klass.build(name, component.get('params'))
Component.install(category, instance)
logging.debug('Component {}:{} loaded'.format(name, str(klass))) | - name: MyComponent
class: mymodule:MyClass
params: {} | yew/core/settings.py | load_component | Claudjos/yew | 0 | python | def load_component(category, component):
'\n\t- name: MyComponent\n\t class: mymodule:MyClass\n\t params: {}\n\t'
name = component.get('name')
klass = import_class(component.get('class'))
instance = klass.build(name, component.get('params'))
Component.install(category, instance)
logging.debug('Component {}:{} loaded'.format(name, str(klass))) | def load_component(category, component):
'\n\t- name: MyComponent\n\t class: mymodule:MyClass\n\t params: {}\n\t'
name = component.get('name')
klass = import_class(component.get('class'))
instance = klass.build(name, component.get('params'))
Component.install(category, instance)
logging.debug('Component {}:{} loaded'.format(name, str(klass)))<|docstring|>- name: MyComponent
class: mymodule:MyClass
params: {}<|endoftext|> |
6fd68aee249db0891f83d9803d97ea5013130d76932e9ce89dd339f766395a76 | def generate_dataframe_thesaurus():
'generate dataframe and populate with data from LDES'
df_thes = pd.DataFrame(generate_dataframe('THES'))
for i in range(0, len(columns_thes)):
df_thes.insert(i, columns_thes[i], '')
for i in range(0, len(df_thes)):
x = df_thes.loc[i]
j = json.loads(x[0])
uri = j['http://purl.org/dc/terms/isVersionOf']
df_thes.at[(i, 'URI')] = uri
fetch_timestamp(df_thes, i, j)
fetch_thesaurus_term(df_thes, i, j)
fetch_thesaurus_external_uri(df_thes, i, j)
return df_thes | generate dataframe and populate with data from LDES | src/parser/parser_thes.py | generate_dataframe_thesaurus | sofiedroid/LDES_TO_PG | 1 | python | def generate_dataframe_thesaurus():
df_thes = pd.DataFrame(generate_dataframe('THES'))
for i in range(0, len(columns_thes)):
df_thes.insert(i, columns_thes[i], )
for i in range(0, len(df_thes)):
x = df_thes.loc[i]
j = json.loads(x[0])
uri = j['http://purl.org/dc/terms/isVersionOf']
df_thes.at[(i, 'URI')] = uri
fetch_timestamp(df_thes, i, j)
fetch_thesaurus_term(df_thes, i, j)
fetch_thesaurus_external_uri(df_thes, i, j)
return df_thes | def generate_dataframe_thesaurus():
df_thes = pd.DataFrame(generate_dataframe('THES'))
for i in range(0, len(columns_thes)):
df_thes.insert(i, columns_thes[i], )
for i in range(0, len(df_thes)):
x = df_thes.loc[i]
j = json.loads(x[0])
uri = j['http://purl.org/dc/terms/isVersionOf']
df_thes.at[(i, 'URI')] = uri
fetch_timestamp(df_thes, i, j)
fetch_thesaurus_term(df_thes, i, j)
fetch_thesaurus_external_uri(df_thes, i, j)
return df_thes<|docstring|>generate dataframe and populate with data from LDES<|endoftext|> |
e33d6f4bece56362b22c7e741e5d732a514b9e0081db2a0a3eab0270ef2208df | def process_raw_grid(procstatus, dscfg, radar_list=None):
'\n Dummy function that returns the initial input data set\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, _, _, _) = get_datatype_fields(datatypedescr)
break
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}
return (new_dataset, ind_rad) | Dummy function that returns the initial input data set
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_raw_grid | jfigui/pyrad | 41 | python | def process_raw_grid(procstatus, dscfg, radar_list=None):
'\n Dummy function that returns the initial input data set\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, _, _, _) = get_datatype_fields(datatypedescr)
break
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}
return (new_dataset, ind_rad) | def process_raw_grid(procstatus, dscfg, radar_list=None):
'\n Dummy function that returns the initial input data set\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, _, _, _) = get_datatype_fields(datatypedescr)
break
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}
return (new_dataset, ind_rad)<|docstring|>Dummy function that returns the initial input data set
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
04bce3a6449d8183bcf03e3a30009ba7c53f53b12e30d5d00fb26def8d507649 | def process_grid(procstatus, dscfg, radar_list=None):
'\n Puts the radar data in a regular grid\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n gridconfig : dictionary. Dataset keyword\n Dictionary containing some or all of this keywords:\n xmin, xmax, ymin, ymax, zmin, zmax : floats\n minimum and maximum horizontal distance from grid origin [km]\n and minimum and maximum vertical distance from grid origin [m]\n Defaults -40, 40, -40, 40, 0., 10000.\n hres, vres : floats\n horizontal and vertical grid resolution [m]\n Defaults 1000., 500.\n latorig, lonorig, altorig : floats\n latitude and longitude of grid origin [deg] and altitude of\n grid origin [m MSL]\n Defaults the latitude, longitude and altitude of the radar\n wfunc : str. Dataset keyword\n the weighting function used to combine the radar gates close to a\n grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST\n Default NEAREST\n roif_func : str. Dataset keyword\n the function used to compute the region of interest.\n Possible values: dist_beam, constant\n roi : float. Dataset keyword\n the (minimum) radius of the region of interest in m. Default half\n the largest resolution\n beamwidth : float. Dataset keyword\n the radar antenna beamwidth [deg]. If None that of the key\n radar_beam_width_h in attribute instrument_parameters of the radar\n object will be used. If the key or the attribute are not present\n a default 1 deg value will be used\n beam_spacing : float. Dataset keyword\n the beam spacing, i.e. the ray angle resolution [deg]. If None,\n that of the attribute ray_angle_res of the radar object will be\n used. If the attribute is None a default 1 deg value will be used\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the gridded data\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
field_names_aux = []
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_names_aux.append(get_fieldname_pyart(datatype))
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
radar = radar_list[ind_rad]
field_names = []
nfields_available = 0
for field_name in field_names_aux:
if (field_name not in radar.fields):
warn((('Field name ' + field_name) + ' not available in radar object'))
continue
field_names.append(field_name)
nfields_available += 1
if (nfields_available == 0):
warn('Fields not available in radar data')
return (None, None)
xmin = (- 40.0)
xmax = 40.0
ymin = (- 40.0)
ymax = 40.0
zmin = 0.0
zmax = 10000.0
hres = 1000.0
vres = 500.0
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = float(radar.altitude['data'])
if ('gridConfig' in dscfg):
if ('xmin' in dscfg['gridConfig']):
xmin = dscfg['gridConfig']['xmin']
if ('xmax' in dscfg['gridConfig']):
xmax = dscfg['gridConfig']['xmax']
if ('ymin' in dscfg['gridConfig']):
ymin = dscfg['gridConfig']['ymin']
if ('ymax' in dscfg['gridConfig']):
ymax = dscfg['gridConfig']['ymax']
if ('zmin' in dscfg['gridConfig']):
zmin = dscfg['gridConfig']['zmin']
if ('zmax' in dscfg['gridConfig']):
zmax = dscfg['gridConfig']['zmax']
if ('hres' in dscfg['gridConfig']):
hres = dscfg['gridConfig']['hres']
if ('vres' in dscfg['gridConfig']):
vres = dscfg['gridConfig']['vres']
if ('latorig' in dscfg['gridConfig']):
lat = dscfg['gridConfig']['latorig']
if ('lonorig' in dscfg['gridConfig']):
lon = dscfg['gridConfig']['lonorig']
if ('altorig' in dscfg['gridConfig']):
alt = dscfg['gridConfig']['altorig']
wfunc = dscfg.get('wfunc', 'NEAREST')
roi_func = dscfg.get('roi_func', 'dist_beam')
nz = (int(((zmax - zmin) / vres)) + 1)
ny = (int((((ymax - ymin) * 1000.0) / hres)) + 1)
nx = (int((((xmax - xmin) * 1000.0) / hres)) + 1)
min_radius = dscfg.get('roi', (np.max([vres, hres]) / 2.0))
beamwidth = dscfg.get('beamwidth', None)
beam_spacing = dscfg.get('beam_spacing', None)
if (beamwidth is None):
if ((radar.instrument_parameters is not None) and ('radar_beam_width_h' in radar.instrument_parameters)):
beamwidth = radar.instrument_parameters['radar_beam_width_h']['data'][0]
else:
warn('Unknown radar beamwidth. Default 1 deg will be used')
beamwidth = 1
if (beam_spacing is None):
if (radar.ray_angle_res is not None):
beam_spacing = radar.ray_angle_res['data'][0]
else:
warn('Unknown beam spacing. Default 1 deg will be used')
beam_spacing = 1
grid = pyart.map.grid_from_radars((radar,), gridding_algo='map_to_grid', weighting_function=wfunc, roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing, min_radius=min_radius, constant_roi=min_radius, grid_shape=(nz, ny, nx), grid_limits=((zmin, zmax), ((ymin * 1000.0), (ymax * 1000.0)), ((xmin * 1000.0), (xmax * 1000.0))), grid_origin=(lat, lon), grid_origin_alt=alt, fields=field_names)
new_dataset = {'radar_out': grid}
return (new_dataset, ind_rad) | Puts the radar data in a regular grid
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
gridconfig : dictionary. Dataset keyword
Dictionary containing some or all of this keywords:
xmin, xmax, ymin, ymax, zmin, zmax : floats
minimum and maximum horizontal distance from grid origin [km]
and minimum and maximum vertical distance from grid origin [m]
Defaults -40, 40, -40, 40, 0., 10000.
hres, vres : floats
horizontal and vertical grid resolution [m]
Defaults 1000., 500.
latorig, lonorig, altorig : floats
latitude and longitude of grid origin [deg] and altitude of
grid origin [m MSL]
Defaults the latitude, longitude and altitude of the radar
wfunc : str. Dataset keyword
the weighting function used to combine the radar gates close to a
grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST
Default NEAREST
roif_func : str. Dataset keyword
the function used to compute the region of interest.
Possible values: dist_beam, constant
roi : float. Dataset keyword
the (minimum) radius of the region of interest in m. Default half
the largest resolution
beamwidth : float. Dataset keyword
the radar antenna beamwidth [deg]. If None that of the key
radar_beam_width_h in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
a default 1 deg value will be used
beam_spacing : float. Dataset keyword
the beam spacing, i.e. the ray angle resolution [deg]. If None,
that of the attribute ray_angle_res of the radar object will be
used. If the attribute is None a default 1 deg value will be used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the gridded data
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid | jfigui/pyrad | 41 | python | def process_grid(procstatus, dscfg, radar_list=None):
'\n Puts the radar data in a regular grid\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n gridconfig : dictionary. Dataset keyword\n Dictionary containing some or all of this keywords:\n xmin, xmax, ymin, ymax, zmin, zmax : floats\n minimum and maximum horizontal distance from grid origin [km]\n and minimum and maximum vertical distance from grid origin [m]\n Defaults -40, 40, -40, 40, 0., 10000.\n hres, vres : floats\n horizontal and vertical grid resolution [m]\n Defaults 1000., 500.\n latorig, lonorig, altorig : floats\n latitude and longitude of grid origin [deg] and altitude of\n grid origin [m MSL]\n Defaults the latitude, longitude and altitude of the radar\n wfunc : str. Dataset keyword\n the weighting function used to combine the radar gates close to a\n grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST\n Default NEAREST\n roif_func : str. Dataset keyword\n the function used to compute the region of interest.\n Possible values: dist_beam, constant\n roi : float. Dataset keyword\n the (minimum) radius of the region of interest in m. Default half\n the largest resolution\n beamwidth : float. Dataset keyword\n the radar antenna beamwidth [deg]. If None that of the key\n radar_beam_width_h in attribute instrument_parameters of the radar\n object will be used. If the key or the attribute are not present\n a default 1 deg value will be used\n beam_spacing : float. Dataset keyword\n the beam spacing, i.e. the ray angle resolution [deg]. If None,\n that of the attribute ray_angle_res of the radar object will be\n used. If the attribute is None a default 1 deg value will be used\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the gridded data\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
field_names_aux = []
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_names_aux.append(get_fieldname_pyart(datatype))
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
radar = radar_list[ind_rad]
field_names = []
nfields_available = 0
for field_name in field_names_aux:
if (field_name not in radar.fields):
warn((('Field name ' + field_name) + ' not available in radar object'))
continue
field_names.append(field_name)
nfields_available += 1
if (nfields_available == 0):
warn('Fields not available in radar data')
return (None, None)
xmin = (- 40.0)
xmax = 40.0
ymin = (- 40.0)
ymax = 40.0
zmin = 0.0
zmax = 10000.0
hres = 1000.0
vres = 500.0
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = float(radar.altitude['data'])
if ('gridConfig' in dscfg):
if ('xmin' in dscfg['gridConfig']):
xmin = dscfg['gridConfig']['xmin']
if ('xmax' in dscfg['gridConfig']):
xmax = dscfg['gridConfig']['xmax']
if ('ymin' in dscfg['gridConfig']):
ymin = dscfg['gridConfig']['ymin']
if ('ymax' in dscfg['gridConfig']):
ymax = dscfg['gridConfig']['ymax']
if ('zmin' in dscfg['gridConfig']):
zmin = dscfg['gridConfig']['zmin']
if ('zmax' in dscfg['gridConfig']):
zmax = dscfg['gridConfig']['zmax']
if ('hres' in dscfg['gridConfig']):
hres = dscfg['gridConfig']['hres']
if ('vres' in dscfg['gridConfig']):
vres = dscfg['gridConfig']['vres']
if ('latorig' in dscfg['gridConfig']):
lat = dscfg['gridConfig']['latorig']
if ('lonorig' in dscfg['gridConfig']):
lon = dscfg['gridConfig']['lonorig']
if ('altorig' in dscfg['gridConfig']):
alt = dscfg['gridConfig']['altorig']
wfunc = dscfg.get('wfunc', 'NEAREST')
roi_func = dscfg.get('roi_func', 'dist_beam')
nz = (int(((zmax - zmin) / vres)) + 1)
ny = (int((((ymax - ymin) * 1000.0) / hres)) + 1)
nx = (int((((xmax - xmin) * 1000.0) / hres)) + 1)
min_radius = dscfg.get('roi', (np.max([vres, hres]) / 2.0))
beamwidth = dscfg.get('beamwidth', None)
beam_spacing = dscfg.get('beam_spacing', None)
if (beamwidth is None):
if ((radar.instrument_parameters is not None) and ('radar_beam_width_h' in radar.instrument_parameters)):
beamwidth = radar.instrument_parameters['radar_beam_width_h']['data'][0]
else:
warn('Unknown radar beamwidth. Default 1 deg will be used')
beamwidth = 1
if (beam_spacing is None):
if (radar.ray_angle_res is not None):
beam_spacing = radar.ray_angle_res['data'][0]
else:
warn('Unknown beam spacing. Default 1 deg will be used')
beam_spacing = 1
grid = pyart.map.grid_from_radars((radar,), gridding_algo='map_to_grid', weighting_function=wfunc, roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing, min_radius=min_radius, constant_roi=min_radius, grid_shape=(nz, ny, nx), grid_limits=((zmin, zmax), ((ymin * 1000.0), (ymax * 1000.0)), ((xmin * 1000.0), (xmax * 1000.0))), grid_origin=(lat, lon), grid_origin_alt=alt, fields=field_names)
new_dataset = {'radar_out': grid}
return (new_dataset, ind_rad) | def process_grid(procstatus, dscfg, radar_list=None):
'\n Puts the radar data in a regular grid\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n gridconfig : dictionary. Dataset keyword\n Dictionary containing some or all of this keywords:\n xmin, xmax, ymin, ymax, zmin, zmax : floats\n minimum and maximum horizontal distance from grid origin [km]\n and minimum and maximum vertical distance from grid origin [m]\n Defaults -40, 40, -40, 40, 0., 10000.\n hres, vres : floats\n horizontal and vertical grid resolution [m]\n Defaults 1000., 500.\n latorig, lonorig, altorig : floats\n latitude and longitude of grid origin [deg] and altitude of\n grid origin [m MSL]\n Defaults the latitude, longitude and altitude of the radar\n wfunc : str. Dataset keyword\n the weighting function used to combine the radar gates close to a\n grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST\n Default NEAREST\n roif_func : str. Dataset keyword\n the function used to compute the region of interest.\n Possible values: dist_beam, constant\n roi : float. Dataset keyword\n the (minimum) radius of the region of interest in m. Default half\n the largest resolution\n beamwidth : float. Dataset keyword\n the radar antenna beamwidth [deg]. If None that of the key\n radar_beam_width_h in attribute instrument_parameters of the radar\n object will be used. If the key or the attribute are not present\n a default 1 deg value will be used\n beam_spacing : float. Dataset keyword\n the beam spacing, i.e. the ray angle resolution [deg]. If None,\n that of the attribute ray_angle_res of the radar object will be\n used. If the attribute is None a default 1 deg value will be used\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the gridded data\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
field_names_aux = []
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_names_aux.append(get_fieldname_pyart(datatype))
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
radar = radar_list[ind_rad]
field_names = []
nfields_available = 0
for field_name in field_names_aux:
if (field_name not in radar.fields):
warn((('Field name ' + field_name) + ' not available in radar object'))
continue
field_names.append(field_name)
nfields_available += 1
if (nfields_available == 0):
warn('Fields not available in radar data')
return (None, None)
xmin = (- 40.0)
xmax = 40.0
ymin = (- 40.0)
ymax = 40.0
zmin = 0.0
zmax = 10000.0
hres = 1000.0
vres = 500.0
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = float(radar.altitude['data'])
if ('gridConfig' in dscfg):
if ('xmin' in dscfg['gridConfig']):
xmin = dscfg['gridConfig']['xmin']
if ('xmax' in dscfg['gridConfig']):
xmax = dscfg['gridConfig']['xmax']
if ('ymin' in dscfg['gridConfig']):
ymin = dscfg['gridConfig']['ymin']
if ('ymax' in dscfg['gridConfig']):
ymax = dscfg['gridConfig']['ymax']
if ('zmin' in dscfg['gridConfig']):
zmin = dscfg['gridConfig']['zmin']
if ('zmax' in dscfg['gridConfig']):
zmax = dscfg['gridConfig']['zmax']
if ('hres' in dscfg['gridConfig']):
hres = dscfg['gridConfig']['hres']
if ('vres' in dscfg['gridConfig']):
vres = dscfg['gridConfig']['vres']
if ('latorig' in dscfg['gridConfig']):
lat = dscfg['gridConfig']['latorig']
if ('lonorig' in dscfg['gridConfig']):
lon = dscfg['gridConfig']['lonorig']
if ('altorig' in dscfg['gridConfig']):
alt = dscfg['gridConfig']['altorig']
wfunc = dscfg.get('wfunc', 'NEAREST')
roi_func = dscfg.get('roi_func', 'dist_beam')
nz = (int(((zmax - zmin) / vres)) + 1)
ny = (int((((ymax - ymin) * 1000.0) / hres)) + 1)
nx = (int((((xmax - xmin) * 1000.0) / hres)) + 1)
min_radius = dscfg.get('roi', (np.max([vres, hres]) / 2.0))
beamwidth = dscfg.get('beamwidth', None)
beam_spacing = dscfg.get('beam_spacing', None)
if (beamwidth is None):
if ((radar.instrument_parameters is not None) and ('radar_beam_width_h' in radar.instrument_parameters)):
beamwidth = radar.instrument_parameters['radar_beam_width_h']['data'][0]
else:
warn('Unknown radar beamwidth. Default 1 deg will be used')
beamwidth = 1
if (beam_spacing is None):
if (radar.ray_angle_res is not None):
beam_spacing = radar.ray_angle_res['data'][0]
else:
warn('Unknown beam spacing. Default 1 deg will be used')
beam_spacing = 1
grid = pyart.map.grid_from_radars((radar,), gridding_algo='map_to_grid', weighting_function=wfunc, roi_func=roi_func, h_factor=1.0, nb=beamwidth, bsp=beam_spacing, min_radius=min_radius, constant_roi=min_radius, grid_shape=(nz, ny, nx), grid_limits=((zmin, zmax), ((ymin * 1000.0), (ymax * 1000.0)), ((xmin * 1000.0), (xmax * 1000.0))), grid_origin=(lat, lon), grid_origin_alt=alt, fields=field_names)
new_dataset = {'radar_out': grid}
return (new_dataset, ind_rad)<|docstring|>Puts the radar data in a regular grid
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
gridconfig : dictionary. Dataset keyword
Dictionary containing some or all of this keywords:
xmin, xmax, ymin, ymax, zmin, zmax : floats
minimum and maximum horizontal distance from grid origin [km]
and minimum and maximum vertical distance from grid origin [m]
Defaults -40, 40, -40, 40, 0., 10000.
hres, vres : floats
horizontal and vertical grid resolution [m]
Defaults 1000., 500.
latorig, lonorig, altorig : floats
latitude and longitude of grid origin [deg] and altitude of
grid origin [m MSL]
Defaults the latitude, longitude and altitude of the radar
wfunc : str. Dataset keyword
the weighting function used to combine the radar gates close to a
grid point. Possible values BARNES, BARNES2, CRESSMAN, NEAREST
Default NEAREST
roif_func : str. Dataset keyword
the function used to compute the region of interest.
Possible values: dist_beam, constant
roi : float. Dataset keyword
the (minimum) radius of the region of interest in m. Default half
the largest resolution
beamwidth : float. Dataset keyword
the radar antenna beamwidth [deg]. If None that of the key
radar_beam_width_h in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
a default 1 deg value will be used
beam_spacing : float. Dataset keyword
the beam spacing, i.e. the ray angle resolution [deg]. If None,
that of the attribute ray_angle_res of the radar object will be
used. If the attribute is None a default 1 deg value will be used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the gridded data
ind_rad : int
radar index<|endoftext|> |
7eda05eb891d9900c0d3f3519dd773e8f6a8f33511e5dd47a0cc0e37f7185d1c | def process_grid_point(procstatus, dscfg, radar_list=None):
'\n Obtains the grid data at a point location.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n latlon : boolean. Dataset keyword\n if True position is obtained from latitude, longitude information,\n otherwise position is obtained from grid index (iz, iy, ix).\n lon : float. Dataset keyword\n the longitude [deg]. Use when latlon is True.\n lat : float. Dataset keyword\n the latitude [deg]. Use when latlon is True.\n alt : float. Dataset keyword\n altitude [m MSL]. Use when latlon is True.\n iz, iy, ix : int. Dataset keyword\n The grid indices. Use when latlon is False\n latlonTol : float. Dataset keyword\n latitude-longitude tolerance to determine which grid point to use\n [deg]\n altTol : float. Dataset keyword\n Altitude tolerance to determine which grid point to use [deg]\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n '
if (procstatus == 0):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
break
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
new_dataset = {'time': dscfg['global_data']['time'], 'ref_time': dscfg['global_data']['ref_time'], 'datatype': datatype, 'point_coordinates_WGS84_lon_lat_alt': dscfg['global_data']['point_coordinates_WGS84_lon_lat_alt'], 'grid_points_iz_iy_ix': dscfg['global_data']['grid_points_iz_iy_ix'], 'final': True}
return (new_dataset, ind_rad)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn(('Unable to extract point measurement information. ' + 'Field not available'))
return (None, None)
if dscfg['latlon']:
lon = dscfg['lon']
lat = dscfg['lat']
alt = dscfg.get('alt', 0.0)
latlon_tol = dscfg.get('latlonTol', 1.0)
alt_tol = dscfg.get('altTol', 100.0)
d_lon = np.min(np.abs((grid.point_longitude['data'] - lon)))
if (d_lon > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to longitude ') + str(d_lon)) + ' larger than tolerance'))
return (None, None)
d_lat = np.min(np.abs((grid.point_latitude['data'] - lat)))
if (d_lat > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to latitude ') + str(d_lat)) + ' larger than tolerance'))
return (None, None)
d_alt = np.min(np.abs((grid.point_altitude['data'] - alt)))
if (d_alt > alt_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to altitude ') + str(d_alt)) + ' larger than tolerance'))
return (None, None)
(iz, iy, ix) = np.unravel_index(np.argmin((np.abs((grid.point_longitude['data'] - lon)) + np.abs((grid.point_latitude['data'] - lat)))), grid.point_longitude['data'].shape)
iz = np.argmin(np.abs((grid.point_altitude['data'][(:, iy, ix)] - alt)))
else:
ix = dscfg['ix']
iy = dscfg['iy']
iz = dscfg['iz']
lon = grid.point_longitude['data'][(iz, iy, ix)]
lat = grid.point_latitude['data'][(iz, iy, ix)]
alt = grid.point_altitude['data'][(iz, iy, ix)]
val = grid.fields[field_name]['data'][(iz, iy, ix)]
time = num2date(grid.time['data'][0], grid.time['units'], grid.time['calendar'])
if (dscfg['initialized'] == 0):
poi = {'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt], 'grid_points_iz_iy_ix': [iz, iy, ix], 'time': time, 'ref_time': dscfg['timeinfo']}
dscfg['global_data'] = poi
dscfg['initialized'] = 1
dscfg['global_data']['ref_time'] = dscfg['timeinfo']
new_dataset = dict()
new_dataset.update({'value': val})
new_dataset.update({'datatype': datatype})
new_dataset.update({'time': time})
new_dataset.update({'ref_time': dscfg['timeinfo']})
new_dataset.update({'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt]})
new_dataset.update({'grid_points_iz_iy_ix': [iz, iy, ix]})
new_dataset.update({'used_coordinates_WGS84_lon_lat_alt': [grid.point_longitude['data'][(iz, iy, ix)], grid.point_latitude['data'][(iz, iy, ix)], grid.point_altitude['data'][(iz, iy, ix)]]})
new_dataset.update({'final': False})
return (new_dataset, ind_rad) | Obtains the grid data at a point location.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
latlon : boolean. Dataset keyword
if True position is obtained from latitude, longitude information,
otherwise position is obtained from grid index (iz, iy, ix).
lon : float. Dataset keyword
the longitude [deg]. Use when latlon is True.
lat : float. Dataset keyword
the latitude [deg]. Use when latlon is True.
alt : float. Dataset keyword
altitude [m MSL]. Use when latlon is True.
iz, iy, ix : int. Dataset keyword
The grid indices. Use when latlon is False
latlonTol : float. Dataset keyword
latitude-longitude tolerance to determine which grid point to use
[deg]
altTol : float. Dataset keyword
Altitude tolerance to determine which grid point to use [deg]
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the data and metadata at the point of interest
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_point | jfigui/pyrad | 41 | python | def process_grid_point(procstatus, dscfg, radar_list=None):
'\n Obtains the grid data at a point location.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n latlon : boolean. Dataset keyword\n if True position is obtained from latitude, longitude information,\n otherwise position is obtained from grid index (iz, iy, ix).\n lon : float. Dataset keyword\n the longitude [deg]. Use when latlon is True.\n lat : float. Dataset keyword\n the latitude [deg]. Use when latlon is True.\n alt : float. Dataset keyword\n altitude [m MSL]. Use when latlon is True.\n iz, iy, ix : int. Dataset keyword\n The grid indices. Use when latlon is False\n latlonTol : float. Dataset keyword\n latitude-longitude tolerance to determine which grid point to use\n [deg]\n altTol : float. Dataset keyword\n Altitude tolerance to determine which grid point to use [deg]\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n '
if (procstatus == 0):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
break
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
new_dataset = {'time': dscfg['global_data']['time'], 'ref_time': dscfg['global_data']['ref_time'], 'datatype': datatype, 'point_coordinates_WGS84_lon_lat_alt': dscfg['global_data']['point_coordinates_WGS84_lon_lat_alt'], 'grid_points_iz_iy_ix': dscfg['global_data']['grid_points_iz_iy_ix'], 'final': True}
return (new_dataset, ind_rad)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn(('Unable to extract point measurement information. ' + 'Field not available'))
return (None, None)
if dscfg['latlon']:
lon = dscfg['lon']
lat = dscfg['lat']
alt = dscfg.get('alt', 0.0)
latlon_tol = dscfg.get('latlonTol', 1.0)
alt_tol = dscfg.get('altTol', 100.0)
d_lon = np.min(np.abs((grid.point_longitude['data'] - lon)))
if (d_lon > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to longitude ') + str(d_lon)) + ' larger than tolerance'))
return (None, None)
d_lat = np.min(np.abs((grid.point_latitude['data'] - lat)))
if (d_lat > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to latitude ') + str(d_lat)) + ' larger than tolerance'))
return (None, None)
d_alt = np.min(np.abs((grid.point_altitude['data'] - alt)))
if (d_alt > alt_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to altitude ') + str(d_alt)) + ' larger than tolerance'))
return (None, None)
(iz, iy, ix) = np.unravel_index(np.argmin((np.abs((grid.point_longitude['data'] - lon)) + np.abs((grid.point_latitude['data'] - lat)))), grid.point_longitude['data'].shape)
iz = np.argmin(np.abs((grid.point_altitude['data'][(:, iy, ix)] - alt)))
else:
ix = dscfg['ix']
iy = dscfg['iy']
iz = dscfg['iz']
lon = grid.point_longitude['data'][(iz, iy, ix)]
lat = grid.point_latitude['data'][(iz, iy, ix)]
alt = grid.point_altitude['data'][(iz, iy, ix)]
val = grid.fields[field_name]['data'][(iz, iy, ix)]
time = num2date(grid.time['data'][0], grid.time['units'], grid.time['calendar'])
if (dscfg['initialized'] == 0):
poi = {'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt], 'grid_points_iz_iy_ix': [iz, iy, ix], 'time': time, 'ref_time': dscfg['timeinfo']}
dscfg['global_data'] = poi
dscfg['initialized'] = 1
dscfg['global_data']['ref_time'] = dscfg['timeinfo']
new_dataset = dict()
new_dataset.update({'value': val})
new_dataset.update({'datatype': datatype})
new_dataset.update({'time': time})
new_dataset.update({'ref_time': dscfg['timeinfo']})
new_dataset.update({'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt]})
new_dataset.update({'grid_points_iz_iy_ix': [iz, iy, ix]})
new_dataset.update({'used_coordinates_WGS84_lon_lat_alt': [grid.point_longitude['data'][(iz, iy, ix)], grid.point_latitude['data'][(iz, iy, ix)], grid.point_altitude['data'][(iz, iy, ix)]]})
new_dataset.update({'final': False})
return (new_dataset, ind_rad) | def process_grid_point(procstatus, dscfg, radar_list=None):
'\n Obtains the grid data at a point location.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n latlon : boolean. Dataset keyword\n if True position is obtained from latitude, longitude information,\n otherwise position is obtained from grid index (iz, iy, ix).\n lon : float. Dataset keyword\n the longitude [deg]. Use when latlon is True.\n lat : float. Dataset keyword\n the latitude [deg]. Use when latlon is True.\n alt : float. Dataset keyword\n altitude [m MSL]. Use when latlon is True.\n iz, iy, ix : int. Dataset keyword\n The grid indices. Use when latlon is False\n latlonTol : float. Dataset keyword\n latitude-longitude tolerance to determine which grid point to use\n [deg]\n altTol : float. Dataset keyword\n Altitude tolerance to determine which grid point to use [deg]\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n '
if (procstatus == 0):
return (None, None)
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
break
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
new_dataset = {'time': dscfg['global_data']['time'], 'ref_time': dscfg['global_data']['ref_time'], 'datatype': datatype, 'point_coordinates_WGS84_lon_lat_alt': dscfg['global_data']['point_coordinates_WGS84_lon_lat_alt'], 'grid_points_iz_iy_ix': dscfg['global_data']['grid_points_iz_iy_ix'], 'final': True}
return (new_dataset, ind_rad)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn(('Unable to extract point measurement information. ' + 'Field not available'))
return (None, None)
if dscfg['latlon']:
lon = dscfg['lon']
lat = dscfg['lat']
alt = dscfg.get('alt', 0.0)
latlon_tol = dscfg.get('latlonTol', 1.0)
alt_tol = dscfg.get('altTol', 100.0)
d_lon = np.min(np.abs((grid.point_longitude['data'] - lon)))
if (d_lon > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to longitude ') + str(d_lon)) + ' larger than tolerance'))
return (None, None)
d_lat = np.min(np.abs((grid.point_latitude['data'] - lat)))
if (d_lat > latlon_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to latitude ') + str(d_lat)) + ' larger than tolerance'))
return (None, None)
d_alt = np.min(np.abs((grid.point_altitude['data'] - alt)))
if (d_alt > alt_tol):
warn(((((((((' No grid point found for point (lat, lon, alt):(' + str(lat)) + ', ') + str(lon)) + ', ') + str(alt)) + '). Minimum distance to altitude ') + str(d_alt)) + ' larger than tolerance'))
return (None, None)
(iz, iy, ix) = np.unravel_index(np.argmin((np.abs((grid.point_longitude['data'] - lon)) + np.abs((grid.point_latitude['data'] - lat)))), grid.point_longitude['data'].shape)
iz = np.argmin(np.abs((grid.point_altitude['data'][(:, iy, ix)] - alt)))
else:
ix = dscfg['ix']
iy = dscfg['iy']
iz = dscfg['iz']
lon = grid.point_longitude['data'][(iz, iy, ix)]
lat = grid.point_latitude['data'][(iz, iy, ix)]
alt = grid.point_altitude['data'][(iz, iy, ix)]
val = grid.fields[field_name]['data'][(iz, iy, ix)]
time = num2date(grid.time['data'][0], grid.time['units'], grid.time['calendar'])
if (dscfg['initialized'] == 0):
poi = {'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt], 'grid_points_iz_iy_ix': [iz, iy, ix], 'time': time, 'ref_time': dscfg['timeinfo']}
dscfg['global_data'] = poi
dscfg['initialized'] = 1
dscfg['global_data']['ref_time'] = dscfg['timeinfo']
new_dataset = dict()
new_dataset.update({'value': val})
new_dataset.update({'datatype': datatype})
new_dataset.update({'time': time})
new_dataset.update({'ref_time': dscfg['timeinfo']})
new_dataset.update({'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt]})
new_dataset.update({'grid_points_iz_iy_ix': [iz, iy, ix]})
new_dataset.update({'used_coordinates_WGS84_lon_lat_alt': [grid.point_longitude['data'][(iz, iy, ix)], grid.point_latitude['data'][(iz, iy, ix)], grid.point_altitude['data'][(iz, iy, ix)]]})
new_dataset.update({'final': False})
return (new_dataset, ind_rad)<|docstring|>Obtains the grid data at a point location.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
latlon : boolean. Dataset keyword
if True position is obtained from latitude, longitude information,
otherwise position is obtained from grid index (iz, iy, ix).
lon : float. Dataset keyword
the longitude [deg]. Use when latlon is True.
lat : float. Dataset keyword
the latitude [deg]. Use when latlon is True.
alt : float. Dataset keyword
altitude [m MSL]. Use when latlon is True.
iz, iy, ix : int. Dataset keyword
The grid indices. Use when latlon is False
latlonTol : float. Dataset keyword
latitude-longitude tolerance to determine which grid point to use
[deg]
altTol : float. Dataset keyword
Altitude tolerance to determine which grid point to use [deg]
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the data and metadata at the point of interest
ind_rad : int
radar index<|endoftext|> |
eaa86739e688369ee04428299f50bb2e745089dcefd4961efe2101618a367222 | def process_grid_time_stats(procstatus, dscfg, radar_list=None):
'\n computes the temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n lin_trans: int. Dataset keyword\n If 1 apply linear transformation before averaging\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n stat: string. Dataset keyword\n Statistic to compute: Can be mean, std, cov, min, max. Default\n mean\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
lin_trans = dscfg.get('lin_trans', 0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'mean')
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if (stat in ('mean', 'std', 'cov')):
if lin_trans:
field_dict['data'] = np.ma.power(10.0, (0.1 * field_dict['data']))
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
if (stat in ('std', 'cov')):
sum2_dict = pyart.config.get_metadata('sum_squared')
sum2_dict['data'] = (field_dict['data'] * field_dict['data'])
elif use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (stat in ('std', 'cov')):
grid_aux.add_field('sum_squared', sum2_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
if (stat in ('mean', 'std', 'cov')):
masked_sum = np.ma.getmaskarray(dscfg['global_data']['grid_out'].fields[field_name]['data'])
valid_sum = np.logical_and(np.logical_not(masked_sum), np.logical_not(np.ma.getmaskarray(field_dict['data'])))
dscfg['global_data']['grid_out'].fields[field_name]['data'][masked_sum] = field_dict['data'][masked_sum]
dscfg['global_data']['grid_out'].fields[field_name]['data'][valid_sum] += field_dict['data'][valid_sum]
if (stat in ('cov', 'std')):
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][masked_sum] = (field_dict['data'][masked_sum] * field_dict['data'][masked_sum])
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][valid_sum] += (field_dict['data'][valid_sum] * field_dict['data'][valid_sum])
elif (stat == 'max'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.maximum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=(- 1e+300)), field_dict['data'].filled(fill_value=(- 1e+300)))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], (- 1e+300))
elif (stat == 'min'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.minimum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=1e+300), field_dict['data'].filled(fill_value=1e+300))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], 1e+300)
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
elif lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10((field_std / field_mean)))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad) | computes the temporal statistics of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
lin_trans: int. Dataset keyword
If 1 apply linear transformation before averaging
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
stat: string. Dataset keyword
Statistic to compute: Can be mean, std, cov, min, max. Default
mean
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_time_stats | jfigui/pyrad | 41 | python | def process_grid_time_stats(procstatus, dscfg, radar_list=None):
'\n computes the temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n lin_trans: int. Dataset keyword\n If 1 apply linear transformation before averaging\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n stat: string. Dataset keyword\n Statistic to compute: Can be mean, std, cov, min, max. Default\n mean\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
lin_trans = dscfg.get('lin_trans', 0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'mean')
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if (stat in ('mean', 'std', 'cov')):
if lin_trans:
field_dict['data'] = np.ma.power(10.0, (0.1 * field_dict['data']))
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
if (stat in ('std', 'cov')):
sum2_dict = pyart.config.get_metadata('sum_squared')
sum2_dict['data'] = (field_dict['data'] * field_dict['data'])
elif use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (stat in ('std', 'cov')):
grid_aux.add_field('sum_squared', sum2_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
if (stat in ('mean', 'std', 'cov')):
masked_sum = np.ma.getmaskarray(dscfg['global_data']['grid_out'].fields[field_name]['data'])
valid_sum = np.logical_and(np.logical_not(masked_sum), np.logical_not(np.ma.getmaskarray(field_dict['data'])))
dscfg['global_data']['grid_out'].fields[field_name]['data'][masked_sum] = field_dict['data'][masked_sum]
dscfg['global_data']['grid_out'].fields[field_name]['data'][valid_sum] += field_dict['data'][valid_sum]
if (stat in ('cov', 'std')):
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][masked_sum] = (field_dict['data'][masked_sum] * field_dict['data'][masked_sum])
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][valid_sum] += (field_dict['data'][valid_sum] * field_dict['data'][valid_sum])
elif (stat == 'max'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.maximum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=(- 1e+300)), field_dict['data'].filled(fill_value=(- 1e+300)))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], (- 1e+300))
elif (stat == 'min'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.minimum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=1e+300), field_dict['data'].filled(fill_value=1e+300))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], 1e+300)
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
elif lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10((field_std / field_mean)))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad) | def process_grid_time_stats(procstatus, dscfg, radar_list=None):
'\n computes the temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n lin_trans: int. Dataset keyword\n If 1 apply linear transformation before averaging\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n stat: string. Dataset keyword\n Statistic to compute: Can be mean, std, cov, min, max. Default\n mean\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
lin_trans = dscfg.get('lin_trans', 0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'mean')
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if (stat in ('mean', 'std', 'cov')):
if lin_trans:
field_dict['data'] = np.ma.power(10.0, (0.1 * field_dict['data']))
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
if (stat in ('std', 'cov')):
sum2_dict = pyart.config.get_metadata('sum_squared')
sum2_dict['data'] = (field_dict['data'] * field_dict['data'])
elif use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (stat in ('std', 'cov')):
grid_aux.add_field('sum_squared', sum2_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
if (stat in ('mean', 'std', 'cov')):
masked_sum = np.ma.getmaskarray(dscfg['global_data']['grid_out'].fields[field_name]['data'])
valid_sum = np.logical_and(np.logical_not(masked_sum), np.logical_not(np.ma.getmaskarray(field_dict['data'])))
dscfg['global_data']['grid_out'].fields[field_name]['data'][masked_sum] = field_dict['data'][masked_sum]
dscfg['global_data']['grid_out'].fields[field_name]['data'][valid_sum] += field_dict['data'][valid_sum]
if (stat in ('cov', 'std')):
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][masked_sum] = (field_dict['data'][masked_sum] * field_dict['data'][masked_sum])
dscfg['global_data']['grid_out'].fields['sum_squared']['data'][valid_sum] += (field_dict['data'][valid_sum] * field_dict['data'][valid_sum])
elif (stat == 'max'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.maximum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=(- 1e+300)), field_dict['data'].filled(fill_value=(- 1e+300)))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], (- 1e+300))
elif (stat == 'min'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.minimum(dscfg['global_data']['grid_out'].fields[field_name]['data'].filled(fill_value=1e+300), field_dict['data'].filled(fill_value=1e+300))
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_values(dscfg['global_data']['grid_out'].fields[field_name]['data'], 1e+300)
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
elif lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10((field_std / field_mean)))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat in ('mean', 'std', 'cov')):
field_mean = (dscfg['global_data']['grid_out'].fields[field_name]['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data'])
if (stat == 'mean'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_mean))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_mean
elif (stat in ('std', 'cov')):
field_std = np.ma.sqrt(((dscfg['global_data']['grid_out'].fields['sum_squared']['data'] / dscfg['global_data']['grid_out'].fields['number_of_samples']['data']) - (field_mean * field_mean)))
if (stat == 'std'):
if lin_trans:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (10.0 * np.ma.log10(field_std))
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = field_std
else:
dscfg['global_data']['grid_out'].fields[field_name]['data'] = (field_std / field_mean)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad)<|docstring|>computes the temporal statistics of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
lin_trans: int. Dataset keyword
If 1 apply linear transformation before averaging
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
stat: string. Dataset keyword
Statistic to compute: Can be mean, std, cov, min, max. Default
mean
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
ba6d1aaf5bf55bc4fd05e36d161b9ec34eae6ec555effd972f863badd8f6054d | def process_grid_time_stats2(procstatus, dscfg, radar_list=None):
'\n computes temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n stat: string. Dataset keyword\n Statistic to compute: Can be median, mode, percentileXX\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'median')
if ('percentile' in stat):
percentile = float(stat.replace('percentile', ''))
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
dscfg['global_data']['field_data'] = np.ma.append(dscfg['global_data']['field_data'], np.expand_dims(field_dict['data'], axis=0), axis=0)
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad) | computes temporal statistics of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
stat: string. Dataset keyword
Statistic to compute: Can be median, mode, percentileXX
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_time_stats2 | jfigui/pyrad | 41 | python | def process_grid_time_stats2(procstatus, dscfg, radar_list=None):
'\n computes temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n stat: string. Dataset keyword\n Statistic to compute: Can be median, mode, percentileXX\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'median')
if ('percentile' in stat):
percentile = float(stat.replace('percentile', ))
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
dscfg['global_data']['field_data'] = np.ma.append(dscfg['global_data']['field_data'], np.expand_dims(field_dict['data'], axis=0), axis=0)
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad) | def process_grid_time_stats2(procstatus, dscfg, radar_list=None):
'\n computes temporal statistics of a field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n period : float. Dataset keyword\n the period to average [s]. If -1 the statistics are going to be\n performed over the entire data. Default 3600.\n start_average : float. Dataset keyword\n when to start the average [s from midnight UTC]. Default 0.\n stat: string. Dataset keyword\n Statistic to compute: Can be median, mode, percentileXX\n use_nan : bool. Dataset keyword\n If true non valid data will be used\n nan_value : float. Dataset keyword\n The value of the non valid data. Default 0\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = (int(radarnr[5:8]) - 1)
start_average = dscfg.get('start_average', 0.0)
period = dscfg.get('period', 3600.0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.0)
stat = dscfg.get('stat', 'median')
if ('percentile' in stat):
percentile = float(stat.replace('percentile', ))
if (procstatus == 0):
return (None, None)
if (procstatus == 1):
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((field_name + ' not available.'))
return (None, None)
field_dict = deepcopy(grid.fields[field_name])
if use_nan:
field_dict['data'] = np.ma.asarray(field_dict['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(np.logical_not(np.ma.getmaskarray(field_dict['data'])), dtype=int)
grid_aux = deepcopy(grid)
grid_aux.fields = dict()
grid_aux.add_field(field_name, field_dict)
grid_aux.add_field('number_of_samples', npoints_dict)
if (dscfg['initialized'] == 0):
avg_par = dict()
if (period != (- 1)):
date_00 = dscfg['timeinfo'].replace(hour=0, minute=0, second=0, microsecond=0)
avg_par.update({'starttime': (date_00 + datetime.timedelta(seconds=start_average))})
avg_par.update({'endtime': (avg_par['starttime'] + datetime.timedelta(seconds=period))})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if (dscfg['initialized'] == 0):
return (None, None)
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
if ('grid_out' not in dscfg['global_data']):
if (period != (- 1)):
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
else:
dscfg['global_data'].update({'grid_out': grid_aux})
dscfg['global_data'].update({'field_data': np.expand_dims(grid_aux.fields[field_name]['data'], axis=0)})
return (None, None)
if ((period == (- 1)) or (dscfg['timeinfo'] < dscfg['global_data']['endtime'])):
if (period == (- 1)):
dscfg['global_data']['endtime'] = dscfg['timeinfo']
dscfg['global_data']['grid_out'].fields['number_of_samples']['data'] += npoints_dict['data']
dscfg['global_data']['field_data'] = np.ma.append(dscfg['global_data']['field_data'], np.expand_dims(field_dict['data'], axis=0), axis=0)
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
dscfg['global_data'].pop('grid_out', None)
(dscfg['global_data']['starttime'], dscfg['global_data']['endtime']) = time_avg_range(dscfg['timeinfo'], dscfg['global_data']['starttime'], dscfg['global_data']['endtime'], period)
if (dscfg['timeinfo'] > dscfg['global_data']['starttime']):
dscfg['global_data'].update({'grid_out': grid_aux})
return (new_dataset, ind_rad)
if (procstatus == 2):
if (dscfg['initialized'] == 0):
return (None, None)
if ('grid_out' not in dscfg['global_data']):
return (None, None)
if (stat == 'median'):
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.median(dscfg['global_data']['field_data'], axis=0)
elif (stat == 'mode'):
(mode_data, _) = scipy.stats.mode(dscfg['global_data']['field_data'].filled(fill_value=np.nan), axis=0, nan_policy='omit')
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(np.squeeze(mode_data, axis=0))
elif ('percentile' in stat):
percent_data = np.nanpercentile(dscfg['global_data']['field_data'].filled(fill_value=np.nan), percentile, axis=0)
dscfg['global_data']['grid_out'].fields[field_name]['data'] = np.ma.masked_invalid(percent_data)
new_dataset = {'radar_out': deepcopy(dscfg['global_data']['grid_out']), 'timeinfo': dscfg['global_data']['endtime']}
return (new_dataset, ind_rad)<|docstring|>computes temporal statistics of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
stat: string. Dataset keyword
Statistic to compute: Can be median, mode, percentileXX
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
9cb90b9d5949f3cab65d0fda41b5557dac2bc98457be9ed496ec6718a486a9a7 | def process_grid_fields_diff(procstatus, dscfg, radar_list=None):
'\n Computes grid field differences\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 2):
warn('Two and only two fields are required to compute the field differences')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name_1 = get_fieldname_pyart(datatype)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][1])
field_name_2 = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if ((field_name_1 not in grid.fields) or (field_name_2 not in grid.fields)):
warn((((('Unable to compare fields ' + field_name_1) + 'and ') + field_name_2) + '. Fields missings'))
return (None, None)
field_diff = pyart.config.get_metadata('fields_difference')
field_diff['data'] = (grid.fields[field_name_1]['data'] - grid.fields[field_name_2]['data'])
field_diff['long_name'] = ((field_name_1 + ' - ') + field_name_2)
grid_diff = deepcopy(grid)
grid_diff.fields = dict()
grid_diff.add_field('fields_difference', field_diff)
new_dataset = {'radar_out': grid_diff}
return (new_dataset, ind_rad) | Computes grid field differences
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing a radar object containing the field differences
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_fields_diff | jfigui/pyrad | 41 | python | def process_grid_fields_diff(procstatus, dscfg, radar_list=None):
'\n Computes grid field differences\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 2):
warn('Two and only two fields are required to compute the field differences')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name_1 = get_fieldname_pyart(datatype)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][1])
field_name_2 = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if ((field_name_1 not in grid.fields) or (field_name_2 not in grid.fields)):
warn((((('Unable to compare fields ' + field_name_1) + 'and ') + field_name_2) + '. Fields missings'))
return (None, None)
field_diff = pyart.config.get_metadata('fields_difference')
field_diff['data'] = (grid.fields[field_name_1]['data'] - grid.fields[field_name_2]['data'])
field_diff['long_name'] = ((field_name_1 + ' - ') + field_name_2)
grid_diff = deepcopy(grid)
grid_diff.fields = dict()
grid_diff.add_field('fields_difference', field_diff)
new_dataset = {'radar_out': grid_diff}
return (new_dataset, ind_rad) | def process_grid_fields_diff(procstatus, dscfg, radar_list=None):
'\n Computes grid field differences\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 2):
warn('Two and only two fields are required to compute the field differences')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name_1 = get_fieldname_pyart(datatype)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][1])
field_name_2 = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if ((field_name_1 not in grid.fields) or (field_name_2 not in grid.fields)):
warn((((('Unable to compare fields ' + field_name_1) + 'and ') + field_name_2) + '. Fields missings'))
return (None, None)
field_diff = pyart.config.get_metadata('fields_difference')
field_diff['data'] = (grid.fields[field_name_1]['data'] - grid.fields[field_name_2]['data'])
field_diff['long_name'] = ((field_name_1 + ' - ') + field_name_2)
grid_diff = deepcopy(grid)
grid_diff.fields = dict()
grid_diff.add_field('fields_difference', field_diff)
new_dataset = {'radar_out': grid_diff}
return (new_dataset, ind_rad)<|docstring|>Computes grid field differences
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing a radar object containing the field differences
ind_rad : int
radar index<|endoftext|> |
0e25bcf239fa3a825177833b9b71fc2c0adef46f5d0217a1f361195f8d02b7b0 | def process_grid_texture(procstatus, dscfg, radar_list=None):
'\n Computes the 2D texture of a gridded field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n xwind, ywind : int\n The size of the local window in the x and y axis. Default 7\n fill_value : float\n The value with which to fill masked data. Default np.NaN\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 1):
warn('Texture can only be computed on one field')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to compute field ' + field_name) + ' texture. Field missing'))
return (None, None)
xwind = dscfg.get('xwind', 7)
ywind = dscfg.get('ywind', 7)
fill_value = dscfg.get('fill_value', np.NaN)
field_text = pyart.config.get_metadata('field_texture')
field_text['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=grid.fields[field_name]['data'].dtype)
for level in range(grid.nz):
field_array = deepcopy(grid.fields[field_name]['data'][(level, :, :)])
field_array = field_array.filled(fill_value=fill_value)
field_text['data'][(level, :, :)] = pyart.util.grid_texture_2d(field_array, xwind=xwind, ywind=ywind)
grid_text = deepcopy(grid)
grid_text.fields = dict()
grid_text.add_field('field_texture', field_text)
new_dataset = {'radar_out': grid_text}
return (new_dataset, ind_rad) | Computes the 2D texture of a gridded field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
xwind, ywind : int
The size of the local window in the x and y axis. Default 7
fill_value : float
The value with which to fill masked data. Default np.NaN
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing a radar object containing the field differences
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_texture | jfigui/pyrad | 41 | python | def process_grid_texture(procstatus, dscfg, radar_list=None):
'\n Computes the 2D texture of a gridded field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n xwind, ywind : int\n The size of the local window in the x and y axis. Default 7\n fill_value : float\n The value with which to fill masked data. Default np.NaN\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 1):
warn('Texture can only be computed on one field')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to compute field ' + field_name) + ' texture. Field missing'))
return (None, None)
xwind = dscfg.get('xwind', 7)
ywind = dscfg.get('ywind', 7)
fill_value = dscfg.get('fill_value', np.NaN)
field_text = pyart.config.get_metadata('field_texture')
field_text['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=grid.fields[field_name]['data'].dtype)
for level in range(grid.nz):
field_array = deepcopy(grid.fields[field_name]['data'][(level, :, :)])
field_array = field_array.filled(fill_value=fill_value)
field_text['data'][(level, :, :)] = pyart.util.grid_texture_2d(field_array, xwind=xwind, ywind=ywind)
grid_text = deepcopy(grid)
grid_text.fields = dict()
grid_text.add_field('field_texture', field_text)
new_dataset = {'radar_out': grid_text}
return (new_dataset, ind_rad) | def process_grid_texture(procstatus, dscfg, radar_list=None):
'\n Computes the 2D texture of a gridded field\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of string. Dataset keyword\n The input data types\n xwind, ywind : int\n The size of the local window in the x and y axis. Default 7\n fill_value : float\n The value with which to fill masked data. Default np.NaN\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing a radar object containing the field differences\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
if (len(dscfg['datatype']) != 1):
warn('Texture can only be computed on one field')
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if (radar_list[ind_rad] is None):
warn('No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to compute field ' + field_name) + ' texture. Field missing'))
return (None, None)
xwind = dscfg.get('xwind', 7)
ywind = dscfg.get('ywind', 7)
fill_value = dscfg.get('fill_value', np.NaN)
field_text = pyart.config.get_metadata('field_texture')
field_text['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=grid.fields[field_name]['data'].dtype)
for level in range(grid.nz):
field_array = deepcopy(grid.fields[field_name]['data'][(level, :, :)])
field_array = field_array.filled(fill_value=fill_value)
field_text['data'][(level, :, :)] = pyart.util.grid_texture_2d(field_array, xwind=xwind, ywind=ywind)
grid_text = deepcopy(grid)
grid_text.fields = dict()
grid_text.add_field('field_texture', field_text)
new_dataset = {'radar_out': grid_text}
return (new_dataset, ind_rad)<|docstring|>Computes the 2D texture of a gridded field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
xwind, ywind : int
The size of the local window in the x and y axis. Default 7
fill_value : float
The value with which to fill masked data. Default np.NaN
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing a radar object containing the field differences
ind_rad : int
radar index<|endoftext|> |
e1787dbe6078d34293bd50f29f63ce561db41fbe22bf604e7d2c41b0f67e0127 | def process_grid_mask(procstatus, dscfg, radar_list=None):
'\n Mask data. Puts True if data is above a certain threshold and false\n otherwise.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n threshold : float\n Threshold used for the mask. Values below threshold are set to False.\n Above threshold are set to True. Default 0.\n x_dir_ext, y_dir_ext : int\n Number of pixels by which to extend the mask on each side of the\n west-east direction and south-north direction\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to mask field ' + field_name) + ' Field missing in grid'))
return (None, None)
threshold = dscfg.get('threshold', 0.0)
x_dir_ext = dscfg.get('x_dir_ext', 0)
y_dir_ext = dscfg.get('y_dir_ext', 0)
field_mask = pyart.config.get_metadata('field_mask')
field_mask['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=np.int8)
field_mask['data'][:] = 0
valid = np.logical_not(np.ma.getmaskarray(grid.fields[field_name]['data']))
field_mask['data'][valid] = 1
field_mask['data'][(grid.fields[field_name]['data'] >= threshold)] = 2
field_mask['long_name'] = ((field_name + ' threshold ') + str(threshold))
if ((x_dir_ext > 0) or (y_dir_ext > 0)):
(ind_z, ind_y, ind_x) = np.where((field_mask['data'] == 2))
if (ind_z.size > 0):
for (z, y, x) in zip(ind_z, ind_y, ind_x):
field_mask['data'][(0, (y - y_dir_ext):((y + y_dir_ext) + 1), (x - x_dir_ext):((x + x_dir_ext) + 1))] = 2
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
grid_mask.add_field('field_mask', field_mask)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad) | Mask data. Puts True if data is above a certain threshold and false
otherwise.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
threshold : float
Threshold used for the mask. Values below threshold are set to False.
Above threshold are set to True. Default 0.
x_dir_ext, y_dir_ext : int
Number of pixels by which to extend the mask on each side of the
west-east direction and south-north direction
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_grid_mask | jfigui/pyrad | 41 | python | def process_grid_mask(procstatus, dscfg, radar_list=None):
'\n Mask data. Puts True if data is above a certain threshold and false\n otherwise.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n threshold : float\n Threshold used for the mask. Values below threshold are set to False.\n Above threshold are set to True. Default 0.\n x_dir_ext, y_dir_ext : int\n Number of pixels by which to extend the mask on each side of the\n west-east direction and south-north direction\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to mask field ' + field_name) + ' Field missing in grid'))
return (None, None)
threshold = dscfg.get('threshold', 0.0)
x_dir_ext = dscfg.get('x_dir_ext', 0)
y_dir_ext = dscfg.get('y_dir_ext', 0)
field_mask = pyart.config.get_metadata('field_mask')
field_mask['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=np.int8)
field_mask['data'][:] = 0
valid = np.logical_not(np.ma.getmaskarray(grid.fields[field_name]['data']))
field_mask['data'][valid] = 1
field_mask['data'][(grid.fields[field_name]['data'] >= threshold)] = 2
field_mask['long_name'] = ((field_name + ' threshold ') + str(threshold))
if ((x_dir_ext > 0) or (y_dir_ext > 0)):
(ind_z, ind_y, ind_x) = np.where((field_mask['data'] == 2))
if (ind_z.size > 0):
for (z, y, x) in zip(ind_z, ind_y, ind_x):
field_mask['data'][(0, (y - y_dir_ext):((y + y_dir_ext) + 1), (x - x_dir_ext):((x + x_dir_ext) + 1))] = 2
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
grid_mask.add_field('field_mask', field_mask)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad) | def process_grid_mask(procstatus, dscfg, radar_list=None):
'\n Mask data. Puts True if data is above a certain threshold and false\n otherwise.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n threshold : float\n Threshold used for the mask. Values below threshold are set to False.\n Above threshold are set to True. Default 0.\n x_dir_ext, y_dir_ext : int\n Number of pixels by which to extend the mask on each side of the\n west-east direction and south-north direction\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, datatype, _, _) = get_datatype_fields(dscfg['datatype'][0])
field_name = get_fieldname_pyart(datatype)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (field_name not in grid.fields):
warn((('Unable to mask field ' + field_name) + ' Field missing in grid'))
return (None, None)
threshold = dscfg.get('threshold', 0.0)
x_dir_ext = dscfg.get('x_dir_ext', 0)
y_dir_ext = dscfg.get('y_dir_ext', 0)
field_mask = pyart.config.get_metadata('field_mask')
field_mask['data'] = np.ma.masked_all((grid.nz, grid.ny, grid.nx), dtype=np.int8)
field_mask['data'][:] = 0
valid = np.logical_not(np.ma.getmaskarray(grid.fields[field_name]['data']))
field_mask['data'][valid] = 1
field_mask['data'][(grid.fields[field_name]['data'] >= threshold)] = 2
field_mask['long_name'] = ((field_name + ' threshold ') + str(threshold))
if ((x_dir_ext > 0) or (y_dir_ext > 0)):
(ind_z, ind_y, ind_x) = np.where((field_mask['data'] == 2))
if (ind_z.size > 0):
for (z, y, x) in zip(ind_z, ind_y, ind_x):
field_mask['data'][(0, (y - y_dir_ext):((y + y_dir_ext) + 1), (x - x_dir_ext):((x + x_dir_ext) + 1))] = 2
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
grid_mask.add_field('field_mask', field_mask)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad)<|docstring|>Mask data. Puts True if data is above a certain threshold and false
otherwise.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
threshold : float
Threshold used for the mask. Values below threshold are set to False.
Above threshold are set to True. Default 0.
x_dir_ext, y_dir_ext : int
Number of pixels by which to extend the mask on each side of the
west-east direction and south-north direction
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
65165ceb047bfe3109d4a28e2aff6ff1fdddb67bc0a89b436be00747ffd830c3 | def process_normalize_luminosity(procstatus, dscfg, radar_list=None):
'\n Normalize the data by the sinus of the sun elevation. The sun elevation is\n computed at the central pixel.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, _, _, _) = get_datatype_fields(dscfg['datatype'][0])
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
lat_point = grid.point_latitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
lon_point = grid.point_longitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
(el_sun, _) = pyart.correct.sun_position_pysolar(dscfg['timeinfo'], lat_point, lon_point)
norm = np.sin(np.deg2rad(el_sun))
grid_norm = deepcopy(grid)
grid_norm.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
norm_field = pyart.config.get_metadata((field_name + '_norm'))
norm_field['data'] = (grid.fields[field_name]['data'] / norm)
grid_norm.add_field((field_name + '_norm'), norm_field)
new_dataset = {'radar_out': grid_norm}
return (new_dataset, ind_rad) | Normalize the data by the sinus of the sun elevation. The sun elevation is
computed at the central pixel.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_normalize_luminosity | jfigui/pyrad | 41 | python | def process_normalize_luminosity(procstatus, dscfg, radar_list=None):
'\n Normalize the data by the sinus of the sun elevation. The sun elevation is\n computed at the central pixel.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, _, _, _) = get_datatype_fields(dscfg['datatype'][0])
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
lat_point = grid.point_latitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
lon_point = grid.point_longitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
(el_sun, _) = pyart.correct.sun_position_pysolar(dscfg['timeinfo'], lat_point, lon_point)
norm = np.sin(np.deg2rad(el_sun))
grid_norm = deepcopy(grid)
grid_norm.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
norm_field = pyart.config.get_metadata((field_name + '_norm'))
norm_field['data'] = (grid.fields[field_name]['data'] / norm)
grid_norm.add_field((field_name + '_norm'), norm_field)
new_dataset = {'radar_out': grid_norm}
return (new_dataset, ind_rad) | def process_normalize_luminosity(procstatus, dscfg, radar_list=None):
'\n Normalize the data by the sinus of the sun elevation. The sun elevation is\n computed at the central pixel.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
(radarnr, _, _, _, _) = get_datatype_fields(dscfg['datatype'][0])
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
lat_point = grid.point_latitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
lon_point = grid.point_longitude['data'][(0, int((grid.ny / 2)), int((grid.nx / 2)))]
(el_sun, _) = pyart.correct.sun_position_pysolar(dscfg['timeinfo'], lat_point, lon_point)
norm = np.sin(np.deg2rad(el_sun))
grid_norm = deepcopy(grid)
grid_norm.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
norm_field = pyart.config.get_metadata((field_name + '_norm'))
norm_field['data'] = (grid.fields[field_name]['data'] / norm)
grid_norm.add_field((field_name + '_norm'), norm_field)
new_dataset = {'radar_out': grid_norm}
return (new_dataset, ind_rad)<|docstring|>Normalize the data by the sinus of the sun elevation. The sun elevation is
computed at the central pixel.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
a3b23492ffdeeecc895cba4d5100d61e1b1ab9fd204327e13c16ea282c05ee3b | def process_pixel_filter(procstatus, dscfg, radar_list=None):
'\n Masks all pixels that are not of the class specified in keyword pixel_type\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n pixel_type : int or list of ints\n The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above\n threshold. Default 2\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
mask_field = None
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
mask_field = get_fieldname_pyart(datatype)
break
if (mask_field is None):
warn('mask field required to filter data')
return (None, None)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (mask_field not in grid.fields):
warn('Unable to filter data. Missing mask field')
return (None, None)
pixel_type = dscfg.get('pixel_type', 2)
mask = np.ma.isin(grid.fields[mask_field]['data'], pixel_type, invert=True)
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
continue
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
field = deepcopy(grid.fields[field_name])
field['data'] = np.ma.masked_where(mask, field['data'])
grid_mask.add_field(field_name, field)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad) | Masks all pixels that are not of the class specified in keyword pixel_type
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
pixel_type : int or list of ints
The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above
threshold. Default 2
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index | src/pyrad_proc/pyrad/proc/process_grid.py | process_pixel_filter | jfigui/pyrad | 41 | python | def process_pixel_filter(procstatus, dscfg, radar_list=None):
'\n Masks all pixels that are not of the class specified in keyword pixel_type\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n pixel_type : int or list of ints\n The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above\n threshold. Default 2\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
mask_field = None
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
mask_field = get_fieldname_pyart(datatype)
break
if (mask_field is None):
warn('mask field required to filter data')
return (None, None)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (mask_field not in grid.fields):
warn('Unable to filter data. Missing mask field')
return (None, None)
pixel_type = dscfg.get('pixel_type', 2)
mask = np.ma.isin(grid.fields[mask_field]['data'], pixel_type, invert=True)
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
continue
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
field = deepcopy(grid.fields[field_name])
field['data'] = np.ma.masked_where(mask, field['data'])
grid_mask.add_field(field_name, field)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad) | def process_pixel_filter(procstatus, dscfg, radar_list=None):
'\n Masks all pixels that are not of the class specified in keyword pixel_type\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n pixel_type : int or list of ints\n The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above\n threshold. Default 2\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n '
if (procstatus != 1):
return (None, None)
mask_field = None
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
mask_field = get_fieldname_pyart(datatype)
break
if (mask_field is None):
warn('mask field required to filter data')
return (None, None)
ind_rad = (int(radarnr[5:8]) - 1)
if ((radar_list is None) or (radar_list[ind_rad] is None)):
warn('ERROR: No valid radar')
return (None, None)
grid = radar_list[ind_rad]
if (mask_field not in grid.fields):
warn('Unable to filter data. Missing mask field')
return (None, None)
pixel_type = dscfg.get('pixel_type', 2)
mask = np.ma.isin(grid.fields[mask_field]['data'], pixel_type, invert=True)
grid_mask = deepcopy(grid)
grid_mask.fields = dict()
for datatypedescr in dscfg['datatype']:
(radarnr, _, datatype, _, _) = get_datatype_fields(datatypedescr)
if (datatype == 'mask'):
continue
field_name = get_fieldname_pyart(datatype)
if (field_name not in grid.fields):
warn((('Unable to normalize field ' + field_name) + '. Field missing in grid'))
continue
field = deepcopy(grid.fields[field_name])
field['data'] = np.ma.masked_where(mask, field['data'])
grid_mask.add_field(field_name, field)
new_dataset = {'radar_out': grid_mask}
return (new_dataset, ind_rad)<|docstring|>Masks all pixels that are not of the class specified in keyword pixel_type
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
pixel_type : int or list of ints
The type of pixels to keep: 0 No data, 1 Below threshold, 2 Above
threshold. Default 2
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index<|endoftext|> |
807e8538b4b10c966b686d3e7073e401c0c035a0c655fafecc45f056d860455b | def split_into_train_test(data, in_num, fhorizon):
'\n Splits the series into train and test sets.\n\n Each step takes multiple points as inputs\n :param data: an individual TS\n :param fhorizon: number of out of sample points\n :param in_num: number of input points for the forecast\n :return:\n '
(train, test) = (data[:(- fhorizon)], data[(- (fhorizon + in_num)):])
(_x_train, _y_train) = (train[:(- 1)], np.roll(train, (- in_num))[:(- in_num)])
(_x_test, _y_test) = (test[:(- 1)], np.roll(test, (- in_num))[:(- in_num)])
_x_train = np.reshape(_x_train, ((- 1), 1))
_x_test = np.reshape(_x_test, ((- 1), 1))
temp_test = np.roll(_x_test, (- 1))
temp_train = np.roll(_x_train, (- 1))
for x in range(1, in_num):
_x_train = np.concatenate((_x_train[:(- 1)], temp_train[:(- 1)]), 1)
_x_test = np.concatenate((_x_test[:(- 1)], temp_test[:(- 1)]), 1)
temp_test = np.roll(temp_test, (- 1))[:(- 1)]
temp_train = np.roll(temp_train, (- 1))[:(- 1)]
return (_x_train, _y_train, _x_test, _y_test) | Splits the series into train and test sets.
Each step takes multiple points as inputs
:param data: an individual TS
:param fhorizon: number of out of sample points
:param in_num: number of input points for the forecast
:return: | examples/scripts/01_forecasting.py | split_into_train_test | wilsonify/sktime | 0 | python | def split_into_train_test(data, in_num, fhorizon):
'\n Splits the series into train and test sets.\n\n Each step takes multiple points as inputs\n :param data: an individual TS\n :param fhorizon: number of out of sample points\n :param in_num: number of input points for the forecast\n :return:\n '
(train, test) = (data[:(- fhorizon)], data[(- (fhorizon + in_num)):])
(_x_train, _y_train) = (train[:(- 1)], np.roll(train, (- in_num))[:(- in_num)])
(_x_test, _y_test) = (test[:(- 1)], np.roll(test, (- in_num))[:(- in_num)])
_x_train = np.reshape(_x_train, ((- 1), 1))
_x_test = np.reshape(_x_test, ((- 1), 1))
temp_test = np.roll(_x_test, (- 1))
temp_train = np.roll(_x_train, (- 1))
for x in range(1, in_num):
_x_train = np.concatenate((_x_train[:(- 1)], temp_train[:(- 1)]), 1)
_x_test = np.concatenate((_x_test[:(- 1)], temp_test[:(- 1)]), 1)
temp_test = np.roll(temp_test, (- 1))[:(- 1)]
temp_train = np.roll(temp_train, (- 1))[:(- 1)]
return (_x_train, _y_train, _x_test, _y_test) | def split_into_train_test(data, in_num, fhorizon):
'\n Splits the series into train and test sets.\n\n Each step takes multiple points as inputs\n :param data: an individual TS\n :param fhorizon: number of out of sample points\n :param in_num: number of input points for the forecast\n :return:\n '
(train, test) = (data[:(- fhorizon)], data[(- (fhorizon + in_num)):])
(_x_train, _y_train) = (train[:(- 1)], np.roll(train, (- in_num))[:(- in_num)])
(_x_test, _y_test) = (test[:(- 1)], np.roll(test, (- in_num))[:(- in_num)])
_x_train = np.reshape(_x_train, ((- 1), 1))
_x_test = np.reshape(_x_test, ((- 1), 1))
temp_test = np.roll(_x_test, (- 1))
temp_train = np.roll(_x_train, (- 1))
for x in range(1, in_num):
_x_train = np.concatenate((_x_train[:(- 1)], temp_train[:(- 1)]), 1)
_x_test = np.concatenate((_x_test[:(- 1)], temp_test[:(- 1)]), 1)
temp_test = np.roll(temp_test, (- 1))[:(- 1)]
temp_train = np.roll(temp_train, (- 1))[:(- 1)]
return (_x_train, _y_train, _x_test, _y_test)<|docstring|>Splits the series into train and test sets.
Each step takes multiple points as inputs
:param data: an individual TS
:param fhorizon: number of out of sample points
:param in_num: number of input points for the forecast
:return:<|endoftext|> |
ad4c55d3882783361128caa3216f6928af2163c689d4e65d52cd4e9c3a91f10a | def error_rates(A, A_reco):
'\n Obtain the error rates by comparing the reconstructed adjacency matrix A_reco\n to the actual adjacency matrix A and print the error rates to the console\n\n Arguments:\n 1. A: Actual adjacency matrix\n 2. A_reco: Reconstructed adjacency matrix\n\n Returns:\n 1. fn: Number of false negative\n 2. fp: Number of false positive\n 3. num_link: Number of bi-directional links\n '
assert (type(A) == np.ndarray), "A must be of type 'numpy.ndarray'"
assert (A.size > 0), 'A must not be empty'
assert (A.dtype == int), "Elements in A must be of dtype 'int'"
size = A.shape
assert (len(size) == 2), 'A must be of 2D shape'
assert (size[0] == size[1]), 'A must be a square matrix'
assert np.allclose(A, A.T), 'A must be symmetric'
assert (np.diag(A) == 0).all(), 'Diagonal elements of A must all be zero'
assert (np.min(A) == 0), 'Elements in A must be either 0 or 1'
assert (np.max(A) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A) == 1), 'All elements in A are zero'
assert (type(A_reco) == np.ndarray), "A_reco must be of type 'numpy.ndarray'"
assert (A_reco.size > 0), 'A_reco must not be empty'
assert (A_reco.dtype == int), "Elements in A_reco must be of dtype 'int'"
size_reco = A_reco.shape
assert (len(size_reco) == 2), 'A_reco must be of 2D shape'
assert (size_reco[0] == size_reco[1]), 'A_reco must be a square matrix'
assert np.allclose(A_reco, A_reco.T), 'A_reco must be symmetric'
assert (np.diag(A_reco) == 0).all(), 'Diagonal elements of A_reco must all be zero'
assert (np.min(A_reco) == 0), 'Elements in A_reco must be either 0 or 1'
assert (np.max(A_reco) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A_reco) == 1), 'All elements in A_reco are zero'
assert (size == size_reco), 'A and A_reco must have the same shape'
A_off = base.off_diag_upper(A)
A_reco_off = base.off_diag_upper(A_reco)
fp = np.sum((A_reco_off[(A_off == 0)] == 1))
fn = np.sum((A_reco_off[(A_off == 1)] == 0))
num_link = np.sum(A_off)
fpr = (fp / num_link)
fnr = (fn / num_link)
print('Number of bidirectional links = {}'.format(num_link))
print('Number of false positive = {}'.format(fp))
print('Number of false negative = {}'.format(fn))
print('False positive rate = {:.4f}%'.format((fpr * 100)))
print('False negative rate = {:.4f}%'.format((fnr * 100)))
return (fn, fp, num_link) | Obtain the error rates by comparing the reconstructed adjacency matrix A_reco
to the actual adjacency matrix A and print the error rates to the console
Arguments:
1. A: Actual adjacency matrix
2. A_reco: Reconstructed adjacency matrix
Returns:
1. fn: Number of false negative
2. fp: Number of false positive
3. num_link: Number of bi-directional links | evaluate/error_rates.py | error_rates | newTypeGeek/Network-Reconstruction | 3 | python | def error_rates(A, A_reco):
'\n Obtain the error rates by comparing the reconstructed adjacency matrix A_reco\n to the actual adjacency matrix A and print the error rates to the console\n\n Arguments:\n 1. A: Actual adjacency matrix\n 2. A_reco: Reconstructed adjacency matrix\n\n Returns:\n 1. fn: Number of false negative\n 2. fp: Number of false positive\n 3. num_link: Number of bi-directional links\n '
assert (type(A) == np.ndarray), "A must be of type 'numpy.ndarray'"
assert (A.size > 0), 'A must not be empty'
assert (A.dtype == int), "Elements in A must be of dtype 'int'"
size = A.shape
assert (len(size) == 2), 'A must be of 2D shape'
assert (size[0] == size[1]), 'A must be a square matrix'
assert np.allclose(A, A.T), 'A must be symmetric'
assert (np.diag(A) == 0).all(), 'Diagonal elements of A must all be zero'
assert (np.min(A) == 0), 'Elements in A must be either 0 or 1'
assert (np.max(A) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A) == 1), 'All elements in A are zero'
assert (type(A_reco) == np.ndarray), "A_reco must be of type 'numpy.ndarray'"
assert (A_reco.size > 0), 'A_reco must not be empty'
assert (A_reco.dtype == int), "Elements in A_reco must be of dtype 'int'"
size_reco = A_reco.shape
assert (len(size_reco) == 2), 'A_reco must be of 2D shape'
assert (size_reco[0] == size_reco[1]), 'A_reco must be a square matrix'
assert np.allclose(A_reco, A_reco.T), 'A_reco must be symmetric'
assert (np.diag(A_reco) == 0).all(), 'Diagonal elements of A_reco must all be zero'
assert (np.min(A_reco) == 0), 'Elements in A_reco must be either 0 or 1'
assert (np.max(A_reco) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A_reco) == 1), 'All elements in A_reco are zero'
assert (size == size_reco), 'A and A_reco must have the same shape'
A_off = base.off_diag_upper(A)
A_reco_off = base.off_diag_upper(A_reco)
fp = np.sum((A_reco_off[(A_off == 0)] == 1))
fn = np.sum((A_reco_off[(A_off == 1)] == 0))
num_link = np.sum(A_off)
fpr = (fp / num_link)
fnr = (fn / num_link)
print('Number of bidirectional links = {}'.format(num_link))
print('Number of false positive = {}'.format(fp))
print('Number of false negative = {}'.format(fn))
print('False positive rate = {:.4f}%'.format((fpr * 100)))
print('False negative rate = {:.4f}%'.format((fnr * 100)))
return (fn, fp, num_link) | def error_rates(A, A_reco):
'\n Obtain the error rates by comparing the reconstructed adjacency matrix A_reco\n to the actual adjacency matrix A and print the error rates to the console\n\n Arguments:\n 1. A: Actual adjacency matrix\n 2. A_reco: Reconstructed adjacency matrix\n\n Returns:\n 1. fn: Number of false negative\n 2. fp: Number of false positive\n 3. num_link: Number of bi-directional links\n '
assert (type(A) == np.ndarray), "A must be of type 'numpy.ndarray'"
assert (A.size > 0), 'A must not be empty'
assert (A.dtype == int), "Elements in A must be of dtype 'int'"
size = A.shape
assert (len(size) == 2), 'A must be of 2D shape'
assert (size[0] == size[1]), 'A must be a square matrix'
assert np.allclose(A, A.T), 'A must be symmetric'
assert (np.diag(A) == 0).all(), 'Diagonal elements of A must all be zero'
assert (np.min(A) == 0), 'Elements in A must be either 0 or 1'
assert (np.max(A) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A) == 1), 'All elements in A are zero'
assert (type(A_reco) == np.ndarray), "A_reco must be of type 'numpy.ndarray'"
assert (A_reco.size > 0), 'A_reco must not be empty'
assert (A_reco.dtype == int), "Elements in A_reco must be of dtype 'int'"
size_reco = A_reco.shape
assert (len(size_reco) == 2), 'A_reco must be of 2D shape'
assert (size_reco[0] == size_reco[1]), 'A_reco must be a square matrix'
assert np.allclose(A_reco, A_reco.T), 'A_reco must be symmetric'
assert (np.diag(A_reco) == 0).all(), 'Diagonal elements of A_reco must all be zero'
assert (np.min(A_reco) == 0), 'Elements in A_reco must be either 0 or 1'
assert (np.max(A_reco) <= 1), 'Elements in A must be either 0 or 1'
assert (np.max(A_reco) == 1), 'All elements in A_reco are zero'
assert (size == size_reco), 'A and A_reco must have the same shape'
A_off = base.off_diag_upper(A)
A_reco_off = base.off_diag_upper(A_reco)
fp = np.sum((A_reco_off[(A_off == 0)] == 1))
fn = np.sum((A_reco_off[(A_off == 1)] == 0))
num_link = np.sum(A_off)
fpr = (fp / num_link)
fnr = (fn / num_link)
print('Number of bidirectional links = {}'.format(num_link))
print('Number of false positive = {}'.format(fp))
print('Number of false negative = {}'.format(fn))
print('False positive rate = {:.4f}%'.format((fpr * 100)))
print('False negative rate = {:.4f}%'.format((fnr * 100)))
return (fn, fp, num_link)<|docstring|>Obtain the error rates by comparing the reconstructed adjacency matrix A_reco
to the actual adjacency matrix A and print the error rates to the console
Arguments:
1. A: Actual adjacency matrix
2. A_reco: Reconstructed adjacency matrix
Returns:
1. fn: Number of false negative
2. fp: Number of false positive
3. num_link: Number of bi-directional links<|endoftext|> |
8f960a5d887f42c12acf2cb10387d882b8187adb68e0a5a39cf49b0c2dcd6cfd | def identify_spacegroup(spg_cell, max_iterations=200, minimum_distance=0.9):
'This function aims to identify the best spacegroup\n based on allowing some amount of deviation. Accepts a spglib\n compatible cell tuple as the argument.'
prec = (5 * minimum_distance)
precs = []
grps = []
grp = 'None'
highest_symmetry_group = 'None'
max_group = 0
counter = 0
while ((grp is None) or (grp.split()[(- 1)] not in ['(1)', '(2)'])):
counter += 1
if (counter > max_iterations):
break
grp = spglib.get_spacegroup(spg_cell, symprec=prec)
grps.append(grp)
precs.append(prec)
prec /= 2
if (grp is not None):
group_num = int(grp.split()[(- 1)].replace('(', '').replace(')', ''))
if (group_num > max_group):
max_group = group_num
highest_symmetry_group = grp
if all(((g is None) for g in grps)):
raise ValueError('No symmetry groups found!')
highest_symmetry_group_prec = precs[::(- 1)][grps[::(- 1)].index(highest_symmetry_group)]
counts = Counter(grps)
if (None in counts):
del counts[None]
most_common_group = counts.most_common(1)[0][0]
most_common_group_prec = precs[::(- 1)][grps[::(- 1)].index(most_common_group)]
return {'common': (most_common_group, most_common_group_prec), 'highest': (highest_symmetry_group, highest_symmetry_group_prec), 'histogram': counts} | This function aims to identify the best spacegroup
based on allowing some amount of deviation. Accepts a spglib
compatible cell tuple as the argument. | flowws_unit_cell/CenterSpaceGroup.py | identify_spacegroup | glotzerlab/flowws-unit-cell | 0 | python | def identify_spacegroup(spg_cell, max_iterations=200, minimum_distance=0.9):
'This function aims to identify the best spacegroup\n based on allowing some amount of deviation. Accepts a spglib\n compatible cell tuple as the argument.'
prec = (5 * minimum_distance)
precs = []
grps = []
grp = 'None'
highest_symmetry_group = 'None'
max_group = 0
counter = 0
while ((grp is None) or (grp.split()[(- 1)] not in ['(1)', '(2)'])):
counter += 1
if (counter > max_iterations):
break
grp = spglib.get_spacegroup(spg_cell, symprec=prec)
grps.append(grp)
precs.append(prec)
prec /= 2
if (grp is not None):
group_num = int(grp.split()[(- 1)].replace('(', ).replace(')', ))
if (group_num > max_group):
max_group = group_num
highest_symmetry_group = grp
if all(((g is None) for g in grps)):
raise ValueError('No symmetry groups found!')
highest_symmetry_group_prec = precs[::(- 1)][grps[::(- 1)].index(highest_symmetry_group)]
counts = Counter(grps)
if (None in counts):
del counts[None]
most_common_group = counts.most_common(1)[0][0]
most_common_group_prec = precs[::(- 1)][grps[::(- 1)].index(most_common_group)]
return {'common': (most_common_group, most_common_group_prec), 'highest': (highest_symmetry_group, highest_symmetry_group_prec), 'histogram': counts} | def identify_spacegroup(spg_cell, max_iterations=200, minimum_distance=0.9):
'This function aims to identify the best spacegroup\n based on allowing some amount of deviation. Accepts a spglib\n compatible cell tuple as the argument.'
prec = (5 * minimum_distance)
precs = []
grps = []
grp = 'None'
highest_symmetry_group = 'None'
max_group = 0
counter = 0
while ((grp is None) or (grp.split()[(- 1)] not in ['(1)', '(2)'])):
counter += 1
if (counter > max_iterations):
break
grp = spglib.get_spacegroup(spg_cell, symprec=prec)
grps.append(grp)
precs.append(prec)
prec /= 2
if (grp is not None):
group_num = int(grp.split()[(- 1)].replace('(', ).replace(')', ))
if (group_num > max_group):
max_group = group_num
highest_symmetry_group = grp
if all(((g is None) for g in grps)):
raise ValueError('No symmetry groups found!')
highest_symmetry_group_prec = precs[::(- 1)][grps[::(- 1)].index(highest_symmetry_group)]
counts = Counter(grps)
if (None in counts):
del counts[None]
most_common_group = counts.most_common(1)[0][0]
most_common_group_prec = precs[::(- 1)][grps[::(- 1)].index(most_common_group)]
return {'common': (most_common_group, most_common_group_prec), 'highest': (highest_symmetry_group, highest_symmetry_group_prec), 'histogram': counts}<|docstring|>This function aims to identify the best spacegroup
based on allowing some amount of deviation. Accepts a spglib
compatible cell tuple as the argument.<|endoftext|> |
f21d39e7e1979d78b8030124961e2d079d97243153f2e1bab5f5d768f8dc5314 | def matrix_to_box(bm):
'Convert a box matrix into a box object'
bm = np.asarray(bm)
(Lx, Ly, Lz) = np.diag(bm).flatten().tolist()
xy = (bm[(0, 1)] / Ly)
xz = (bm[(0, 2)] / Lz)
yz = (bm[(1, 2)] / Lz)
box = (Lx, Ly, Lz, xy, xz, yz)
return box | Convert a box matrix into a box object | flowws_unit_cell/CenterSpaceGroup.py | matrix_to_box | glotzerlab/flowws-unit-cell | 0 | python | def matrix_to_box(bm):
bm = np.asarray(bm)
(Lx, Ly, Lz) = np.diag(bm).flatten().tolist()
xy = (bm[(0, 1)] / Ly)
xz = (bm[(0, 2)] / Lz)
yz = (bm[(1, 2)] / Lz)
box = (Lx, Ly, Lz, xy, xz, yz)
return box | def matrix_to_box(bm):
bm = np.asarray(bm)
(Lx, Ly, Lz) = np.diag(bm).flatten().tolist()
xy = (bm[(0, 1)] / Ly)
xz = (bm[(0, 2)] / Lz)
yz = (bm[(1, 2)] / Lz)
box = (Lx, Ly, Lz, xy, xz, yz)
return box<|docstring|>Convert a box matrix into a box object<|endoftext|> |
5f18e5c5f64799dba6deead8f8c7f4b508a07a982e6e5236d416ee5955163ce9 | def standardize_cell(spg_cell, best_prec):
'Convert cell into its standard crystallographic representation'
dataset = spglib.get_symmetry_dataset(spg_cell, best_prec)
box = matrix_to_box(dataset['std_lattice'].T)
positions = dataset['std_positions']
return (box, positions, dataset['std_types']) | Convert cell into its standard crystallographic representation | flowws_unit_cell/CenterSpaceGroup.py | standardize_cell | glotzerlab/flowws-unit-cell | 0 | python | def standardize_cell(spg_cell, best_prec):
dataset = spglib.get_symmetry_dataset(spg_cell, best_prec)
box = matrix_to_box(dataset['std_lattice'].T)
positions = dataset['std_positions']
return (box, positions, dataset['std_types']) | def standardize_cell(spg_cell, best_prec):
dataset = spglib.get_symmetry_dataset(spg_cell, best_prec)
box = matrix_to_box(dataset['std_lattice'].T)
positions = dataset['std_positions']
return (box, positions, dataset['std_types'])<|docstring|>Convert cell into its standard crystallographic representation<|endoftext|> |
b9cc602c52a4cca58d52faf47b5cd656fb8fe50fd3f697835dac780d67ca45b4 | def run(self, scope, storage):
'Detect the space group and center the system.'
box = scope['box']
boxmat = plato.math.box_to_matrix(box)
fractions = plato.math.make_fractions(box, scope['position'])
types = scope['type']
if (not self.arguments['use_types']):
types = np.zeros_like(types)
spglib_cell = (boxmat, fractions, types)
try:
self.spg_info = identify_spacegroup(spglib_cell, max_iterations=64, minimum_distance=self.arguments['minimum_distance'])
except ValueError:
return
(box, fractions, types) = standardize_cell(spglib_cell, self.spg_info['common'][1])
scope['box'] = box
scope['position'] = plato.math.fractions_to_coordinates(box, fractions)
scope['type'] = types
scope.setdefault('visuals', []).append(self) | Detect the space group and center the system. | flowws_unit_cell/CenterSpaceGroup.py | run | glotzerlab/flowws-unit-cell | 0 | python | def run(self, scope, storage):
box = scope['box']
boxmat = plato.math.box_to_matrix(box)
fractions = plato.math.make_fractions(box, scope['position'])
types = scope['type']
if (not self.arguments['use_types']):
types = np.zeros_like(types)
spglib_cell = (boxmat, fractions, types)
try:
self.spg_info = identify_spacegroup(spglib_cell, max_iterations=64, minimum_distance=self.arguments['minimum_distance'])
except ValueError:
return
(box, fractions, types) = standardize_cell(spglib_cell, self.spg_info['common'][1])
scope['box'] = box
scope['position'] = plato.math.fractions_to_coordinates(box, fractions)
scope['type'] = types
scope.setdefault('visuals', []).append(self) | def run(self, scope, storage):
box = scope['box']
boxmat = plato.math.box_to_matrix(box)
fractions = plato.math.make_fractions(box, scope['position'])
types = scope['type']
if (not self.arguments['use_types']):
types = np.zeros_like(types)
spglib_cell = (boxmat, fractions, types)
try:
self.spg_info = identify_spacegroup(spglib_cell, max_iterations=64, minimum_distance=self.arguments['minimum_distance'])
except ValueError:
return
(box, fractions, types) = standardize_cell(spglib_cell, self.spg_info['common'][1])
scope['box'] = box
scope['position'] = plato.math.fractions_to_coordinates(box, fractions)
scope['type'] = types
scope.setdefault('visuals', []).append(self)<|docstring|>Detect the space group and center the system.<|endoftext|> |
30d2248b35eee75e7f11bdbd4a9432892df50dc2913e03f8472b5529bd2c0607 | def sw_sxo(Rxo, Rmf, Rw, Rt):
'\n sw/sxo - is called movable hydrocarbon index\n --if sw/sxo ratio is less than 0.7 (ss) or 0.6 (carb), mv hc are present--\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
sw_sxo = ((((Rxo / Rmf) * Rw) / Rt) ** (1 / 2))
return sw_sxo | sw/sxo - is called movable hydrocarbon index
--if sw/sxo ratio is less than 0.7 (ss) or 0.6 (carb), mv hc are present--
- Rmf - mud filtrate resistivity @ formation temperature
- Rxo - shallow resistivity (from LL8, MSFL, ML)
- Rw - water resistivity
- Rt - true resistivity | petrophysics/saturation/mvi.py | sw_sxo | petroGG/petrophysics | 47 | python | def sw_sxo(Rxo, Rmf, Rw, Rt):
'\n sw/sxo - is called movable hydrocarbon index\n --if sw/sxo ratio is less than 0.7 (ss) or 0.6 (carb), mv hc are present--\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
sw_sxo = ((((Rxo / Rmf) * Rw) / Rt) ** (1 / 2))
return sw_sxo | def sw_sxo(Rxo, Rmf, Rw, Rt):
'\n sw/sxo - is called movable hydrocarbon index\n --if sw/sxo ratio is less than 0.7 (ss) or 0.6 (carb), mv hc are present--\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
sw_sxo = ((((Rxo / Rmf) * Rw) / Rt) ** (1 / 2))
return sw_sxo<|docstring|>sw/sxo - is called movable hydrocarbon index
--if sw/sxo ratio is less than 0.7 (ss) or 0.6 (carb), mv hc are present--
- Rmf - mud filtrate resistivity @ formation temperature
- Rxo - shallow resistivity (from LL8, MSFL, ML)
- Rw - water resistivity
- Rt - true resistivity<|endoftext|> |
1e91afaba70ee31476224d66a107c5cf9235f40c3add06c47195c5d4d002af81 | def swr(Rxo, Rmf, Rw, Rt):
'\n swr - movable hydrocarbon index\n 0.625 or 5/8 - works well for moderate invasion\n then sxo=sw^(1/5) and results: swr = ....\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
swr = ((((Rxo / Rmf) * Rw) / Rt) ** 0.625)
return swr | swr - movable hydrocarbon index
0.625 or 5/8 - works well for moderate invasion
then sxo=sw^(1/5) and results: swr = ....
- Rmf - mud filtrate resistivity @ formation temperature
- Rxo - shallow resistivity (from LL8, MSFL, ML)
- Rw - water resistivity
- Rt - true resistivity | petrophysics/saturation/mvi.py | swr | petroGG/petrophysics | 47 | python | def swr(Rxo, Rmf, Rw, Rt):
'\n swr - movable hydrocarbon index\n 0.625 or 5/8 - works well for moderate invasion\n then sxo=sw^(1/5) and results: swr = ....\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
swr = ((((Rxo / Rmf) * Rw) / Rt) ** 0.625)
return swr | def swr(Rxo, Rmf, Rw, Rt):
'\n swr - movable hydrocarbon index\n 0.625 or 5/8 - works well for moderate invasion\n then sxo=sw^(1/5) and results: swr = ....\n - Rmf - mud filtrate resistivity @ formation temperature\n - Rxo - shallow resistivity (from LL8, MSFL, ML)\n - Rw - water resistivity\n - Rt - true resistivity\n '
swr = ((((Rxo / Rmf) * Rw) / Rt) ** 0.625)
return swr<|docstring|>swr - movable hydrocarbon index
0.625 or 5/8 - works well for moderate invasion
then sxo=sw^(1/5) and results: swr = ....
- Rmf - mud filtrate resistivity @ formation temperature
- Rxo - shallow resistivity (from LL8, MSFL, ML)
- Rw - water resistivity
- Rt - true resistivity<|endoftext|> |
5c9f455a5194e488fd87b9962a4f95b2cf1465ac5a158baca7ed5c804a4900bf | def click_button_ok(self):
'\n Click button to ok the dialog\n\n '
self.click_element(self.BUTTON_OK) | Click button to ok the dialog | common/xrd-ui-tests-qautomate/pagemodel/cs_initial_conf_initilialized_dlg.py | click_button_ok | nordic-institute/X-Road-tests | 1 | python | def click_button_ok(self):
'\n \n\n '
self.click_element(self.BUTTON_OK) | def click_button_ok(self):
'\n \n\n '
self.click_element(self.BUTTON_OK)<|docstring|>Click button to ok the dialog<|endoftext|> |
b02c476b11365e7f062c9e79f57addf2a114f00359a38d5f2863badd21df8986 | def canCompleteCircuit(self, gas, cost):
'\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n '
start = (len(gas) - 1)
end = 0
total = (gas[start] - cost[start])
while (start > end):
if (total >= 0):
total += (gas[end] - cost[end])
end += 1
else:
start -= 1
total += (gas[start] - cost[start])
return (start if (total >= 0) else (- 1)) | :type gas: List[int]
:type cost: List[int]
:rtype: int | 0134.Gas Station/solution.py | canCompleteCircuit | zhlinh/leetcode | 0 | python | def canCompleteCircuit(self, gas, cost):
'\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n '
start = (len(gas) - 1)
end = 0
total = (gas[start] - cost[start])
while (start > end):
if (total >= 0):
total += (gas[end] - cost[end])
end += 1
else:
start -= 1
total += (gas[start] - cost[start])
return (start if (total >= 0) else (- 1)) | def canCompleteCircuit(self, gas, cost):
'\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n '
start = (len(gas) - 1)
end = 0
total = (gas[start] - cost[start])
while (start > end):
if (total >= 0):
total += (gas[end] - cost[end])
end += 1
else:
start -= 1
total += (gas[start] - cost[start])
return (start if (total >= 0) else (- 1))<|docstring|>:type gas: List[int]
:type cost: List[int]
:rtype: int<|endoftext|> |
0527299e62b535c517e1f63229f33369d9f108b6a50706e4e78481e4fcad3b12 | def atomic():
' To run a bunch of stuff within a transaction; e.g.:\n\n with model.atomic() as xact:\n foo\n bar\n baz\n if error:\n xact.rollback()\n\n '
return database.atomic() | To run a bunch of stuff within a transaction; e.g.:
with model.atomic() as xact:
foo
bar
baz
if error:
xact.rollback() | dc_common/model.py | atomic | plaidfluff/dreamcatcher | 0 | python | def atomic():
' To run a bunch of stuff within a transaction; e.g.:\n\n with model.atomic() as xact:\n foo\n bar\n baz\n if error:\n xact.rollback()\n\n '
return database.atomic() | def atomic():
' To run a bunch of stuff within a transaction; e.g.:\n\n with model.atomic() as xact:\n foo\n bar\n baz\n if error:\n xact.rollback()\n\n '
return database.atomic()<|docstring|>To run a bunch of stuff within a transaction; e.g.:
with model.atomic() as xact:
foo
bar
baz
if error:
xact.rollback()<|endoftext|> |
e3f69c43b92de50a16cb144ae73f3203389fed583342ad2f7f6db0ac563c716a | def drop_all_tables(i_am_really_sure=False):
' Call this if you need to nuke everything and restart. Only for development purposes, hopefully. '
if (not i_am_really_sure):
raise 'You are not really sure. Call with i_am_really_sure=True to proceed.'
with database.atomic():
for table in all_types:
database.drop_table(table) | Call this if you need to nuke everything and restart. Only for development purposes, hopefully. | dc_common/model.py | drop_all_tables | plaidfluff/dreamcatcher | 0 | python | def drop_all_tables(i_am_really_sure=False):
' '
if (not i_am_really_sure):
raise 'You are not really sure. Call with i_am_really_sure=True to proceed.'
with database.atomic():
for table in all_types:
database.drop_table(table) | def drop_all_tables(i_am_really_sure=False):
' '
if (not i_am_really_sure):
raise 'You are not really sure. Call with i_am_really_sure=True to proceed.'
with database.atomic():
for table in all_types:
database.drop_table(table)<|docstring|>Call this if you need to nuke everything and restart. Only for development purposes, hopefully.<|endoftext|> |
7f98abcd64a8093a8d98b33f3b077a29f6190419f89ad5aaba0f53640a2d2945 | @staticmethod
def update_schema(check_update, from_version):
" Implement this to migrate a database from an older version, and return the current version of this table.\n\n Only process updates if check_update is true. Example:\n\n if check_update and from_version < 1:\n migrator.migrate(\n migrator.add_column('BlahTable', 'foo', BlahTable.foo),\n migrator.add_column('BlahTable', 'bar', BlahTable.baz), # changed from 'bar' to 'baz' in verison 2\n )\n if check_update and from_version < 2:\n migrator.migrate(\n migrator.rename_column('BlahTable', 'bar', 'baz'),\n )\n return 2\n "
return 0 | Implement this to migrate a database from an older version, and return the current version of this table.
Only process updates if check_update is true. Example:
if check_update and from_version < 1:
migrator.migrate(
migrator.add_column('BlahTable', 'foo', BlahTable.foo),
migrator.add_column('BlahTable', 'bar', BlahTable.baz), # changed from 'bar' to 'baz' in verison 2
)
if check_update and from_version < 2:
migrator.migrate(
migrator.rename_column('BlahTable', 'bar', 'baz'),
)
return 2 | dc_common/model.py | update_schema | plaidfluff/dreamcatcher | 0 | python | @staticmethod
def update_schema(check_update, from_version):
" Implement this to migrate a database from an older version, and return the current version of this table.\n\n Only process updates if check_update is true. Example:\n\n if check_update and from_version < 1:\n migrator.migrate(\n migrator.add_column('BlahTable', 'foo', BlahTable.foo),\n migrator.add_column('BlahTable', 'bar', BlahTable.baz), # changed from 'bar' to 'baz' in verison 2\n )\n if check_update and from_version < 2:\n migrator.migrate(\n migrator.rename_column('BlahTable', 'bar', 'baz'),\n )\n return 2\n "
return 0 | @staticmethod
def update_schema(check_update, from_version):
" Implement this to migrate a database from an older version, and return the current version of this table.\n\n Only process updates if check_update is true. Example:\n\n if check_update and from_version < 1:\n migrator.migrate(\n migrator.add_column('BlahTable', 'foo', BlahTable.foo),\n migrator.add_column('BlahTable', 'bar', BlahTable.baz), # changed from 'bar' to 'baz' in verison 2\n )\n if check_update and from_version < 2:\n migrator.migrate(\n migrator.rename_column('BlahTable', 'bar', 'baz'),\n )\n return 2\n "
return 0<|docstring|>Implement this to migrate a database from an older version, and return the current version of this table.
Only process updates if check_update is true. Example:
if check_update and from_version < 1:
migrator.migrate(
migrator.add_column('BlahTable', 'foo', BlahTable.foo),
migrator.add_column('BlahTable', 'bar', BlahTable.baz), # changed from 'bar' to 'baz' in verison 2
)
if check_update and from_version < 2:
migrator.migrate(
migrator.rename_column('BlahTable', 'bar', 'baz'),
)
return 2<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.