body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
1cbd7de79d3988fdc3ad3642ef83d29790d9a88f6c89bbc841ed51c188af15a5
|
def convert_parsec_to_m(self, parsecs):
'Convert length in parsec to meters.'
return (parsecs * self.PARSEC)
|
Convert length in parsec to meters.
|
bidobe/astunit.py
|
convert_parsec_to_m
|
pbrus/binary-doppler-beaming
| 1
|
python
|
def convert_parsec_to_m(self, parsecs):
return (parsecs * self.PARSEC)
|
def convert_parsec_to_m(self, parsecs):
return (parsecs * self.PARSEC)<|docstring|>Convert length in parsec to meters.<|endoftext|>
|
259f0f562f0545acd4d3a928a964a79c5e1b06500fc2c02958abef482636af33
|
def test_get_profile(self):
' Test can get single user profile '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/baduism/profile/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))
|
Test can get single user profile
|
src/api/tests/test_profile_views.py
|
test_get_profile
|
ThaDeveloper/grind
| 1
|
python
|
def test_get_profile(self):
' '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/baduism/profile/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))
|
def test_get_profile(self):
' '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/baduism/profile/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))<|docstring|>Test can get single user profile<|endoftext|>
|
24dea13eb76db3a242de3de5213bdb5260dbc3d4cc2738bf9f4bca34881a826f
|
def test_get_users(self):
' Test can get all users '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))
|
Test can get all users
|
src/api/tests/test_profile_views.py
|
test_get_users
|
ThaDeveloper/grind
| 1
|
python
|
def test_get_users(self):
' '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))
|
def test_get_users(self):
' '
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/')
self.assertEqual(200, response.status_code)
self.assertIn('baduism', str(response.data))<|docstring|>Test can get all users<|endoftext|>
|
4e53bd0335d22abd568ba4ae0e863943fc8010c9609c099c5ad56cfac76c4797
|
def forward(self, x):
'"Get the logits.'
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = x[0]
return [self.fc(x)]
|
"Get the logits.
|
openmixup/models/heads/mim_head.py
|
forward
|
Westlake-AI/openmixup
| 10
|
python
|
def forward(self, x):
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = x[0]
return [self.fc(x)]
|
def forward(self, x):
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = x[0]
return [self.fc(x)]<|docstring|>"Get the logits.<|endoftext|>
|
7b71fd7728285c0805ac5ed840e67100e46697ad42049c51d6c668b3c08619a7
|
def forward(self, x):
'"Get the logits.'
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = self.bn(x[0])
return [self.fc(x)]
|
"Get the logits.
|
openmixup/models/heads/mim_head.py
|
forward
|
Westlake-AI/openmixup
| 10
|
python
|
def forward(self, x):
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = self.bn(x[0])
return [self.fc(x)]
|
def forward(self, x):
assert (isinstance(x, (tuple, list)) and (len(x) == 1))
x = self.bn(x[0])
return [self.fc(x)]<|docstring|>"Get the logits.<|endoftext|>
|
106fdb2e2055b05c938c8aa0f7369cddf3be26f4c97e875c552b81c1d26bbeab
|
def move(start, buffr, end, n):
'\n By using the recursion from base case to move upwards\n we can change the roles of each of the three towers.\n The base case will be moving one disk from start to end.\n This can be done without any buffer, in one step.\n The behaviour of the function is modified by what type\n of the three stacks we send into it.\n '
if (n < 2):
x = start.pop()
return end.append(x)
top = start.pop()
buffr = move(start, end, buffr, (n - 1))
|
By using the recursion from base case to move upwards
we can change the roles of each of the three towers.
The base case will be moving one disk from start to end.
This can be done without any buffer, in one step.
The behaviour of the function is modified by what type
of the three stacks we send into it.
|
dp/HanoiTowers.py
|
move
|
ykumards/Algorithms
| 0
|
python
|
def move(start, buffr, end, n):
'\n By using the recursion from base case to move upwards\n we can change the roles of each of the three towers.\n The base case will be moving one disk from start to end.\n This can be done without any buffer, in one step.\n The behaviour of the function is modified by what type\n of the three stacks we send into it.\n '
if (n < 2):
x = start.pop()
return end.append(x)
top = start.pop()
buffr = move(start, end, buffr, (n - 1))
|
def move(start, buffr, end, n):
'\n By using the recursion from base case to move upwards\n we can change the roles of each of the three towers.\n The base case will be moving one disk from start to end.\n This can be done without any buffer, in one step.\n The behaviour of the function is modified by what type\n of the three stacks we send into it.\n '
if (n < 2):
x = start.pop()
return end.append(x)
top = start.pop()
buffr = move(start, end, buffr, (n - 1))<|docstring|>By using the recursion from base case to move upwards
we can change the roles of each of the three towers.
The base case will be moving one disk from start to end.
This can be done without any buffer, in one step.
The behaviour of the function is modified by what type
of the three stacks we send into it.<|endoftext|>
|
0f835f6c21e2bdece68228aaaea66aee5c5321afdd7e557b6a545930120e4855
|
def prep_constant_median_DV(median):
'\n Returns a constant median Decision Variable (DV) function.\n\n Parameters\n ----------\n median: float\n The median DV for a consequence function with fixed median.\n\n Returns\n -------\n f: callable\n A function that returns the constant median DV for all component\n quantities.\n '
def f(quantity):
return median
return f
|
Returns a constant median Decision Variable (DV) function.
Parameters
----------
median: float
The median DV for a consequence function with fixed median.
Returns
-------
f: callable
A function that returns the constant median DV for all component
quantities.
|
pelicun/model.py
|
prep_constant_median_DV
|
13273781142/pelicun1
| 20
|
python
|
def prep_constant_median_DV(median):
'\n Returns a constant median Decision Variable (DV) function.\n\n Parameters\n ----------\n median: float\n The median DV for a consequence function with fixed median.\n\n Returns\n -------\n f: callable\n A function that returns the constant median DV for all component\n quantities.\n '
def f(quantity):
return median
return f
|
def prep_constant_median_DV(median):
'\n Returns a constant median Decision Variable (DV) function.\n\n Parameters\n ----------\n median: float\n The median DV for a consequence function with fixed median.\n\n Returns\n -------\n f: callable\n A function that returns the constant median DV for all component\n quantities.\n '
def f(quantity):
return median
return f<|docstring|>Returns a constant median Decision Variable (DV) function.
Parameters
----------
median: float
The median DV for a consequence function with fixed median.
Returns
-------
f: callable
A function that returns the constant median DV for all component
quantities.<|endoftext|>
|
7e6ead31daac351bb7d0730cae499bc8d60788b1caea57dddb85d6509b2789c5
|
def prep_bounded_linear_median_DV(median_max, median_min, quantity_lower, quantity_upper):
'\n Returns a bounded linear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by a linear function with a\n negative slope between max and min values.\n\n Parameters\n ----------\n median_max: float, optional\n median_min: float, optional\n Minimum and maximum limits that define the bounded_linear median DV\n function.\n quantity_lower: float, optional\n quantity_upper: float, optional\n Lower and upper bounds of component quantity that define the\n bounded_linear median DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, [quantity_lower, quantity_upper], [median_max, median_min])
return output
return f
|
Returns a bounded linear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by a linear function with a
negative slope between max and min values.
Parameters
----------
median_max: float, optional
median_min: float, optional
Minimum and maximum limits that define the bounded_linear median DV
function.
quantity_lower: float, optional
quantity_upper: float, optional
Lower and upper bounds of component quantity that define the
bounded_linear median DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.
|
pelicun/model.py
|
prep_bounded_linear_median_DV
|
13273781142/pelicun1
| 20
|
python
|
def prep_bounded_linear_median_DV(median_max, median_min, quantity_lower, quantity_upper):
'\n Returns a bounded linear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by a linear function with a\n negative slope between max and min values.\n\n Parameters\n ----------\n median_max: float, optional\n median_min: float, optional\n Minimum and maximum limits that define the bounded_linear median DV\n function.\n quantity_lower: float, optional\n quantity_upper: float, optional\n Lower and upper bounds of component quantity that define the\n bounded_linear median DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, [quantity_lower, quantity_upper], [median_max, median_min])
return output
return f
|
def prep_bounded_linear_median_DV(median_max, median_min, quantity_lower, quantity_upper):
'\n Returns a bounded linear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by a linear function with a\n negative slope between max and min values.\n\n Parameters\n ----------\n median_max: float, optional\n median_min: float, optional\n Minimum and maximum limits that define the bounded_linear median DV\n function.\n quantity_lower: float, optional\n quantity_upper: float, optional\n Lower and upper bounds of component quantity that define the\n bounded_linear median DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, [quantity_lower, quantity_upper], [median_max, median_min])
return output
return f<|docstring|>Returns a bounded linear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by a linear function with a
negative slope between max and min values.
Parameters
----------
median_max: float, optional
median_min: float, optional
Minimum and maximum limits that define the bounded_linear median DV
function.
quantity_lower: float, optional
quantity_upper: float, optional
Lower and upper bounds of component quantity that define the
bounded_linear median DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.<|endoftext|>
|
ec107bb78890a593dc4deb085914934df3352cad37feb5b78c40428158d791f7
|
def prep_bounded_multilinear_median_DV(medians, quantities):
'\n Returns a bounded multilinear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by linear interpolation.\n\n Parameters\n ----------\n medians: ndarray\n Series of values that define the y coordinates of the multilinear DV\n function.\n quantities: ndarray\n Series of values that define the component quantities corresponding to\n the series of medians and serving as the x coordinates of the \n multilinear DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, quantities, medians)
return output
return f
|
Returns a bounded multilinear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by linear interpolation.
Parameters
----------
medians: ndarray
Series of values that define the y coordinates of the multilinear DV
function.
quantities: ndarray
Series of values that define the component quantities corresponding to
the series of medians and serving as the x coordinates of the
multilinear DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.
|
pelicun/model.py
|
prep_bounded_multilinear_median_DV
|
13273781142/pelicun1
| 20
|
python
|
def prep_bounded_multilinear_median_DV(medians, quantities):
'\n Returns a bounded multilinear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by linear interpolation.\n\n Parameters\n ----------\n medians: ndarray\n Series of values that define the y coordinates of the multilinear DV\n function.\n quantities: ndarray\n Series of values that define the component quantities corresponding to\n the series of medians and serving as the x coordinates of the \n multilinear DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, quantities, medians)
return output
return f
|
def prep_bounded_multilinear_median_DV(medians, quantities):
'\n Returns a bounded multilinear median Decision Variable (DV) function.\n\n The median DV equals the min and max values when the quantity is\n outside of the prescribed quantity bounds. When the quantity is within the\n bounds, the returned median is calculated by linear interpolation.\n\n Parameters\n ----------\n medians: ndarray\n Series of values that define the y coordinates of the multilinear DV\n function.\n quantities: ndarray\n Series of values that define the component quantities corresponding to\n the series of medians and serving as the x coordinates of the \n multilinear DV function.\n\n Returns\n -------\n f: callable\n A function that returns the median DV given the quantity of damaged\n components.\n '
def f(quantity):
if (quantity is None):
raise ValueError('A bounded linear median Decision Variable function called without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
output = np.interp(q_array, quantities, medians)
return output
return f<|docstring|>Returns a bounded multilinear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by linear interpolation.
Parameters
----------
medians: ndarray
Series of values that define the y coordinates of the multilinear DV
function.
quantities: ndarray
Series of values that define the component quantities corresponding to
the series of medians and serving as the x coordinates of the
multilinear DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.<|endoftext|>
|
5dd1ee37157c98941ab669574320d42b489921eed3886ab326e1e03148ce793f
|
def P_exc(self, EDP, DSG_ID):
'\n Return the probability of damage state exceedance.\n\n Calculate the probability of exceeding the damage corresponding to the\n DSG identified by the DSG_ID conditioned on a particular EDP value.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the conditioning DSG. The DSG numbering is 1-based,\n because zero typically corresponds to the undamaged state.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n DSG exceedance probability at the given EDP point(s).\n '
EDP = np.asarray(EDP, dtype=np.float64)
nvals = EDP.size
if (DSG_ID == 0):
P_exc = np.ones(EDP.size)
else:
ndims = len(self._EDP_limit)
limit_list = np.full((ndims, nvals), (- np.inf), dtype=np.float64)
limit_list[(DSG_ID - 1):] = EDP
limit_list[:(DSG_ID - 1)] = None
P_exc = (1.0 - self._EDP_limit[0].RV_set.orthotope_density(lower=limit_list, var_subset=self._EDP_tags))
if (EDP.size == 1):
return P_exc[0]
else:
return P_exc
|
Return the probability of damage state exceedance.
Calculate the probability of exceeding the damage corresponding to the
DSG identified by the DSG_ID conditioned on a particular EDP value.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the conditioning DSG. The DSG numbering is 1-based,
because zero typically corresponds to the undamaged state.
Returns
-------
P_exc: float scalar or ndarray
DSG exceedance probability at the given EDP point(s).
|
pelicun/model.py
|
P_exc
|
13273781142/pelicun1
| 20
|
python
|
def P_exc(self, EDP, DSG_ID):
'\n Return the probability of damage state exceedance.\n\n Calculate the probability of exceeding the damage corresponding to the\n DSG identified by the DSG_ID conditioned on a particular EDP value.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the conditioning DSG. The DSG numbering is 1-based,\n because zero typically corresponds to the undamaged state.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n DSG exceedance probability at the given EDP point(s).\n '
EDP = np.asarray(EDP, dtype=np.float64)
nvals = EDP.size
if (DSG_ID == 0):
P_exc = np.ones(EDP.size)
else:
ndims = len(self._EDP_limit)
limit_list = np.full((ndims, nvals), (- np.inf), dtype=np.float64)
limit_list[(DSG_ID - 1):] = EDP
limit_list[:(DSG_ID - 1)] = None
P_exc = (1.0 - self._EDP_limit[0].RV_set.orthotope_density(lower=limit_list, var_subset=self._EDP_tags))
if (EDP.size == 1):
return P_exc[0]
else:
return P_exc
|
def P_exc(self, EDP, DSG_ID):
'\n Return the probability of damage state exceedance.\n\n Calculate the probability of exceeding the damage corresponding to the\n DSG identified by the DSG_ID conditioned on a particular EDP value.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the conditioning DSG. The DSG numbering is 1-based,\n because zero typically corresponds to the undamaged state.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n DSG exceedance probability at the given EDP point(s).\n '
EDP = np.asarray(EDP, dtype=np.float64)
nvals = EDP.size
if (DSG_ID == 0):
P_exc = np.ones(EDP.size)
else:
ndims = len(self._EDP_limit)
limit_list = np.full((ndims, nvals), (- np.inf), dtype=np.float64)
limit_list[(DSG_ID - 1):] = EDP
limit_list[:(DSG_ID - 1)] = None
P_exc = (1.0 - self._EDP_limit[0].RV_set.orthotope_density(lower=limit_list, var_subset=self._EDP_tags))
if (EDP.size == 1):
return P_exc[0]
else:
return P_exc<|docstring|>Return the probability of damage state exceedance.
Calculate the probability of exceeding the damage corresponding to the
DSG identified by the DSG_ID conditioned on a particular EDP value.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the conditioning DSG. The DSG numbering is 1-based,
because zero typically corresponds to the undamaged state.
Returns
-------
P_exc: float scalar or ndarray
DSG exceedance probability at the given EDP point(s).<|endoftext|>
|
ec57cb272b48dcc748b056d143a88efc467ac1e9a6360b9751db1df77f8a8efd
|
def DSG_given_EDP(self, EDP, force_resampling=False):
'\n Given an EDP, get a damage level based on the fragility function.\n\n The damage is evaluated by sampling the joint distribution of\n fragilities corresponding to all possible damage levels and checking\n which damage level the given EDP falls into. This approach allows for\n efficient damage state evaluation for a large number of EDP\n realizations.\n\n Parameters\n ----------\n EDP: float scalar or ndarray or Series\n Single EDP, or numpy array or pandas Series of EDP values.\n force_resampling: bool, optional, default: False\n If True, the probability distribution is resampled before\n evaluating the damage for each EDP. This is not recommended if the\n fragility functions are correlated with other sources of\n uncertainty because those variables will also be resampled in this\n case. If False, which is the default approach, we assume that\n the random variable has already been sampled and the number of\n samples greater or equal to the number of EDP values.\n\n Returns\n -------\n DSG_ID: Series\n Identifies the damage that corresponds to the given EDP. A DSG_ID\n of 0 means no damage.\n\n '
nsamples = EDP.size
samples = pd.DataFrame(dict([(lim_i.name, lim_i.samples) for lim_i in self._EDP_limit]))
if (type(EDP) not in [pd.Series, pd.DataFrame]):
EDP = pd.Series(EDP, name='EDP')
nstates = samples.shape[1]
samples = samples.loc[(EDP.index, :)]
sample_cols = samples.columns
samples = samples[sample_cols[np.argsort(sample_cols)]]
EXC = (samples.sub(EDP, axis=0) < 0.0)
DSG_ID = pd.Series(np.zeros(len(samples.index)), name='DSG_ID', index=samples.index, dtype=np.int)
for s in range(nstates):
DSG_ID[EXC.iloc[(:, s)]] = (s + 1)
return DSG_ID
|
Given an EDP, get a damage level based on the fragility function.
The damage is evaluated by sampling the joint distribution of
fragilities corresponding to all possible damage levels and checking
which damage level the given EDP falls into. This approach allows for
efficient damage state evaluation for a large number of EDP
realizations.
Parameters
----------
EDP: float scalar or ndarray or Series
Single EDP, or numpy array or pandas Series of EDP values.
force_resampling: bool, optional, default: False
If True, the probability distribution is resampled before
evaluating the damage for each EDP. This is not recommended if the
fragility functions are correlated with other sources of
uncertainty because those variables will also be resampled in this
case. If False, which is the default approach, we assume that
the random variable has already been sampled and the number of
samples greater or equal to the number of EDP values.
Returns
-------
DSG_ID: Series
Identifies the damage that corresponds to the given EDP. A DSG_ID
of 0 means no damage.
|
pelicun/model.py
|
DSG_given_EDP
|
13273781142/pelicun1
| 20
|
python
|
def DSG_given_EDP(self, EDP, force_resampling=False):
'\n Given an EDP, get a damage level based on the fragility function.\n\n The damage is evaluated by sampling the joint distribution of\n fragilities corresponding to all possible damage levels and checking\n which damage level the given EDP falls into. This approach allows for\n efficient damage state evaluation for a large number of EDP\n realizations.\n\n Parameters\n ----------\n EDP: float scalar or ndarray or Series\n Single EDP, or numpy array or pandas Series of EDP values.\n force_resampling: bool, optional, default: False\n If True, the probability distribution is resampled before\n evaluating the damage for each EDP. This is not recommended if the\n fragility functions are correlated with other sources of\n uncertainty because those variables will also be resampled in this\n case. If False, which is the default approach, we assume that\n the random variable has already been sampled and the number of\n samples greater or equal to the number of EDP values.\n\n Returns\n -------\n DSG_ID: Series\n Identifies the damage that corresponds to the given EDP. A DSG_ID\n of 0 means no damage.\n\n '
nsamples = EDP.size
samples = pd.DataFrame(dict([(lim_i.name, lim_i.samples) for lim_i in self._EDP_limit]))
if (type(EDP) not in [pd.Series, pd.DataFrame]):
EDP = pd.Series(EDP, name='EDP')
nstates = samples.shape[1]
samples = samples.loc[(EDP.index, :)]
sample_cols = samples.columns
samples = samples[sample_cols[np.argsort(sample_cols)]]
EXC = (samples.sub(EDP, axis=0) < 0.0)
DSG_ID = pd.Series(np.zeros(len(samples.index)), name='DSG_ID', index=samples.index, dtype=np.int)
for s in range(nstates):
DSG_ID[EXC.iloc[(:, s)]] = (s + 1)
return DSG_ID
|
def DSG_given_EDP(self, EDP, force_resampling=False):
'\n Given an EDP, get a damage level based on the fragility function.\n\n The damage is evaluated by sampling the joint distribution of\n fragilities corresponding to all possible damage levels and checking\n which damage level the given EDP falls into. This approach allows for\n efficient damage state evaluation for a large number of EDP\n realizations.\n\n Parameters\n ----------\n EDP: float scalar or ndarray or Series\n Single EDP, or numpy array or pandas Series of EDP values.\n force_resampling: bool, optional, default: False\n If True, the probability distribution is resampled before\n evaluating the damage for each EDP. This is not recommended if the\n fragility functions are correlated with other sources of\n uncertainty because those variables will also be resampled in this\n case. If False, which is the default approach, we assume that\n the random variable has already been sampled and the number of\n samples greater or equal to the number of EDP values.\n\n Returns\n -------\n DSG_ID: Series\n Identifies the damage that corresponds to the given EDP. A DSG_ID\n of 0 means no damage.\n\n '
nsamples = EDP.size
samples = pd.DataFrame(dict([(lim_i.name, lim_i.samples) for lim_i in self._EDP_limit]))
if (type(EDP) not in [pd.Series, pd.DataFrame]):
EDP = pd.Series(EDP, name='EDP')
nstates = samples.shape[1]
samples = samples.loc[(EDP.index, :)]
sample_cols = samples.columns
samples = samples[sample_cols[np.argsort(sample_cols)]]
EXC = (samples.sub(EDP, axis=0) < 0.0)
DSG_ID = pd.Series(np.zeros(len(samples.index)), name='DSG_ID', index=samples.index, dtype=np.int)
for s in range(nstates):
DSG_ID[EXC.iloc[(:, s)]] = (s + 1)
return DSG_ID<|docstring|>Given an EDP, get a damage level based on the fragility function.
The damage is evaluated by sampling the joint distribution of
fragilities corresponding to all possible damage levels and checking
which damage level the given EDP falls into. This approach allows for
efficient damage state evaluation for a large number of EDP
realizations.
Parameters
----------
EDP: float scalar or ndarray or Series
Single EDP, or numpy array or pandas Series of EDP values.
force_resampling: bool, optional, default: False
If True, the probability distribution is resampled before
evaluating the damage for each EDP. This is not recommended if the
fragility functions are correlated with other sources of
uncertainty because those variables will also be resampled in this
case. If False, which is the default approach, we assume that
the random variable has already been sampled and the number of
samples greater or equal to the number of EDP values.
Returns
-------
DSG_ID: Series
Identifies the damage that corresponds to the given EDP. A DSG_ID
of 0 means no damage.<|endoftext|>
|
01ce5874260faec77a3b18f34a9c8160b0e8fe871cd6d64126e7fc3340dfd70b
|
def median(self, quantity=None):
'\n Return the value of the median DV.\n\n The median DV corresponds to the component damage state (DS). If the\n damage consequence depends on the quantity of damaged components, the\n total quantity of damaged components shall be specified through the\n quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar or ndarray, optional\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n\n Returns\n -------\n median: float scalar or ndarray\n A single scalar for fixed median; a scalar or an array depending on\n the shape of the quantity parameter for bounded_linear median.\n\n '
return self._DV_median(quantity)
|
Return the value of the median DV.
The median DV corresponds to the component damage state (DS). If the
damage consequence depends on the quantity of damaged components, the
total quantity of damaged components shall be specified through the
quantity parameter.
Parameters
----------
quantity: float scalar or ndarray, optional
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
Returns
-------
median: float scalar or ndarray
A single scalar for fixed median; a scalar or an array depending on
the shape of the quantity parameter for bounded_linear median.
|
pelicun/model.py
|
median
|
13273781142/pelicun1
| 20
|
python
|
def median(self, quantity=None):
'\n Return the value of the median DV.\n\n The median DV corresponds to the component damage state (DS). If the\n damage consequence depends on the quantity of damaged components, the\n total quantity of damaged components shall be specified through the\n quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar or ndarray, optional\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n\n Returns\n -------\n median: float scalar or ndarray\n A single scalar for fixed median; a scalar or an array depending on\n the shape of the quantity parameter for bounded_linear median.\n\n '
return self._DV_median(quantity)
|
def median(self, quantity=None):
'\n Return the value of the median DV.\n\n The median DV corresponds to the component damage state (DS). If the\n damage consequence depends on the quantity of damaged components, the\n total quantity of damaged components shall be specified through the\n quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar or ndarray, optional\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n\n Returns\n -------\n median: float scalar or ndarray\n A single scalar for fixed median; a scalar or an array depending on\n the shape of the quantity parameter for bounded_linear median.\n\n '
return self._DV_median(quantity)<|docstring|>Return the value of the median DV.
The median DV corresponds to the component damage state (DS). If the
damage consequence depends on the quantity of damaged components, the
total quantity of damaged components shall be specified through the
quantity parameter.
Parameters
----------
quantity: float scalar or ndarray, optional
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
Returns
-------
median: float scalar or ndarray
A single scalar for fixed median; a scalar or an array depending on
the shape of the quantity parameter for bounded_linear median.<|endoftext|>
|
0fa1e58977199dc96104b0fbad5641443d9b4d2bbbcd777090a461ce3348a479
|
def sample_unit_DV(self, quantity=None, sample_size=1, force_resampling=False):
'\n Sample the decision variable quantity per component unit.\n\n The Unit Decision Variable (UDV) corresponds to the component Damage\n State (DS). It shall be multiplied by the quantity of damaged\n components to get the total DV that corresponds to the quantity of the\n damaged components in the asset. If the DV depends on the total\n quantity of damaged components, that value shall be specified through\n the quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n sample_size: int, optional, default: 1\n Number of samples drawn from the DV distribution. The default value\n yields one sample. If quantity is an array with more than one\n element, the sample_size parameter is ignored.\n force_resampling: bool, optional, default: False\n If True, the DV distribution (and the corresponding RV if there\n are correlations) is resampled even if there are samples already\n available. This is not recommended if the DV distribution is\n correlated with other sources of uncertainty because those\n variables will also be resampled in this case. If False, which is\n the default approach, we assume that the random variable has\n already been sampled and the number of samples is greater or equal\n to the number of samples requested.\n\n Returns\n -------\n unit_DV: float scalar or ndarray\n Unit DV samples.\n\n '
median = self.median(quantity=np.asarray(quantity))
if (self._DV_distribution is None):
return median
else:
if (quantity is not None):
if (type(quantity) not in [pd.Series, pd.DataFrame]):
quantity = pd.Series(quantity, name='QNT')
if (quantity.size > 1):
sample_size = quantity.size
elif (sample_size > 1):
quantity = pd.Series((np.ones(sample_size) * quantity.values), name='QNT')
if (quantity is not None):
samples = pd.Series(self._DV_distribution.samples).loc[quantity.index]
else:
samples = pd.Series(self._DV_distribution.samples).iloc[:sample_size]
samples = (samples * median)
return samples
|
Sample the decision variable quantity per component unit.
The Unit Decision Variable (UDV) corresponds to the component Damage
State (DS). It shall be multiplied by the quantity of damaged
components to get the total DV that corresponds to the quantity of the
damaged components in the asset. If the DV depends on the total
quantity of damaged components, that value shall be specified through
the quantity parameter.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
sample_size: int, optional, default: 1
Number of samples drawn from the DV distribution. The default value
yields one sample. If quantity is an array with more than one
element, the sample_size parameter is ignored.
force_resampling: bool, optional, default: False
If True, the DV distribution (and the corresponding RV if there
are correlations) is resampled even if there are samples already
available. This is not recommended if the DV distribution is
correlated with other sources of uncertainty because those
variables will also be resampled in this case. If False, which is
the default approach, we assume that the random variable has
already been sampled and the number of samples is greater or equal
to the number of samples requested.
Returns
-------
unit_DV: float scalar or ndarray
Unit DV samples.
|
pelicun/model.py
|
sample_unit_DV
|
13273781142/pelicun1
| 20
|
python
|
def sample_unit_DV(self, quantity=None, sample_size=1, force_resampling=False):
'\n Sample the decision variable quantity per component unit.\n\n The Unit Decision Variable (UDV) corresponds to the component Damage\n State (DS). It shall be multiplied by the quantity of damaged\n components to get the total DV that corresponds to the quantity of the\n damaged components in the asset. If the DV depends on the total\n quantity of damaged components, that value shall be specified through\n the quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n sample_size: int, optional, default: 1\n Number of samples drawn from the DV distribution. The default value\n yields one sample. If quantity is an array with more than one\n element, the sample_size parameter is ignored.\n force_resampling: bool, optional, default: False\n If True, the DV distribution (and the corresponding RV if there\n are correlations) is resampled even if there are samples already\n available. This is not recommended if the DV distribution is\n correlated with other sources of uncertainty because those\n variables will also be resampled in this case. If False, which is\n the default approach, we assume that the random variable has\n already been sampled and the number of samples is greater or equal\n to the number of samples requested.\n\n Returns\n -------\n unit_DV: float scalar or ndarray\n Unit DV samples.\n\n '
median = self.median(quantity=np.asarray(quantity))
if (self._DV_distribution is None):
return median
else:
if (quantity is not None):
if (type(quantity) not in [pd.Series, pd.DataFrame]):
quantity = pd.Series(quantity, name='QNT')
if (quantity.size > 1):
sample_size = quantity.size
elif (sample_size > 1):
quantity = pd.Series((np.ones(sample_size) * quantity.values), name='QNT')
if (quantity is not None):
samples = pd.Series(self._DV_distribution.samples).loc[quantity.index]
else:
samples = pd.Series(self._DV_distribution.samples).iloc[:sample_size]
samples = (samples * median)
return samples
|
def sample_unit_DV(self, quantity=None, sample_size=1, force_resampling=False):
'\n Sample the decision variable quantity per component unit.\n\n The Unit Decision Variable (UDV) corresponds to the component Damage\n State (DS). It shall be multiplied by the quantity of damaged\n components to get the total DV that corresponds to the quantity of the\n damaged components in the asset. If the DV depends on the total\n quantity of damaged components, that value shall be specified through\n the quantity parameter.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median DV. Not needed for consequence functions with a fixed\n median DV.\n sample_size: int, optional, default: 1\n Number of samples drawn from the DV distribution. The default value\n yields one sample. If quantity is an array with more than one\n element, the sample_size parameter is ignored.\n force_resampling: bool, optional, default: False\n If True, the DV distribution (and the corresponding RV if there\n are correlations) is resampled even if there are samples already\n available. This is not recommended if the DV distribution is\n correlated with other sources of uncertainty because those\n variables will also be resampled in this case. If False, which is\n the default approach, we assume that the random variable has\n already been sampled and the number of samples is greater or equal\n to the number of samples requested.\n\n Returns\n -------\n unit_DV: float scalar or ndarray\n Unit DV samples.\n\n '
median = self.median(quantity=np.asarray(quantity))
if (self._DV_distribution is None):
return median
else:
if (quantity is not None):
if (type(quantity) not in [pd.Series, pd.DataFrame]):
quantity = pd.Series(quantity, name='QNT')
if (quantity.size > 1):
sample_size = quantity.size
elif (sample_size > 1):
quantity = pd.Series((np.ones(sample_size) * quantity.values), name='QNT')
if (quantity is not None):
samples = pd.Series(self._DV_distribution.samples).loc[quantity.index]
else:
samples = pd.Series(self._DV_distribution.samples).iloc[:sample_size]
samples = (samples * median)
return samples<|docstring|>Sample the decision variable quantity per component unit.
The Unit Decision Variable (UDV) corresponds to the component Damage
State (DS). It shall be multiplied by the quantity of damaged
components to get the total DV that corresponds to the quantity of the
damaged components in the asset. If the DV depends on the total
quantity of damaged components, that value shall be specified through
the quantity parameter.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
sample_size: int, optional, default: 1
Number of samples drawn from the DV distribution. The default value
yields one sample. If quantity is an array with more than one
element, the sample_size parameter is ignored.
force_resampling: bool, optional, default: False
If True, the DV distribution (and the corresponding RV if there
are correlations) is resampled even if there are samples already
available. This is not recommended if the DV distribution is
correlated with other sources of uncertainty because those
variables will also be resampled in this case. If False, which is
the default approach, we assume that the random variable has
already been sampled and the number of samples is greater or equal
to the number of samples requested.
Returns
-------
unit_DV: float scalar or ndarray
Unit DV samples.<|endoftext|>
|
e2bfc1ae3ff34754bbde5596e79398677a7c2d703c2458c201f54e5b2f47ec42
|
@property
def description(self):
'\n Return the damage description.\n '
return self._description
|
Return the damage description.
|
pelicun/model.py
|
description
|
13273781142/pelicun1
| 20
|
python
|
@property
def description(self):
'\n \n '
return self._description
|
@property
def description(self):
'\n \n '
return self._description<|docstring|>Return the damage description.<|endoftext|>
|
a691e87d1ead2c5147db363706659378eae7f882b197332bb0c487db8bb8b99b
|
@property
def weight(self):
'\n Return the weight of DS among the set of damage states in the DSG.\n '
return self._weight
|
Return the weight of DS among the set of damage states in the DSG.
|
pelicun/model.py
|
weight
|
13273781142/pelicun1
| 20
|
python
|
@property
def weight(self):
'\n \n '
return self._weight
|
@property
def weight(self):
'\n \n '
return self._weight<|docstring|>Return the weight of DS among the set of damage states in the DSG.<|endoftext|>
|
2498eac209693eef3401da6d1249a8e5667664d27f3c9a8aaf46d2c39c784950
|
def unit_repair_cost(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the repair cost distribution and return the unit repair costs.\n\n The unit repair costs shall be multiplied by the quantity of damaged\n components to get the total repair costs for the components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the median\n repair cost. Not used for repair cost models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the repair cost distribution. The\n default value yields one sample.\n\n Returns\n -------\n unit_repair_cost: float scalar or ndarray\n Unit repair cost samples.\n\n '
output = None
if (self._repair_cost_CF is not None):
output = self._repair_cost_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output
|
Sample the repair cost distribution and return the unit repair costs.
The unit repair costs shall be multiplied by the quantity of damaged
components to get the total repair costs for the components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the median
repair cost. Not used for repair cost models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the repair cost distribution. The
default value yields one sample.
Returns
-------
unit_repair_cost: float scalar or ndarray
Unit repair cost samples.
|
pelicun/model.py
|
unit_repair_cost
|
13273781142/pelicun1
| 20
|
python
|
def unit_repair_cost(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the repair cost distribution and return the unit repair costs.\n\n The unit repair costs shall be multiplied by the quantity of damaged\n components to get the total repair costs for the components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the median\n repair cost. Not used for repair cost models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the repair cost distribution. The\n default value yields one sample.\n\n Returns\n -------\n unit_repair_cost: float scalar or ndarray\n Unit repair cost samples.\n\n '
output = None
if (self._repair_cost_CF is not None):
output = self._repair_cost_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output
|
def unit_repair_cost(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the repair cost distribution and return the unit repair costs.\n\n The unit repair costs shall be multiplied by the quantity of damaged\n components to get the total repair costs for the components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the median\n repair cost. Not used for repair cost models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the repair cost distribution. The\n default value yields one sample.\n\n Returns\n -------\n unit_repair_cost: float scalar or ndarray\n Unit repair cost samples.\n\n '
output = None
if (self._repair_cost_CF is not None):
output = self._repair_cost_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output<|docstring|>Sample the repair cost distribution and return the unit repair costs.
The unit repair costs shall be multiplied by the quantity of damaged
components to get the total repair costs for the components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the median
repair cost. Not used for repair cost models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the repair cost distribution. The
default value yields one sample.
Returns
-------
unit_repair_cost: float scalar or ndarray
Unit repair cost samples.<|endoftext|>
|
163d2238e8dc8f470f6fb8e2e69cc5acd2e197b346190a7642d1c743e81cd5d1
|
def unit_reconstruction_time(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the reconstruction time distribution and return the unit\n reconstruction times.\n\n The unit reconstruction times shall be multiplied by the quantity of\n damaged components to get the total reconstruction time for the\n components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median reconstruction time. Not used for reconstruction time\n models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the reconstruction time distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_reconstruction_time: float scalar or ndarray\n Unit reconstruction time samples.\n\n '
output = None
if (self._reconstruction_time_CF is not None):
output = self._reconstruction_time_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output
|
Sample the reconstruction time distribution and return the unit
reconstruction times.
The unit reconstruction times shall be multiplied by the quantity of
damaged components to get the total reconstruction time for the
components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median reconstruction time. Not used for reconstruction time
models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the reconstruction time distribution.
The default value yields one sample.
Returns
-------
unit_reconstruction_time: float scalar or ndarray
Unit reconstruction time samples.
|
pelicun/model.py
|
unit_reconstruction_time
|
13273781142/pelicun1
| 20
|
python
|
def unit_reconstruction_time(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the reconstruction time distribution and return the unit\n reconstruction times.\n\n The unit reconstruction times shall be multiplied by the quantity of\n damaged components to get the total reconstruction time for the\n components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median reconstruction time. Not used for reconstruction time\n models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the reconstruction time distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_reconstruction_time: float scalar or ndarray\n Unit reconstruction time samples.\n\n '
output = None
if (self._reconstruction_time_CF is not None):
output = self._reconstruction_time_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output
|
def unit_reconstruction_time(self, quantity=None, sample_size=1, **kwargs):
'\n Sample the reconstruction time distribution and return the unit\n reconstruction times.\n\n The unit reconstruction times shall be multiplied by the quantity of\n damaged components to get the total reconstruction time for the\n components in this DS.\n\n Parameters\n ----------\n quantity: float scalar, ndarray or Series, optional, default: None\n Total quantity of damaged components that determines the magnitude\n of median reconstruction time. Not used for reconstruction time\n models with fixed median.\n sample_size: int, optional, default: 1\n Number of samples drawn from the reconstruction time distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_reconstruction_time: float scalar or ndarray\n Unit reconstruction time samples.\n\n '
output = None
if (self._reconstruction_time_CF is not None):
output = self._reconstruction_time_CF.sample_unit_DV(quantity=quantity, sample_size=sample_size, **kwargs)
return output<|docstring|>Sample the reconstruction time distribution and return the unit
reconstruction times.
The unit reconstruction times shall be multiplied by the quantity of
damaged components to get the total reconstruction time for the
components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median reconstruction time. Not used for reconstruction time
models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the reconstruction time distribution.
The default value yields one sample.
Returns
-------
unit_reconstruction_time: float scalar or ndarray
Unit reconstruction time samples.<|endoftext|>
|
5452521ceb9287522864287edcba6c4fe23e586d241341156a0e3705b94f983b
|
def red_tag_dmg_limit(self, sample_size=1, **kwargs):
'\n Sample the red tag consequence function and return the proportion of\n components that needs to be damaged to trigger a red tag.\n\n The red tag consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components.\n\n Parameters\n ----------\n sample_size: int, optional, default: 1\n Number of samples drawn from the red tag consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n red_tag_trigger: float scalar or ndarray\n Samples of damaged component proportions that trigger a red tag.\n\n '
output = None
if (self._red_tag_CF is not None):
output = self._red_tag_CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output
|
Sample the red tag consequence function and return the proportion of
components that needs to be damaged to trigger a red tag.
The red tag consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components.
Parameters
----------
sample_size: int, optional, default: 1
Number of samples drawn from the red tag consequence distribution.
The default value yields one sample.
Returns
-------
red_tag_trigger: float scalar or ndarray
Samples of damaged component proportions that trigger a red tag.
|
pelicun/model.py
|
red_tag_dmg_limit
|
13273781142/pelicun1
| 20
|
python
|
def red_tag_dmg_limit(self, sample_size=1, **kwargs):
'\n Sample the red tag consequence function and return the proportion of\n components that needs to be damaged to trigger a red tag.\n\n The red tag consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components.\n\n Parameters\n ----------\n sample_size: int, optional, default: 1\n Number of samples drawn from the red tag consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n red_tag_trigger: float scalar or ndarray\n Samples of damaged component proportions that trigger a red tag.\n\n '
output = None
if (self._red_tag_CF is not None):
output = self._red_tag_CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output
|
def red_tag_dmg_limit(self, sample_size=1, **kwargs):
'\n Sample the red tag consequence function and return the proportion of\n components that needs to be damaged to trigger a red tag.\n\n The red tag consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components.\n\n Parameters\n ----------\n sample_size: int, optional, default: 1\n Number of samples drawn from the red tag consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n red_tag_trigger: float scalar or ndarray\n Samples of damaged component proportions that trigger a red tag.\n\n '
output = None
if (self._red_tag_CF is not None):
output = self._red_tag_CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output<|docstring|>Sample the red tag consequence function and return the proportion of
components that needs to be damaged to trigger a red tag.
The red tag consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components.
Parameters
----------
sample_size: int, optional, default: 1
Number of samples drawn from the red tag consequence distribution.
The default value yields one sample.
Returns
-------
red_tag_trigger: float scalar or ndarray
Samples of damaged component proportions that trigger a red tag.<|endoftext|>
|
4b54d134ffc0a53df49110bee1b93bf7d7e777556bfe5ed96c3527cdf501b085
|
def unit_injuries(self, severity_level=0, sample_size=1, **kwargs):
'\n Sample the injury consequence function that corresponds to the\n specified level of severity and return the injuries per component unit.\n\n The injury consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components (i.e.\n the number of injuries per component unit does not change with the\n quantity of components.)\n\n Parameters\n ----------\n severity_level: int, optional, default: 1\n Identifies which injury consequence to sample. The indexing of\n severity levels is zero-based.\n sample_size: int, optional, default: 1\n Number of samples drawn from the injury consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_injuries: float scalar or ndarray\n Unit injury samples.\n\n '
output = None
if (len(self._injuries_CF_set) > severity_level):
CF = self._injuries_CF_set[severity_level]
if (CF is not None):
output = CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output
|
Sample the injury consequence function that corresponds to the
specified level of severity and return the injuries per component unit.
The injury consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components (i.e.
the number of injuries per component unit does not change with the
quantity of components.)
Parameters
----------
severity_level: int, optional, default: 1
Identifies which injury consequence to sample. The indexing of
severity levels is zero-based.
sample_size: int, optional, default: 1
Number of samples drawn from the injury consequence distribution.
The default value yields one sample.
Returns
-------
unit_injuries: float scalar or ndarray
Unit injury samples.
|
pelicun/model.py
|
unit_injuries
|
13273781142/pelicun1
| 20
|
python
|
def unit_injuries(self, severity_level=0, sample_size=1, **kwargs):
'\n Sample the injury consequence function that corresponds to the\n specified level of severity and return the injuries per component unit.\n\n The injury consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components (i.e.\n the number of injuries per component unit does not change with the\n quantity of components.)\n\n Parameters\n ----------\n severity_level: int, optional, default: 1\n Identifies which injury consequence to sample. The indexing of\n severity levels is zero-based.\n sample_size: int, optional, default: 1\n Number of samples drawn from the injury consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_injuries: float scalar or ndarray\n Unit injury samples.\n\n '
output = None
if (len(self._injuries_CF_set) > severity_level):
CF = self._injuries_CF_set[severity_level]
if (CF is not None):
output = CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output
|
def unit_injuries(self, severity_level=0, sample_size=1, **kwargs):
'\n Sample the injury consequence function that corresponds to the\n specified level of severity and return the injuries per component unit.\n\n The injury consequence function is assumed to have a fixed median\n value that does not depend on the quantity of damaged components (i.e.\n the number of injuries per component unit does not change with the\n quantity of components.)\n\n Parameters\n ----------\n severity_level: int, optional, default: 1\n Identifies which injury consequence to sample. The indexing of\n severity levels is zero-based.\n sample_size: int, optional, default: 1\n Number of samples drawn from the injury consequence distribution.\n The default value yields one sample.\n\n Returns\n -------\n unit_injuries: float scalar or ndarray\n Unit injury samples.\n\n '
output = None
if (len(self._injuries_CF_set) > severity_level):
CF = self._injuries_CF_set[severity_level]
if (CF is not None):
output = CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output<|docstring|>Sample the injury consequence function that corresponds to the
specified level of severity and return the injuries per component unit.
The injury consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components (i.e.
the number of injuries per component unit does not change with the
quantity of components.)
Parameters
----------
severity_level: int, optional, default: 1
Identifies which injury consequence to sample. The indexing of
severity levels is zero-based.
sample_size: int, optional, default: 1
Number of samples drawn from the injury consequence distribution.
The default value yields one sample.
Returns
-------
unit_injuries: float scalar or ndarray
Unit injury samples.<|endoftext|>
|
89c0b2e7c2cc3f211aa1b2eac69451210fe8455adc89a6f23399f3326c0dafbb
|
def P_exc(self, EDP, DSG_ID):
'\n This is a convenience function that provides a shortcut to\n fragility_function.P_exc(). It calculates the exceedance probability\n of a given DSG conditioned on the provided EDP value(s). The fragility\n functions assigned to the first subset are used for this calculation\n because P_exc shall be identical among subsets.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the DSG of interest.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n Exceedance probability of the given DSG at the EDP point(s).\n '
return self._FF_set[0].P_exc(EDP, DSG_ID)
|
This is a convenience function that provides a shortcut to
fragility_function.P_exc(). It calculates the exceedance probability
of a given DSG conditioned on the provided EDP value(s). The fragility
functions assigned to the first subset are used for this calculation
because P_exc shall be identical among subsets.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the DSG of interest.
Returns
-------
P_exc: float scalar or ndarray
Exceedance probability of the given DSG at the EDP point(s).
|
pelicun/model.py
|
P_exc
|
13273781142/pelicun1
| 20
|
python
|
def P_exc(self, EDP, DSG_ID):
'\n This is a convenience function that provides a shortcut to\n fragility_function.P_exc(). It calculates the exceedance probability\n of a given DSG conditioned on the provided EDP value(s). The fragility\n functions assigned to the first subset are used for this calculation\n because P_exc shall be identical among subsets.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the DSG of interest.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n Exceedance probability of the given DSG at the EDP point(s).\n '
return self._FF_set[0].P_exc(EDP, DSG_ID)
|
def P_exc(self, EDP, DSG_ID):
'\n This is a convenience function that provides a shortcut to\n fragility_function.P_exc(). It calculates the exceedance probability\n of a given DSG conditioned on the provided EDP value(s). The fragility\n functions assigned to the first subset are used for this calculation\n because P_exc shall be identical among subsets.\n\n Parameters\n ----------\n EDP: float scalar or ndarray\n Single EDP or numpy array of EDP values.\n DSG_ID: int\n Identifies the DSG of interest.\n\n Returns\n -------\n P_exc: float scalar or ndarray\n Exceedance probability of the given DSG at the EDP point(s).\n '
return self._FF_set[0].P_exc(EDP, DSG_ID)<|docstring|>This is a convenience function that provides a shortcut to
fragility_function.P_exc(). It calculates the exceedance probability
of a given DSG conditioned on the provided EDP value(s). The fragility
functions assigned to the first subset are used for this calculation
because P_exc shall be identical among subsets.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the DSG of interest.
Returns
-------
P_exc: float scalar or ndarray
Exceedance probability of the given DSG at the EDP point(s).<|endoftext|>
|
954cc76ef15f64607534983990ff457ef82f8a28e350d5c605ac4fa5eee39689
|
@property
def description(self):
'\n Return the fragility group description.\n '
return self._description
|
Return the fragility group description.
|
pelicun/model.py
|
description
|
13273781142/pelicun1
| 20
|
python
|
@property
def description(self):
'\n \n '
return self._description
|
@property
def description(self):
'\n \n '
return self._description<|docstring|>Return the fragility group description.<|endoftext|>
|
235a343fb6013f96c528eeafa72d4222af19397981bfcc6e66a78e873c595d66
|
@property
def name(self):
'\n Return the name of the fragility group.\n\n '
return self._name
|
Return the name of the fragility group.
|
pelicun/model.py
|
name
|
13273781142/pelicun1
| 20
|
python
|
@property
def name(self):
'\n \n\n '
return self._name
|
@property
def name(self):
'\n \n\n '
return self._name<|docstring|>Return the name of the fragility group.<|endoftext|>
|
b4b7fef6ba8916986c85dbe29d42e333a0569cf06c4ad6ed91789c8356659c8f
|
def get_domainid_bysession(request):
' 获取操作域名ID\n :param request:\n :return:\n '
try:
domain_id = int(request.session.get('domain_id', None))
except:
domain_id = 0
if (not domain_id):
obj = Domain.objects.order_by('id').first()
if obj:
domain_id = obj.id
request.session['domain_id'] = domain_id
return domain_id
|
获取操作域名ID
:param request:
:return:
|
Linux-Operation0605/app/utils/domain_session.py
|
get_domainid_bysession
|
zhouli121018/nodejsgm
| 0
|
python
|
def get_domainid_bysession(request):
' 获取操作域名ID\n :param request:\n :return:\n '
try:
domain_id = int(request.session.get('domain_id', None))
except:
domain_id = 0
if (not domain_id):
obj = Domain.objects.order_by('id').first()
if obj:
domain_id = obj.id
request.session['domain_id'] = domain_id
return domain_id
|
def get_domainid_bysession(request):
' 获取操作域名ID\n :param request:\n :return:\n '
try:
domain_id = int(request.session.get('domain_id', None))
except:
domain_id = 0
if (not domain_id):
obj = Domain.objects.order_by('id').first()
if obj:
domain_id = obj.id
request.session['domain_id'] = domain_id
return domain_id<|docstring|>获取操作域名ID
:param request:
:return:<|endoftext|>
|
f71072310afded9d1adf8142466b7696e67aab643c403dd6e8742da335fa50a3
|
async def connect(conf):
'Connect to local monitor server\n\n Connection is established once chatter communication is established.\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n\n Returns:\n Client\n\n '
client = Client()
client._name = conf['name']
client._group = conf['group']
client._address = conf['component_address']
client._components = []
client._info = None
client._ready = None
client._change_cbs = util.CallbackRegistry()
client._async_group = aio.Group()
client._conn = (await chatter.connect(common.sbs_repo, conf['monitor_address']))
client._async_group.spawn(aio.call_on_cancel, client._conn.async_close)
mlog.debug('connected to local monitor server %s', conf['monitor_address'])
client._async_group.spawn(client._receive_loop)
return client
|
Connect to local monitor server
Connection is established once chatter communication is established.
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
Returns:
Client
|
src_py/hat/monitor/client.py
|
connect
|
hrvojekeserica/hat-core
| 0
|
python
|
async def connect(conf):
'Connect to local monitor server\n\n Connection is established once chatter communication is established.\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n\n Returns:\n Client\n\n '
client = Client()
client._name = conf['name']
client._group = conf['group']
client._address = conf['component_address']
client._components = []
client._info = None
client._ready = None
client._change_cbs = util.CallbackRegistry()
client._async_group = aio.Group()
client._conn = (await chatter.connect(common.sbs_repo, conf['monitor_address']))
client._async_group.spawn(aio.call_on_cancel, client._conn.async_close)
mlog.debug('connected to local monitor server %s', conf['monitor_address'])
client._async_group.spawn(client._receive_loop)
return client
|
async def connect(conf):
'Connect to local monitor server\n\n Connection is established once chatter communication is established.\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n\n Returns:\n Client\n\n '
client = Client()
client._name = conf['name']
client._group = conf['group']
client._address = conf['component_address']
client._components = []
client._info = None
client._ready = None
client._change_cbs = util.CallbackRegistry()
client._async_group = aio.Group()
client._conn = (await chatter.connect(common.sbs_repo, conf['monitor_address']))
client._async_group.spawn(aio.call_on_cancel, client._conn.async_close)
mlog.debug('connected to local monitor server %s', conf['monitor_address'])
client._async_group.spawn(client._receive_loop)
return client<|docstring|>Connect to local monitor server
Connection is established once chatter communication is established.
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
Returns:
Client<|endoftext|>
|
25e8726e092685cfd4989f43a73dde1d8fd496de2c09bbfd57e89dc0c0f58f0b
|
async def run_component(conf, async_run_cb):
"Run component\n\n This method opens new connection to Monitor server and starts client's\n loop which manages blessing/ready states.\n\n When blessing token matches ready token, `async_run_cb` is called. While\n `async_run_cb` is running, if blessing token changes, `async_run_cb` is\n canceled.\n\n If `async_run_cb` finishes or raises exception, this function closes\n connection to monitor server and returns `async_run_cb` result. If\n connection to monitor server is closed, this function raises exception.\n\n TODO:\n * provide opportunity for user to react to blessing token prior to\n setting ready token (additional async_ready_cb)\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n async_run_cb (Callable[[Client],None]): run callback\n\n Returns:\n Any\n\n "
client = (await connect(conf))
try:
while True:
(await _wait_until_blessed_and_ready(client))
async_group = aio.Group()
run_future = async_group.spawn(async_run_cb, client)
blessed_and_ready_future = async_group.spawn(_wait_while_blessed_and_ready, client)
try:
(done, _) = (await asyncio.wait([run_future, blessed_and_ready_future, client.closed], return_when=asyncio.FIRST_COMPLETED))
if run_future.done():
mlog.debug('async_run_cb finished or raised an exception')
return run_future.result()
if client.closed.done():
raise Exception('connection to monitor server closed!')
finally:
if (not client.closed.done()):
client.set_ready(None)
(await async_group.async_close())
except asyncio.CancelledError:
raise
except Exception as e:
mlog.error('run component exception: %s', e, exc_info=e)
raise
finally:
(await client.async_close())
mlog.debug('component closed')
|
Run component
This method opens new connection to Monitor server and starts client's
loop which manages blessing/ready states.
When blessing token matches ready token, `async_run_cb` is called. While
`async_run_cb` is running, if blessing token changes, `async_run_cb` is
canceled.
If `async_run_cb` finishes or raises exception, this function closes
connection to monitor server and returns `async_run_cb` result. If
connection to monitor server is closed, this function raises exception.
TODO:
* provide opportunity for user to react to blessing token prior to
setting ready token (additional async_ready_cb)
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
async_run_cb (Callable[[Client],None]): run callback
Returns:
Any
|
src_py/hat/monitor/client.py
|
run_component
|
hrvojekeserica/hat-core
| 0
|
python
|
async def run_component(conf, async_run_cb):
"Run component\n\n This method opens new connection to Monitor server and starts client's\n loop which manages blessing/ready states.\n\n When blessing token matches ready token, `async_run_cb` is called. While\n `async_run_cb` is running, if blessing token changes, `async_run_cb` is\n canceled.\n\n If `async_run_cb` finishes or raises exception, this function closes\n connection to monitor server and returns `async_run_cb` result. If\n connection to monitor server is closed, this function raises exception.\n\n TODO:\n * provide opportunity for user to react to blessing token prior to\n setting ready token (additional async_ready_cb)\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n async_run_cb (Callable[[Client],None]): run callback\n\n Returns:\n Any\n\n "
client = (await connect(conf))
try:
while True:
(await _wait_until_blessed_and_ready(client))
async_group = aio.Group()
run_future = async_group.spawn(async_run_cb, client)
blessed_and_ready_future = async_group.spawn(_wait_while_blessed_and_ready, client)
try:
(done, _) = (await asyncio.wait([run_future, blessed_and_ready_future, client.closed], return_when=asyncio.FIRST_COMPLETED))
if run_future.done():
mlog.debug('async_run_cb finished or raised an exception')
return run_future.result()
if client.closed.done():
raise Exception('connection to monitor server closed!')
finally:
if (not client.closed.done()):
client.set_ready(None)
(await async_group.async_close())
except asyncio.CancelledError:
raise
except Exception as e:
mlog.error('run component exception: %s', e, exc_info=e)
raise
finally:
(await client.async_close())
mlog.debug('component closed')
|
async def run_component(conf, async_run_cb):
"Run component\n\n This method opens new connection to Monitor server and starts client's\n loop which manages blessing/ready states.\n\n When blessing token matches ready token, `async_run_cb` is called. While\n `async_run_cb` is running, if blessing token changes, `async_run_cb` is\n canceled.\n\n If `async_run_cb` finishes or raises exception, this function closes\n connection to monitor server and returns `async_run_cb` result. If\n connection to monitor server is closed, this function raises exception.\n\n TODO:\n * provide opportunity for user to react to blessing token prior to\n setting ready token (additional async_ready_cb)\n\n Args:\n conf (hat.json.Data): configuration as defined by\n ``hat://monitor/client.yaml#``\n async_run_cb (Callable[[Client],None]): run callback\n\n Returns:\n Any\n\n "
client = (await connect(conf))
try:
while True:
(await _wait_until_blessed_and_ready(client))
async_group = aio.Group()
run_future = async_group.spawn(async_run_cb, client)
blessed_and_ready_future = async_group.spawn(_wait_while_blessed_and_ready, client)
try:
(done, _) = (await asyncio.wait([run_future, blessed_and_ready_future, client.closed], return_when=asyncio.FIRST_COMPLETED))
if run_future.done():
mlog.debug('async_run_cb finished or raised an exception')
return run_future.result()
if client.closed.done():
raise Exception('connection to monitor server closed!')
finally:
if (not client.closed.done()):
client.set_ready(None)
(await async_group.async_close())
except asyncio.CancelledError:
raise
except Exception as e:
mlog.error('run component exception: %s', e, exc_info=e)
raise
finally:
(await client.async_close())
mlog.debug('component closed')<|docstring|>Run component
This method opens new connection to Monitor server and starts client's
loop which manages blessing/ready states.
When blessing token matches ready token, `async_run_cb` is called. While
`async_run_cb` is running, if blessing token changes, `async_run_cb` is
canceled.
If `async_run_cb` finishes or raises exception, this function closes
connection to monitor server and returns `async_run_cb` result. If
connection to monitor server is closed, this function raises exception.
TODO:
* provide opportunity for user to react to blessing token prior to
setting ready token (additional async_ready_cb)
Args:
conf (hat.json.Data): configuration as defined by
``hat://monitor/client.yaml#``
async_run_cb (Callable[[Client],None]): run callback
Returns:
Any<|endoftext|>
|
e48740c35471377a64aa7a2391451b8ca342f14c43d1d8cec065ad8c21ab74de
|
@property
def closed(self):
'asyncio.Future: closed future'
return self._async_group.closed
|
asyncio.Future: closed future
|
src_py/hat/monitor/client.py
|
closed
|
hrvojekeserica/hat-core
| 0
|
python
|
@property
def closed(self):
return self._async_group.closed
|
@property
def closed(self):
return self._async_group.closed<|docstring|>asyncio.Future: closed future<|endoftext|>
|
c2526eac77845bb4494ae0bf4944235b92ca0140314e60d518579b1c862ea434
|
@property
def info(self):
"Optional[common.ComponentInfo]: client's component info"
return self._info
|
Optional[common.ComponentInfo]: client's component info
|
src_py/hat/monitor/client.py
|
info
|
hrvojekeserica/hat-core
| 0
|
python
|
@property
def info(self):
return self._info
|
@property
def info(self):
return self._info<|docstring|>Optional[common.ComponentInfo]: client's component info<|endoftext|>
|
12a1b3f54507c7b92b14877d698ab9f161a35476c138082c54e773d48bb9b224
|
@property
def components(self):
'List[common.ComponentInfo]: global component state'
return self._components
|
List[common.ComponentInfo]: global component state
|
src_py/hat/monitor/client.py
|
components
|
hrvojekeserica/hat-core
| 0
|
python
|
@property
def components(self):
return self._components
|
@property
def components(self):
return self._components<|docstring|>List[common.ComponentInfo]: global component state<|endoftext|>
|
4864cfa986db6fd4b83d6626088678b03891fb2fba735b2d3f913b48784d4732
|
def register_change_cb(self, cb):
'Register change callback\n\n Registered callback is called once info and/or components changes.\n\n Args:\n cb (Callable[[],None]): callback\n\n Returns:\n util.RegisterCallbackHandle\n\n '
return self._change_cbs.register(cb)
|
Register change callback
Registered callback is called once info and/or components changes.
Args:
cb (Callable[[],None]): callback
Returns:
util.RegisterCallbackHandle
|
src_py/hat/monitor/client.py
|
register_change_cb
|
hrvojekeserica/hat-core
| 0
|
python
|
def register_change_cb(self, cb):
'Register change callback\n\n Registered callback is called once info and/or components changes.\n\n Args:\n cb (Callable[[],None]): callback\n\n Returns:\n util.RegisterCallbackHandle\n\n '
return self._change_cbs.register(cb)
|
def register_change_cb(self, cb):
'Register change callback\n\n Registered callback is called once info and/or components changes.\n\n Args:\n cb (Callable[[],None]): callback\n\n Returns:\n util.RegisterCallbackHandle\n\n '
return self._change_cbs.register(cb)<|docstring|>Register change callback
Registered callback is called once info and/or components changes.
Args:
cb (Callable[[],None]): callback
Returns:
util.RegisterCallbackHandle<|endoftext|>
|
f10ad5d13af4c97448f5e6557b69d735d7d9ce4125028f0d7d9651b43d0ae161
|
async def async_close(self):
'Async close'
(await self._async_group.async_close())
|
Async close
|
src_py/hat/monitor/client.py
|
async_close
|
hrvojekeserica/hat-core
| 0
|
python
|
async def async_close(self):
(await self._async_group.async_close())
|
async def async_close(self):
(await self._async_group.async_close())<|docstring|>Async close<|endoftext|>
|
12541e275535d2c067a4033240e344961a890e1d246d8986d1bb8447e55dd68b
|
def set_ready(self, token):
'Set ready token\n\n Args:\n token (Optional[int]): ready token\n\n '
if (token == self._ready):
return
self._ready = token
self._send_msg_client()
|
Set ready token
Args:
token (Optional[int]): ready token
|
src_py/hat/monitor/client.py
|
set_ready
|
hrvojekeserica/hat-core
| 0
|
python
|
def set_ready(self, token):
'Set ready token\n\n Args:\n token (Optional[int]): ready token\n\n '
if (token == self._ready):
return
self._ready = token
self._send_msg_client()
|
def set_ready(self, token):
'Set ready token\n\n Args:\n token (Optional[int]): ready token\n\n '
if (token == self._ready):
return
self._ready = token
self._send_msg_client()<|docstring|>Set ready token
Args:
token (Optional[int]): ready token<|endoftext|>
|
b33726a361e119a1ecdb583fedf5e222d22b7c4d327bcfabd6b198ec2f023d2f
|
@property
def size(self):
'Return the number of train datas.'
raise NotImplementedError
|
Return the number of train datas.
|
datasets.py
|
size
|
smarsu/mtcnn
| 0
|
python
|
@property
def size(self):
raise NotImplementedError
|
@property
def size(self):
raise NotImplementedError<|docstring|>Return the number of train datas.<|endoftext|>
|
04d62c2f37ff52d39d45704d572c1140b8ef56b4c98348fd1c323454c152d5ed
|
def train_datas_debug(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(batch_size))
datas = []
datas.append([self._train_datas[:batch_size], self._train_labels[:batch_size]])
return datas
|
Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0
|
datasets.py
|
train_datas_debug
|
smarsu/mtcnn
| 0
|
python
|
def train_datas_debug(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(batch_size))
datas = []
datas.append([self._train_datas[:batch_size], self._train_labels[:batch_size]])
return datas
|
def train_datas_debug(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(batch_size))
datas = []
datas.append([self._train_datas[:batch_size], self._train_labels[:batch_size]])
return datas<|docstring|>Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0<|endoftext|>
|
432d65fce1acdf0acb5ba9a9ebfe12cf702d178f1ad29ea9d8bc6fa1f0cf4692
|
def train_datas(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(self.size))
np.random.shuffle(indices)
epoch_size = ((self.size // batch_size) * batch_size)
self._train_datas = self._train_datas[indices][:epoch_size]
self._train_labels = self._train_labels[indices][:epoch_size]
datas = []
for i in range((self.size // batch_size)):
datas.append([self._train_datas[(i * batch_size):((i + 1) * batch_size)], self._train_labels[(i * batch_size):((i + 1) * batch_size)]])
return datas
|
Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0
|
datasets.py
|
train_datas
|
smarsu/mtcnn
| 0
|
python
|
def train_datas(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(self.size))
np.random.shuffle(indices)
epoch_size = ((self.size // batch_size) * batch_size)
self._train_datas = self._train_datas[indices][:epoch_size]
self._train_labels = self._train_labels[indices][:epoch_size]
datas = []
for i in range((self.size // batch_size)):
datas.append([self._train_datas[(i * batch_size):((i + 1) * batch_size)], self._train_labels[(i * batch_size):((i + 1) * batch_size)]])
return datas
|
def train_datas(self, batch_size):
'Yield batch size train datas per step. \n\n Train datas should be shuffled.\n \n Args:\n batch_size: int, > 0\n '
if (not isinstance(batch_size, int)):
raise ValueError('In Dataset, batch_size should be int, get {}'.format(type(batch_size)))
if (batch_size <= 0):
raise ValueError('In Dataset, batch_size should larger equal to 1, get {}'.format(batch_size))
indices = list(range(self.size))
np.random.shuffle(indices)
epoch_size = ((self.size // batch_size) * batch_size)
self._train_datas = self._train_datas[indices][:epoch_size]
self._train_labels = self._train_labels[indices][:epoch_size]
datas = []
for i in range((self.size // batch_size)):
datas.append([self._train_datas[(i * batch_size):((i + 1) * batch_size)], self._train_labels[(i * batch_size):((i + 1) * batch_size)]])
return datas<|docstring|>Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0<|endoftext|>
|
5e6d9cfa2071c14669bf49d5e58e3e6279be33476ba1a729dfe67334162059de
|
def merge(self, other):
'Merge the other datas to self.\n \n Args:\n other: Dataset\n '
self._train_datas = np.concatenate([self._train_datas, other._train_datas], 0)
self._train_labels = np.concatenate([self._train_labels, other._train_labels], 0)
|
Merge the other datas to self.
Args:
other: Dataset
|
datasets.py
|
merge
|
smarsu/mtcnn
| 0
|
python
|
def merge(self, other):
'Merge the other datas to self.\n \n Args:\n other: Dataset\n '
self._train_datas = np.concatenate([self._train_datas, other._train_datas], 0)
self._train_labels = np.concatenate([self._train_labels, other._train_labels], 0)
|
def merge(self, other):
'Merge the other datas to self.\n \n Args:\n other: Dataset\n '
self._train_datas = np.concatenate([self._train_datas, other._train_datas], 0)
self._train_labels = np.concatenate([self._train_labels, other._train_labels], 0)<|docstring|>Merge the other datas to self.
Args:
other: Dataset<|endoftext|>
|
e1a3365e792296ad9c1b87ea0fc403768dd7ca9520f20e95bb5279cbc6060ef3
|
def __init__(self, train_image_path, label_path, value_image_path=None, test_image_path=None):
'\n TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.\n Add way to read `value_label_path` and `test_label_path`.\n\n Args:\n train_image_path: str, the path of train images.\n label_path: str\n '
self._data_map = {}
self.train_image_path = train_image_path
self.label_path = label_path
self.train_label_path = self.label_path
(self._train_datas, self._train_labels) = self._read_train_datas()
|
TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.
Add way to read `value_label_path` and `test_label_path`.
Args:
train_image_path: str, the path of train images.
label_path: str
|
datasets.py
|
__init__
|
smarsu/mtcnn
| 0
|
python
|
def __init__(self, train_image_path, label_path, value_image_path=None, test_image_path=None):
'\n TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.\n Add way to read `value_label_path` and `test_label_path`.\n\n Args:\n train_image_path: str, the path of train images.\n label_path: str\n '
self._data_map = {}
self.train_image_path = train_image_path
self.label_path = label_path
self.train_label_path = self.label_path
(self._train_datas, self._train_labels) = self._read_train_datas()
|
def __init__(self, train_image_path, label_path, value_image_path=None, test_image_path=None):
'\n TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.\n Add way to read `value_label_path` and `test_label_path`.\n\n Args:\n train_image_path: str, the path of train images.\n label_path: str\n '
self._data_map = {}
self.train_image_path = train_image_path
self.label_path = label_path
self.train_label_path = self.label_path
(self._train_datas, self._train_labels) = self._read_train_datas()<|docstring|>TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.
Add way to read `value_label_path` and `test_label_path`.
Args:
train_image_path: str, the path of train images.
label_path: str<|endoftext|>
|
c36c7f92b71b7da42ba2c9c2f128fe65ddff0b10ac3f5f38873f34ca3d7e8b0f
|
@property
def size(self):
'Return the number of train datas.\n \n Assert the size of self._train_datas and self._train_labels is equal.\n '
return len(self._train_datas)
|
Return the number of train datas.
Assert the size of self._train_datas and self._train_labels is equal.
|
datasets.py
|
size
|
smarsu/mtcnn
| 0
|
python
|
@property
def size(self):
'Return the number of train datas.\n \n Assert the size of self._train_datas and self._train_labels is equal.\n '
return len(self._train_datas)
|
@property
def size(self):
'Return the number of train datas.\n \n Assert the size of self._train_datas and self._train_labels is equal.\n '
return len(self._train_datas)<|docstring|>Return the number of train datas.
Assert the size of self._train_datas and self._train_labels is equal.<|endoftext|>
|
a1eca21253a5767c0bff691cbb8619f3241a3ff2cd0ef3af6c6af78edb4c43c7
|
def _real_image_path(self, path):
"Get real path of image.\n\n self.train_image_path + '/' + path\n \n Args:\n path: str, the image name(id) of labels.\n "
return osp.join(self.train_image_path, path)
|
Get real path of image.
self.train_image_path + '/' + path
Args:
path: str, the image name(id) of labels.
|
datasets.py
|
_real_image_path
|
smarsu/mtcnn
| 0
|
python
|
def _real_image_path(self, path):
"Get real path of image.\n\n self.train_image_path + '/' + path\n \n Args:\n path: str, the image name(id) of labels.\n "
return osp.join(self.train_image_path, path)
|
def _real_image_path(self, path):
"Get real path of image.\n\n self.train_image_path + '/' + path\n \n Args:\n path: str, the image name(id) of labels.\n "
return osp.join(self.train_image_path, path)<|docstring|>Get real path of image.
self.train_image_path + '/' + path
Args:
path: str, the image name(id) of labels.<|endoftext|>
|
d25f1022b3c0da9a04277bebc4cacd506e21d4a80ad4b97c89fb819acd0e9b47
|
def _read_train_datas(self):
'The special way to read wider face labels.\n\n Args:\n label_path: str, \n '
with open(self.train_label_path, 'r') as fb:
lines = fb.readlines()
return self._parse_raw_labels(lines)
|
The special way to read wider face labels.
Args:
label_path: str,
|
datasets.py
|
_read_train_datas
|
smarsu/mtcnn
| 0
|
python
|
def _read_train_datas(self):
'The special way to read wider face labels.\n\n Args:\n label_path: str, \n '
with open(self.train_label_path, 'r') as fb:
lines = fb.readlines()
return self._parse_raw_labels(lines)
|
def _read_train_datas(self):
'The special way to read wider face labels.\n\n Args:\n label_path: str, \n '
with open(self.train_label_path, 'r') as fb:
lines = fb.readlines()
return self._parse_raw_labels(lines)<|docstring|>The special way to read wider face labels.
Args:
label_path: str,<|endoftext|>
|
4693886e5ff94155e932137ffdeddaedfaeb8dcf4313884f3e8348e0fc3c4bee
|
def _parse_raw_labels(self, lines):
'Parse raw str lines to python object.\n \n Args:\n lines: list of str, with the structure of \n File name\n Number of bounding box\n x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose\n\n Returns:\n images: numpy array, [n], image paths\n labels: numpy array, [n, 4], [x1, y1, x2, y2]\n '
images = []
labels = []
idx = 0
while (idx < len(lines)):
image_path = lines[idx].strip()
images.append(self._real_image_path(image_path))
idx += 1
num = int(lines[idx])
idx += 1
labels_ = []
for _ in range(num):
(x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose) = [int(v) for v in lines[idx].strip().split()]
(x2, y2) = (((x1 + w) - 1), ((y1 + h) - 1))
labels_.append([x1, y1, x2, y2])
idx += 1
labels.append(np.array(labels_))
self._data_map[self._real_image_path(image_path)] = np.array(labels_)
return (np.array(images), np.array(labels))
|
Parse raw str lines to python object.
Args:
lines: list of str, with the structure of
File name
Number of bounding box
x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose
Returns:
images: numpy array, [n], image paths
labels: numpy array, [n, 4], [x1, y1, x2, y2]
|
datasets.py
|
_parse_raw_labels
|
smarsu/mtcnn
| 0
|
python
|
def _parse_raw_labels(self, lines):
'Parse raw str lines to python object.\n \n Args:\n lines: list of str, with the structure of \n File name\n Number of bounding box\n x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose\n\n Returns:\n images: numpy array, [n], image paths\n labels: numpy array, [n, 4], [x1, y1, x2, y2]\n '
images = []
labels = []
idx = 0
while (idx < len(lines)):
image_path = lines[idx].strip()
images.append(self._real_image_path(image_path))
idx += 1
num = int(lines[idx])
idx += 1
labels_ = []
for _ in range(num):
(x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose) = [int(v) for v in lines[idx].strip().split()]
(x2, y2) = (((x1 + w) - 1), ((y1 + h) - 1))
labels_.append([x1, y1, x2, y2])
idx += 1
labels.append(np.array(labels_))
self._data_map[self._real_image_path(image_path)] = np.array(labels_)
return (np.array(images), np.array(labels))
|
def _parse_raw_labels(self, lines):
'Parse raw str lines to python object.\n \n Args:\n lines: list of str, with the structure of \n File name\n Number of bounding box\n x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose\n\n Returns:\n images: numpy array, [n], image paths\n labels: numpy array, [n, 4], [x1, y1, x2, y2]\n '
images = []
labels = []
idx = 0
while (idx < len(lines)):
image_path = lines[idx].strip()
images.append(self._real_image_path(image_path))
idx += 1
num = int(lines[idx])
idx += 1
labels_ = []
for _ in range(num):
(x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose) = [int(v) for v in lines[idx].strip().split()]
(x2, y2) = (((x1 + w) - 1), ((y1 + h) - 1))
labels_.append([x1, y1, x2, y2])
idx += 1
labels.append(np.array(labels_))
self._data_map[self._real_image_path(image_path)] = np.array(labels_)
return (np.array(images), np.array(labels))<|docstring|>Parse raw str lines to python object.
Args:
lines: list of str, with the structure of
File name
Number of bounding box
x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose
Returns:
images: numpy array, [n], image paths
labels: numpy array, [n, 4], [x1, y1, x2, y2]<|endoftext|>
|
91b35eb6f22f3cad0c37c60219fd072b890b4c2b1a03aab703fb809badc6b63c
|
def _check_keys_and_values(result):
'\n Check that given dict represents equipment data in correct form.\n '
assert (len(result) == 3)
assert (result['id'] != '')
assert (result['name'] == {'fi': 'test equipment category'})
equipments = result['equipment']
assert (len(equipments) == 1)
equipment = equipments[0]
assert (len(equipment) == 2)
assert (equipment['name'] == {'fi': 'test equipment'})
assert (equipment['id'] != '')
|
Check that given dict represents equipment data in correct form.
|
resources/tests/test_equipment_category_api.py
|
_check_keys_and_values
|
tansionline/respa
| 49
|
python
|
def _check_keys_and_values(result):
'\n \n '
assert (len(result) == 3)
assert (result['id'] != )
assert (result['name'] == {'fi': 'test equipment category'})
equipments = result['equipment']
assert (len(equipments) == 1)
equipment = equipments[0]
assert (len(equipment) == 2)
assert (equipment['name'] == {'fi': 'test equipment'})
assert (equipment['id'] != )
|
def _check_keys_and_values(result):
'\n \n '
assert (len(result) == 3)
assert (result['id'] != )
assert (result['name'] == {'fi': 'test equipment category'})
equipments = result['equipment']
assert (len(equipments) == 1)
equipment = equipments[0]
assert (len(equipment) == 2)
assert (equipment['name'] == {'fi': 'test equipment'})
assert (equipment['id'] != )<|docstring|>Check that given dict represents equipment data in correct form.<|endoftext|>
|
12d528b851f6327f75c32293a8e61964a3c7e10a4460c4e46fecfbe23b6e1e6e
|
@pytest.mark.django_db
def test_disallowed_methods(all_user_types_api_client, list_url, detail_url):
'\n Tests that only safe methods are allowed to equipment list and detail endpoints.\n '
check_disallowed_methods(all_user_types_api_client, (list_url, detail_url), UNSAFE_METHODS)
|
Tests that only safe methods are allowed to equipment list and detail endpoints.
|
resources/tests/test_equipment_category_api.py
|
test_disallowed_methods
|
tansionline/respa
| 49
|
python
|
@pytest.mark.django_db
def test_disallowed_methods(all_user_types_api_client, list_url, detail_url):
'\n \n '
check_disallowed_methods(all_user_types_api_client, (list_url, detail_url), UNSAFE_METHODS)
|
@pytest.mark.django_db
def test_disallowed_methods(all_user_types_api_client, list_url, detail_url):
'\n \n '
check_disallowed_methods(all_user_types_api_client, (list_url, detail_url), UNSAFE_METHODS)<|docstring|>Tests that only safe methods are allowed to equipment list and detail endpoints.<|endoftext|>
|
40d8d68b51334600b7b966ec7d062c456d0828870c7108f41a7c437f6ca7d6dd
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, list_url, equipment):
'\n Tests that equipment category list endpoint returns equipment category data in correct form.\n '
response = api_client.get(list_url)
results = response.data['results']
assert (len(results) == 1)
_check_keys_and_values(results[0])
|
Tests that equipment category list endpoint returns equipment category data in correct form.
|
resources/tests/test_equipment_category_api.py
|
test_get_equipment_category_list
|
tansionline/respa
| 49
|
python
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, list_url, equipment):
'\n \n '
response = api_client.get(list_url)
results = response.data['results']
assert (len(results) == 1)
_check_keys_and_values(results[0])
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, list_url, equipment):
'\n \n '
response = api_client.get(list_url)
results = response.data['results']
assert (len(results) == 1)
_check_keys_and_values(results[0])<|docstring|>Tests that equipment category list endpoint returns equipment category data in correct form.<|endoftext|>
|
d1052ac8d32eb155fcc554054127d4f0c62d8a17a8b001fd454a7e41405b3c05
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, detail_url, equipment):
'\n Tests that equipment category detail endpoint returns equipment category data in correct form.\n '
response = api_client.get(detail_url)
_check_keys_and_values(response.data)
|
Tests that equipment category detail endpoint returns equipment category data in correct form.
|
resources/tests/test_equipment_category_api.py
|
test_get_equipment_category_list
|
tansionline/respa
| 49
|
python
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, detail_url, equipment):
'\n \n '
response = api_client.get(detail_url)
_check_keys_and_values(response.data)
|
@pytest.mark.django_db
def test_get_equipment_category_list(api_client, detail_url, equipment):
'\n \n '
response = api_client.get(detail_url)
_check_keys_and_values(response.data)<|docstring|>Tests that equipment category detail endpoint returns equipment category data in correct form.<|endoftext|>
|
326de99d12fdf7bb0786fdb030c1ea066b7cf5e3719935db9b5334620991f5cd
|
def regex_clean_ruidos(text):
"\n função manual para eliminação de ruídos através de regex, descartar qualquer coisa\n que na composição possua uma 'não letra', como símbolos e números...\n\n Ex: ',ola' - '...' - 'email@alguem'\n "
return ' '.join([t for t in text.strip().split() if re.match('[^\\W\\d]*$', t)])
|
função manual para eliminação de ruídos através de regex, descartar qualquer coisa
que na composição possua uma 'não letra', como símbolos e números...
Ex: ',ola' - '...' - 'email@alguem'
|
src/slave/preprocess.py
|
regex_clean_ruidos
|
emanuelibiapino/micro_cluster
| 0
|
python
|
def regex_clean_ruidos(text):
"\n função manual para eliminação de ruídos através de regex, descartar qualquer coisa\n que na composição possua uma 'não letra', como símbolos e números...\n\n Ex: ',ola' - '...' - 'email@alguem'\n "
return ' '.join([t for t in text.strip().split() if re.match('[^\\W\\d]*$', t)])
|
def regex_clean_ruidos(text):
"\n função manual para eliminação de ruídos através de regex, descartar qualquer coisa\n que na composição possua uma 'não letra', como símbolos e números...\n\n Ex: ',ola' - '...' - 'email@alguem'\n "
return ' '.join([t for t in text.strip().split() if re.match('[^\\W\\d]*$', t)])<|docstring|>função manual para eliminação de ruídos através de regex, descartar qualquer coisa
que na composição possua uma 'não letra', como símbolos e números...
Ex: ',ola' - '...' - 'email@alguem'<|endoftext|>
|
b9bbcf14a2095cef7830f159fc0333ea6929fcdcf911adc1ee33d92f03457c68
|
def regex_clean(text):
'\n função pronta da lib NLTK para limpeza de ruído\n '
return regex_clean_ruidos(' '.join(RegexpTokenizer('\\w+').tokenize(text)).lower())
|
função pronta da lib NLTK para limpeza de ruído
|
src/slave/preprocess.py
|
regex_clean
|
emanuelibiapino/micro_cluster
| 0
|
python
|
def regex_clean(text):
'\n \n '
return regex_clean_ruidos(' '.join(RegexpTokenizer('\\w+').tokenize(text)).lower())
|
def regex_clean(text):
'\n \n '
return regex_clean_ruidos(' '.join(RegexpTokenizer('\\w+').tokenize(text)).lower())<|docstring|>função pronta da lib NLTK para limpeza de ruído<|endoftext|>
|
5a9834d32436a63f4ee9583258613a9ae187057576d0e44737ece5087c475d53
|
def __init__(self, scenario, **kwargs):
'\n\n :type scenario: Scenario\n '
self.dataset = scenario.dataset
self.partners_list = scenario.partners_list
self.init_model_from = scenario.init_model_from
self.use_saved_weights = scenario.use_saved_weights
self.amounts_per_partner = scenario.amounts_per_partner
self.val_set = scenario.val_set
self.test_set = scenario.test_set
self.epoch_count = scenario.epoch_count
self.minibatch_count = scenario.minibatch_count
self.is_early_stopping = scenario.is_early_stopping
self.save_folder = scenario.save_folder
self.__dict__.update(((k, v) for (k, v) in kwargs.items() if (k in ALLOWED_PARAMETERS)))
self.val_data = (self.dataset.x_val, self.dataset.y_val)
self.test_data = (self.dataset.x_test, self.dataset.y_test)
self.dataset_name = self.dataset.name
self.generate_new_model = self.dataset.generate_new_model
model = self.init_model()
self.model_weights = model.get_weights()
self.metrics_names = self.dataset.model_metrics_names
self.epoch_index = 0
self.minibatch_index = 0
self.learning_computation_time = 0
for partner in self.partners_list:
assert isinstance(partner, Partner)
partners_list = sorted(self.partners_list, key=operator.attrgetter('id'))
logger.info(f"## Preparation of model's training on partners with ids: {[('#' + str(p.id)) for p in partners_list]}")
self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]
self.aggregator = self.init_aggregation_function(scenario.aggregation)
self.history = History(self)
if (self.save_folder is not None):
if ('custom_name' in kwargs):
self.save_folder = (self.save_folder / kwargs['custom_name'])
else:
self.save_folder = (self.save_folder / 'multi_partner_learning')
self.save_folder.mkdir(parents=True, exist_ok=False)
logger.debug('MultiPartnerLearning object instantiated.')
|
:type scenario: Scenario
|
mplc/multi_partner_learning/basic_mpl.py
|
__init__
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def __init__(self, scenario, **kwargs):
'\n\n \n '
self.dataset = scenario.dataset
self.partners_list = scenario.partners_list
self.init_model_from = scenario.init_model_from
self.use_saved_weights = scenario.use_saved_weights
self.amounts_per_partner = scenario.amounts_per_partner
self.val_set = scenario.val_set
self.test_set = scenario.test_set
self.epoch_count = scenario.epoch_count
self.minibatch_count = scenario.minibatch_count
self.is_early_stopping = scenario.is_early_stopping
self.save_folder = scenario.save_folder
self.__dict__.update(((k, v) for (k, v) in kwargs.items() if (k in ALLOWED_PARAMETERS)))
self.val_data = (self.dataset.x_val, self.dataset.y_val)
self.test_data = (self.dataset.x_test, self.dataset.y_test)
self.dataset_name = self.dataset.name
self.generate_new_model = self.dataset.generate_new_model
model = self.init_model()
self.model_weights = model.get_weights()
self.metrics_names = self.dataset.model_metrics_names
self.epoch_index = 0
self.minibatch_index = 0
self.learning_computation_time = 0
for partner in self.partners_list:
assert isinstance(partner, Partner)
partners_list = sorted(self.partners_list, key=operator.attrgetter('id'))
logger.info(f"## Preparation of model's training on partners with ids: {[('#' + str(p.id)) for p in partners_list]}")
self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]
self.aggregator = self.init_aggregation_function(scenario.aggregation)
self.history = History(self)
if (self.save_folder is not None):
if ('custom_name' in kwargs):
self.save_folder = (self.save_folder / kwargs['custom_name'])
else:
self.save_folder = (self.save_folder / 'multi_partner_learning')
self.save_folder.mkdir(parents=True, exist_ok=False)
logger.debug('MultiPartnerLearning object instantiated.')
|
def __init__(self, scenario, **kwargs):
'\n\n \n '
self.dataset = scenario.dataset
self.partners_list = scenario.partners_list
self.init_model_from = scenario.init_model_from
self.use_saved_weights = scenario.use_saved_weights
self.amounts_per_partner = scenario.amounts_per_partner
self.val_set = scenario.val_set
self.test_set = scenario.test_set
self.epoch_count = scenario.epoch_count
self.minibatch_count = scenario.minibatch_count
self.is_early_stopping = scenario.is_early_stopping
self.save_folder = scenario.save_folder
self.__dict__.update(((k, v) for (k, v) in kwargs.items() if (k in ALLOWED_PARAMETERS)))
self.val_data = (self.dataset.x_val, self.dataset.y_val)
self.test_data = (self.dataset.x_test, self.dataset.y_test)
self.dataset_name = self.dataset.name
self.generate_new_model = self.dataset.generate_new_model
model = self.init_model()
self.model_weights = model.get_weights()
self.metrics_names = self.dataset.model_metrics_names
self.epoch_index = 0
self.minibatch_index = 0
self.learning_computation_time = 0
for partner in self.partners_list:
assert isinstance(partner, Partner)
partners_list = sorted(self.partners_list, key=operator.attrgetter('id'))
logger.info(f"## Preparation of model's training on partners with ids: {[('#' + str(p.id)) for p in partners_list]}")
self.partners_list = [PartnerMpl(partner, self) for partner in self.partners_list]
self.aggregator = self.init_aggregation_function(scenario.aggregation)
self.history = History(self)
if (self.save_folder is not None):
if ('custom_name' in kwargs):
self.save_folder = (self.save_folder / kwargs['custom_name'])
else:
self.save_folder = (self.save_folder / 'multi_partner_learning')
self.save_folder.mkdir(parents=True, exist_ok=False)
logger.debug('MultiPartnerLearning object instantiated.')<|docstring|>:type scenario: Scenario<|endoftext|>
|
61d9a40dbb3d09dbbd013ff02c60800c46d759f976101f1db6e38d20896edfe3
|
def build_model_from_weights(self, new_weights):
'Generate a new model initialized with weights passed as arguments'
new_model = self.generate_new_model()
new_model.set_weights(new_weights)
return new_model
|
Generate a new model initialized with weights passed as arguments
|
mplc/multi_partner_learning/basic_mpl.py
|
build_model_from_weights
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def build_model_from_weights(self, new_weights):
new_model = self.generate_new_model()
new_model.set_weights(new_weights)
return new_model
|
def build_model_from_weights(self, new_weights):
new_model = self.generate_new_model()
new_model.set_weights(new_weights)
return new_model<|docstring|>Generate a new model initialized with weights passed as arguments<|endoftext|>
|
6eb77927c0fae5b7fed84a76942f1f8a3d5b9eed3beb9d15234279f5abecaa0b
|
def save_final_model(self):
'Save final model weights'
model_folder = os.path.join(self.save_folder, 'model')
if (not os.path.isdir(model_folder)):
os.makedirs(model_folder)
np.save(os.path.join(model_folder, (self.dataset_name + '_final_weights.npy')), self.model_weights)
model_to_save = self.build_model()
model_to_save.save_weights(os.path.join(model_folder, (self.dataset_name + '_final_weights.h5')))
|
Save final model weights
|
mplc/multi_partner_learning/basic_mpl.py
|
save_final_model
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def save_final_model(self):
model_folder = os.path.join(self.save_folder, 'model')
if (not os.path.isdir(model_folder)):
os.makedirs(model_folder)
np.save(os.path.join(model_folder, (self.dataset_name + '_final_weights.npy')), self.model_weights)
model_to_save = self.build_model()
model_to_save.save_weights(os.path.join(model_folder, (self.dataset_name + '_final_weights.h5')))
|
def save_final_model(self):
model_folder = os.path.join(self.save_folder, 'model')
if (not os.path.isdir(model_folder)):
os.makedirs(model_folder)
np.save(os.path.join(model_folder, (self.dataset_name + '_final_weights.npy')), self.model_weights)
model_to_save = self.build_model()
model_to_save.save_weights(os.path.join(model_folder, (self.dataset_name + '_final_weights.h5')))<|docstring|>Save final model weights<|endoftext|>
|
ecbe802f1f0b3c8c16b609e9034559f577c7b63e5672f1e77af5e2c480abbdb7
|
def split_in_minibatches(self):
'Split the dataset passed as argument in mini-batches'
for partner in self.partners_list:
partner.split_minibatches()
|
Split the dataset passed as argument in mini-batches
|
mplc/multi_partner_learning/basic_mpl.py
|
split_in_minibatches
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def split_in_minibatches(self):
for partner in self.partners_list:
partner.split_minibatches()
|
def split_in_minibatches(self):
for partner in self.partners_list:
partner.split_minibatches()<|docstring|>Split the dataset passed as argument in mini-batches<|endoftext|>
|
0727a6d6b4aa120ad0902673efb151a80c573f76949b2318f93db26e84a77bf5
|
def fit(self):
'Return the score on test data of a final aggregated model trained in a federated way on each partner'
start = timer()
while (self.epoch_index < self.epoch_count):
self.fit_epoch()
if self.early_stop():
break
self.epoch_index += 1
self.eval_and_log_final_model_test_perf()
end = timer()
self.learning_computation_time = (end - start)
logger.info(f'Training and evaluation on multiple partners: done. ({np.round(self.learning_computation_time, 3)} seconds)')
if (self.save_folder is not None):
self.save_data()
|
Return the score on test data of a final aggregated model trained in a federated way on each partner
|
mplc/multi_partner_learning/basic_mpl.py
|
fit
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit(self):
start = timer()
while (self.epoch_index < self.epoch_count):
self.fit_epoch()
if self.early_stop():
break
self.epoch_index += 1
self.eval_and_log_final_model_test_perf()
end = timer()
self.learning_computation_time = (end - start)
logger.info(f'Training and evaluation on multiple partners: done. ({np.round(self.learning_computation_time, 3)} seconds)')
if (self.save_folder is not None):
self.save_data()
|
def fit(self):
start = timer()
while (self.epoch_index < self.epoch_count):
self.fit_epoch()
if self.early_stop():
break
self.epoch_index += 1
self.eval_and_log_final_model_test_perf()
end = timer()
self.learning_computation_time = (end - start)
logger.info(f'Training and evaluation on multiple partners: done. ({np.round(self.learning_computation_time, 3)} seconds)')
if (self.save_folder is not None):
self.save_data()<|docstring|>Return the score on test data of a final aggregated model trained in a federated way on each partner<|endoftext|>
|
e83ff6e406046092bb2c52848356d8ee16e422cdafa2e18f59871e13fe5b1e27
|
def fit(self):
'Return the score on test data of a model trained on a single partner'
start = timer()
logger.info(f'## Training and evaluating model on partner with partner_id #{self.partner.id}')
cb = []
es = None
if self.is_early_stopping:
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)
cb.append(es)
logger.info(' Training model...')
model = self.build_model()
if (self.val_set == 'global'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=self.val_data, callbacks=cb)
elif (self.val_set == 'local'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=(self.partner.x_val, self.partner.y_val), callbacks=cb)
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.model_weights = model.get_weights()
self.log_partner_perf(self.partner.id, 0, history.history)
del self.history.history['mpl_model']
self.eval_and_log_final_model_test_perf()
self.history.nb_epochs_done = ((es.stopped_epoch + 1) if (es.stopped_epoch != 0) else self.epoch_count)
end = timer()
self.learning_computation_time = (end - start)
|
Return the score on test data of a model trained on a single partner
|
mplc/multi_partner_learning/basic_mpl.py
|
fit
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit(self):
start = timer()
logger.info(f'## Training and evaluating model on partner with partner_id #{self.partner.id}')
cb = []
es = None
if self.is_early_stopping:
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)
cb.append(es)
logger.info(' Training model...')
model = self.build_model()
if (self.val_set == 'global'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=self.val_data, callbacks=cb)
elif (self.val_set == 'local'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=(self.partner.x_val, self.partner.y_val), callbacks=cb)
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.model_weights = model.get_weights()
self.log_partner_perf(self.partner.id, 0, history.history)
del self.history.history['mpl_model']
self.eval_and_log_final_model_test_perf()
self.history.nb_epochs_done = ((es.stopped_epoch + 1) if (es.stopped_epoch != 0) else self.epoch_count)
end = timer()
self.learning_computation_time = (end - start)
|
def fit(self):
start = timer()
logger.info(f'## Training and evaluating model on partner with partner_id #{self.partner.id}')
cb = []
es = None
if self.is_early_stopping:
es = EarlyStopping(monitor='val_loss', mode='min', verbose=0, patience=constants.PATIENCE)
cb.append(es)
logger.info(' Training model...')
model = self.build_model()
if (self.val_set == 'global'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=self.val_data, callbacks=cb)
elif (self.val_set == 'local'):
history = model.fit(self.partner.x_train, self.partner.y_train, batch_size=self.partner.batch_size, epochs=self.epoch_count, verbose=0, validation_data=(self.partner.x_val, self.partner.y_val), callbacks=cb)
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.model_weights = model.get_weights()
self.log_partner_perf(self.partner.id, 0, history.history)
del self.history.history['mpl_model']
self.eval_and_log_final_model_test_perf()
self.history.nb_epochs_done = ((es.stopped_epoch + 1) if (es.stopped_epoch != 0) else self.epoch_count)
end = timer()
self.learning_computation_time = (end - start)<|docstring|>Return the score on test data of a model trained on a single partner<|endoftext|>
|
ca299c382e69d0c55800fc0413049e928c995cec2527710a76df958300037a84
|
def fit_minibatch(self):
'Proceed to a collaborative round with a federated averaging approach'
logger.debug('Start new fedavg collaborative round ...')
logger.info(f"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
if (self.val_set == 'global'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, partner_index, history.history)
partner.model_weights = partner_model.get_weights()
logger.debug('End of fedavg collaborative round.')
|
Proceed to a collaborative round with a federated averaging approach
|
mplc/multi_partner_learning/basic_mpl.py
|
fit_minibatch
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit_minibatch(self):
logger.debug('Start new fedavg collaborative round ...')
logger.info(f"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
if (self.val_set == 'global'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, partner_index, history.history)
partner.model_weights = partner_model.get_weights()
logger.debug('End of fedavg collaborative round.')
|
def fit_minibatch(self):
logger.debug('Start new fedavg collaborative round ...')
logger.info(f"(fedavg) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
if (self.val_set == 'global'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = partner_model.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, partner_index, history.history)
partner.model_weights = partner_model.get_weights()
logger.debug('End of fedavg collaborative round.')<|docstring|>Proceed to a collaborative round with a federated averaging approach<|endoftext|>
|
faec923f8fc98079220c1e897f1c8cf51c81af863af3fc07c6905f4951d77416
|
def fit_minibatch(self):
'Proceed to a collaborative round with a distributionally robust federated averaging approach'
logger.info(f"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.active_partners_list):
partner_model = partner.build_model()
minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]
for (idx, batch_x_y) in enumerate(minibatched_x_y):
with tf.GradientTape() as tape:
p_pred = partner_model(batch_x_y[0])
loss = partner_model.compiled_loss(batch_x_y[1], p_pred)
partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)
self.local_steps_index += 1
if (self.local_steps_index == self.local_steps_index_t):
self.model_weights_at_index_t.append(partner.model_weights)
partner.model_weights = partner_model.get_weights()
self.local_steps_index = 0
self.model_weights = self.aggregate_model_weights(self.active_partners_list)
for (active_partner, weights_t) in zip(self.active_partners_list, self.model_weights_at_index_t):
active_partner.model_weights = weights_t
self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)
subset_index = random.sample(range(self.partners_count), self.active_partners_count)
self.subset_u_partners = [self.partners_list[index] for index in subset_index]
logger.info(f"Subset of partners chosen for lambda update {[('#' + str(partner.id)) for partner in self.subset_u_partners]}")
for (partner, index) in zip(self.subset_u_partners, subset_index):
random_minibatch_index = np.random.randint(0, (self.minibatch_count - 1))
random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]
random_batch_index = np.random.randint(0, (len(random_minibatch) - 1))
random_batch = list(random_minibatch)[random_batch_index]
partner_model = self.build_model_from_weights(self.global_model_at_index_t)
loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))
self.loss_for_model_at_index_t[index] = ((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))
|
Proceed to a collaborative round with a distributionally robust federated averaging approach
|
mplc/multi_partner_learning/basic_mpl.py
|
fit_minibatch
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit_minibatch(self):
logger.info(f"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.active_partners_list):
partner_model = partner.build_model()
minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]
for (idx, batch_x_y) in enumerate(minibatched_x_y):
with tf.GradientTape() as tape:
p_pred = partner_model(batch_x_y[0])
loss = partner_model.compiled_loss(batch_x_y[1], p_pred)
partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)
self.local_steps_index += 1
if (self.local_steps_index == self.local_steps_index_t):
self.model_weights_at_index_t.append(partner.model_weights)
partner.model_weights = partner_model.get_weights()
self.local_steps_index = 0
self.model_weights = self.aggregate_model_weights(self.active_partners_list)
for (active_partner, weights_t) in zip(self.active_partners_list, self.model_weights_at_index_t):
active_partner.model_weights = weights_t
self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)
subset_index = random.sample(range(self.partners_count), self.active_partners_count)
self.subset_u_partners = [self.partners_list[index] for index in subset_index]
logger.info(f"Subset of partners chosen for lambda update {[('#' + str(partner.id)) for partner in self.subset_u_partners]}")
for (partner, index) in zip(self.subset_u_partners, subset_index):
random_minibatch_index = np.random.randint(0, (self.minibatch_count - 1))
random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]
random_batch_index = np.random.randint(0, (len(random_minibatch) - 1))
random_batch = list(random_minibatch)[random_batch_index]
partner_model = self.build_model_from_weights(self.global_model_at_index_t)
loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))
self.loss_for_model_at_index_t[index] = ((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))
|
def fit_minibatch(self):
logger.info(f"(drfa) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.active_partners_list):
partner_model = partner.build_model()
minibatched_x_y = self.partners_training_data[partner.id][self.minibatch_index]
for (idx, batch_x_y) in enumerate(minibatched_x_y):
with tf.GradientTape() as tape:
p_pred = partner_model(batch_x_y[0])
loss = partner_model.compiled_loss(batch_x_y[1], p_pred)
partner_model.optimizer.minimize(loss, partner_model.trainable_weights, tape=tape)
self.local_steps_index += 1
if (self.local_steps_index == self.local_steps_index_t):
self.model_weights_at_index_t.append(partner.model_weights)
partner.model_weights = partner_model.get_weights()
self.local_steps_index = 0
self.model_weights = self.aggregate_model_weights(self.active_partners_list)
for (active_partner, weights_t) in zip(self.active_partners_list, self.model_weights_at_index_t):
active_partner.model_weights = weights_t
self.global_model_at_index_t = self.aggregate_model_weights(self.active_partners_list)
subset_index = random.sample(range(self.partners_count), self.active_partners_count)
self.subset_u_partners = [self.partners_list[index] for index in subset_index]
logger.info(f"Subset of partners chosen for lambda update {[('#' + str(partner.id)) for partner in self.subset_u_partners]}")
for (partner, index) in zip(self.subset_u_partners, subset_index):
random_minibatch_index = np.random.randint(0, (self.minibatch_count - 1))
random_minibatch = self.partners_training_data[partner.id][random_minibatch_index]
random_batch_index = np.random.randint(0, (len(random_minibatch) - 1))
random_batch = list(random_minibatch)[random_batch_index]
partner_model = self.build_model_from_weights(self.global_model_at_index_t)
loss = partner_model.compiled_loss(random_batch[1], partner_model(random_batch[0]))
self.loss_for_model_at_index_t[index] = ((self.partners_count / self.active_partners_count) * np.mean(loss.numpy()))<|docstring|>Proceed to a collaborative round with a distributionally robust federated averaging approach<|endoftext|>
|
6c09b4c4939c07ec4fe3c54c21b24e89efba66e31b1a3fe8b441aad3a9e3e708
|
def init_lambda(self):
"\n - initialize lambda vector according to each partner's dataset size\n - this is a probability vector of size partners_count\n "
return np.array(self.amounts_per_partner)
|
- initialize lambda vector according to each partner's dataset size
- this is a probability vector of size partners_count
|
mplc/multi_partner_learning/basic_mpl.py
|
init_lambda
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def init_lambda(self):
"\n - initialize lambda vector according to each partner's dataset size\n - this is a probability vector of size partners_count\n "
return np.array(self.amounts_per_partner)
|
def init_lambda(self):
"\n - initialize lambda vector according to each partner's dataset size\n - this is a probability vector of size partners_count\n "
return np.array(self.amounts_per_partner)<|docstring|>- initialize lambda vector according to each partner's dataset size
- this is a probability vector of size partners_count<|endoftext|>
|
555a08429e596fb3f90d3116a2284fb37701d3f8b6cfc4ca02eb46ad564ed8d7
|
def update_lambda(self):
'\n The update rule for lambda is : lambda_vector(i) =\n Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))\n '
self.lambda_vector += ((self.local_steps_index_t * self.lambda_learning_rate) * self.loss_for_model_at_index_t)
self.lambda_vector = project_onto_the_simplex(self.lambda_vector)
if any((self.lambda_vector < 0.001)):
self.lambda_vector[(self.lambda_vector < 0.001)] = 0.001
self.lambda_vector = (self.lambda_vector / np.sum(self.lambda_vector))
|
The update rule for lambda is : lambda_vector(i) =
Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))
|
mplc/multi_partner_learning/basic_mpl.py
|
update_lambda
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def update_lambda(self):
'\n The update rule for lambda is : lambda_vector(i) =\n Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))\n '
self.lambda_vector += ((self.local_steps_index_t * self.lambda_learning_rate) * self.loss_for_model_at_index_t)
self.lambda_vector = project_onto_the_simplex(self.lambda_vector)
if any((self.lambda_vector < 0.001)):
self.lambda_vector[(self.lambda_vector < 0.001)] = 0.001
self.lambda_vector = (self.lambda_vector / np.sum(self.lambda_vector))
|
def update_lambda(self):
'\n The update rule for lambda is : lambda_vector(i) =\n Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))\n '
self.lambda_vector += ((self.local_steps_index_t * self.lambda_learning_rate) * self.loss_for_model_at_index_t)
self.lambda_vector = project_onto_the_simplex(self.lambda_vector)
if any((self.lambda_vector < 0.001)):
self.lambda_vector[(self.lambda_vector < 0.001)] = 0.001
self.lambda_vector = (self.lambda_vector / np.sum(self.lambda_vector))<|docstring|>The update rule for lambda is : lambda_vector(i) =
Projection(lambda_vector(i-1) + (local_step_index_t * lambda_learning_rate * local_losses_at_index_t))<|endoftext|>
|
b98928ceaf3d957a5484977a0c2152fca96fceb3f94b13044d96ed7025adcd30
|
def update_active_partners_list(self):
'\n Update the active partners list according to lambda vector\n '
active_partners_indices = (- self.lambda_vector).argsort()[:self.active_partners_count]
self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]
|
Update the active partners list according to lambda vector
|
mplc/multi_partner_learning/basic_mpl.py
|
update_active_partners_list
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def update_active_partners_list(self):
'\n \n '
active_partners_indices = (- self.lambda_vector).argsort()[:self.active_partners_count]
self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]
|
def update_active_partners_list(self):
'\n \n '
active_partners_indices = (- self.lambda_vector).argsort()[:self.active_partners_count]
self.active_partners_list = [self.partners_list[index] for index in active_partners_indices]<|docstring|>Update the active partners list according to lambda vector<|endoftext|>
|
1ff25a8e7b220fc62ecbaac81613618d3e8bf25049bb1005bdfe25e9e2db7e36
|
@staticmethod
def aggregate_model_weights(partners_list):
" This method is identical to the one in the aggregator class with few modifications.\n I couldn't use the original aggregator method since it operates on the entire list of partners and\n DRFA requires model aggregation over a subset of partners list only\n "
aggregation_weights = np.ones(len(partners_list), dtype='float32')
weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))
new_weights = list()
for weights_for_layer in weights_per_layer:
avg_weights_for_layer = np.average(np.array(weights_for_layer), axis=0, weights=aggregation_weights)
new_weights.append(avg_weights_for_layer)
return new_weights
|
This method is identical to the one in the aggregator class with few modifications.
I couldn't use the original aggregator method since it operates on the entire list of partners and
DRFA requires model aggregation over a subset of partners list only
|
mplc/multi_partner_learning/basic_mpl.py
|
aggregate_model_weights
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
@staticmethod
def aggregate_model_weights(partners_list):
" This method is identical to the one in the aggregator class with few modifications.\n I couldn't use the original aggregator method since it operates on the entire list of partners and\n DRFA requires model aggregation over a subset of partners list only\n "
aggregation_weights = np.ones(len(partners_list), dtype='float32')
weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))
new_weights = list()
for weights_for_layer in weights_per_layer:
avg_weights_for_layer = np.average(np.array(weights_for_layer), axis=0, weights=aggregation_weights)
new_weights.append(avg_weights_for_layer)
return new_weights
|
@staticmethod
def aggregate_model_weights(partners_list):
" This method is identical to the one in the aggregator class with few modifications.\n I couldn't use the original aggregator method since it operates on the entire list of partners and\n DRFA requires model aggregation over a subset of partners list only\n "
aggregation_weights = np.ones(len(partners_list), dtype='float32')
weights_per_layer = list(zip(*[partner.model_weights for partner in partners_list]))
new_weights = list()
for weights_for_layer in weights_per_layer:
avg_weights_for_layer = np.average(np.array(weights_for_layer), axis=0, weights=aggregation_weights)
new_weights.append(avg_weights_for_layer)
return new_weights<|docstring|>This method is identical to the one in the aggregator class with few modifications.
I couldn't use the original aggregator method since it operates on the entire list of partners and
DRFA requires model aggregation over a subset of partners list only<|endoftext|>
|
561a41ec9b7384790e9d738ff29b6b746466611f1299ad898376acb0c9d42709
|
def fit_minibatch(self):
'Proceed to a collaborative round with a sequential averaging approach'
logger.debug('Start new seq collaborative round ...')
model_for_round = self.build_model()
self.eval_and_log_model_val_perf()
shuffled_indexes = np.random.permutation(self.partners_count)
logger.debug(f'(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}')
for (idx, partner_index) in enumerate(shuffled_indexes):
partner = self.partners_list[partner_index]
if (self.val_set == 'global'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, idx, history.history)
partner.model_weights = model_for_round.get_weights()
self.model_weights = model_for_round.get_weights()
logger.debug('End of seq collaborative round.')
|
Proceed to a collaborative round with a sequential averaging approach
|
mplc/multi_partner_learning/basic_mpl.py
|
fit_minibatch
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit_minibatch(self):
logger.debug('Start new seq collaborative round ...')
model_for_round = self.build_model()
self.eval_and_log_model_val_perf()
shuffled_indexes = np.random.permutation(self.partners_count)
logger.debug(f'(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}')
for (idx, partner_index) in enumerate(shuffled_indexes):
partner = self.partners_list[partner_index]
if (self.val_set == 'global'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, idx, history.history)
partner.model_weights = model_for_round.get_weights()
self.model_weights = model_for_round.get_weights()
logger.debug('End of seq collaborative round.')
|
def fit_minibatch(self):
logger.debug('Start new seq collaborative round ...')
model_for_round = self.build_model()
self.eval_and_log_model_val_perf()
shuffled_indexes = np.random.permutation(self.partners_count)
logger.debug(f'(seq) Shuffled order for this seqavg collaborative round: {shuffled_indexes}')
for (idx, partner_index) in enumerate(shuffled_indexes):
partner = self.partners_list[partner_index]
if (self.val_set == 'global'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
elif (self.val_set == 'local'):
history = model_for_round.fit(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], batch_size=partner.batch_size, verbose=0, validation_data=(partner.x_val, partner.y_val))
else:
raise ValueError("validation set should be 'local' or 'global', not {self.val_set}")
self.log_partner_perf(partner.id, idx, history.history)
partner.model_weights = model_for_round.get_weights()
self.model_weights = model_for_round.get_weights()
logger.debug('End of seq collaborative round.')<|docstring|>Proceed to a collaborative round with a sequential averaging approach<|endoftext|>
|
ee18c11d6dbe0850a00d4c2a59baf0912e093ed9cce74662032070b4467f6ea5
|
def fit_minibatch(self):
'Proceed to a collaborative round with a S-Model federated averaging approach'
logger.debug('Start new S-Model collaborative round ...')
logger.info(f"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
x_batch = partner.minibatched_x_train[self.minibatch_index]
y_batch = partner.minibatched_y_train[self.minibatch_index]
model_input = Input(shape=self.dataset.input_shape)
x = partner_model(model_input)
outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)
full_model = Model(inputs=model_input, outputs=outputs, name=f'full_model_partner_{partner_index}')
full_model.compile(loss=partner_model.loss, optimizer=partner_model.optimizer, metrics='accuracy')
history = full_model.fit(x_batch, y_batch, batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
self.log_partner_perf(partner.id, partner_index, history.history)
partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()
partner.model_weights = partner_model.get_weights()
logger.debug('End of S-Model collaborative round.')
|
Proceed to a collaborative round with a S-Model federated averaging approach
|
mplc/multi_partner_learning/basic_mpl.py
|
fit_minibatch
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit_minibatch(self):
logger.debug('Start new S-Model collaborative round ...')
logger.info(f"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
x_batch = partner.minibatched_x_train[self.minibatch_index]
y_batch = partner.minibatched_y_train[self.minibatch_index]
model_input = Input(shape=self.dataset.input_shape)
x = partner_model(model_input)
outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)
full_model = Model(inputs=model_input, outputs=outputs, name=f'full_model_partner_{partner_index}')
full_model.compile(loss=partner_model.loss, optimizer=partner_model.optimizer, metrics='accuracy')
history = full_model.fit(x_batch, y_batch, batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
self.log_partner_perf(partner.id, partner_index, history.history)
partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()
partner.model_weights = partner_model.get_weights()
logger.debug('End of S-Model collaborative round.')
|
def fit_minibatch(self):
logger.debug('Start new S-Model collaborative round ...')
logger.info(f"(S-Model) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
partner_model = partner.build_model()
x_batch = partner.minibatched_x_train[self.minibatch_index]
y_batch = partner.minibatched_y_train[self.minibatch_index]
model_input = Input(shape=self.dataset.input_shape)
x = partner_model(model_input)
outputs = NoiseAdaptationChannel(weights=partner.noise_layer_weights, name='s-model')(x)
full_model = Model(inputs=model_input, outputs=outputs, name=f'full_model_partner_{partner_index}')
full_model.compile(loss=partner_model.loss, optimizer=partner_model.optimizer, metrics='accuracy')
history = full_model.fit(x_batch, y_batch, batch_size=partner.batch_size, verbose=0, validation_data=self.val_data)
self.log_partner_perf(partner.id, partner_index, history.history)
partner.noise_layer_weights = full_model.get_layer('s-model').get_weights()
partner.model_weights = partner_model.get_weights()
logger.debug('End of S-Model collaborative round.')<|docstring|>Proceed to a collaborative round with a S-Model federated averaging approach<|endoftext|>
|
93ee8786a174c5f73ed655d3db133039ca497ceb9c00d2be744d55047b5f120a
|
def fit_minibatch(self):
'Proceed to a collaborative round with a federated averaging approach'
logger.debug('Start new gradients fusion collaborative round ...')
logger.info(f"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
with tf.GradientTape() as tape:
loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index], self.model(partner.minibatched_x_train[self.minibatch_index]))
partner.grads = tape.gradient(loss, self.model.trainable_weights)
global_grad = self.aggregator.aggregate_gradients()
self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))
self.model_weights = self.model.get_weights()
for (partner_index, partner) in enumerate(self.partners_list):
val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)
history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], verbose=False)
history = {'loss': [history[0]], 'accuracy': [history[1]], 'val_loss': [val_history[0]], 'val_accuracy': [val_history[1]]}
self.log_partner_perf(partner.id, partner_index, history)
logger.debug('End of grads-fusion collaborative round.')
|
Proceed to a collaborative round with a federated averaging approach
|
mplc/multi_partner_learning/basic_mpl.py
|
fit_minibatch
|
SubstraFoundation/distributed-learning-contributivity
| 47
|
python
|
def fit_minibatch(self):
logger.debug('Start new gradients fusion collaborative round ...')
logger.info(f"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
with tf.GradientTape() as tape:
loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index], self.model(partner.minibatched_x_train[self.minibatch_index]))
partner.grads = tape.gradient(loss, self.model.trainable_weights)
global_grad = self.aggregator.aggregate_gradients()
self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))
self.model_weights = self.model.get_weights()
for (partner_index, partner) in enumerate(self.partners_list):
val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)
history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], verbose=False)
history = {'loss': [history[0]], 'accuracy': [history[1]], 'val_loss': [val_history[0]], 'val_accuracy': [val_history[1]]}
self.log_partner_perf(partner.id, partner_index, history)
logger.debug('End of grads-fusion collaborative round.')
|
def fit_minibatch(self):
logger.debug('Start new gradients fusion collaborative round ...')
logger.info(f"(gradient fusion) Minibatch n°{self.minibatch_index} of epoch n°{self.epoch_index}, init each partner's models with a copy of the global model")
for partner in self.partners_list:
partner.model_weights = self.model_weights
self.eval_and_log_model_val_perf()
for (partner_index, partner) in enumerate(self.partners_list):
with tf.GradientTape() as tape:
loss = self.model.loss(partner.minibatched_y_train[self.minibatch_index], self.model(partner.minibatched_x_train[self.minibatch_index]))
partner.grads = tape.gradient(loss, self.model.trainable_weights)
global_grad = self.aggregator.aggregate_gradients()
self.model.optimizer.apply_gradients(zip(global_grad, self.model.trainable_weights))
self.model_weights = self.model.get_weights()
for (partner_index, partner) in enumerate(self.partners_list):
val_history = self.model.evaluate(self.val_data[0], self.val_data[1], verbose=False)
history = self.model.evaluate(partner.minibatched_x_train[self.minibatch_index], partner.minibatched_y_train[self.minibatch_index], verbose=False)
history = {'loss': [history[0]], 'accuracy': [history[1]], 'val_loss': [val_history[0]], 'val_accuracy': [val_history[1]]}
self.log_partner_perf(partner.id, partner_index, history)
logger.debug('End of grads-fusion collaborative round.')<|docstring|>Proceed to a collaborative round with a federated averaging approach<|endoftext|>
|
0674055d64bbadeed09d4aae39d98438754550822c3323603e8953cd43d8665a
|
def _wrap(fn, parameter_type, wrapper_fn):
"Wrap a given `fn` with a given `parameter_type` using `wrapper_fn`.\n\n This method does not handle the multiple modes of usage as wrapper/decorator,\n as those are handled by ComputationWrapper below. It focused on the simple\n case with a function/defun (always present) and either a valid parameter type\n or an indication that there's no parameter (None).\n\n The only ambiguity left to resolve is whether `fn` should be immediately\n wrapped, or treated as a polymorphic callable to be wrapped upon invocation\n based on actual parameter types. The determination is based on the presence\n or absence of parameters in the declaration of `fn`. In order to be\n treated as a concrete no-argument computation, `fn` shouldn't declare any\n arguments (even with default values).\n\n Args:\n fn: The function or defun to wrap as a computation.\n parameter_type: The parameter type accepted by the computation, or None if\n there is no parameter.\n wrapper_fn: The Python callable that performs actual wrapping. It must\n accept two arguments, and optional third `name`. The first argument will\n be a Python function that takes either zero parameters if the computation\n is to be a no-parameter computation, or exactly one parameter if the\n computation does have a parameter. The second argument will be either None\n for a no-parameter computation, or the type of the computation's parameter\n (an instance of types.Type) if the computation has one. The third,\n optional parameter `name` is the optional name of the function that is\n being wrapped (only for debugging purposes). The object to be returned by\n this function should be an instance of a ConcreteFunction.\n\n Returns:\n Either the result of wrapping (an object that represents the computation),\n or a polymorphic callable that performs wrapping upon invocation based on\n argument types.\n\n Raises:\n TypeError: if the arguments are of the wrong types, or the `wrapper_fn`\n constructs something that isn't a ConcreteFunction.\n "
try:
fn_name = fn.__name__
except AttributeError:
fn_name = None
argspec = function_utils.get_argspec(fn)
parameter_type = computation_types.to_type(parameter_type)
if (not parameter_type):
if (argspec.args or argspec.varargs or argspec.keywords):
def _wrap_polymorphic(wrapper_fn, fn, parameter_type, name=fn_name):
return wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type, unpack=True), parameter_type, name=name)
polymorphic_fn = function_utils.PolymorphicFunction((lambda pt: _wrap_polymorphic(wrapper_fn, fn, pt)))
polymorphic_fn.__doc__ = getattr(fn, '__doc__', None)
return polymorphic_fn
concrete_fn = wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type), parameter_type, name=fn_name)
py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction, 'value returned by the wrapper')
if (not type_utils.are_equivalent_types(concrete_fn.type_signature.parameter, parameter_type)):
raise TypeError('Expected a concrete function that takes parameter {}, got one that takes {}.'.format(str(parameter_type), str(concrete_fn.type_signature.parameter)))
concrete_fn.__doc__ = getattr(fn, '__doc__', None)
return concrete_fn
|
Wrap a given `fn` with a given `parameter_type` using `wrapper_fn`.
This method does not handle the multiple modes of usage as wrapper/decorator,
as those are handled by ComputationWrapper below. It focused on the simple
case with a function/defun (always present) and either a valid parameter type
or an indication that there's no parameter (None).
The only ambiguity left to resolve is whether `fn` should be immediately
wrapped, or treated as a polymorphic callable to be wrapped upon invocation
based on actual parameter types. The determination is based on the presence
or absence of parameters in the declaration of `fn`. In order to be
treated as a concrete no-argument computation, `fn` shouldn't declare any
arguments (even with default values).
Args:
fn: The function or defun to wrap as a computation.
parameter_type: The parameter type accepted by the computation, or None if
there is no parameter.
wrapper_fn: The Python callable that performs actual wrapping. It must
accept two arguments, and optional third `name`. The first argument will
be a Python function that takes either zero parameters if the computation
is to be a no-parameter computation, or exactly one parameter if the
computation does have a parameter. The second argument will be either None
for a no-parameter computation, or the type of the computation's parameter
(an instance of types.Type) if the computation has one. The third,
optional parameter `name` is the optional name of the function that is
being wrapped (only for debugging purposes). The object to be returned by
this function should be an instance of a ConcreteFunction.
Returns:
Either the result of wrapping (an object that represents the computation),
or a polymorphic callable that performs wrapping upon invocation based on
argument types.
Raises:
TypeError: if the arguments are of the wrong types, or the `wrapper_fn`
constructs something that isn't a ConcreteFunction.
|
federated-0.4.0/tensorflow_federated/python/core/impl/computation_wrapper.py
|
_wrap
|
abogdanova/FedMed
| 5
|
python
|
def _wrap(fn, parameter_type, wrapper_fn):
"Wrap a given `fn` with a given `parameter_type` using `wrapper_fn`.\n\n This method does not handle the multiple modes of usage as wrapper/decorator,\n as those are handled by ComputationWrapper below. It focused on the simple\n case with a function/defun (always present) and either a valid parameter type\n or an indication that there's no parameter (None).\n\n The only ambiguity left to resolve is whether `fn` should be immediately\n wrapped, or treated as a polymorphic callable to be wrapped upon invocation\n based on actual parameter types. The determination is based on the presence\n or absence of parameters in the declaration of `fn`. In order to be\n treated as a concrete no-argument computation, `fn` shouldn't declare any\n arguments (even with default values).\n\n Args:\n fn: The function or defun to wrap as a computation.\n parameter_type: The parameter type accepted by the computation, or None if\n there is no parameter.\n wrapper_fn: The Python callable that performs actual wrapping. It must\n accept two arguments, and optional third `name`. The first argument will\n be a Python function that takes either zero parameters if the computation\n is to be a no-parameter computation, or exactly one parameter if the\n computation does have a parameter. The second argument will be either None\n for a no-parameter computation, or the type of the computation's parameter\n (an instance of types.Type) if the computation has one. The third,\n optional parameter `name` is the optional name of the function that is\n being wrapped (only for debugging purposes). The object to be returned by\n this function should be an instance of a ConcreteFunction.\n\n Returns:\n Either the result of wrapping (an object that represents the computation),\n or a polymorphic callable that performs wrapping upon invocation based on\n argument types.\n\n Raises:\n TypeError: if the arguments are of the wrong types, or the `wrapper_fn`\n constructs something that isn't a ConcreteFunction.\n "
try:
fn_name = fn.__name__
except AttributeError:
fn_name = None
argspec = function_utils.get_argspec(fn)
parameter_type = computation_types.to_type(parameter_type)
if (not parameter_type):
if (argspec.args or argspec.varargs or argspec.keywords):
def _wrap_polymorphic(wrapper_fn, fn, parameter_type, name=fn_name):
return wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type, unpack=True), parameter_type, name=name)
polymorphic_fn = function_utils.PolymorphicFunction((lambda pt: _wrap_polymorphic(wrapper_fn, fn, pt)))
polymorphic_fn.__doc__ = getattr(fn, '__doc__', None)
return polymorphic_fn
concrete_fn = wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type), parameter_type, name=fn_name)
py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction, 'value returned by the wrapper')
if (not type_utils.are_equivalent_types(concrete_fn.type_signature.parameter, parameter_type)):
raise TypeError('Expected a concrete function that takes parameter {}, got one that takes {}.'.format(str(parameter_type), str(concrete_fn.type_signature.parameter)))
concrete_fn.__doc__ = getattr(fn, '__doc__', None)
return concrete_fn
|
def _wrap(fn, parameter_type, wrapper_fn):
"Wrap a given `fn` with a given `parameter_type` using `wrapper_fn`.\n\n This method does not handle the multiple modes of usage as wrapper/decorator,\n as those are handled by ComputationWrapper below. It focused on the simple\n case with a function/defun (always present) and either a valid parameter type\n or an indication that there's no parameter (None).\n\n The only ambiguity left to resolve is whether `fn` should be immediately\n wrapped, or treated as a polymorphic callable to be wrapped upon invocation\n based on actual parameter types. The determination is based on the presence\n or absence of parameters in the declaration of `fn`. In order to be\n treated as a concrete no-argument computation, `fn` shouldn't declare any\n arguments (even with default values).\n\n Args:\n fn: The function or defun to wrap as a computation.\n parameter_type: The parameter type accepted by the computation, or None if\n there is no parameter.\n wrapper_fn: The Python callable that performs actual wrapping. It must\n accept two arguments, and optional third `name`. The first argument will\n be a Python function that takes either zero parameters if the computation\n is to be a no-parameter computation, or exactly one parameter if the\n computation does have a parameter. The second argument will be either None\n for a no-parameter computation, or the type of the computation's parameter\n (an instance of types.Type) if the computation has one. The third,\n optional parameter `name` is the optional name of the function that is\n being wrapped (only for debugging purposes). The object to be returned by\n this function should be an instance of a ConcreteFunction.\n\n Returns:\n Either the result of wrapping (an object that represents the computation),\n or a polymorphic callable that performs wrapping upon invocation based on\n argument types.\n\n Raises:\n TypeError: if the arguments are of the wrong types, or the `wrapper_fn`\n constructs something that isn't a ConcreteFunction.\n "
try:
fn_name = fn.__name__
except AttributeError:
fn_name = None
argspec = function_utils.get_argspec(fn)
parameter_type = computation_types.to_type(parameter_type)
if (not parameter_type):
if (argspec.args or argspec.varargs or argspec.keywords):
def _wrap_polymorphic(wrapper_fn, fn, parameter_type, name=fn_name):
return wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type, unpack=True), parameter_type, name=name)
polymorphic_fn = function_utils.PolymorphicFunction((lambda pt: _wrap_polymorphic(wrapper_fn, fn, pt)))
polymorphic_fn.__doc__ = getattr(fn, '__doc__', None)
return polymorphic_fn
concrete_fn = wrapper_fn(function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type), parameter_type, name=fn_name)
py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction, 'value returned by the wrapper')
if (not type_utils.are_equivalent_types(concrete_fn.type_signature.parameter, parameter_type)):
raise TypeError('Expected a concrete function that takes parameter {}, got one that takes {}.'.format(str(parameter_type), str(concrete_fn.type_signature.parameter)))
concrete_fn.__doc__ = getattr(fn, '__doc__', None)
return concrete_fn<|docstring|>Wrap a given `fn` with a given `parameter_type` using `wrapper_fn`.
This method does not handle the multiple modes of usage as wrapper/decorator,
as those are handled by ComputationWrapper below. It focused on the simple
case with a function/defun (always present) and either a valid parameter type
or an indication that there's no parameter (None).
The only ambiguity left to resolve is whether `fn` should be immediately
wrapped, or treated as a polymorphic callable to be wrapped upon invocation
based on actual parameter types. The determination is based on the presence
or absence of parameters in the declaration of `fn`. In order to be
treated as a concrete no-argument computation, `fn` shouldn't declare any
arguments (even with default values).
Args:
fn: The function or defun to wrap as a computation.
parameter_type: The parameter type accepted by the computation, or None if
there is no parameter.
wrapper_fn: The Python callable that performs actual wrapping. It must
accept two arguments, and optional third `name`. The first argument will
be a Python function that takes either zero parameters if the computation
is to be a no-parameter computation, or exactly one parameter if the
computation does have a parameter. The second argument will be either None
for a no-parameter computation, or the type of the computation's parameter
(an instance of types.Type) if the computation has one. The third,
optional parameter `name` is the optional name of the function that is
being wrapped (only for debugging purposes). The object to be returned by
this function should be an instance of a ConcreteFunction.
Returns:
Either the result of wrapping (an object that represents the computation),
or a polymorphic callable that performs wrapping upon invocation based on
argument types.
Raises:
TypeError: if the arguments are of the wrong types, or the `wrapper_fn`
constructs something that isn't a ConcreteFunction.<|endoftext|>
|
2b513a5aabf0ce24ecf11f7e12d9820333e7b9b4ad607dd0f50028295504823f
|
def __init__(self, wrapper_fn):
'Construct a new wrapper/decorator for the given wrapping function.\n\n Args:\n wrapper_fn: The Python callable that performs actual wrapping (as in the\n specification of `_wrap`).\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
py_typecheck.check_callable(wrapper_fn)
self._wrapper_fn = wrapper_fn
|
Construct a new wrapper/decorator for the given wrapping function.
Args:
wrapper_fn: The Python callable that performs actual wrapping (as in the
specification of `_wrap`).
Raises:
TypeError: if the arguments are of the wrong types.
|
federated-0.4.0/tensorflow_federated/python/core/impl/computation_wrapper.py
|
__init__
|
abogdanova/FedMed
| 5
|
python
|
def __init__(self, wrapper_fn):
'Construct a new wrapper/decorator for the given wrapping function.\n\n Args:\n wrapper_fn: The Python callable that performs actual wrapping (as in the\n specification of `_wrap`).\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
py_typecheck.check_callable(wrapper_fn)
self._wrapper_fn = wrapper_fn
|
def __init__(self, wrapper_fn):
'Construct a new wrapper/decorator for the given wrapping function.\n\n Args:\n wrapper_fn: The Python callable that performs actual wrapping (as in the\n specification of `_wrap`).\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
py_typecheck.check_callable(wrapper_fn)
self._wrapper_fn = wrapper_fn<|docstring|>Construct a new wrapper/decorator for the given wrapping function.
Args:
wrapper_fn: The Python callable that performs actual wrapping (as in the
specification of `_wrap`).
Raises:
TypeError: if the arguments are of the wrong types.<|endoftext|>
|
61b8333e145916b037360ba588a5145b2fac63eee673526d1471aa325e3d0e96
|
def __call__(self, *args):
'Handles the differents modes of usage of the decorator/wrapper.\n\n This method only acts as a frontend that allows this class to be used as a\n decorator or wrapper in a variety of ways. The actual wrapping is performed\n by the private method `_wrap`.\n\n Args:\n *args: Positional arguments (the decorator at this point does not accept\n keyword arguments, although that might change in the future).\n\n Returns:\n Either a result of wrapping, or a callable that expects a function or a\n defun and performs wrapping on it, depending on specific usage pattern.\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
if (not args):
return (lambda fn: _wrap(fn, None, self._wrapper_fn))
elif (isinstance(args[0], types.FunctionType) or function_utils.is_defun(args[0])):
if (len(args) > 2):
args = (args[0], args[1:])
return _wrap(args[0], (computation_types.to_type(args[1]) if (len(args) > 1) else None), self._wrapper_fn)
else:
if (len(args) > 1):
args = (args,)
arg_type = computation_types.to_type(args[0])
return (lambda fn: _wrap(fn, arg_type, self._wrapper_fn))
|
Handles the differents modes of usage of the decorator/wrapper.
This method only acts as a frontend that allows this class to be used as a
decorator or wrapper in a variety of ways. The actual wrapping is performed
by the private method `_wrap`.
Args:
*args: Positional arguments (the decorator at this point does not accept
keyword arguments, although that might change in the future).
Returns:
Either a result of wrapping, or a callable that expects a function or a
defun and performs wrapping on it, depending on specific usage pattern.
Raises:
TypeError: if the arguments are of the wrong types.
|
federated-0.4.0/tensorflow_federated/python/core/impl/computation_wrapper.py
|
__call__
|
abogdanova/FedMed
| 5
|
python
|
def __call__(self, *args):
'Handles the differents modes of usage of the decorator/wrapper.\n\n This method only acts as a frontend that allows this class to be used as a\n decorator or wrapper in a variety of ways. The actual wrapping is performed\n by the private method `_wrap`.\n\n Args:\n *args: Positional arguments (the decorator at this point does not accept\n keyword arguments, although that might change in the future).\n\n Returns:\n Either a result of wrapping, or a callable that expects a function or a\n defun and performs wrapping on it, depending on specific usage pattern.\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
if (not args):
return (lambda fn: _wrap(fn, None, self._wrapper_fn))
elif (isinstance(args[0], types.FunctionType) or function_utils.is_defun(args[0])):
if (len(args) > 2):
args = (args[0], args[1:])
return _wrap(args[0], (computation_types.to_type(args[1]) if (len(args) > 1) else None), self._wrapper_fn)
else:
if (len(args) > 1):
args = (args,)
arg_type = computation_types.to_type(args[0])
return (lambda fn: _wrap(fn, arg_type, self._wrapper_fn))
|
def __call__(self, *args):
'Handles the differents modes of usage of the decorator/wrapper.\n\n This method only acts as a frontend that allows this class to be used as a\n decorator or wrapper in a variety of ways. The actual wrapping is performed\n by the private method `_wrap`.\n\n Args:\n *args: Positional arguments (the decorator at this point does not accept\n keyword arguments, although that might change in the future).\n\n Returns:\n Either a result of wrapping, or a callable that expects a function or a\n defun and performs wrapping on it, depending on specific usage pattern.\n\n Raises:\n TypeError: if the arguments are of the wrong types.\n '
if (not args):
return (lambda fn: _wrap(fn, None, self._wrapper_fn))
elif (isinstance(args[0], types.FunctionType) or function_utils.is_defun(args[0])):
if (len(args) > 2):
args = (args[0], args[1:])
return _wrap(args[0], (computation_types.to_type(args[1]) if (len(args) > 1) else None), self._wrapper_fn)
else:
if (len(args) > 1):
args = (args,)
arg_type = computation_types.to_type(args[0])
return (lambda fn: _wrap(fn, arg_type, self._wrapper_fn))<|docstring|>Handles the differents modes of usage of the decorator/wrapper.
This method only acts as a frontend that allows this class to be used as a
decorator or wrapper in a variety of ways. The actual wrapping is performed
by the private method `_wrap`.
Args:
*args: Positional arguments (the decorator at this point does not accept
keyword arguments, although that might change in the future).
Returns:
Either a result of wrapping, or a callable that expects a function or a
defun and performs wrapping on it, depending on specific usage pattern.
Raises:
TypeError: if the arguments are of the wrong types.<|endoftext|>
|
eb73e90802cad448750d92601a8839c0e746e3cd1537a1172a55698fae34144e
|
def __init__(self, luminosity: pint.Quantity, temperature: pint.Quantity, satellites: typing.Sequence[base.Orbit[planet.Planet]]) -> None:
'Initialize a new instance of a star.'
super().__init__(satellites)
self._luminosity: pint.Quantity = luminosity
self._temperature: pint.Quantity = temperature
self._mass: typing.Optional[pint.Quantity] = None
|
Initialize a new instance of a star.
|
src/peregrinus/world/celestial/star.py
|
__init__
|
tom65536/sabio
| 0
|
python
|
def __init__(self, luminosity: pint.Quantity, temperature: pint.Quantity, satellites: typing.Sequence[base.Orbit[planet.Planet]]) -> None:
super().__init__(satellites)
self._luminosity: pint.Quantity = luminosity
self._temperature: pint.Quantity = temperature
self._mass: typing.Optional[pint.Quantity] = None
|
def __init__(self, luminosity: pint.Quantity, temperature: pint.Quantity, satellites: typing.Sequence[base.Orbit[planet.Planet]]) -> None:
super().__init__(satellites)
self._luminosity: pint.Quantity = luminosity
self._temperature: pint.Quantity = temperature
self._mass: typing.Optional[pint.Quantity] = None<|docstring|>Initialize a new instance of a star.<|endoftext|>
|
8e4576d6ab05e3787a125828eb01e61e75214ee13f6e385a186512b84fe924f1
|
@property
def luminosity(self) -> pint.Quantity:
"Return the luminosity of the star.\n\n :return: the star's luminosity\n :rtype: pint.Quantity['energy']\n "
return self._luminosity
|
Return the luminosity of the star.
:return: the star's luminosity
:rtype: pint.Quantity['energy']
|
src/peregrinus/world/celestial/star.py
|
luminosity
|
tom65536/sabio
| 0
|
python
|
@property
def luminosity(self) -> pint.Quantity:
"Return the luminosity of the star.\n\n :return: the star's luminosity\n :rtype: pint.Quantity['energy']\n "
return self._luminosity
|
@property
def luminosity(self) -> pint.Quantity:
"Return the luminosity of the star.\n\n :return: the star's luminosity\n :rtype: pint.Quantity['energy']\n "
return self._luminosity<|docstring|>Return the luminosity of the star.
:return: the star's luminosity
:rtype: pint.Quantity['energy']<|endoftext|>
|
b61719d8d7ab1a7c832a42cfe71319b6ad59a32d9672dbf4be7905437b3b2062
|
@property
def temperature(self) -> pint.Quantity:
"Return the star's temperature.\n\n :return: the star's temperature.\n :rtype: pint.Quantity['temperature']\n "
return self._temperature
|
Return the star's temperature.
:return: the star's temperature.
:rtype: pint.Quantity['temperature']
|
src/peregrinus/world/celestial/star.py
|
temperature
|
tom65536/sabio
| 0
|
python
|
@property
def temperature(self) -> pint.Quantity:
"Return the star's temperature.\n\n :return: the star's temperature.\n :rtype: pint.Quantity['temperature']\n "
return self._temperature
|
@property
def temperature(self) -> pint.Quantity:
"Return the star's temperature.\n\n :return: the star's temperature.\n :rtype: pint.Quantity['temperature']\n "
return self._temperature<|docstring|>Return the star's temperature.
:return: the star's temperature.
:rtype: pint.Quantity['temperature']<|endoftext|>
|
f076cf8139edb19c4c5bcba573fb4e24ac47b97a460ab3aa1aa76dea85f124b1
|
@property
def radius(self) -> pint.Quantity:
'Return the estimated radius of the star.'
raise NotImplementedError('TODO: look up radius formula')
|
Return the estimated radius of the star.
|
src/peregrinus/world/celestial/star.py
|
radius
|
tom65536/sabio
| 0
|
python
|
@property
def radius(self) -> pint.Quantity:
raise NotImplementedError('TODO: look up radius formula')
|
@property
def radius(self) -> pint.Quantity:
raise NotImplementedError('TODO: look up radius formula')<|docstring|>Return the estimated radius of the star.<|endoftext|>
|
60d970bda9f0f97ce5057b38601d424b5cbb10314011802e6ea69a4830474ff4
|
@property
def mass(self) -> pint.Quantity:
"Return the estimated mass of the star.\n\n The mass is derived from the luminosity.\n The following interpolation formula is used, which has been taken from :cite:p:`Sewell:2012`:\n\n .. math::\n\n M / M_{sun} = 0.967 (L / L_{sun})^0.255 + 5.19\\times10^{-5}(L / L_{sun}) - 0.0670\n\n :return: the estimated mass of the star\n :rtype: pint.Quantity['mass']\n "
if (self._mass is None):
lum_rel = (self.luminosity / _LUM_SUN)
mass_rel = (((0.967 * (lum_rel ** 0.255)) + (5.19e-05 * lum_rel)) - 0.067)
self._mass = (mass_rel * _MASS_SUN)
return self._mass
|
Return the estimated mass of the star.
The mass is derived from the luminosity.
The following interpolation formula is used, which has been taken from :cite:p:`Sewell:2012`:
.. math::
M / M_{sun} = 0.967 (L / L_{sun})^0.255 + 5.19\times10^{-5}(L / L_{sun}) - 0.0670
:return: the estimated mass of the star
:rtype: pint.Quantity['mass']
|
src/peregrinus/world/celestial/star.py
|
mass
|
tom65536/sabio
| 0
|
python
|
@property
def mass(self) -> pint.Quantity:
"Return the estimated mass of the star.\n\n The mass is derived from the luminosity.\n The following interpolation formula is used, which has been taken from :cite:p:`Sewell:2012`:\n\n .. math::\n\n M / M_{sun} = 0.967 (L / L_{sun})^0.255 + 5.19\\times10^{-5}(L / L_{sun}) - 0.0670\n\n :return: the estimated mass of the star\n :rtype: pint.Quantity['mass']\n "
if (self._mass is None):
lum_rel = (self.luminosity / _LUM_SUN)
mass_rel = (((0.967 * (lum_rel ** 0.255)) + (5.19e-05 * lum_rel)) - 0.067)
self._mass = (mass_rel * _MASS_SUN)
return self._mass
|
@property
def mass(self) -> pint.Quantity:
"Return the estimated mass of the star.\n\n The mass is derived from the luminosity.\n The following interpolation formula is used, which has been taken from :cite:p:`Sewell:2012`:\n\n .. math::\n\n M / M_{sun} = 0.967 (L / L_{sun})^0.255 + 5.19\\times10^{-5}(L / L_{sun}) - 0.0670\n\n :return: the estimated mass of the star\n :rtype: pint.Quantity['mass']\n "
if (self._mass is None):
lum_rel = (self.luminosity / _LUM_SUN)
mass_rel = (((0.967 * (lum_rel ** 0.255)) + (5.19e-05 * lum_rel)) - 0.067)
self._mass = (mass_rel * _MASS_SUN)
return self._mass<|docstring|>Return the estimated mass of the star.
The mass is derived from the luminosity.
The following interpolation formula is used, which has been taken from :cite:p:`Sewell:2012`:
.. math::
M / M_{sun} = 0.967 (L / L_{sun})^0.255 + 5.19\times10^{-5}(L / L_{sun}) - 0.0670
:return: the estimated mass of the star
:rtype: pint.Quantity['mass']<|endoftext|>
|
b03243f7073fcb835abfaa709f6f23a9b159309a32b72297508bf263017ae54c
|
def diff_eq(x, t, a):
'微分方程式'
return (a * x)
|
微分方程式
|
example_py/example_2.py
|
diff_eq
|
YoshimitsuMatsutaIe/ans_2021
| 0
|
python
|
def diff_eq(x, t, a):
return (a * x)
|
def diff_eq(x, t, a):
return (a * x)<|docstring|>微分方程式<|endoftext|>
|
4bf1f624cd967bdffa1ec15851a003e4edbb98a747f36439d4f27360ed674e12
|
def url_v_html(url, mapa, ime_datoteke, headers=default_headers):
'Sprejme url in v dano destinacijo shrani HTML datoteko.'
try:
page_content = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
print(f'Napaka pri povezovanju na {url}')
return None
if (page_content.status_code == requests.codes.ok):
os.makedirs(mapa, exist_ok=True)
path = os.path.join(mapa, ime_datoteke)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(page_content.text)
else:
print(f'Napaka pri prenosu strani {url}')
return None
|
Sprejme url in v dano destinacijo shrani HTML datoteko.
|
orodja.py
|
url_v_html
|
benoucakar/analiza-podatkov---Najbolj-ih-10000-videoiger
| 1
|
python
|
def url_v_html(url, mapa, ime_datoteke, headers=default_headers):
try:
page_content = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
print(f'Napaka pri povezovanju na {url}')
return None
if (page_content.status_code == requests.codes.ok):
os.makedirs(mapa, exist_ok=True)
path = os.path.join(mapa, ime_datoteke)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(page_content.text)
else:
print(f'Napaka pri prenosu strani {url}')
return None
|
def url_v_html(url, mapa, ime_datoteke, headers=default_headers):
try:
page_content = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
print(f'Napaka pri povezovanju na {url}')
return None
if (page_content.status_code == requests.codes.ok):
os.makedirs(mapa, exist_ok=True)
path = os.path.join(mapa, ime_datoteke)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(page_content.text)
else:
print(f'Napaka pri prenosu strani {url}')
return None<|docstring|>Sprejme url in v dano destinacijo shrani HTML datoteko.<|endoftext|>
|
59771ae386cd9ca5573d08059afbf7af712d06cfdefd8372db6502191e4f7f53
|
def odpri_html(mapa, ime_datoteke):
'Vrne niz z vsebino dane datoteke.'
with open(os.path.join(mapa, ime_datoteke), encoding='utf-8') as datoteka:
return datoteka.read()
|
Vrne niz z vsebino dane datoteke.
|
orodja.py
|
odpri_html
|
benoucakar/analiza-podatkov---Najbolj-ih-10000-videoiger
| 1
|
python
|
def odpri_html(mapa, ime_datoteke):
with open(os.path.join(mapa, ime_datoteke), encoding='utf-8') as datoteka:
return datoteka.read()
|
def odpri_html(mapa, ime_datoteke):
with open(os.path.join(mapa, ime_datoteke), encoding='utf-8') as datoteka:
return datoteka.read()<|docstring|>Vrne niz z vsebino dane datoteke.<|endoftext|>
|
f26be214ba14ecad660bd7a5d7b372692de46f3d18e26afb3f8781f4df527867
|
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
'Iz seznama slovarjev ustvari CSV datoteko z glavo.'
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
for slovar in slovarji:
writer.writerow(slovar)
|
Iz seznama slovarjev ustvari CSV datoteko z glavo.
|
orodja.py
|
zapisi_csv
|
benoucakar/analiza-podatkov---Najbolj-ih-10000-videoiger
| 1
|
python
|
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
for slovar in slovarji:
writer.writerow(slovar)
|
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
for slovar in slovarji:
writer.writerow(slovar)<|docstring|>Iz seznama slovarjev ustvari CSV datoteko z glavo.<|endoftext|>
|
8de25d313e55dd97530a06d43a035764cf8f670d0ad69a6ffd33a78df6ede4aa
|
def zapisi_json(objekt, ime_datoteke):
'Iz danega objekta ustvari JSON datoteko.'
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)
|
Iz danega objekta ustvari JSON datoteko.
|
orodja.py
|
zapisi_json
|
benoucakar/analiza-podatkov---Najbolj-ih-10000-videoiger
| 1
|
python
|
def zapisi_json(objekt, ime_datoteke):
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)
|
def zapisi_json(objekt, ime_datoteke):
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)<|docstring|>Iz danega objekta ustvari JSON datoteko.<|endoftext|>
|
c7ba3eabf1a6bb58ed387057dd9ffbc0c26978eee09ace8ab7e68a9570a10fa5
|
def odpri_json(ime_datoteke):
'Odpre dano JSON datoteko.'
with open(ime_datoteke, 'r', encoding='utf-8') as json_datoteka:
return json.load(json_datoteka)
|
Odpre dano JSON datoteko.
|
orodja.py
|
odpri_json
|
benoucakar/analiza-podatkov---Najbolj-ih-10000-videoiger
| 1
|
python
|
def odpri_json(ime_datoteke):
with open(ime_datoteke, 'r', encoding='utf-8') as json_datoteka:
return json.load(json_datoteka)
|
def odpri_json(ime_datoteke):
with open(ime_datoteke, 'r', encoding='utf-8') as json_datoteka:
return json.load(json_datoteka)<|docstring|>Odpre dano JSON datoteko.<|endoftext|>
|
cd6973743288229c4c02b78662ab5a50c816d0a18cfcb4876cd9de745501aaef
|
def _parse_envi(meta):
"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n "
def parsevec(value):
return np.fromstring(value.strip('{}'), dtype='float', sep=',')
def default(value):
return value.strip('{}')
parse = {'wavelength': parsevec, 'fwhm': parsevec}
parsed_meta = {key: parse.get(key, default)(value) for (key, value) in meta.items()}
return parsed_meta
|
Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
|
rioxarray/_io.py
|
_parse_envi
|
mraspaud/rioxarray
| 269
|
python
|
def _parse_envi(meta):
"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n "
def parsevec(value):
return np.fromstring(value.strip('{}'), dtype='float', sep=',')
def default(value):
return value.strip('{}')
parse = {'wavelength': parsevec, 'fwhm': parsevec}
parsed_meta = {key: parse.get(key, default)(value) for (key, value) in meta.items()}
return parsed_meta
|
def _parse_envi(meta):
"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n "
def parsevec(value):
return np.fromstring(value.strip('{}'), dtype='float', sep=',')
def default(value):
return value.strip('{}')
parse = {'wavelength': parsevec, 'fwhm': parsevec}
parsed_meta = {key: parse.get(key, default)(value) for (key, value) in meta.items()}
return parsed_meta<|docstring|>Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values<|endoftext|>
|
f21a08ed8401d77683e410ba75a9b29d4d586e60b3112decb6c6bb07fbe6784b
|
def _rasterio_to_numpy_dtype(dtypes):
'Numpy dtype from first entry of rasterio dataset.dtypes'
if (dtypes[0] == 'complex_int16'):
dtype = np.dtype('complex64')
else:
dtype = np.dtype(dtypes[0])
return dtype
|
Numpy dtype from first entry of rasterio dataset.dtypes
|
rioxarray/_io.py
|
_rasterio_to_numpy_dtype
|
mraspaud/rioxarray
| 269
|
python
|
def _rasterio_to_numpy_dtype(dtypes):
if (dtypes[0] == 'complex_int16'):
dtype = np.dtype('complex64')
else:
dtype = np.dtype(dtypes[0])
return dtype
|
def _rasterio_to_numpy_dtype(dtypes):
if (dtypes[0] == 'complex_int16'):
dtype = np.dtype('complex64')
else:
dtype = np.dtype(dtypes[0])
return dtype<|docstring|>Numpy dtype from first entry of rasterio dataset.dtypes<|endoftext|>
|
0cc57c87129eafe69b8b27e4f4fce60813d2a6c3fa641be88b8a385c96173f9a
|
def _to_numeric(value):
'\n Convert the value to a number\n '
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
|
Convert the value to a number
|
rioxarray/_io.py
|
_to_numeric
|
mraspaud/rioxarray
| 269
|
python
|
def _to_numeric(value):
'\n \n '
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
|
def _to_numeric(value):
'\n \n '
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value<|docstring|>Convert the value to a number<|endoftext|>
|
5ada4e69eabce66c1521721ca374c1a5c217070f188d48eb21832d1c5d4a8242
|
def _load_netcdf_attrs(tags, data_array):
'\n Loads the netCDF attributes into the data array\n\n Attributes stored in this format:\n - variable_name#attr_name: attr_value\n '
for (key, value) in tags.items():
(key, value) = _parse_tag(key, value)
key_split = key.split('#')
if (len(key_split) != 2):
continue
(variable_name, attr_name) = key_split
if (variable_name in data_array.coords):
data_array.coords[variable_name].attrs.update({attr_name: value})
|
Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value
|
rioxarray/_io.py
|
_load_netcdf_attrs
|
mraspaud/rioxarray
| 269
|
python
|
def _load_netcdf_attrs(tags, data_array):
'\n Loads the netCDF attributes into the data array\n\n Attributes stored in this format:\n - variable_name#attr_name: attr_value\n '
for (key, value) in tags.items():
(key, value) = _parse_tag(key, value)
key_split = key.split('#')
if (len(key_split) != 2):
continue
(variable_name, attr_name) = key_split
if (variable_name in data_array.coords):
data_array.coords[variable_name].attrs.update({attr_name: value})
|
def _load_netcdf_attrs(tags, data_array):
'\n Loads the netCDF attributes into the data array\n\n Attributes stored in this format:\n - variable_name#attr_name: attr_value\n '
for (key, value) in tags.items():
(key, value) = _parse_tag(key, value)
key_split = key.split('#')
if (len(key_split) != 2):
continue
(variable_name, attr_name) = key_split
if (variable_name in data_array.coords):
data_array.coords[variable_name].attrs.update({attr_name: value})<|docstring|>Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value<|endoftext|>
|
8db30b9ac3e9e93bb4928c284757d0ef7ded03be7d8c53860223958fa8a88fad
|
def _load_netcdf_1d_coords(tags):
"\n Dimension information:\n - NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)\n - NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)\n - NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)\n "
dim_names = tags.get('NETCDF_DIM_EXTRA')
if (not dim_names):
return {}
dim_names = dim_names.strip('{}').split(',')
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f'NETCDF_DIM_{dim_name}_DEF')
if (not dim_def):
continue
(dim_size, dim_dtype) = dim_def.strip('{}').split(',')
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f'NETCDF_DIM_{dim_name}_VALUES'].strip('{}')
coords[dim_name] = IndexVariable(dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=','))
return coords
|
Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)
|
rioxarray/_io.py
|
_load_netcdf_1d_coords
|
mraspaud/rioxarray
| 269
|
python
|
def _load_netcdf_1d_coords(tags):
"\n Dimension information:\n - NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)\n - NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)\n - NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)\n "
dim_names = tags.get('NETCDF_DIM_EXTRA')
if (not dim_names):
return {}
dim_names = dim_names.strip('{}').split(',')
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f'NETCDF_DIM_{dim_name}_DEF')
if (not dim_def):
continue
(dim_size, dim_dtype) = dim_def.strip('{}').split(',')
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f'NETCDF_DIM_{dim_name}_VALUES'].strip('{}')
coords[dim_name] = IndexVariable(dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=','))
return coords
|
def _load_netcdf_1d_coords(tags):
"\n Dimension information:\n - NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)\n - NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)\n - NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)\n "
dim_names = tags.get('NETCDF_DIM_EXTRA')
if (not dim_names):
return {}
dim_names = dim_names.strip('{}').split(',')
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f'NETCDF_DIM_{dim_name}_DEF')
if (not dim_def):
continue
(dim_size, dim_dtype) = dim_def.strip('{}').split(',')
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f'NETCDF_DIM_{dim_name}_VALUES'].strip('{}')
coords[dim_name] = IndexVariable(dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=','))
return coords<|docstring|>Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)<|endoftext|>
|
7ee4ae72bce2c50f69f3b081d22d03191d67a0c779d59445214bfaa94f69634a
|
def build_subdataset_filter(group_names, variable_names):
'\n Example::\n \'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":\n MODIS_Grid_2D:sur_refl_b01_1\'\n\n Parameters\n ----------\n group_names: str or list or tuple\n Name or names of netCDF groups to filter by.\n\n variable_names: str or list or tuple\n Name or names of netCDF variables to filter by.\n\n Returns\n -------\n re.SRE_Pattern: output of re.compile()\n '
variable_query = '\\w+'
if (variable_names is not None):
if (not isinstance(variable_names, (tuple, list))):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = f"(?:{'|'.join(variable_names)})"
if (group_names is not None):
if (not isinstance(group_names, (tuple, list))):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = f"(?:{'|'.join(group_names)})"
else:
return re.compile(''.join(['.*(?:\\:/|\\:)(/+)?', variable_query, '$']))
return re.compile(''.join(['.*(?:\\:/|\\:)(/+)?', group_query, '[:/](/+)?', variable_query, '$']))
|
Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()
|
rioxarray/_io.py
|
build_subdataset_filter
|
mraspaud/rioxarray
| 269
|
python
|
def build_subdataset_filter(group_names, variable_names):
'\n Example::\n \'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":\n MODIS_Grid_2D:sur_refl_b01_1\'\n\n Parameters\n ----------\n group_names: str or list or tuple\n Name or names of netCDF groups to filter by.\n\n variable_names: str or list or tuple\n Name or names of netCDF variables to filter by.\n\n Returns\n -------\n re.SRE_Pattern: output of re.compile()\n '
variable_query = '\\w+'
if (variable_names is not None):
if (not isinstance(variable_names, (tuple, list))):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = f"(?:{'|'.join(variable_names)})"
if (group_names is not None):
if (not isinstance(group_names, (tuple, list))):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = f"(?:{'|'.join(group_names)})"
else:
return re.compile(.join(['.*(?:\\:/|\\:)(/+)?', variable_query, '$']))
return re.compile(.join(['.*(?:\\:/|\\:)(/+)?', group_query, '[:/](/+)?', variable_query, '$']))
|
def build_subdataset_filter(group_names, variable_names):
'\n Example::\n \'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":\n MODIS_Grid_2D:sur_refl_b01_1\'\n\n Parameters\n ----------\n group_names: str or list or tuple\n Name or names of netCDF groups to filter by.\n\n variable_names: str or list or tuple\n Name or names of netCDF variables to filter by.\n\n Returns\n -------\n re.SRE_Pattern: output of re.compile()\n '
variable_query = '\\w+'
if (variable_names is not None):
if (not isinstance(variable_names, (tuple, list))):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = f"(?:{'|'.join(variable_names)})"
if (group_names is not None):
if (not isinstance(group_names, (tuple, list))):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = f"(?:{'|'.join(group_names)})"
else:
return re.compile(.join(['.*(?:\\:/|\\:)(/+)?', variable_query, '$']))
return re.compile(.join(['.*(?:\\:/|\\:)(/+)?', group_query, '[:/](/+)?', variable_query, '$']))<|docstring|>Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()<|endoftext|>
|
4fb7dbe773b68cd7344cad7c533a4a60e8f193a7e2334c392f2cdd596dce79bc
|
def _rio_transform(riods):
'\n Get the transform from a rasterio dataset\n reguardless of rasterio version.\n '
try:
return riods.transform
except AttributeError:
return riods.affine
|
Get the transform from a rasterio dataset
reguardless of rasterio version.
|
rioxarray/_io.py
|
_rio_transform
|
mraspaud/rioxarray
| 269
|
python
|
def _rio_transform(riods):
'\n Get the transform from a rasterio dataset\n reguardless of rasterio version.\n '
try:
return riods.transform
except AttributeError:
return riods.affine
|
def _rio_transform(riods):
'\n Get the transform from a rasterio dataset\n reguardless of rasterio version.\n '
try:
return riods.transform
except AttributeError:
return riods.affine<|docstring|>Get the transform from a rasterio dataset
reguardless of rasterio version.<|endoftext|>
|
55df90a2525a9691a571e7569aab3a731bfab9b9dc7a2d5ca24f0dc046dce5a6
|
def _get_rasterio_attrs(riods):
'\n Get rasterio specific attributes\n '
attrs = _parse_tags(riods.tags(1))
if (hasattr(riods, 'nodata') and (riods.nodata is not None)):
attrs['_FillValue'] = riods.nodata
if hasattr(riods, 'scales'):
if (len(set(riods.scales)) > 1):
attrs['scales'] = riods.scales
warnings.warn("Offsets differ across bands. The 'scale_factor' attribute will not be added. See the 'scales' attribute.")
else:
attrs['scale_factor'] = riods.scales[0]
if hasattr(riods, 'offsets'):
if (len(set(riods.offsets)) > 1):
attrs['offsets'] = riods.offsets
warnings.warn("Offsets differ across bands. The 'add_offset' attribute will not be added. See the 'offsets' attribute.")
else:
attrs['add_offset'] = riods.offsets[0]
if (hasattr(riods, 'descriptions') and any(riods.descriptions)):
if (len(set(riods.descriptions)) == 1):
attrs['long_name'] = riods.descriptions[0]
else:
attrs['long_name'] = riods.descriptions
if (hasattr(riods, 'units') and any(riods.units)):
if (len(riods.units) == 1):
attrs['units'] = riods.units[0]
else:
attrs['units'] = riods.units
return attrs
|
Get rasterio specific attributes
|
rioxarray/_io.py
|
_get_rasterio_attrs
|
mraspaud/rioxarray
| 269
|
python
|
def _get_rasterio_attrs(riods):
'\n \n '
attrs = _parse_tags(riods.tags(1))
if (hasattr(riods, 'nodata') and (riods.nodata is not None)):
attrs['_FillValue'] = riods.nodata
if hasattr(riods, 'scales'):
if (len(set(riods.scales)) > 1):
attrs['scales'] = riods.scales
warnings.warn("Offsets differ across bands. The 'scale_factor' attribute will not be added. See the 'scales' attribute.")
else:
attrs['scale_factor'] = riods.scales[0]
if hasattr(riods, 'offsets'):
if (len(set(riods.offsets)) > 1):
attrs['offsets'] = riods.offsets
warnings.warn("Offsets differ across bands. The 'add_offset' attribute will not be added. See the 'offsets' attribute.")
else:
attrs['add_offset'] = riods.offsets[0]
if (hasattr(riods, 'descriptions') and any(riods.descriptions)):
if (len(set(riods.descriptions)) == 1):
attrs['long_name'] = riods.descriptions[0]
else:
attrs['long_name'] = riods.descriptions
if (hasattr(riods, 'units') and any(riods.units)):
if (len(riods.units) == 1):
attrs['units'] = riods.units[0]
else:
attrs['units'] = riods.units
return attrs
|
def _get_rasterio_attrs(riods):
'\n \n '
attrs = _parse_tags(riods.tags(1))
if (hasattr(riods, 'nodata') and (riods.nodata is not None)):
attrs['_FillValue'] = riods.nodata
if hasattr(riods, 'scales'):
if (len(set(riods.scales)) > 1):
attrs['scales'] = riods.scales
warnings.warn("Offsets differ across bands. The 'scale_factor' attribute will not be added. See the 'scales' attribute.")
else:
attrs['scale_factor'] = riods.scales[0]
if hasattr(riods, 'offsets'):
if (len(set(riods.offsets)) > 1):
attrs['offsets'] = riods.offsets
warnings.warn("Offsets differ across bands. The 'add_offset' attribute will not be added. See the 'offsets' attribute.")
else:
attrs['add_offset'] = riods.offsets[0]
if (hasattr(riods, 'descriptions') and any(riods.descriptions)):
if (len(set(riods.descriptions)) == 1):
attrs['long_name'] = riods.descriptions[0]
else:
attrs['long_name'] = riods.descriptions
if (hasattr(riods, 'units') and any(riods.units)):
if (len(riods.units) == 1):
attrs['units'] = riods.units[0]
else:
attrs['units'] = riods.units
return attrs<|docstring|>Get rasterio specific attributes<|endoftext|>
|
3cadd590683b717be41c9fc96fcb09d9e004717521ee3ef6db489ce44ace9624
|
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
'\n Decide the datetime based on CF conventions\n '
if (decode_timedelta is None):
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if (decode_times and ('since' in data_array[coord].attrs.get('units', ''))):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(as_variable(data_array[coord]), name=coord)
elif (decode_timedelta and (data_array[coord].attrs.get('units') in times.TIME_UNITS)):
time_var = times.CFTimedeltaCoder().decode(as_variable(data_array[coord]), name=coord)
if (time_var is not None):
(dimensions, data, attributes, encoding) = variables.unpack_for_decoding(time_var)
data_array = data_array.assign_coords({coord: IndexVariable(dims=dimensions, data=data, attrs=attributes, encoding=encoding)})
return data_array
|
Decide the datetime based on CF conventions
|
rioxarray/_io.py
|
_decode_datetime_cf
|
mraspaud/rioxarray
| 269
|
python
|
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
'\n \n '
if (decode_timedelta is None):
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if (decode_times and ('since' in data_array[coord].attrs.get('units', ))):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(as_variable(data_array[coord]), name=coord)
elif (decode_timedelta and (data_array[coord].attrs.get('units') in times.TIME_UNITS)):
time_var = times.CFTimedeltaCoder().decode(as_variable(data_array[coord]), name=coord)
if (time_var is not None):
(dimensions, data, attributes, encoding) = variables.unpack_for_decoding(time_var)
data_array = data_array.assign_coords({coord: IndexVariable(dims=dimensions, data=data, attrs=attributes, encoding=encoding)})
return data_array
|
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
'\n \n '
if (decode_timedelta is None):
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if (decode_times and ('since' in data_array[coord].attrs.get('units', ))):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(as_variable(data_array[coord]), name=coord)
elif (decode_timedelta and (data_array[coord].attrs.get('units') in times.TIME_UNITS)):
time_var = times.CFTimedeltaCoder().decode(as_variable(data_array[coord]), name=coord)
if (time_var is not None):
(dimensions, data, attributes, encoding) = variables.unpack_for_decoding(time_var)
data_array = data_array.assign_coords({coord: IndexVariable(dims=dimensions, data=data, attrs=attributes, encoding=encoding)})
return data_array<|docstring|>Decide the datetime based on CF conventions<|endoftext|>
|
5f5d0495a92aaa0f6bbbe3186dfbab237c1e469bb686b3d9352005b825e44274
|
def _load_subdatasets(riods, group, variable, parse_coordinates, chunks, cache, lock, masked, mask_and_scale, decode_times, decode_timedelta, **open_kwargs):
'\n Load in rasterio subdatasets\n '
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if ((subdataset_filter is not None) and (not subdataset_filter.match(subdataset))):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(subdataset, parse_coordinates=((shape not in dim_groups) and parse_coordinates), chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, default_name=subdataset.split(':')[(- 1)].lstrip('/').replace('/', '_'), decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (shape not in dim_groups):
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if (len(dim_groups) > 1):
dataset = [Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()]
elif (not dim_groups):
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
|
Load in rasterio subdatasets
|
rioxarray/_io.py
|
_load_subdatasets
|
mraspaud/rioxarray
| 269
|
python
|
def _load_subdatasets(riods, group, variable, parse_coordinates, chunks, cache, lock, masked, mask_and_scale, decode_times, decode_timedelta, **open_kwargs):
'\n \n '
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if ((subdataset_filter is not None) and (not subdataset_filter.match(subdataset))):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(subdataset, parse_coordinates=((shape not in dim_groups) and parse_coordinates), chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, default_name=subdataset.split(':')[(- 1)].lstrip('/').replace('/', '_'), decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (shape not in dim_groups):
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if (len(dim_groups) > 1):
dataset = [Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()]
elif (not dim_groups):
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
|
def _load_subdatasets(riods, group, variable, parse_coordinates, chunks, cache, lock, masked, mask_and_scale, decode_times, decode_timedelta, **open_kwargs):
'\n \n '
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if ((subdataset_filter is not None) and (not subdataset_filter.match(subdataset))):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(subdataset, parse_coordinates=((shape not in dim_groups) and parse_coordinates), chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, default_name=subdataset.split(':')[(- 1)].lstrip('/').replace('/', '_'), decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (shape not in dim_groups):
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if (len(dim_groups) > 1):
dataset = [Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()]
elif (not dim_groups):
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset<|docstring|>Load in rasterio subdatasets<|endoftext|>
|
2019c611b601698b235966c773badd17ba4bd0b898f8b2b26bbe096cffd0ea17
|
def _prepare_dask(result, riods, filename, chunks):
'\n Prepare the data for dask computations\n '
from dask.base import tokenize
try:
mtime = os.path.getmtime(filename)
except OSError:
mtime = None
if (chunks in (True, 'auto')):
import dask
from dask.array.core import normalize_chunks
if (version.parse(dask.__version__) < version.parse('0.18.0')):
msg = f'Automatic chunking requires dask.__version__ >= 0.18.0 . You currently have version {dask.__version__}'
raise NotImplementedError(msg)
block_shape = ((1,) + riods.block_shapes[0])
chunks = normalize_chunks(chunks=(1, 'auto', 'auto'), shape=(riods.count, riods.height, riods.width), dtype=riods.dtypes[0], previous_chunks=tuple(((c,) for c in block_shape)))
token = tokenize(filename, mtime, chunks)
name_prefix = f'open_rasterio-{token}'
return result.chunk(chunks, name_prefix=name_prefix, token=token)
|
Prepare the data for dask computations
|
rioxarray/_io.py
|
_prepare_dask
|
mraspaud/rioxarray
| 269
|
python
|
def _prepare_dask(result, riods, filename, chunks):
'\n \n '
from dask.base import tokenize
try:
mtime = os.path.getmtime(filename)
except OSError:
mtime = None
if (chunks in (True, 'auto')):
import dask
from dask.array.core import normalize_chunks
if (version.parse(dask.__version__) < version.parse('0.18.0')):
msg = f'Automatic chunking requires dask.__version__ >= 0.18.0 . You currently have version {dask.__version__}'
raise NotImplementedError(msg)
block_shape = ((1,) + riods.block_shapes[0])
chunks = normalize_chunks(chunks=(1, 'auto', 'auto'), shape=(riods.count, riods.height, riods.width), dtype=riods.dtypes[0], previous_chunks=tuple(((c,) for c in block_shape)))
token = tokenize(filename, mtime, chunks)
name_prefix = f'open_rasterio-{token}'
return result.chunk(chunks, name_prefix=name_prefix, token=token)
|
def _prepare_dask(result, riods, filename, chunks):
'\n \n '
from dask.base import tokenize
try:
mtime = os.path.getmtime(filename)
except OSError:
mtime = None
if (chunks in (True, 'auto')):
import dask
from dask.array.core import normalize_chunks
if (version.parse(dask.__version__) < version.parse('0.18.0')):
msg = f'Automatic chunking requires dask.__version__ >= 0.18.0 . You currently have version {dask.__version__}'
raise NotImplementedError(msg)
block_shape = ((1,) + riods.block_shapes[0])
chunks = normalize_chunks(chunks=(1, 'auto', 'auto'), shape=(riods.count, riods.height, riods.width), dtype=riods.dtypes[0], previous_chunks=tuple(((c,) for c in block_shape)))
token = tokenize(filename, mtime, chunks)
name_prefix = f'open_rasterio-{token}'
return result.chunk(chunks, name_prefix=name_prefix, token=token)<|docstring|>Prepare the data for dask computations<|endoftext|>
|
6d3ce7d2b4aafea6ffb65e25d95e176f0bdc0e0a3eed86fb43850f82413ed0dc
|
def _handle_encoding(result, mask_and_scale, masked, da_name):
'\n Make sure encoding handled properly\n '
if ('grid_mapping' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'grid_mapping', name=da_name)
if mask_and_scale:
if ('scale_factor' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'scale_factor', name=da_name)
if ('add_offset' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'add_offset', name=da_name)
if masked:
if ('_FillValue' in result.attrs):
variables.pop_to(result.attrs, result.encoding, '_FillValue', name=da_name)
if ('missing_value' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'missing_value', name=da_name)
|
Make sure encoding handled properly
|
rioxarray/_io.py
|
_handle_encoding
|
mraspaud/rioxarray
| 269
|
python
|
def _handle_encoding(result, mask_and_scale, masked, da_name):
'\n \n '
if ('grid_mapping' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'grid_mapping', name=da_name)
if mask_and_scale:
if ('scale_factor' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'scale_factor', name=da_name)
if ('add_offset' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'add_offset', name=da_name)
if masked:
if ('_FillValue' in result.attrs):
variables.pop_to(result.attrs, result.encoding, '_FillValue', name=da_name)
if ('missing_value' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'missing_value', name=da_name)
|
def _handle_encoding(result, mask_and_scale, masked, da_name):
'\n \n '
if ('grid_mapping' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'grid_mapping', name=da_name)
if mask_and_scale:
if ('scale_factor' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'scale_factor', name=da_name)
if ('add_offset' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'add_offset', name=da_name)
if masked:
if ('_FillValue' in result.attrs):
variables.pop_to(result.attrs, result.encoding, '_FillValue', name=da_name)
if ('missing_value' in result.attrs):
variables.pop_to(result.attrs, result.encoding, 'missing_value', name=da_name)<|docstring|>Make sure encoding handled properly<|endoftext|>
|
cb488f4dd25f082edfb7abb0f66e0e140c3c6a13867350bf040d3888493663eb
|
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None, masked=False, mask_and_scale=False, variable=None, group=None, default_name=None, decode_times=True, decode_timedelta=None, **open_kwargs):
'Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file\'s geoinformation, shifted to the center of each pixel (see\n `"PixelIsArea" Raster Space\n <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_\n for more information).\n\n Parameters\n ----------\n filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates: bool, optional\n Whether to parse the x and y coordinates out of the file\'s\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don\'t need the coordinates.\n chunks: int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{\'x\': 5, \'y\': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array. Chunks can also be set to\n ``True`` or ``"auto"`` to choose sensible chunk sizes according to\n ``dask.config.get("array.chunk-size")``.\n cache: bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock: bool or dask.utils.SerializableLock, optional\n\n If chunks is provided, this argument is used to ensure that only one\n thread per process is reading from a rasterio file object at a time.\n\n By default and when a lock instance is provided,\n a :class:`xarray.backends.CachingFileManager` is used to cache File objects.\n Since rasterio also caches some data, this will make repeated reads from the\n same object fast.\n\n When ``lock=False``, no lock is used, allowing for completely parallel reads\n from multiple threads or processes. However, a new file handle is opened on\n each request.\n\n masked: bool, optional\n If True, read the mask and set values to NaN. Defaults to False.\n mask_and_scale: bool, optional\n Lazily scale (using the `scales` and `offsets` from rasterio) and mask.\n If the _Unsigned attribute is present treat integer arrays as unsigned.\n variable: str or list or tuple, optional\n Variable name or names to use to filter loading.\n group: str or list or tuple, optional\n Group name or names to use to filter loading.\n default_name: str, optional\n The name of the data array if none exists. Default is None.\n decode_times: bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n decode_timedelta: bool, optional\n If True, decode variables and coordinates with time units in\n {“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}\n into timedelta objects. If False, leave them encoded as numbers.\n If None (default), assume the same value of decode_time.\n **open_kwargs: kwargs, optional\n Optional keyword arguments to pass into rasterio.open().\n\n Returns\n -------\n :obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:\n The newly created dataset(s).\n '
parse_coordinates = (True if (parse_coordinates is None) else parse_coordinates)
masked = (masked or mask_and_scale)
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(src_crs=(vrt.src_crs.to_string() if vrt.src_crs else None), crs=(vrt.crs.to_string() if vrt.crs else None), resampling=vrt.resampling, tolerance=vrt.tolerance, src_nodata=vrt.src_nodata, nodata=vrt.nodata, width=vrt.width, height=vrt.height, src_transform=vrt.src_transform, transform=vrt.transform, dtype=vrt.working_dtype, warp_extras=vrt.warp_extras)
if (lock in (True, None)):
lock = RASTERIO_LOCK
elif (lock is False):
lock = NO_LOCK
open_kwargs['sharing'] = open_kwargs.get('sharing', False)
with warnings.catch_warnings(record=True) as rio_warnings:
if (lock is not NO_LOCK):
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode='r', kwargs=open_kwargs)
else:
manager = URIManager(rasterio.open, filename, mode='r', kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
for rio_warning in captured_warnings:
if ((not riods.subdatasets) or (not isinstance(rio_warning.message, NotGeoreferencedWarning))):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
if riods.subdatasets:
return _load_subdatasets(riods=riods, group=group, variable=variable, parse_coordinates=parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (vrt_params is not None):
riods = WarpedVRT(riods, **vrt_params)
if (cache is None):
cache = (chunks is None)
if (riods.count < 1):
raise ValueError('Unknown dims')
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if (f'NETCDF_DIM_{coord}' in attrs):
coord_name = coord
attrs.pop(f'NETCDF_DIM_{coord}')
break
else:
coord_name = 'band'
coords[coord_name] = np.asarray(riods.indexes)
if parse_coordinates:
coords.update(_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height))
unsigned = False
encoding = {}
if (mask_and_scale and ('_Unsigned' in attrs)):
unsigned = (variables.pop_to(attrs, encoding, '_Unsigned') == 'true')
if masked:
encoding['dtype'] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop('NETCDF_VARNAME', default_name)
data = indexing.LazilyOuterIndexedArray(RasterioArrayWrapper(manager, lock, name=da_name, vrt_params=vrt_params, masked=masked, mask_and_scale=mask_and_scale, unsigned=unsigned))
data = indexing.CopyOnWriteArray(data)
if (cache and (chunks is None)):
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=(coord_name, 'y', 'x'), coords=coords, attrs=attrs, name=da_name)
result.encoding = encoding
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(result, decode_times=decode_times, decode_timedelta=decode_timedelta)
if ('_FillValue' in attrs):
attrs['_FillValue'] = result.dtype.type(attrs['_FillValue'])
_handle_encoding(result, mask_and_scale, masked, da_name)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if (hasattr(riods, 'crs') and riods.crs):
result.rio.write_crs(riods.crs, inplace=True)
if (chunks is not None):
result = _prepare_dask(result, riods, filename, chunks)
result.set_close(manager.close)
result.rio._manager = manager
result.encoding['source'] = riods.name
result.encoding['rasterio_dtype'] = str(riods.dtypes[0])
return result
|
Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).
|
rioxarray/_io.py
|
open_rasterio
|
mraspaud/rioxarray
| 269
|
python
|
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None, masked=False, mask_and_scale=False, variable=None, group=None, default_name=None, decode_times=True, decode_timedelta=None, **open_kwargs):
'Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file\'s geoinformation, shifted to the center of each pixel (see\n `"PixelIsArea" Raster Space\n <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_\n for more information).\n\n Parameters\n ----------\n filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates: bool, optional\n Whether to parse the x and y coordinates out of the file\'s\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don\'t need the coordinates.\n chunks: int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{\'x\': 5, \'y\': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array. Chunks can also be set to\n ``True`` or ``"auto"`` to choose sensible chunk sizes according to\n ``dask.config.get("array.chunk-size")``.\n cache: bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock: bool or dask.utils.SerializableLock, optional\n\n If chunks is provided, this argument is used to ensure that only one\n thread per process is reading from a rasterio file object at a time.\n\n By default and when a lock instance is provided,\n a :class:`xarray.backends.CachingFileManager` is used to cache File objects.\n Since rasterio also caches some data, this will make repeated reads from the\n same object fast.\n\n When ``lock=False``, no lock is used, allowing for completely parallel reads\n from multiple threads or processes. However, a new file handle is opened on\n each request.\n\n masked: bool, optional\n If True, read the mask and set values to NaN. Defaults to False.\n mask_and_scale: bool, optional\n Lazily scale (using the `scales` and `offsets` from rasterio) and mask.\n If the _Unsigned attribute is present treat integer arrays as unsigned.\n variable: str or list or tuple, optional\n Variable name or names to use to filter loading.\n group: str or list or tuple, optional\n Group name or names to use to filter loading.\n default_name: str, optional\n The name of the data array if none exists. Default is None.\n decode_times: bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n decode_timedelta: bool, optional\n If True, decode variables and coordinates with time units in\n {“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}\n into timedelta objects. If False, leave them encoded as numbers.\n If None (default), assume the same value of decode_time.\n **open_kwargs: kwargs, optional\n Optional keyword arguments to pass into rasterio.open().\n\n Returns\n -------\n :obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:\n The newly created dataset(s).\n '
parse_coordinates = (True if (parse_coordinates is None) else parse_coordinates)
masked = (masked or mask_and_scale)
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(src_crs=(vrt.src_crs.to_string() if vrt.src_crs else None), crs=(vrt.crs.to_string() if vrt.crs else None), resampling=vrt.resampling, tolerance=vrt.tolerance, src_nodata=vrt.src_nodata, nodata=vrt.nodata, width=vrt.width, height=vrt.height, src_transform=vrt.src_transform, transform=vrt.transform, dtype=vrt.working_dtype, warp_extras=vrt.warp_extras)
if (lock in (True, None)):
lock = RASTERIO_LOCK
elif (lock is False):
lock = NO_LOCK
open_kwargs['sharing'] = open_kwargs.get('sharing', False)
with warnings.catch_warnings(record=True) as rio_warnings:
if (lock is not NO_LOCK):
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode='r', kwargs=open_kwargs)
else:
manager = URIManager(rasterio.open, filename, mode='r', kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
for rio_warning in captured_warnings:
if ((not riods.subdatasets) or (not isinstance(rio_warning.message, NotGeoreferencedWarning))):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
if riods.subdatasets:
return _load_subdatasets(riods=riods, group=group, variable=variable, parse_coordinates=parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (vrt_params is not None):
riods = WarpedVRT(riods, **vrt_params)
if (cache is None):
cache = (chunks is None)
if (riods.count < 1):
raise ValueError('Unknown dims')
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if (f'NETCDF_DIM_{coord}' in attrs):
coord_name = coord
attrs.pop(f'NETCDF_DIM_{coord}')
break
else:
coord_name = 'band'
coords[coord_name] = np.asarray(riods.indexes)
if parse_coordinates:
coords.update(_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height))
unsigned = False
encoding = {}
if (mask_and_scale and ('_Unsigned' in attrs)):
unsigned = (variables.pop_to(attrs, encoding, '_Unsigned') == 'true')
if masked:
encoding['dtype'] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop('NETCDF_VARNAME', default_name)
data = indexing.LazilyOuterIndexedArray(RasterioArrayWrapper(manager, lock, name=da_name, vrt_params=vrt_params, masked=masked, mask_and_scale=mask_and_scale, unsigned=unsigned))
data = indexing.CopyOnWriteArray(data)
if (cache and (chunks is None)):
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=(coord_name, 'y', 'x'), coords=coords, attrs=attrs, name=da_name)
result.encoding = encoding
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(result, decode_times=decode_times, decode_timedelta=decode_timedelta)
if ('_FillValue' in attrs):
attrs['_FillValue'] = result.dtype.type(attrs['_FillValue'])
_handle_encoding(result, mask_and_scale, masked, da_name)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if (hasattr(riods, 'crs') and riods.crs):
result.rio.write_crs(riods.crs, inplace=True)
if (chunks is not None):
result = _prepare_dask(result, riods, filename, chunks)
result.set_close(manager.close)
result.rio._manager = manager
result.encoding['source'] = riods.name
result.encoding['rasterio_dtype'] = str(riods.dtypes[0])
return result
|
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None, masked=False, mask_and_scale=False, variable=None, group=None, default_name=None, decode_times=True, decode_timedelta=None, **open_kwargs):
'Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file\'s geoinformation, shifted to the center of each pixel (see\n `"PixelIsArea" Raster Space\n <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_\n for more information).\n\n Parameters\n ----------\n filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates: bool, optional\n Whether to parse the x and y coordinates out of the file\'s\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don\'t need the coordinates.\n chunks: int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{\'x\': 5, \'y\': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array. Chunks can also be set to\n ``True`` or ``"auto"`` to choose sensible chunk sizes according to\n ``dask.config.get("array.chunk-size")``.\n cache: bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock: bool or dask.utils.SerializableLock, optional\n\n If chunks is provided, this argument is used to ensure that only one\n thread per process is reading from a rasterio file object at a time.\n\n By default and when a lock instance is provided,\n a :class:`xarray.backends.CachingFileManager` is used to cache File objects.\n Since rasterio also caches some data, this will make repeated reads from the\n same object fast.\n\n When ``lock=False``, no lock is used, allowing for completely parallel reads\n from multiple threads or processes. However, a new file handle is opened on\n each request.\n\n masked: bool, optional\n If True, read the mask and set values to NaN. Defaults to False.\n mask_and_scale: bool, optional\n Lazily scale (using the `scales` and `offsets` from rasterio) and mask.\n If the _Unsigned attribute is present treat integer arrays as unsigned.\n variable: str or list or tuple, optional\n Variable name or names to use to filter loading.\n group: str or list or tuple, optional\n Group name or names to use to filter loading.\n default_name: str, optional\n The name of the data array if none exists. Default is None.\n decode_times: bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n decode_timedelta: bool, optional\n If True, decode variables and coordinates with time units in\n {“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}\n into timedelta objects. If False, leave them encoded as numbers.\n If None (default), assume the same value of decode_time.\n **open_kwargs: kwargs, optional\n Optional keyword arguments to pass into rasterio.open().\n\n Returns\n -------\n :obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:\n The newly created dataset(s).\n '
parse_coordinates = (True if (parse_coordinates is None) else parse_coordinates)
masked = (masked or mask_and_scale)
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(src_crs=(vrt.src_crs.to_string() if vrt.src_crs else None), crs=(vrt.crs.to_string() if vrt.crs else None), resampling=vrt.resampling, tolerance=vrt.tolerance, src_nodata=vrt.src_nodata, nodata=vrt.nodata, width=vrt.width, height=vrt.height, src_transform=vrt.src_transform, transform=vrt.transform, dtype=vrt.working_dtype, warp_extras=vrt.warp_extras)
if (lock in (True, None)):
lock = RASTERIO_LOCK
elif (lock is False):
lock = NO_LOCK
open_kwargs['sharing'] = open_kwargs.get('sharing', False)
with warnings.catch_warnings(record=True) as rio_warnings:
if (lock is not NO_LOCK):
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode='r', kwargs=open_kwargs)
else:
manager = URIManager(rasterio.open, filename, mode='r', kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
for rio_warning in captured_warnings:
if ((not riods.subdatasets) or (not isinstance(rio_warning.message, NotGeoreferencedWarning))):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
if riods.subdatasets:
return _load_subdatasets(riods=riods, group=group, variable=variable, parse_coordinates=parse_coordinates, chunks=chunks, cache=cache, lock=lock, masked=masked, mask_and_scale=mask_and_scale, decode_times=decode_times, decode_timedelta=decode_timedelta, **open_kwargs)
if (vrt_params is not None):
riods = WarpedVRT(riods, **vrt_params)
if (cache is None):
cache = (chunks is None)
if (riods.count < 1):
raise ValueError('Unknown dims')
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if (f'NETCDF_DIM_{coord}' in attrs):
coord_name = coord
attrs.pop(f'NETCDF_DIM_{coord}')
break
else:
coord_name = 'band'
coords[coord_name] = np.asarray(riods.indexes)
if parse_coordinates:
coords.update(_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height))
unsigned = False
encoding = {}
if (mask_and_scale and ('_Unsigned' in attrs)):
unsigned = (variables.pop_to(attrs, encoding, '_Unsigned') == 'true')
if masked:
encoding['dtype'] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop('NETCDF_VARNAME', default_name)
data = indexing.LazilyOuterIndexedArray(RasterioArrayWrapper(manager, lock, name=da_name, vrt_params=vrt_params, masked=masked, mask_and_scale=mask_and_scale, unsigned=unsigned))
data = indexing.CopyOnWriteArray(data)
if (cache and (chunks is None)):
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=(coord_name, 'y', 'x'), coords=coords, attrs=attrs, name=da_name)
result.encoding = encoding
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(result, decode_times=decode_times, decode_timedelta=decode_timedelta)
if ('_FillValue' in attrs):
attrs['_FillValue'] = result.dtype.type(attrs['_FillValue'])
_handle_encoding(result, mask_and_scale, masked, da_name)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if (hasattr(riods, 'crs') and riods.crs):
result.rio.write_crs(riods.crs, inplace=True)
if (chunks is not None):
result = _prepare_dask(result, riods, filename, chunks)
result.set_close(manager.close)
result.rio._manager = manager
result.encoding['source'] = riods.name
result.encoding['rasterio_dtype'] = str(riods.dtypes[0])
return result<|docstring|>Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).<|endoftext|>
|
ed02cd0c5048540172da022a2c87d7423f20fb750da5852f2d0a272948474aa3
|
@property
def file_handle(self):
'\n File handle returned by the opener.\n '
if (self._file_handle is not None):
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
|
File handle returned by the opener.
|
rioxarray/_io.py
|
file_handle
|
mraspaud/rioxarray
| 269
|
python
|
@property
def file_handle(self):
'\n \n '
if (self._file_handle is not None):
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
|
@property
def file_handle(self):
'\n \n '
if (self._file_handle is not None):
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle<|docstring|>File handle returned by the opener.<|endoftext|>
|
34f526afc39163921be80b6af1f2fea5a9854a4ef09789f3ce5b0f0f9e57b8c2
|
def close(self):
'\n Close file handle.\n '
if (self._file_handle is not None):
self._file_handle.close()
self._file_handle = None
|
Close file handle.
|
rioxarray/_io.py
|
close
|
mraspaud/rioxarray
| 269
|
python
|
def close(self):
'\n \n '
if (self._file_handle is not None):
self._file_handle.close()
self._file_handle = None
|
def close(self):
'\n \n '
if (self._file_handle is not None):
self._file_handle.close()
self._file_handle = None<|docstring|>Close file handle.<|endoftext|>
|
3e014f6fd389249734cee8d0ded162d2e5673c76fe86d8bce20718aa2f66b789
|
def __getstate__(self):
'State for pickling.'
return (self._opener, self._args, self._mode, self._kwargs)
|
State for pickling.
|
rioxarray/_io.py
|
__getstate__
|
mraspaud/rioxarray
| 269
|
python
|
def __getstate__(self):
return (self._opener, self._args, self._mode, self._kwargs)
|
def __getstate__(self):
return (self._opener, self._args, self._mode, self._kwargs)<|docstring|>State for pickling.<|endoftext|>
|
0c5fdeb6f29dddda971bc1651abe16efb53b5990a00f29f19f8e06154af25c53
|
def __setstate__(self, state):
'Restore from a pickle.'
(opener, args, mode, kwargs) = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
|
Restore from a pickle.
|
rioxarray/_io.py
|
__setstate__
|
mraspaud/rioxarray
| 269
|
python
|
def __setstate__(self, state):
(opener, args, mode, kwargs) = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
|
def __setstate__(self, state):
(opener, args, mode, kwargs) = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)<|docstring|>Restore from a pickle.<|endoftext|>
|
308f8ab026e4c9948d0b14c78d94f6afe52427b15f1d579a68cc3de5c893e408
|
@property
def dtype(self):
'\n Data type of the array\n '
return self._dtype
|
Data type of the array
|
rioxarray/_io.py
|
dtype
|
mraspaud/rioxarray
| 269
|
python
|
@property
def dtype(self):
'\n \n '
return self._dtype
|
@property
def dtype(self):
'\n \n '
return self._dtype<|docstring|>Data type of the array<|endoftext|>
|
1d1f9aa8279df2c4255760da5b8057cbbbe41ed37cf293b2c5ac7650d61ac4c7
|
@property
def fill_value(self):
'\n Fill value of the array\n '
return self._fill_value
|
Fill value of the array
|
rioxarray/_io.py
|
fill_value
|
mraspaud/rioxarray
| 269
|
python
|
@property
def fill_value(self):
'\n \n '
return self._fill_value
|
@property
def fill_value(self):
'\n \n '
return self._fill_value<|docstring|>Fill value of the array<|endoftext|>
|
19ba5a59762ff50ba4d6278d085ccc13063dc3c586b0860176a9308ac39bd9a6
|
@property
def shape(self):
'\n Shape of the array\n '
return self._shape
|
Shape of the array
|
rioxarray/_io.py
|
shape
|
mraspaud/rioxarray
| 269
|
python
|
@property
def shape(self):
'\n \n '
return self._shape
|
@property
def shape(self):
'\n \n '
return self._shape<|docstring|>Shape of the array<|endoftext|>
|
5210e283ead4dda5b08a95f7ea22be3357920b7b4d9dd355231617b9dd96314a
|
def _get_indexer(self, key):
'Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n '
if (len(key) != 3):
raise RioXarrayError('rasterio datasets should always be 3D')
band_key = key[0]
np_inds = []
if isinstance(band_key, slice):
(start, stop, step) = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list):
np_inds.append(slice(None))
window = []
squeeze_axis = []
for (iii, (ikey, size)) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
(start, stop, step) = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
squeeze_axis.append((- (2 - iii)))
start = ikey
stop = (ikey + 1)
else:
(start, stop) = (np.min(ikey), (np.max(ikey) + 1))
np_inds.append((ikey - start))
window.append((start, stop))
if (isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray)):
np_inds[(- 2):] = np.ix_(*np_inds[(- 2):])
return (band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds))
|
Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
|
rioxarray/_io.py
|
_get_indexer
|
mraspaud/rioxarray
| 269
|
python
|
def _get_indexer(self, key):
'Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n '
if (len(key) != 3):
raise RioXarrayError('rasterio datasets should always be 3D')
band_key = key[0]
np_inds = []
if isinstance(band_key, slice):
(start, stop, step) = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list):
np_inds.append(slice(None))
window = []
squeeze_axis = []
for (iii, (ikey, size)) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
(start, stop, step) = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
squeeze_axis.append((- (2 - iii)))
start = ikey
stop = (ikey + 1)
else:
(start, stop) = (np.min(ikey), (np.max(ikey) + 1))
np_inds.append((ikey - start))
window.append((start, stop))
if (isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray)):
np_inds[(- 2):] = np.ix_(*np_inds[(- 2):])
return (band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds))
|
def _get_indexer(self, key):
'Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n '
if (len(key) != 3):
raise RioXarrayError('rasterio datasets should always be 3D')
band_key = key[0]
np_inds = []
if isinstance(band_key, slice):
(start, stop, step) = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list):
np_inds.append(slice(None))
window = []
squeeze_axis = []
for (iii, (ikey, size)) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
(start, stop, step) = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
squeeze_axis.append((- (2 - iii)))
start = ikey
stop = (ikey + 1)
else:
(start, stop) = (np.min(ikey), (np.max(ikey) + 1))
np_inds.append((ikey - start))
window.append((start, stop))
if (isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray)):
np_inds[(- 2):] = np.ix_(*np_inds[(- 2):])
return (band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds))<|docstring|>Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer<|endoftext|>
|
452abbf67dea984d4c0f3d283f98fdbc20995932d1ffd23866b408a55734d9db
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5):
'\n Create a lookup table that approximately multiplies pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value: int = round(((i + unbiasing) * (j + unbiasing)))
self.table_entries[(i, j)] = value
|
Create a lookup table that approximately multiplies pairs of positive integers
:param num_significant_bits: number of bits to preserve when approximating operands.
Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8
:param unbiasing: a value in the range [0,1) that is used to unbias lookup table error
|
python/lookup_tables.py
|
__init__
|
robertmacdavid/approx-upf
| 0
|
python
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5):
'\n Create a lookup table that approximately multiplies pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value: int = round(((i + unbiasing) * (j + unbiasing)))
self.table_entries[(i, j)] = value
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5):
'\n Create a lookup table that approximately multiplies pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value: int = round(((i + unbiasing) * (j + unbiasing)))
self.table_entries[(i, j)] = value<|docstring|>Create a lookup table that approximately multiplies pairs of positive integers
:param num_significant_bits: number of bits to preserve when approximating operands.
Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8
:param unbiasing: a value in the range [0,1) that is used to unbias lookup table error<|endoftext|>
|
4befcec507b22cd40b0788b9fa0a6a5d3f78244c9a2976ae783208a9d318c55f
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5, lookup_value_mantissa_bits: int=8):
'\n Create a lookup table that approximately divides pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n :param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value = ((i + unbiasing) / (j + unbiasing))
exp: int
mantissa: int
if (value < self.MIN_LOOKUP_ENTRY):
exp = 0
mantissa = 0
else:
exp = ((math.floor(math.log(value, 2)) - lookup_value_mantissa_bits) + 1)
mantissa = round((value * (2 ** (- exp))))
self.table_entries[(i, j)] = (mantissa, exp)
|
Create a lookup table that approximately divides pairs of positive integers
:param num_significant_bits: number of bits to preserve when approximating operands.
Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8
:param unbiasing: a value in the range [0,1) that is used to unbias lookup table error
:param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table
|
python/lookup_tables.py
|
__init__
|
robertmacdavid/approx-upf
| 0
|
python
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5, lookup_value_mantissa_bits: int=8):
'\n Create a lookup table that approximately divides pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n :param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value = ((i + unbiasing) / (j + unbiasing))
exp: int
mantissa: int
if (value < self.MIN_LOOKUP_ENTRY):
exp = 0
mantissa = 0
else:
exp = ((math.floor(math.log(value, 2)) - lookup_value_mantissa_bits) + 1)
mantissa = round((value * (2 ** (- exp))))
self.table_entries[(i, j)] = (mantissa, exp)
|
def __init__(self, num_significant_bits: int, unbiasing: float=0.5, lookup_value_mantissa_bits: int=8):
'\n Create a lookup table that approximately divides pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n :param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table\n '
self.num_significant_bits = num_significant_bits
self.table_entries = {}
for i in range((1 << num_significant_bits)):
for j in range((1 << num_significant_bits)):
value = ((i + unbiasing) / (j + unbiasing))
exp: int
mantissa: int
if (value < self.MIN_LOOKUP_ENTRY):
exp = 0
mantissa = 0
else:
exp = ((math.floor(math.log(value, 2)) - lookup_value_mantissa_bits) + 1)
mantissa = round((value * (2 ** (- exp))))
self.table_entries[(i, j)] = (mantissa, exp)<|docstring|>Create a lookup table that approximately divides pairs of positive integers
:param num_significant_bits: number of bits to preserve when approximating operands.
Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8
:param unbiasing: a value in the range [0,1) that is used to unbias lookup table error
:param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table<|endoftext|>
|
b0e7807bb1c33139e77a1638f018df6304704464ae08fc24619a737901096b0d
|
def parseArgs():
'\n Parse arguments from command-line\n\n RETURNS\n -------\n PDBs: list\n list of PDB files\n '
parser = ap.ArgumentParser(description='Script used to clean PDB files for PELE simulation')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--input', required=True, metavar='FILE', type=str, nargs='*', help='path to PDB files')
optional.add_argument('-O', '--output', metavar='STRING', type=str, help='filename for the output proecessed PDB file', default='')
optional.add_argument('-RN', '--residue_name', metavar='LIST', nargs='*', type=str, help='residue name of modified amino acids', default=[])
parser._action_groups.append(optional)
args = parser.parse_args()
PDBs = storePDBfilenames(args.input, parser)
Output = args.output
return (PDBs, Output, args.residue_name)
|
Parse arguments from command-line
RETURNS
-------
PDBs: list
list of PDB files
|
PELEAnalysis-Processing/Preprocessing/PDBProcessor4PELE.py
|
parseArgs
|
SergiR1996/PELEAnalysis-Processing
| 3
|
python
|
def parseArgs():
'\n Parse arguments from command-line\n\n RETURNS\n -------\n PDBs: list\n list of PDB files\n '
parser = ap.ArgumentParser(description='Script used to clean PDB files for PELE simulation')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--input', required=True, metavar='FILE', type=str, nargs='*', help='path to PDB files')
optional.add_argument('-O', '--output', metavar='STRING', type=str, help='filename for the output proecessed PDB file', default=)
optional.add_argument('-RN', '--residue_name', metavar='LIST', nargs='*', type=str, help='residue name of modified amino acids', default=[])
parser._action_groups.append(optional)
args = parser.parse_args()
PDBs = storePDBfilenames(args.input, parser)
Output = args.output
return (PDBs, Output, args.residue_name)
|
def parseArgs():
'\n Parse arguments from command-line\n\n RETURNS\n -------\n PDBs: list\n list of PDB files\n '
parser = ap.ArgumentParser(description='Script used to clean PDB files for PELE simulation')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--input', required=True, metavar='FILE', type=str, nargs='*', help='path to PDB files')
optional.add_argument('-O', '--output', metavar='STRING', type=str, help='filename for the output proecessed PDB file', default=)
optional.add_argument('-RN', '--residue_name', metavar='LIST', nargs='*', type=str, help='residue name of modified amino acids', default=[])
parser._action_groups.append(optional)
args = parser.parse_args()
PDBs = storePDBfilenames(args.input, parser)
Output = args.output
return (PDBs, Output, args.residue_name)<|docstring|>Parse arguments from command-line
RETURNS
-------
PDBs: list
list of PDB files<|endoftext|>
|
98ff134312f5e28abf18b28c74d969697d3ccc5b1033f04e7c76d235d2171126
|
def PDB_processing(PDB_filename, Output, Residue_name):
'\n Opens the PDB file, modifies its content and overwrites it\n in order to be used in a PELE simulation.\n\n PARAMETERS\n ----------\n PDB_filename : string\n filename of the input PDB file that wants to be processed\n Output : string\n filename of the output PDB file after processing\n\n RETURNS\n -------\n PDB modified file\n '
(Non_aminoacid_dict, L_number) = ({}, 0)
(HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list) = (False, False, False, False, False)
if (Output != ''):
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}.pdb'.format(Output), 'wt'))
else:
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}_modified.pdb'.format(PDB_filename[:(- 4)]), 'wt'))
Lines = PDB_original.readlines()
i = 0
(water_i, water_j) = (0, 0)
while (i < len(Lines)):
line = Lines[i]
if ((line[0:3] == 'TER' != (- 1)) or (line[0:3] == 'END')):
PDB_modified.write('TER\n')
elif (line.find('CONECT') != (- 1)):
PDB_modified.write('TER\n')
break
elif ((line[0:4] == 'ATOM') or (line[0:6] == 'HETATM')):
if (((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))) and (Lines[(i + 1)].find('CONECT') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
break
elif ((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
water_i += 1
water_j = 0
else:
if (line[17:20].strip() not in Protein_list):
if (line[17:20].strip() not in Non_aminoacid_dict):
if ((line[17:20].strip() in Residue_name) and (len(Residue_name) != 0)):
PDB_modified.write(line)
else:
Non_aminoacid_dict[line[17:20].strip()] = 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
elif (line[17:20].strip() != Lines[(i - 1)][17:20].strip()):
Non_aminoacid_dict[line.strip()[17:20]] += 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
else:
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
i += 1
continue
if ((line[17:20].strip() == 'HOH') and (Lines[(i - 1)][17:20].strip() != 'HOH')):
water_i += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 0)):
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 1)):
PDB_modified.write(((line[0:12] + '1HW HOH W{:>4}'.format(water_i)) + line[26:]))
elif (line[12:16] == ' HXT'):
PDB_modified.write((((line[0:12] + ' OXT') + line[16:77]) + 'O1-\n'))
HXT_terminal = True
elif ((line[12:16] == ' HA2') and (line[17:20] != 'GLY')):
PDB_modified.write(((line[0:12] + ' HA ') + line[16:]))
if (Gly_to_other_residue == False):
Gly_to_other_residue = list()
if (line[22:26].strip() not in Gly_to_other_residue):
Gly_to_other_residue.append(line[22:26].strip())
elif ((line[12:16] == ' HA ') and (line[17:20] == 'GLY')):
PDB_modified.write(((line[0:12] + ' HA2') + line[16:]))
if (other_residue_to_Gly == False):
other_residue_to_Gly = list()
if (line[22:26].strip() not in other_residue_to_Gly):
other_residue_to_Gly.append(line[22:26].strip())
elif ((line[12:16] == ' HD2') and (line[17:20] == 'ASP')):
PDB_modified.write(((line[0:17] + 'ASH') + line[20:]))
if (ASH_list == False):
ASH_list = list()
if (line[22:26].strip() not in ASH_list):
ASH_list.append(line[22:26].strip())
elif ((line[12:16] == ' HE2') and (line[17:20] == 'GLU')):
PDB_modified.write(((line[0:17] + 'GLH') + line[20:]))
if (GLH_list == False):
GLH_list = list()
if (line[22:26].strip() not in GLH_list):
GLH_list.append(line[22:26].strip())
else:
if (HXT_terminal == True):
print(line)
PDB_modified.write(line)
else:
pass
i += 1
PDB_modified.close()
PDB_original.close()
return (water_i, Non_aminoacid_dict, HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list)
|
Opens the PDB file, modifies its content and overwrites it
in order to be used in a PELE simulation.
PARAMETERS
----------
PDB_filename : string
filename of the input PDB file that wants to be processed
Output : string
filename of the output PDB file after processing
RETURNS
-------
PDB modified file
|
PELEAnalysis-Processing/Preprocessing/PDBProcessor4PELE.py
|
PDB_processing
|
SergiR1996/PELEAnalysis-Processing
| 3
|
python
|
def PDB_processing(PDB_filename, Output, Residue_name):
'\n Opens the PDB file, modifies its content and overwrites it\n in order to be used in a PELE simulation.\n\n PARAMETERS\n ----------\n PDB_filename : string\n filename of the input PDB file that wants to be processed\n Output : string\n filename of the output PDB file after processing\n\n RETURNS\n -------\n PDB modified file\n '
(Non_aminoacid_dict, L_number) = ({}, 0)
(HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list) = (False, False, False, False, False)
if (Output != ):
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}.pdb'.format(Output), 'wt'))
else:
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}_modified.pdb'.format(PDB_filename[:(- 4)]), 'wt'))
Lines = PDB_original.readlines()
i = 0
(water_i, water_j) = (0, 0)
while (i < len(Lines)):
line = Lines[i]
if ((line[0:3] == 'TER' != (- 1)) or (line[0:3] == 'END')):
PDB_modified.write('TER\n')
elif (line.find('CONECT') != (- 1)):
PDB_modified.write('TER\n')
break
elif ((line[0:4] == 'ATOM') or (line[0:6] == 'HETATM')):
if (((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))) and (Lines[(i + 1)].find('CONECT') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
break
elif ((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
water_i += 1
water_j = 0
else:
if (line[17:20].strip() not in Protein_list):
if (line[17:20].strip() not in Non_aminoacid_dict):
if ((line[17:20].strip() in Residue_name) and (len(Residue_name) != 0)):
PDB_modified.write(line)
else:
Non_aminoacid_dict[line[17:20].strip()] = 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
elif (line[17:20].strip() != Lines[(i - 1)][17:20].strip()):
Non_aminoacid_dict[line.strip()[17:20]] += 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
else:
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
i += 1
continue
if ((line[17:20].strip() == 'HOH') and (Lines[(i - 1)][17:20].strip() != 'HOH')):
water_i += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 0)):
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 1)):
PDB_modified.write(((line[0:12] + '1HW HOH W{:>4}'.format(water_i)) + line[26:]))
elif (line[12:16] == ' HXT'):
PDB_modified.write((((line[0:12] + ' OXT') + line[16:77]) + 'O1-\n'))
HXT_terminal = True
elif ((line[12:16] == ' HA2') and (line[17:20] != 'GLY')):
PDB_modified.write(((line[0:12] + ' HA ') + line[16:]))
if (Gly_to_other_residue == False):
Gly_to_other_residue = list()
if (line[22:26].strip() not in Gly_to_other_residue):
Gly_to_other_residue.append(line[22:26].strip())
elif ((line[12:16] == ' HA ') and (line[17:20] == 'GLY')):
PDB_modified.write(((line[0:12] + ' HA2') + line[16:]))
if (other_residue_to_Gly == False):
other_residue_to_Gly = list()
if (line[22:26].strip() not in other_residue_to_Gly):
other_residue_to_Gly.append(line[22:26].strip())
elif ((line[12:16] == ' HD2') and (line[17:20] == 'ASP')):
PDB_modified.write(((line[0:17] + 'ASH') + line[20:]))
if (ASH_list == False):
ASH_list = list()
if (line[22:26].strip() not in ASH_list):
ASH_list.append(line[22:26].strip())
elif ((line[12:16] == ' HE2') and (line[17:20] == 'GLU')):
PDB_modified.write(((line[0:17] + 'GLH') + line[20:]))
if (GLH_list == False):
GLH_list = list()
if (line[22:26].strip() not in GLH_list):
GLH_list.append(line[22:26].strip())
else:
if (HXT_terminal == True):
print(line)
PDB_modified.write(line)
else:
pass
i += 1
PDB_modified.close()
PDB_original.close()
return (water_i, Non_aminoacid_dict, HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list)
|
def PDB_processing(PDB_filename, Output, Residue_name):
'\n Opens the PDB file, modifies its content and overwrites it\n in order to be used in a PELE simulation.\n\n PARAMETERS\n ----------\n PDB_filename : string\n filename of the input PDB file that wants to be processed\n Output : string\n filename of the output PDB file after processing\n\n RETURNS\n -------\n PDB modified file\n '
(Non_aminoacid_dict, L_number) = ({}, 0)
(HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list) = (False, False, False, False, False)
if (Output != ):
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}.pdb'.format(Output), 'wt'))
else:
(PDB_original, PDB_modified) = (open('{}'.format(PDB_filename), 'rt'), open('{}_modified.pdb'.format(PDB_filename[:(- 4)]), 'wt'))
Lines = PDB_original.readlines()
i = 0
(water_i, water_j) = (0, 0)
while (i < len(Lines)):
line = Lines[i]
if ((line[0:3] == 'TER' != (- 1)) or (line[0:3] == 'END')):
PDB_modified.write('TER\n')
elif (line.find('CONECT') != (- 1)):
PDB_modified.write('TER\n')
break
elif ((line[0:4] == 'ATOM') or (line[0:6] == 'HETATM')):
if (((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))) and (Lines[(i + 1)].find('CONECT') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
break
elif ((line.find('2HW HOH') != (- 1)) or (line.find('H2 HOH') != (- 1))):
PDB_modified.write((((line[0:12] + '2HW HOH W{:>4}'.format(water_i)) + line[26:]) + 'TER\n'))
water_i += 1
water_j = 0
else:
if (line[17:20].strip() not in Protein_list):
if (line[17:20].strip() not in Non_aminoacid_dict):
if ((line[17:20].strip() in Residue_name) and (len(Residue_name) != 0)):
PDB_modified.write(line)
else:
Non_aminoacid_dict[line[17:20].strip()] = 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
elif (line[17:20].strip() != Lines[(i - 1)][17:20].strip()):
Non_aminoacid_dict[line.strip()[17:20]] += 1
L_number += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
else:
PDB_modified.write(((line[0:21] + 'L{:>4}'.format(L_number)) + line[26:]))
i += 1
continue
if ((line[17:20].strip() == 'HOH') and (Lines[(i - 1)][17:20].strip() != 'HOH')):
water_i += 1
if (Lines[(i - 1)][0:3] != 'TER'):
PDB_modified.write('TER\n')
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 0)):
PDB_modified.write(((line[0:12] + ' OW HOH W{:>4}'.format(water_i)) + line[26:]))
water_j += 1
elif ((line[17:20].strip() == 'HOH') and (water_j == 1)):
PDB_modified.write(((line[0:12] + '1HW HOH W{:>4}'.format(water_i)) + line[26:]))
elif (line[12:16] == ' HXT'):
PDB_modified.write((((line[0:12] + ' OXT') + line[16:77]) + 'O1-\n'))
HXT_terminal = True
elif ((line[12:16] == ' HA2') and (line[17:20] != 'GLY')):
PDB_modified.write(((line[0:12] + ' HA ') + line[16:]))
if (Gly_to_other_residue == False):
Gly_to_other_residue = list()
if (line[22:26].strip() not in Gly_to_other_residue):
Gly_to_other_residue.append(line[22:26].strip())
elif ((line[12:16] == ' HA ') and (line[17:20] == 'GLY')):
PDB_modified.write(((line[0:12] + ' HA2') + line[16:]))
if (other_residue_to_Gly == False):
other_residue_to_Gly = list()
if (line[22:26].strip() not in other_residue_to_Gly):
other_residue_to_Gly.append(line[22:26].strip())
elif ((line[12:16] == ' HD2') and (line[17:20] == 'ASP')):
PDB_modified.write(((line[0:17] + 'ASH') + line[20:]))
if (ASH_list == False):
ASH_list = list()
if (line[22:26].strip() not in ASH_list):
ASH_list.append(line[22:26].strip())
elif ((line[12:16] == ' HE2') and (line[17:20] == 'GLU')):
PDB_modified.write(((line[0:17] + 'GLH') + line[20:]))
if (GLH_list == False):
GLH_list = list()
if (line[22:26].strip() not in GLH_list):
GLH_list.append(line[22:26].strip())
else:
if (HXT_terminal == True):
print(line)
PDB_modified.write(line)
else:
pass
i += 1
PDB_modified.close()
PDB_original.close()
return (water_i, Non_aminoacid_dict, HXT_terminal, Gly_to_other_residue, other_residue_to_Gly, ASH_list, GLH_list)<|docstring|>Opens the PDB file, modifies its content and overwrites it
in order to be used in a PELE simulation.
PARAMETERS
----------
PDB_filename : string
filename of the input PDB file that wants to be processed
Output : string
filename of the output PDB file after processing
RETURNS
-------
PDB modified file<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.