body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
78356a0a12daa0abbd3aa8318e33c50d1db5624d65770a7f8941b591a62d584f | def white_leds_off(sleep_speed):
'\n Turns off the white LEDs one at a time\n '
sleep_speed = sleep_speed
PYGLOW.led(18, 0)
sleep(sleep_speed)
PYGLOW.led(12, 0)
sleep(sleep_speed)
PYGLOW.led(6, 0)
sleep(sleep_speed) | Turns off the white LEDs one at a time | Programs/inside_out_3.py | white_leds_off | ShineTop/PiGlow | 5 | python | def white_leds_off(sleep_speed):
'\n \n '
sleep_speed = sleep_speed
PYGLOW.led(18, 0)
sleep(sleep_speed)
PYGLOW.led(12, 0)
sleep(sleep_speed)
PYGLOW.led(6, 0)
sleep(sleep_speed) | def white_leds_off(sleep_speed):
'\n \n '
sleep_speed = sleep_speed
PYGLOW.led(18, 0)
sleep(sleep_speed)
PYGLOW.led(12, 0)
sleep(sleep_speed)
PYGLOW.led(6, 0)
sleep(sleep_speed)<|docstring|>Turns off the white LEDs one at a time<|endoftext|> |
f226ef7a4fa8fe59c46d1aa39d4346e45e43a8e355972ee520af009862658cff | def inside_out_3a():
'\n Lights up 1 color at a time\n\n Speed goes from 0.25 to 0.05 in decrements of 0.05\n '
LOGGER.debug('Function: inside_out_3a')
LOGGER.debug('Increasing speed...')
sleep_speed = 0.25
while (sleep_speed > 0.05):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.05 | Lights up 1 color at a time
Speed goes from 0.25 to 0.05 in decrements of 0.05 | Programs/inside_out_3.py | inside_out_3a | ShineTop/PiGlow | 5 | python | def inside_out_3a():
'\n Lights up 1 color at a time\n\n Speed goes from 0.25 to 0.05 in decrements of 0.05\n '
LOGGER.debug('Function: inside_out_3a')
LOGGER.debug('Increasing speed...')
sleep_speed = 0.25
while (sleep_speed > 0.05):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.05 | def inside_out_3a():
'\n Lights up 1 color at a time\n\n Speed goes from 0.25 to 0.05 in decrements of 0.05\n '
LOGGER.debug('Function: inside_out_3a')
LOGGER.debug('Increasing speed...')
sleep_speed = 0.25
while (sleep_speed > 0.05):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.05<|docstring|>Lights up 1 color at a time
Speed goes from 0.25 to 0.05 in decrements of 0.05<|endoftext|> |
306bb68a09a3c43784e302453ee8d64bb9323731502acd0375f55d80909c7d92 | def inside_out_3b():
'\n Sleep_speed goes from 0.05 to 0.01 in decrements of 0.0025\n '
LOGGER.debug('Function: inside_out_3b')
LOGGER.debug('Going fast...')
sleep_speed = 0.05
while (sleep_speed > 0.01):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.0025 | Sleep_speed goes from 0.05 to 0.01 in decrements of 0.0025 | Programs/inside_out_3.py | inside_out_3b | ShineTop/PiGlow | 5 | python | def inside_out_3b():
'\n \n '
LOGGER.debug('Function: inside_out_3b')
LOGGER.debug('Going fast...')
sleep_speed = 0.05
while (sleep_speed > 0.01):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.0025 | def inside_out_3b():
'\n \n '
LOGGER.debug('Function: inside_out_3b')
LOGGER.debug('Going fast...')
sleep_speed = 0.05
while (sleep_speed > 0.01):
LOGGER.debug('The speed is now: %s', sleep_speed)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)
sleep_speed -= 0.0025<|docstring|>Sleep_speed goes from 0.05 to 0.01 in decrements of 0.0025<|endoftext|> |
e552e1d5e551d5000525ecd0d63ad9a63cd0a4323ecdfbaadf8a7e0c41f3abc7 | def inside_out_3c():
'\n Sleep_speed is 0.01. Cycle through the LEDS 20 times\n '
LOGGER.debug('Function: inside_out_3c')
LOGGER.debug('Going faster...')
sleep_speed = 0.01
for i in range(1, 21, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed) | Sleep_speed is 0.01. Cycle through the LEDS 20 times | Programs/inside_out_3.py | inside_out_3c | ShineTop/PiGlow | 5 | python | def inside_out_3c():
'\n \n '
LOGGER.debug('Function: inside_out_3c')
LOGGER.debug('Going faster...')
sleep_speed = 0.01
for i in range(1, 21, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed) | def inside_out_3c():
'\n \n '
LOGGER.debug('Function: inside_out_3c')
LOGGER.debug('Going faster...')
sleep_speed = 0.01
for i in range(1, 21, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)<|docstring|>Sleep_speed is 0.01. Cycle through the LEDS 20 times<|endoftext|> |
3e47a6fdc68791d8afda47e3447c8764478ca2405ed645dd78d462faaf17f45a | def inside_out_3d():
'\n Sleep_speed is 0. Cycle through the LEDS 100 times\n '
LOGGER.debug('Function: inside_out_3d')
LOGGER.debug('Going really fast...')
sleep_speed = 0
for i in range(1, 101, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed) | Sleep_speed is 0. Cycle through the LEDS 100 times | Programs/inside_out_3.py | inside_out_3d | ShineTop/PiGlow | 5 | python | def inside_out_3d():
'\n \n '
LOGGER.debug('Function: inside_out_3d')
LOGGER.debug('Going really fast...')
sleep_speed = 0
for i in range(1, 101, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed) | def inside_out_3d():
'\n \n '
LOGGER.debug('Function: inside_out_3d')
LOGGER.debug('Going really fast...')
sleep_speed = 0
for i in range(1, 101, 1):
LOGGER.debug('counter = %s', i)
white_leds_on(sleep_speed)
blue_leds_on(sleep_speed)
green_leds_on(sleep_speed)
yellow_leds_on(sleep_speed)
orange_leds_on(sleep_speed)
red_leds_on(sleep_speed)
white_leds_off(sleep_speed)
blue_leds_off(sleep_speed)
green_leds_off(sleep_speed)
yellow_leds_off(sleep_speed)
orange_leds_off(sleep_speed)
red_leds_off(sleep_speed)<|docstring|>Sleep_speed is 0. Cycle through the LEDS 100 times<|endoftext|> |
1b04047dc87827430e12dc820ab0102281b6ff1f124665c1f4e4c9ef3a69a8ef | def main():
'\n The main function\n '
LOGGER.debug('START')
inside_out_3a()
inside_out_3b()
inside_out_3c()
inside_out_3d()
LOGGER.debug('END')
delete_empty_logs(LOG)
stop() | The main function | Programs/inside_out_3.py | main | ShineTop/PiGlow | 5 | python | def main():
'\n \n '
LOGGER.debug('START')
inside_out_3a()
inside_out_3b()
inside_out_3c()
inside_out_3d()
LOGGER.debug('END')
delete_empty_logs(LOG)
stop() | def main():
'\n \n '
LOGGER.debug('START')
inside_out_3a()
inside_out_3b()
inside_out_3c()
inside_out_3d()
LOGGER.debug('END')
delete_empty_logs(LOG)
stop()<|docstring|>The main function<|endoftext|> |
df9c431a79791e8e53d2cde300f7d9848be8600a1030f21da6a94fa753e6daf2 | def sparql(self, query_string):
'Execute the given SPARQL query on the backend.\n\n Args:\n query_string (): The SPARQL query as a string.\n '
return self._sparql(query_string=query_string.replace(str(self.root), str(uuid.UUID(int=0)))) | Execute the given SPARQL query on the backend.
Args:
query_string (): The SPARQL query as a string. | osp/core/session/sparql_backend.py | sparql | simphony/osp-core | 17 | python | def sparql(self, query_string):
'Execute the given SPARQL query on the backend.\n\n Args:\n query_string (): The SPARQL query as a string.\n '
return self._sparql(query_string=query_string.replace(str(self.root), str(uuid.UUID(int=0)))) | def sparql(self, query_string):
'Execute the given SPARQL query on the backend.\n\n Args:\n query_string (): The SPARQL query as a string.\n '
return self._sparql(query_string=query_string.replace(str(self.root), str(uuid.UUID(int=0))))<|docstring|>Execute the given SPARQL query on the backend.
Args:
query_string (): The SPARQL query as a string.<|endoftext|> |
5695ca1311f4d59082373b4b373b2e54c45152201c1b4b7cdfc7bc901ba4b6f0 | @abstractmethod
def _sparql(self, query_string):
"The abstract method performing the query and returning results.\n\n Args:\n query_string (str): A string with the SPARQL query to perform.\n\n Returns:\n SparqlResult: A SparqlResult object, which can be iterated to\n obtain he output rows. Then for each `row`, the value for each\n query variable can be retrieved as follows: `row['variable']`.\n "
pass | The abstract method performing the query and returning results.
Args:
query_string (str): A string with the SPARQL query to perform.
Returns:
SparqlResult: A SparqlResult object, which can be iterated to
obtain he output rows. Then for each `row`, the value for each
query variable can be retrieved as follows: `row['variable']`. | osp/core/session/sparql_backend.py | _sparql | simphony/osp-core | 17 | python | @abstractmethod
def _sparql(self, query_string):
"The abstract method performing the query and returning results.\n\n Args:\n query_string (str): A string with the SPARQL query to perform.\n\n Returns:\n SparqlResult: A SparqlResult object, which can be iterated to\n obtain he output rows. Then for each `row`, the value for each\n query variable can be retrieved as follows: `row['variable']`.\n "
pass | @abstractmethod
def _sparql(self, query_string):
"The abstract method performing the query and returning results.\n\n Args:\n query_string (str): A string with the SPARQL query to perform.\n\n Returns:\n SparqlResult: A SparqlResult object, which can be iterated to\n obtain he output rows. Then for each `row`, the value for each\n query variable can be retrieved as follows: `row['variable']`.\n "
pass<|docstring|>The abstract method performing the query and returning results.
Args:
query_string (str): A string with the SPARQL query to perform.
Returns:
SparqlResult: A SparqlResult object, which can be iterated to
obtain he output rows. Then for each `row`, the value for each
query variable can be retrieved as follows: `row['variable']`.<|endoftext|> |
eb3a72d7fbbbe5e781b2ef091d22759be1848c8baa151c0e0f92350ee4ae46ca | def __init__(self, session):
'Initialize the object.'
self.session = session | Initialize the object. | osp/core/session/sparql_backend.py | __init__ | simphony/osp-core | 17 | python | def __init__(self, session):
self.session = session | def __init__(self, session):
self.session = session<|docstring|>Initialize the object.<|endoftext|> |
be8576f6877075ad0ec1b28f474346f7ee9c4025670b4bf5621f038d0ec2c713 | @abstractmethod
def close(self):
'Close the connection.' | Close the connection. | osp/core/session/sparql_backend.py | close | simphony/osp-core | 17 | python | @abstractmethod
def close(self):
| @abstractmethod
def close(self):
<|docstring|>Close the connection.<|endoftext|> |
4ba18f61165ce42a43cc83170898edaf56b7e1b9857853779fc6d4ab8236c403 | @abstractmethod
def __iter__(self):
'Iterate the result.' | Iterate the result. | osp/core/session/sparql_backend.py | __iter__ | simphony/osp-core | 17 | python | @abstractmethod
def __iter__(self):
| @abstractmethod
def __iter__(self):
<|docstring|>Iterate the result.<|endoftext|> |
88f608d674b455c94d20605ef77a229197754c0f8e1df428b4c5e067d4bb16d0 | @abstractmethod
def __len__(self):
'Return the number of elements in the result.' | Return the number of elements in the result. | osp/core/session/sparql_backend.py | __len__ | simphony/osp-core | 17 | python | @abstractmethod
def __len__(self):
| @abstractmethod
def __len__(self):
<|docstring|>Return the number of elements in the result.<|endoftext|> |
df4a21285652496d388ad153c8bbd6a2f45a0ca08d18bba28a030cb4f586d610 | def __enter__(self):
'Enter the with statement.'
return self | Enter the with statement. | osp/core/session/sparql_backend.py | __enter__ | simphony/osp-core | 17 | python | def __enter__(self):
return self | def __enter__(self):
return self<|docstring|>Enter the with statement.<|endoftext|> |
5633719dec69784fac5e55a2b7b76c0372c07b60718bc0e2f255100adddbf728 | def __call__(self, **kwargs):
'Add kwargs to datatypes when class is called.'
def add_datatypes(x):
x.datatypes = kwargs
return x
return map(add_datatypes, self.__iter__()) | Add kwargs to datatypes when class is called. | osp/core/session/sparql_backend.py | __call__ | simphony/osp-core | 17 | python | def __call__(self, **kwargs):
def add_datatypes(x):
x.datatypes = kwargs
return x
return map(add_datatypes, self.__iter__()) | def __call__(self, **kwargs):
def add_datatypes(x):
x.datatypes = kwargs
return x
return map(add_datatypes, self.__iter__())<|docstring|>Add kwargs to datatypes when class is called.<|endoftext|> |
07b4cd5e992d3c5ac36b6d93bb5d5b4da35e2627bdde27530818a46aa2d878c4 | def __exit__(self, exc_type, exc_val, exc_tb):
'Close the connection.'
self.close() | Close the connection. | osp/core/session/sparql_backend.py | __exit__ | simphony/osp-core | 17 | python | def __exit__(self, exc_type, exc_val, exc_tb):
self.close() | def __exit__(self, exc_type, exc_val, exc_tb):
self.close()<|docstring|>Close the connection.<|endoftext|> |
137d27744cdae657771a85ae12ca32f5eeea09583b12184997d9ee8545a28de5 | def __init__(self, session, datatypes=None):
'Initialize the object.'
self.session = session
self.datatypes = (datatypes or dict()) | Initialize the object. | osp/core/session/sparql_backend.py | __init__ | simphony/osp-core | 17 | python | def __init__(self, session, datatypes=None):
self.session = session
self.datatypes = (datatypes or dict()) | def __init__(self, session, datatypes=None):
self.session = session
self.datatypes = (datatypes or dict())<|docstring|>Initialize the object.<|endoftext|> |
03b620ac231cc016e71785ed315d710535361473e5fc2ee22a6ab92b5472d0e3 | @abstractmethod
def _get(self, variable_name):
'Get the value of the given variable.'
pass | Get the value of the given variable. | osp/core/session/sparql_backend.py | _get | simphony/osp-core | 17 | python | @abstractmethod
def _get(self, variable_name):
pass | @abstractmethod
def _get(self, variable_name):
pass<|docstring|>Get the value of the given variable.<|endoftext|> |
4cd4b096715b944b64e6a40cd54b1713e7d92d3e3df7b7d4f145657eaaa14631 | def __getitem__(self, variable_name):
'Get the value of the given variable.\n\n Handles wrapper IRIs and datatype conversion.\n '
iri = self._get(variable_name)
if ((iri is not None) and iri.startswith(CUDS_IRI_PREFIX) and (uid_from_iri(iri) == uuid.UUID(int=0))):
iri = iri_from_uid(self.session.root)
return self._check_datatype(variable_name, iri) | Get the value of the given variable.
Handles wrapper IRIs and datatype conversion. | osp/core/session/sparql_backend.py | __getitem__ | simphony/osp-core | 17 | python | def __getitem__(self, variable_name):
'Get the value of the given variable.\n\n Handles wrapper IRIs and datatype conversion.\n '
iri = self._get(variable_name)
if ((iri is not None) and iri.startswith(CUDS_IRI_PREFIX) and (uid_from_iri(iri) == uuid.UUID(int=0))):
iri = iri_from_uid(self.session.root)
return self._check_datatype(variable_name, iri) | def __getitem__(self, variable_name):
'Get the value of the given variable.\n\n Handles wrapper IRIs and datatype conversion.\n '
iri = self._get(variable_name)
if ((iri is not None) and iri.startswith(CUDS_IRI_PREFIX) and (uid_from_iri(iri) == uuid.UUID(int=0))):
iri = iri_from_uid(self.session.root)
return self._check_datatype(variable_name, iri)<|docstring|>Get the value of the given variable.
Handles wrapper IRIs and datatype conversion.<|endoftext|> |
576ea3a6ed8dcaa1c9aa9caa8a7da1d74887d99ae3c8c227392b23d4679ac914 | def _check_datatype(self, variable_name, iri):
'Check if iri shall be converted to a certain datatype.\n\n The `variable_name` is checked against the dictionary `self.datatypes`,\n and if a datatype is defined there for such variable name, then the\n function returns the value of the variable converted to such datatype.\n\n Args:\n variable_name (str): the variable of the SPARQL query on which the\n check should be performed.\n iri (Union[URIRef, Literal]): a result returned by the SPARQL\n query for such variable name. This is what is then converted\n to the desired datatype.\n\n Returns:\n Any: the result of the SPARQL query converted to the desired\n datatype.\n\n Raises:\n TypeError: when the an invalid string is specified as target\n datatype or the target datatype is neither a string or a\n callable.\n\n ValueError: when there is an exception on the conversion process.\n '
if ((iri is None) or (not self.datatypes)):
return iri
variable_type = self.datatypes.get(variable_name)
if (variable_type is None):
return iri
unknown_type_error = TypeError(f'Variable type {variable_type} not understood.')
try:
if (variable_type == 'cuds'):
cuds_query = self.session.load_from_iri(iri)
return cuds_query.first()
elif callable(variable_type):
return variable_type(iri)
else:
raise unknown_type_error
except Exception as exception:
if (exception is not unknown_type_error):
raise ValueError(exception) from exception
else:
raise unknown_type_error | Check if iri shall be converted to a certain datatype.
The `variable_name` is checked against the dictionary `self.datatypes`,
and if a datatype is defined there for such variable name, then the
function returns the value of the variable converted to such datatype.
Args:
variable_name (str): the variable of the SPARQL query on which the
check should be performed.
iri (Union[URIRef, Literal]): a result returned by the SPARQL
query for such variable name. This is what is then converted
to the desired datatype.
Returns:
Any: the result of the SPARQL query converted to the desired
datatype.
Raises:
TypeError: when the an invalid string is specified as target
datatype or the target datatype is neither a string or a
callable.
ValueError: when there is an exception on the conversion process. | osp/core/session/sparql_backend.py | _check_datatype | simphony/osp-core | 17 | python | def _check_datatype(self, variable_name, iri):
'Check if iri shall be converted to a certain datatype.\n\n The `variable_name` is checked against the dictionary `self.datatypes`,\n and if a datatype is defined there for such variable name, then the\n function returns the value of the variable converted to such datatype.\n\n Args:\n variable_name (str): the variable of the SPARQL query on which the\n check should be performed.\n iri (Union[URIRef, Literal]): a result returned by the SPARQL\n query for such variable name. This is what is then converted\n to the desired datatype.\n\n Returns:\n Any: the result of the SPARQL query converted to the desired\n datatype.\n\n Raises:\n TypeError: when the an invalid string is specified as target\n datatype or the target datatype is neither a string or a\n callable.\n\n ValueError: when there is an exception on the conversion process.\n '
if ((iri is None) or (not self.datatypes)):
return iri
variable_type = self.datatypes.get(variable_name)
if (variable_type is None):
return iri
unknown_type_error = TypeError(f'Variable type {variable_type} not understood.')
try:
if (variable_type == 'cuds'):
cuds_query = self.session.load_from_iri(iri)
return cuds_query.first()
elif callable(variable_type):
return variable_type(iri)
else:
raise unknown_type_error
except Exception as exception:
if (exception is not unknown_type_error):
raise ValueError(exception) from exception
else:
raise unknown_type_error | def _check_datatype(self, variable_name, iri):
'Check if iri shall be converted to a certain datatype.\n\n The `variable_name` is checked against the dictionary `self.datatypes`,\n and if a datatype is defined there for such variable name, then the\n function returns the value of the variable converted to such datatype.\n\n Args:\n variable_name (str): the variable of the SPARQL query on which the\n check should be performed.\n iri (Union[URIRef, Literal]): a result returned by the SPARQL\n query for such variable name. This is what is then converted\n to the desired datatype.\n\n Returns:\n Any: the result of the SPARQL query converted to the desired\n datatype.\n\n Raises:\n TypeError: when the an invalid string is specified as target\n datatype or the target datatype is neither a string or a\n callable.\n\n ValueError: when there is an exception on the conversion process.\n '
if ((iri is None) or (not self.datatypes)):
return iri
variable_type = self.datatypes.get(variable_name)
if (variable_type is None):
return iri
unknown_type_error = TypeError(f'Variable type {variable_type} not understood.')
try:
if (variable_type == 'cuds'):
cuds_query = self.session.load_from_iri(iri)
return cuds_query.first()
elif callable(variable_type):
return variable_type(iri)
else:
raise unknown_type_error
except Exception as exception:
if (exception is not unknown_type_error):
raise ValueError(exception) from exception
else:
raise unknown_type_error<|docstring|>Check if iri shall be converted to a certain datatype.
The `variable_name` is checked against the dictionary `self.datatypes`,
and if a datatype is defined there for such variable name, then the
function returns the value of the variable converted to such datatype.
Args:
variable_name (str): the variable of the SPARQL query on which the
check should be performed.
iri (Union[URIRef, Literal]): a result returned by the SPARQL
query for such variable name. This is what is then converted
to the desired datatype.
Returns:
Any: the result of the SPARQL query converted to the desired
datatype.
Raises:
TypeError: when the an invalid string is specified as target
datatype or the target datatype is neither a string or a
callable.
ValueError: when there is an exception on the conversion process.<|endoftext|> |
24214185330c6afe9b113af9e93b05724c309e1bcbb3f9658c166c1bdc9f929f | def hseinv(path):
'hseinv\n\n Data loads lazily. Type data(hseinv) into the console.\n\n A data.frame with 42 rows and 14 variables:\n\n - year. 1947-1988\n\n - inv. real housing inv, millions $\n\n - pop. population, 1000s\n\n - price. housing price index; 1982 = 1\n\n - linv. log(inv)\n\n - lpop. log(pop)\n\n - lprice. log(price)\n\n - t. time trend: t=1,...,42\n\n - invpc. per capita inv: inv/pop\n\n - linvpc. log(invpc)\n\n - lprice\\_1. lprice[\\_n-1]\n\n - linvpc\\_1. linvpc[\\_n-1]\n\n - gprice. lprice - lprice\\_1\n\n - ginvpc. linvpc - linvpc\\_1\n\nhttps://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_\n isbn_issn=9781111531041\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `hseinv.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 42 rows and 14 columns and\n dictionary `metadata` of column headers (feature names).\n '
import pandas as pd
path = os.path.expanduser(path)
filename = 'hseinv.csv'
if (not os.path.exists(os.path.join(path, filename))):
url = 'http://dustintran.com/data/r/wooldridge/hseinv.csv'
maybe_download_and_extract(path, url, save_file_name='hseinv.csv', resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return (x_train, metadata) | hseinv
Data loads lazily. Type data(hseinv) into the console.
A data.frame with 42 rows and 14 variables:
- year. 1947-1988
- inv. real housing inv, millions $
- pop. population, 1000s
- price. housing price index; 1982 = 1
- linv. log(inv)
- lpop. log(pop)
- lprice. log(price)
- t. time trend: t=1,...,42
- invpc. per capita inv: inv/pop
- linvpc. log(invpc)
- lprice\_1. lprice[\_n-1]
- linvpc\_1. linvpc[\_n-1]
- gprice. lprice - lprice\_1
- ginvpc. linvpc - linvpc\_1
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `hseinv.csv`.
Returns:
Tuple of np.ndarray `x_train` with 42 rows and 14 columns and
dictionary `metadata` of column headers (feature names). | observations/r/hseinv.py | hseinv | hajime9652/observations | 199 | python | def hseinv(path):
'hseinv\n\n Data loads lazily. Type data(hseinv) into the console.\n\n A data.frame with 42 rows and 14 variables:\n\n - year. 1947-1988\n\n - inv. real housing inv, millions $\n\n - pop. population, 1000s\n\n - price. housing price index; 1982 = 1\n\n - linv. log(inv)\n\n - lpop. log(pop)\n\n - lprice. log(price)\n\n - t. time trend: t=1,...,42\n\n - invpc. per capita inv: inv/pop\n\n - linvpc. log(invpc)\n\n - lprice\\_1. lprice[\\_n-1]\n\n - linvpc\\_1. linvpc[\\_n-1]\n\n - gprice. lprice - lprice\\_1\n\n - ginvpc. linvpc - linvpc\\_1\n\nhttps://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_\n isbn_issn=9781111531041\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `hseinv.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 42 rows and 14 columns and\n dictionary `metadata` of column headers (feature names).\n '
import pandas as pd
path = os.path.expanduser(path)
filename = 'hseinv.csv'
if (not os.path.exists(os.path.join(path, filename))):
url = 'http://dustintran.com/data/r/wooldridge/hseinv.csv'
maybe_download_and_extract(path, url, save_file_name='hseinv.csv', resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return (x_train, metadata) | def hseinv(path):
'hseinv\n\n Data loads lazily. Type data(hseinv) into the console.\n\n A data.frame with 42 rows and 14 variables:\n\n - year. 1947-1988\n\n - inv. real housing inv, millions $\n\n - pop. population, 1000s\n\n - price. housing price index; 1982 = 1\n\n - linv. log(inv)\n\n - lpop. log(pop)\n\n - lprice. log(price)\n\n - t. time trend: t=1,...,42\n\n - invpc. per capita inv: inv/pop\n\n - linvpc. log(invpc)\n\n - lprice\\_1. lprice[\\_n-1]\n\n - linvpc\\_1. linvpc[\\_n-1]\n\n - gprice. lprice - lprice\\_1\n\n - ginvpc. linvpc - linvpc\\_1\n\nhttps://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_\n isbn_issn=9781111531041\n\n Args:\n\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there.\n Filename is `hseinv.csv`.\n\n Returns:\n\n Tuple of np.ndarray `x_train` with 42 rows and 14 columns and\n dictionary `metadata` of column headers (feature names).\n '
import pandas as pd
path = os.path.expanduser(path)
filename = 'hseinv.csv'
if (not os.path.exists(os.path.join(path, filename))):
url = 'http://dustintran.com/data/r/wooldridge/hseinv.csv'
maybe_download_and_extract(path, url, save_file_name='hseinv.csv', resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return (x_train, metadata)<|docstring|>hseinv
Data loads lazily. Type data(hseinv) into the console.
A data.frame with 42 rows and 14 variables:
- year. 1947-1988
- inv. real housing inv, millions $
- pop. population, 1000s
- price. housing price index; 1982 = 1
- linv. log(inv)
- lpop. log(pop)
- lprice. log(price)
- t. time trend: t=1,...,42
- invpc. per capita inv: inv/pop
- linvpc. log(invpc)
- lprice\_1. lprice[\_n-1]
- linvpc\_1. linvpc[\_n-1]
- gprice. lprice - lprice\_1
- ginvpc. linvpc - linvpc\_1
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `hseinv.csv`.
Returns:
Tuple of np.ndarray `x_train` with 42 rows and 14 columns and
dictionary `metadata` of column headers (feature names).<|endoftext|> |
9cea72f7267379564c25d018531fa9d3927cdd50657257cdab1758a2ca1071e1 | @click.command()
@click.pass_context
@click.option('--lsp-spec-version', default=constants.LSP_SPEC_VERSION)
@click.option('--lsp-repo', default=constants.LSP_REPO)
@click.option('--lsp-committish', default=constants.LSP_COMMIT)
@click.option('--vlspn-repo', default=constants.VLSPN_REPO)
@click.option('--vlspn-committish', default=constants.VLSPN_COMMIT)
def lsp(ctx: Context, lsp_spec_version: Text, lsp_repo: Text, lsp_committish: Text, vlspn_repo: Text, vlspn_committish: Text):
' generate a JSON schema from:\n - Language Server Protocol (lsp) specification\n - vscode-languageserver-node reference implementation\n '
lsp_spec = (CONVENTIONS.get(lsp_spec_version) or CONVENTIONS[constants.LSP_SPEC_VERSION])
assert lsp_spec, f"Couldn't find spec {lsp_spec_version}"
assert ctx.obj.workdir, 'Need a working directory'
assert ctx.obj.output, 'Need an output directory'
assert ctx.obj.log, 'Need a log'
gen = SpecGenerator(log=ctx.obj.log, workdir=ctx.obj.workdir, output=ctx.obj.output, lsp_spec=lsp_spec, lsp_repo=lsp_repo, lsp_committish=lsp_committish, vlspn_repo=vlspn_repo, vlspn_committish=vlspn_committish)
sys.exit(gen.generate()) | generate a JSON schema from:
- Language Server Protocol (lsp) specification
- vscode-languageserver-node reference implementation | src/expectorate/lsp/cli.py | lsp | bollwyvl/expectorate | 4 | python | @click.command()
@click.pass_context
@click.option('--lsp-spec-version', default=constants.LSP_SPEC_VERSION)
@click.option('--lsp-repo', default=constants.LSP_REPO)
@click.option('--lsp-committish', default=constants.LSP_COMMIT)
@click.option('--vlspn-repo', default=constants.VLSPN_REPO)
@click.option('--vlspn-committish', default=constants.VLSPN_COMMIT)
def lsp(ctx: Context, lsp_spec_version: Text, lsp_repo: Text, lsp_committish: Text, vlspn_repo: Text, vlspn_committish: Text):
' generate a JSON schema from:\n - Language Server Protocol (lsp) specification\n - vscode-languageserver-node reference implementation\n '
lsp_spec = (CONVENTIONS.get(lsp_spec_version) or CONVENTIONS[constants.LSP_SPEC_VERSION])
assert lsp_spec, f"Couldn't find spec {lsp_spec_version}"
assert ctx.obj.workdir, 'Need a working directory'
assert ctx.obj.output, 'Need an output directory'
assert ctx.obj.log, 'Need a log'
gen = SpecGenerator(log=ctx.obj.log, workdir=ctx.obj.workdir, output=ctx.obj.output, lsp_spec=lsp_spec, lsp_repo=lsp_repo, lsp_committish=lsp_committish, vlspn_repo=vlspn_repo, vlspn_committish=vlspn_committish)
sys.exit(gen.generate()) | @click.command()
@click.pass_context
@click.option('--lsp-spec-version', default=constants.LSP_SPEC_VERSION)
@click.option('--lsp-repo', default=constants.LSP_REPO)
@click.option('--lsp-committish', default=constants.LSP_COMMIT)
@click.option('--vlspn-repo', default=constants.VLSPN_REPO)
@click.option('--vlspn-committish', default=constants.VLSPN_COMMIT)
def lsp(ctx: Context, lsp_spec_version: Text, lsp_repo: Text, lsp_committish: Text, vlspn_repo: Text, vlspn_committish: Text):
' generate a JSON schema from:\n - Language Server Protocol (lsp) specification\n - vscode-languageserver-node reference implementation\n '
lsp_spec = (CONVENTIONS.get(lsp_spec_version) or CONVENTIONS[constants.LSP_SPEC_VERSION])
assert lsp_spec, f"Couldn't find spec {lsp_spec_version}"
assert ctx.obj.workdir, 'Need a working directory'
assert ctx.obj.output, 'Need an output directory'
assert ctx.obj.log, 'Need a log'
gen = SpecGenerator(log=ctx.obj.log, workdir=ctx.obj.workdir, output=ctx.obj.output, lsp_spec=lsp_spec, lsp_repo=lsp_repo, lsp_committish=lsp_committish, vlspn_repo=vlspn_repo, vlspn_committish=vlspn_committish)
sys.exit(gen.generate())<|docstring|>generate a JSON schema from:
- Language Server Protocol (lsp) specification
- vscode-languageserver-node reference implementation<|endoftext|> |
3d0fc5c76e88c309f046e7c14b469fd85b37df6ee555f7580cb4314aba894dfd | def classifier_iterator():
'Return an iterator over classifier.\n '
return (classifiers.SVDD, classifiers.SVM) | Return an iterator over classifier. | tests/test_classifier.py | classifier_iterator | manu3618/legendary-potato | 0 | python | def classifier_iterator():
'\n '
return (classifiers.SVDD, classifiers.SVM) | def classifier_iterator():
'\n '
return (classifiers.SVDD, classifiers.SVM)<|docstring|>Return an iterator over classifier.<|endoftext|> |
22a63c3c8100fd38ce299ef8f10757e41c934cc40b0b7b80da1f15f6d861fd48 | @pytest.mark.parametrize('data', MULTICLASS_Y)
def test_label_transformation(data):
'test multicall2onevsall\n '
labels = data['labels']
if ('default' in data):
res = classifiers.multiclass2one_vs_all(labels, data['default'])
else:
res = classifiers.multiclass2one_vs_all(labels)
assert (res == data['expected']) | test multicall2onevsall | tests/test_classifier.py | test_label_transformation | manu3618/legendary-potato | 0 | python | @pytest.mark.parametrize('data', MULTICLASS_Y)
def test_label_transformation(data):
'\n '
labels = data['labels']
if ('default' in data):
res = classifiers.multiclass2one_vs_all(labels, data['default'])
else:
res = classifiers.multiclass2one_vs_all(labels)
assert (res == data['expected']) | @pytest.mark.parametrize('data', MULTICLASS_Y)
def test_label_transformation(data):
'\n '
labels = data['labels']
if ('default' in data):
res = classifiers.multiclass2one_vs_all(labels, data['default'])
else:
res = classifiers.multiclass2one_vs_all(labels)
assert (res == data['expected'])<|docstring|>test multicall2onevsall<|endoftext|> |
93baca690588f4b1c7c70a1cd1c83042348da80db68a5145a2262d8e64d1f1b8 | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
'Perform one class classification.\n '
(X, y) = dataset
if isinstance(X, list):
X = np.array(X)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
classif.predict(X_test) | Perform one class classification. | tests/test_classifier.py | test_oneclass | manu3618/legendary-potato | 0 | python | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
'\n '
(X, y) = dataset
if isinstance(X, list):
X = np.array(X)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
classif.predict(X_test) | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_oneclass(classifier, dataset):
'\n '
(X, y) = dataset
if isinstance(X, list):
X = np.array(X)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
y_train = np.ones(X_train.shape[0])
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
classif.predict(X_test)<|docstring|>Perform one class classification.<|endoftext|> |
de0aa4bac3c88a26be18ab89244df295aed5c16f646926e3ce2d39287846aa52 | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_twoclasses(classifier, dataset):
'Perform one class classification.\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred)
if isinstance(classif, classifiers.SVDD):
y_pred = (1 - classif.decision_function(X_test))
elif isinstance(classif, classifiers.SVM):
y_pred = classif.decision_function(X_test)
(fprs, tprs, _) = roc_curve(y_test, y_pred)
aur = auc(fprs, tprs)
if np.isnan(aur):
pass
else:
assert (aur <= 1)
assert (aur >= 0) | Perform one class classification. | tests/test_classifier.py | test_twoclasses | manu3618/legendary-potato | 0 | python | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_twoclasses(classifier, dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred)
if isinstance(classif, classifiers.SVDD):
y_pred = (1 - classif.decision_function(X_test))
elif isinstance(classif, classifiers.SVM):
y_pred = classif.decision_function(X_test)
(fprs, tprs, _) = roc_curve(y_test, y_pred)
aur = auc(fprs, tprs)
if np.isnan(aur):
pass
else:
assert (aur <= 1)
assert (aur >= 0) | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', two_class_generator())
def test_twoclasses(classifier, dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred)
if isinstance(classif, classifiers.SVDD):
y_pred = (1 - classif.decision_function(X_test))
elif isinstance(classif, classifiers.SVM):
y_pred = classif.decision_function(X_test)
(fprs, tprs, _) = roc_curve(y_test, y_pred)
aur = auc(fprs, tprs)
if np.isnan(aur):
pass
else:
assert (aur <= 1)
assert (aur >= 0)<|docstring|>Perform one class classification.<|endoftext|> |
519e38ca48ddc744865bdd771814c3e1cc3aad3406b266bc93db0b2245934cd6 | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', multiclass_generator())
def test_multiclass(classifier, dataset):
'Perform one class classification.\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred) | Perform one class classification. | tests/test_classifier.py | test_multiclass | manu3618/legendary-potato | 0 | python | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', multiclass_generator())
def test_multiclass(classifier, dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred) | @pytest.mark.parametrize('classifier', classifier_iterator())
@pytest.mark.parametrize('dataset', multiclass_generator())
def test_multiclass(classifier, dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
classif = classifier()
try:
classif.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('fit method did not work: %s' % exn))
y_pred = classif.predict(X_test)
confusion_matrix(y_test, y_pred)<|docstring|>Perform one class classification.<|endoftext|> |
7094f9fa865869474632220d058e40f3797431ef5d3cee5ec57825f24c7f5c4b | @pytest.mark.parametrize('dataset', chain(two_class_generator(), multiclass_generator()))
def test_svdd(dataset):
'Test SVDD specificities.\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
svdd = classifiers.SVDD()
try:
svdd.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('optimization did not work: %s' % exn))
if (svdd.hypersphere_nb == 1):
assert np.all((svdd.alphas_ >= 0))
assert (svdd.radius_ > 0)
else:
for sub_svdd in svdd.individual_svdd.values():
assert np.all((sub_svdd.alphas_ >= 0))
assert (sub_svdd.radius_ > 0)
assert np.all(((svdd.dist_center_training_sample(r) >= 0) for r in range(len(X_train))))
for X in (X_train, X_test, None):
assert np.all((svdd.dist_all_centers(X) >= 0))
assert np.all((svdd.relative_dist_all_centers(X) >= 0)) | Test SVDD specificities. | tests/test_classifier.py | test_svdd | manu3618/legendary-potato | 0 | python | @pytest.mark.parametrize('dataset', chain(two_class_generator(), multiclass_generator()))
def test_svdd(dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
svdd = classifiers.SVDD()
try:
svdd.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('optimization did not work: %s' % exn))
if (svdd.hypersphere_nb == 1):
assert np.all((svdd.alphas_ >= 0))
assert (svdd.radius_ > 0)
else:
for sub_svdd in svdd.individual_svdd.values():
assert np.all((sub_svdd.alphas_ >= 0))
assert (sub_svdd.radius_ > 0)
assert np.all(((svdd.dist_center_training_sample(r) >= 0) for r in range(len(X_train))))
for X in (X_train, X_test, None):
assert np.all((svdd.dist_all_centers(X) >= 0))
assert np.all((svdd.relative_dist_all_centers(X) >= 0)) | @pytest.mark.parametrize('dataset', chain(two_class_generator(), multiclass_generator()))
def test_svdd(dataset):
'\n '
(X, y) = dataset
(X_train, X_test, y_train, y_test) = train_test_split(X, y, shuffle=False)
svdd = classifiers.SVDD()
try:
svdd.fit(X_train, y_train)
except RuntimeError as exn:
pytest.skip(('optimization did not work: %s' % exn))
if (svdd.hypersphere_nb == 1):
assert np.all((svdd.alphas_ >= 0))
assert (svdd.radius_ > 0)
else:
for sub_svdd in svdd.individual_svdd.values():
assert np.all((sub_svdd.alphas_ >= 0))
assert (sub_svdd.radius_ > 0)
assert np.all(((svdd.dist_center_training_sample(r) >= 0) for r in range(len(X_train))))
for X in (X_train, X_test, None):
assert np.all((svdd.dist_all_centers(X) >= 0))
assert np.all((svdd.relative_dist_all_centers(X) >= 0))<|docstring|>Test SVDD specificities.<|endoftext|> |
78cb50268ef4d2f60e2672198a9db40dcac288a3f2143c67e89bba0f6c889d93 | @pytest.mark.skip(reason='no 2D-array input')
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
'Check the compatibility.\n '
check_estimator(classifier) | Check the compatibility. | tests/test_classifier.py | test_sklearn_compatibility | manu3618/legendary-potato | 0 | python | @pytest.mark.skip(reason='no 2D-array input')
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
'\n '
check_estimator(classifier) | @pytest.mark.skip(reason='no 2D-array input')
@pytest.mark.parametrize('classifier', classifier_iterator())
def test_sklearn_compatibility(classifier):
'\n '
check_estimator(classifier)<|docstring|>Check the compatibility.<|endoftext|> |
78543ccf1d436094b5e73d322d9c4654872ad27928cb27962ca79177e76e3651 | def add_module(self, module: MPIKernelModule):
'Set up a queue to receive messages and send them to the module'
self._queues[(module.name + '.out')] = SimpleQueue()
self._queues[(module.name + '.in')] = SimpleQueue()
self._modules.append(module) | Set up a queue to receive messages and send them to the module | blazer/hpc/mpi/kernel/__init__.py | add_module | radiantone/blazer | 4 | python | def add_module(self, module: MPIKernelModule):
self._queues[(module.name + '.out')] = SimpleQueue()
self._queues[(module.name + '.in')] = SimpleQueue()
self._modules.append(module) | def add_module(self, module: MPIKernelModule):
self._queues[(module.name + '.out')] = SimpleQueue()
self._queues[(module.name + '.in')] = SimpleQueue()
self._modules.append(module)<|docstring|>Set up a queue to receive messages and send them to the module<|endoftext|> |
c438f37927fe53408570ef98fab24d306cdb5f60ee84b7988ef114b83f699ace | def broadcast_message(self, message: Any):
'Send a message to any registered handlers. A module may receive a message on its\n MPI tag and then broadcast it to the kernel for other modules to receive via the kernel.'
pass | Send a message to any registered handlers. A module may receive a message on its
MPI tag and then broadcast it to the kernel for other modules to receive via the kernel. | blazer/hpc/mpi/kernel/__init__.py | broadcast_message | radiantone/blazer | 4 | python | def broadcast_message(self, message: Any):
'Send a message to any registered handlers. A module may receive a message on its\n MPI tag and then broadcast it to the kernel for other modules to receive via the kernel.'
pass | def broadcast_message(self, message: Any):
'Send a message to any registered handlers. A module may receive a message on its\n MPI tag and then broadcast it to the kernel for other modules to receive via the kernel.'
pass<|docstring|>Send a message to any registered handlers. A module may receive a message on its
MPI tag and then broadcast it to the kernel for other modules to receive via the kernel.<|endoftext|> |
b8b68a56a1e88a27533bf255c5ee19da507dd0d3dfbcf42b0c66a5dbe7299d18 | def run(self):
'Main kernel loop.Will initiate module run methods that\n may set up threads to listen on various MPI tags to process messages.\n\n Also run thread to monitor queues for each module to forward messages to.'
logging.info('Kernel running') | Main kernel loop.Will initiate module run methods that
may set up threads to listen on various MPI tags to process messages.
Also run thread to monitor queues for each module to forward messages to. | blazer/hpc/mpi/kernel/__init__.py | run | radiantone/blazer | 4 | python | def run(self):
'Main kernel loop.Will initiate module run methods that\n may set up threads to listen on various MPI tags to process messages.\n\n Also run thread to monitor queues for each module to forward messages to.'
logging.info('Kernel running') | def run(self):
'Main kernel loop.Will initiate module run methods that\n may set up threads to listen on various MPI tags to process messages.\n\n Also run thread to monitor queues for each module to forward messages to.'
logging.info('Kernel running')<|docstring|>Main kernel loop.Will initiate module run methods that
may set up threads to listen on various MPI tags to process messages.
Also run thread to monitor queues for each module to forward messages to.<|endoftext|> |
76251529432d8c5c01e253c43957ad7a287e6af6e5a6c99813a311486ee60583 | @st.cache
def cached_file(file: str) -> pd.DataFrame:
'Helper function to cache a file and not reload it.\n Args:\n file (str): Path to file\n\n Returns:\n pd.DataFrame: Pandas Dataframe.\n '
df = pd.read_hdf(file, 'protein_table')
return df | Helper function to cache a file and not reload it.
Args:
file (str): Path to file
Returns:
pd.DataFrame: Pandas Dataframe. | alphapept/gui/results.py | cached_file | MannLabs/alphapept | 97 | python | @st.cache
def cached_file(file: str) -> pd.DataFrame:
'Helper function to cache a file and not reload it.\n Args:\n file (str): Path to file\n\n Returns:\n pd.DataFrame: Pandas Dataframe.\n '
df = pd.read_hdf(file, 'protein_table')
return df | @st.cache
def cached_file(file: str) -> pd.DataFrame:
'Helper function to cache a file and not reload it.\n Args:\n file (str): Path to file\n\n Returns:\n pd.DataFrame: Pandas Dataframe.\n '
df = pd.read_hdf(file, 'protein_table')
return df<|docstring|>Helper function to cache a file and not reload it.
Args:
file (str): Path to file
Returns:
pd.DataFrame: Pandas Dataframe.<|endoftext|> |
6a8f19e0880c8e2432d29f7e61d2abbf3990f09deeece557a8f4664e687d9ee6 | def readable_files_from_yaml(results_yaml: dict) -> list:
'Returns all file paths from a results.yaml.\n Filters out files that do not exist.\n\n Args:\n results_yaml (dict): Results dictionary.\n\n Returns:\n list: List of raw files.\n '
raw_files = [(os.path.splitext(_)[0] + '.ms_data.hdf') for _ in results_yaml['experiment']['file_paths']]
raw_files = ([results_yaml['experiment']['results_path']] + raw_files)
raw_files = [_ for _ in raw_files if os.path.exists(_)]
return raw_files | Returns all file paths from a results.yaml.
Filters out files that do not exist.
Args:
results_yaml (dict): Results dictionary.
Returns:
list: List of raw files. | alphapept/gui/results.py | readable_files_from_yaml | MannLabs/alphapept | 97 | python | def readable_files_from_yaml(results_yaml: dict) -> list:
'Returns all file paths from a results.yaml.\n Filters out files that do not exist.\n\n Args:\n results_yaml (dict): Results dictionary.\n\n Returns:\n list: List of raw files.\n '
raw_files = [(os.path.splitext(_)[0] + '.ms_data.hdf') for _ in results_yaml['experiment']['file_paths']]
raw_files = ([results_yaml['experiment']['results_path']] + raw_files)
raw_files = [_ for _ in raw_files if os.path.exists(_)]
return raw_files | def readable_files_from_yaml(results_yaml: dict) -> list:
'Returns all file paths from a results.yaml.\n Filters out files that do not exist.\n\n Args:\n results_yaml (dict): Results dictionary.\n\n Returns:\n list: List of raw files.\n '
raw_files = [(os.path.splitext(_)[0] + '.ms_data.hdf') for _ in results_yaml['experiment']['file_paths']]
raw_files = ([results_yaml['experiment']['results_path']] + raw_files)
raw_files = [_ for _ in raw_files if os.path.exists(_)]
return raw_files<|docstring|>Returns all file paths from a results.yaml.
Filters out files that do not exist.
Args:
results_yaml (dict): Results dictionary.
Returns:
list: List of raw files.<|endoftext|> |
25307c29b51f0000b94b61a9be8c0a9fdfc6b753232dc4b6fd8a53cd70808e2e | def get_table_download_link(df: pd.DataFrame, name: str):
'Generates a streamlit link allowing the data in a given panda dataframe to be downloaded.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n name (str): Name of link.\n '
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="%{name}" >Download as *.csv</a>'
st.markdown('')
st.markdown(href, unsafe_allow_html=True) | Generates a streamlit link allowing the data in a given panda dataframe to be downloaded.
Args:
df (pd.DataFrame): Pandas DataFrame to be downloaded.
name (str): Name of link. | alphapept/gui/results.py | get_table_download_link | MannLabs/alphapept | 97 | python | def get_table_download_link(df: pd.DataFrame, name: str):
'Generates a streamlit link allowing the data in a given panda dataframe to be downloaded.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n name (str): Name of link.\n '
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="%{name}" >Download as *.csv</a>'
st.markdown()
st.markdown(href, unsafe_allow_html=True) | def get_table_download_link(df: pd.DataFrame, name: str):
'Generates a streamlit link allowing the data in a given panda dataframe to be downloaded.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n name (str): Name of link.\n '
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="%{name}" >Download as *.csv</a>'
st.markdown()
st.markdown(href, unsafe_allow_html=True)<|docstring|>Generates a streamlit link allowing the data in a given panda dataframe to be downloaded.
Args:
df (pd.DataFrame): Pandas DataFrame to be downloaded.
name (str): Name of link.<|endoftext|> |
cf676be854cb09c2ae9abb29ecc5a9ac44f9191aaa60e62d2cdb32638196dd80 | def make_df_downloadble(df: pd.DataFrame, file: str):
'Creates streamlit checkbox to make displayed data downloadable.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n file (str): Filename.\n '
if st.button('Create download link'):
file_name = (os.path.splitext(os.path.split(file)[(- 1)])[0] + '.csv')
get_table_download_link(df, file_name) | Creates streamlit checkbox to make displayed data downloadable.
Args:
df (pd.DataFrame): Pandas DataFrame to be downloaded.
file (str): Filename. | alphapept/gui/results.py | make_df_downloadble | MannLabs/alphapept | 97 | python | def make_df_downloadble(df: pd.DataFrame, file: str):
'Creates streamlit checkbox to make displayed data downloadable.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n file (str): Filename.\n '
if st.button('Create download link'):
file_name = (os.path.splitext(os.path.split(file)[(- 1)])[0] + '.csv')
get_table_download_link(df, file_name) | def make_df_downloadble(df: pd.DataFrame, file: str):
'Creates streamlit checkbox to make displayed data downloadable.\n\n Args:\n df (pd.DataFrame): Pandas DataFrame to be downloaded.\n file (str): Filename.\n '
if st.button('Create download link'):
file_name = (os.path.splitext(os.path.split(file)[(- 1)])[0] + '.csv')
get_table_download_link(df, file_name)<|docstring|>Creates streamlit checkbox to make displayed data downloadable.
Args:
df (pd.DataFrame): Pandas DataFrame to be downloaded.
file (str): Filename.<|endoftext|> |
67c176cfad8b5f7d732057d73cb73328083ec1d814a61e6c448d61e6acf7a574 | def ion_plot(ms_file: MS_Data_File, options: list):
'Displays summary statistics from matched ions.\n\n Args:\n ms_file (MS_Data_File): Ms data file to be read.\n options (list): List of plot options.\n '
if ('ions' in options):
if st.button('Ion calibration'):
with st.spinner('Creating plot.'):
ions = ms_file.read(dataset_name='ions')
delta_ppm = (((ions['db_mass'] - ions['ion_mass']) / ((ions['db_mass'] + ions['ion_mass']) / 2)) * 1000000.0).values
(counts, bins) = np.histogram(delta_ppm, bins=100, density=True)
bin_edges = (bins[1:] + ((bins[1] - bins[0]) / 2))
bins = np.arange(ions['db_mass'].min(), ions['db_mass'].max(), 1)
offset = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'mean', bins=bins)
counts_ = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'count', bins=bins)
counts_ = counts_.statistic
fig = make_subplots(rows=1, cols=2, column_widths=[0.8, 0.2], subplot_titles=('Mean ion offset (ppm) over m/z', 'Histogram of Offset (ppm)'))
fig.add_trace(go.Scatter(x=offset.bin_edges[1:], y=offset.statistic, marker_color='#17212b', mode='markers', marker={'opacity': np.sqrt((counts_ / np.max(counts_)))}), row=1, col=1)
fig.add_hline(y=0, line_color='black')
fig.add_bar(y=counts, x=bin_edges, row=1, col=2, marker_color='#17212b')
fig.update_layout(showlegend=False)
st.write(fig) | Displays summary statistics from matched ions.
Args:
ms_file (MS_Data_File): Ms data file to be read.
options (list): List of plot options. | alphapept/gui/results.py | ion_plot | MannLabs/alphapept | 97 | python | def ion_plot(ms_file: MS_Data_File, options: list):
'Displays summary statistics from matched ions.\n\n Args:\n ms_file (MS_Data_File): Ms data file to be read.\n options (list): List of plot options.\n '
if ('ions' in options):
if st.button('Ion calibration'):
with st.spinner('Creating plot.'):
ions = ms_file.read(dataset_name='ions')
delta_ppm = (((ions['db_mass'] - ions['ion_mass']) / ((ions['db_mass'] + ions['ion_mass']) / 2)) * 1000000.0).values
(counts, bins) = np.histogram(delta_ppm, bins=100, density=True)
bin_edges = (bins[1:] + ((bins[1] - bins[0]) / 2))
bins = np.arange(ions['db_mass'].min(), ions['db_mass'].max(), 1)
offset = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'mean', bins=bins)
counts_ = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'count', bins=bins)
counts_ = counts_.statistic
fig = make_subplots(rows=1, cols=2, column_widths=[0.8, 0.2], subplot_titles=('Mean ion offset (ppm) over m/z', 'Histogram of Offset (ppm)'))
fig.add_trace(go.Scatter(x=offset.bin_edges[1:], y=offset.statistic, marker_color='#17212b', mode='markers', marker={'opacity': np.sqrt((counts_ / np.max(counts_)))}), row=1, col=1)
fig.add_hline(y=0, line_color='black')
fig.add_bar(y=counts, x=bin_edges, row=1, col=2, marker_color='#17212b')
fig.update_layout(showlegend=False)
st.write(fig) | def ion_plot(ms_file: MS_Data_File, options: list):
'Displays summary statistics from matched ions.\n\n Args:\n ms_file (MS_Data_File): Ms data file to be read.\n options (list): List of plot options.\n '
if ('ions' in options):
if st.button('Ion calibration'):
with st.spinner('Creating plot.'):
ions = ms_file.read(dataset_name='ions')
delta_ppm = (((ions['db_mass'] - ions['ion_mass']) / ((ions['db_mass'] + ions['ion_mass']) / 2)) * 1000000.0).values
(counts, bins) = np.histogram(delta_ppm, bins=100, density=True)
bin_edges = (bins[1:] + ((bins[1] - bins[0]) / 2))
bins = np.arange(ions['db_mass'].min(), ions['db_mass'].max(), 1)
offset = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'mean', bins=bins)
counts_ = stats.binned_statistic(ions['ion_mass'].values, delta_ppm, 'count', bins=bins)
counts_ = counts_.statistic
fig = make_subplots(rows=1, cols=2, column_widths=[0.8, 0.2], subplot_titles=('Mean ion offset (ppm) over m/z', 'Histogram of Offset (ppm)'))
fig.add_trace(go.Scatter(x=offset.bin_edges[1:], y=offset.statistic, marker_color='#17212b', mode='markers', marker={'opacity': np.sqrt((counts_ / np.max(counts_)))}), row=1, col=1)
fig.add_hline(y=0, line_color='black')
fig.add_bar(y=counts, x=bin_edges, row=1, col=2, marker_color='#17212b')
fig.update_layout(showlegend=False)
st.write(fig)<|docstring|>Displays summary statistics from matched ions.
Args:
ms_file (MS_Data_File): Ms data file to be read.
options (list): List of plot options.<|endoftext|> |
17a9d48fe53c5e5c7db5b758a02530272ed065742ea6805f3226f619ae96d0eb | def multiple_file_check(param: list) -> bool:
'Helper function to check if multiple files are present.\n Displays a streamlit info if only one file is present.\n\n Args:\n param (list): list\n\n Returns:\n bool: Boolean flag if only one file is present or not.\n '
if (len(param) > 1):
return True
else:
st.info("Only one file present, can't perform operation.")
return False | Helper function to check if multiple files are present.
Displays a streamlit info if only one file is present.
Args:
param (list): list
Returns:
bool: Boolean flag if only one file is present or not. | alphapept/gui/results.py | multiple_file_check | MannLabs/alphapept | 97 | python | def multiple_file_check(param: list) -> bool:
'Helper function to check if multiple files are present.\n Displays a streamlit info if only one file is present.\n\n Args:\n param (list): list\n\n Returns:\n bool: Boolean flag if only one file is present or not.\n '
if (len(param) > 1):
return True
else:
st.info("Only one file present, can't perform operation.")
return False | def multiple_file_check(param: list) -> bool:
'Helper function to check if multiple files are present.\n Displays a streamlit info if only one file is present.\n\n Args:\n param (list): list\n\n Returns:\n bool: Boolean flag if only one file is present or not.\n '
if (len(param) > 1):
return True
else:
st.info("Only one file present, can't perform operation.")
return False<|docstring|>Helper function to check if multiple files are present.
Displays a streamlit info if only one file is present.
Args:
param (list): list
Returns:
bool: Boolean flag if only one file is present or not.<|endoftext|> |
94f1ef5e3b9cc63a1b922b3189399bd5a152ca86249fd8272cb793ac564b3b41 | def correlation_heatmap(file: str, options: list):
'Plots a correlation heatmap of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Correlation heatmap'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
df = np.log(df[cols])
corr = df.corr()
fig = make_subplots(rows=1, cols=1)
fig.add_trace(trace=go.Heatmap(z=corr.values, x=corr.index.values, y=corr.columns.values, colorscale='Greys'))
fig.update_layout(height=600, width=600)
st.write(fig) | Plots a correlation heatmap of the proteins.
Args:
file (str): Path to file.
options (list): List of plot options. | alphapept/gui/results.py | correlation_heatmap | MannLabs/alphapept | 97 | python | def correlation_heatmap(file: str, options: list):
'Plots a correlation heatmap of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Correlation heatmap'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
df = np.log(df[cols])
corr = df.corr()
fig = make_subplots(rows=1, cols=1)
fig.add_trace(trace=go.Heatmap(z=corr.values, x=corr.index.values, y=corr.columns.values, colorscale='Greys'))
fig.update_layout(height=600, width=600)
st.write(fig) | def correlation_heatmap(file: str, options: list):
'Plots a correlation heatmap of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Correlation heatmap'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
df = np.log(df[cols])
corr = df.corr()
fig = make_subplots(rows=1, cols=1)
fig.add_trace(trace=go.Heatmap(z=corr.values, x=corr.index.values, y=corr.columns.values, colorscale='Greys'))
fig.update_layout(height=600, width=600)
st.write(fig)<|docstring|>Plots a correlation heatmap of the proteins.
Args:
file (str): Path to file.
options (list): List of plot options.<|endoftext|> |
3a44066d9e33ec2cb96634946e89787b25dceaf2bf7e33ee59ab432a512f9a00 | def pca_plot(file: str, options: list):
'Plots a PCA plot of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('PCA'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
pca = PCA(n_components=2)
components = pca.fit_transform(df[cols].fillna(0).T)
plot_df = pd.DataFrame(components, columns=['Component 1', 'Component 2'])
plot_df['Filename'] = cols
fig = px.scatter(plot_df, x='Component 1', y='Component 2', hover_data=['Filename'], title='PCA')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
st.write(fig) | Plots a PCA plot of the proteins.
Args:
file (str): Path to file.
options (list): List of plot options. | alphapept/gui/results.py | pca_plot | MannLabs/alphapept | 97 | python | def pca_plot(file: str, options: list):
'Plots a PCA plot of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('PCA'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
pca = PCA(n_components=2)
components = pca.fit_transform(df[cols].fillna(0).T)
plot_df = pd.DataFrame(components, columns=['Component 1', 'Component 2'])
plot_df['Filename'] = cols
fig = px.scatter(plot_df, x='Component 1', y='Component 2', hover_data=['Filename'], title='PCA')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
st.write(fig) | def pca_plot(file: str, options: list):
'Plots a PCA plot of the proteins.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('PCA'):
df = cached_file(file)
cols = [_ for _ in df.columns if ('LFQ' in _)]
if (len(cols) == 0):
cols = df.columns
if multiple_file_check(cols):
pca = PCA(n_components=2)
components = pca.fit_transform(df[cols].fillna(0).T)
plot_df = pd.DataFrame(components, columns=['Component 1', 'Component 2'])
plot_df['Filename'] = cols
fig = px.scatter(plot_df, x='Component 1', y='Component 2', hover_data=['Filename'], title='PCA')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
st.write(fig)<|docstring|>Plots a PCA plot of the proteins.
Args:
file (str): Path to file.
options (list): List of plot options.<|endoftext|> |
69b4d20d990e7b1891f5490ff32cb9bf0772887f4556da26fb14353e97bbb1b2 | def volcano_plot(file: str, options: list):
'Plots a volcano plot of the proteins.\n Shows streamlit widgets to select the groups.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Volcano plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
if multiple_file_check(df.columns):
group_1 = col1.multiselect('Group1', df.columns)
group_2 = col2.multiselect('Group2', df.columns)
show_proteins = st.multiselect('Highlight proteins', df.index)
if ((len(group_1) > 0) and (len(group_2) > 0)):
with st.spinner('Creating plot..'):
test = stats.ttest_ind(df_log[group_1].values, df_log[group_2].values, nan_policy='omit', axis=1)
t_diff = (np.nanmean(df_log[group_1].values, axis=1) - np.nanmean(df_log[group_2].values, axis=1))
plot_df = pd.DataFrame()
plot_df['t_test_diff'] = t_diff
plot_df['-log(pvalue)'] = (- np.log(test.pvalue.data))
plot_df['id'] = df.index
plot_df.index = df.index
fig = make_subplots()
fig.add_trace(go.Scatter(x=plot_df['t_test_diff'], y=plot_df['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=plot_df.index, opacity=0.8, mode='markers', marker=dict(color='#3dc5ef')))
if (len(show_proteins) > 0):
fig.add_trace(go.Scatter(x=plot_df.loc[show_proteins]['t_test_diff'], y=plot_df.loc[show_proteins]['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=show_proteins, mode='markers+text', textposition='top center', marker_color='#18212b', textfont=dict(family='Courier New, monospace', size=16, color='#18212b')))
fig.update_layout(height=600, width=600)
fig.update_layout(showlegend=False)
st.write(fig) | Plots a volcano plot of the proteins.
Shows streamlit widgets to select the groups.
Args:
file (str): Path to file.
options (list): List of plot options. | alphapept/gui/results.py | volcano_plot | MannLabs/alphapept | 97 | python | def volcano_plot(file: str, options: list):
'Plots a volcano plot of the proteins.\n Shows streamlit widgets to select the groups.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Volcano plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
if multiple_file_check(df.columns):
group_1 = col1.multiselect('Group1', df.columns)
group_2 = col2.multiselect('Group2', df.columns)
show_proteins = st.multiselect('Highlight proteins', df.index)
if ((len(group_1) > 0) and (len(group_2) > 0)):
with st.spinner('Creating plot..'):
test = stats.ttest_ind(df_log[group_1].values, df_log[group_2].values, nan_policy='omit', axis=1)
t_diff = (np.nanmean(df_log[group_1].values, axis=1) - np.nanmean(df_log[group_2].values, axis=1))
plot_df = pd.DataFrame()
plot_df['t_test_diff'] = t_diff
plot_df['-log(pvalue)'] = (- np.log(test.pvalue.data))
plot_df['id'] = df.index
plot_df.index = df.index
fig = make_subplots()
fig.add_trace(go.Scatter(x=plot_df['t_test_diff'], y=plot_df['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=plot_df.index, opacity=0.8, mode='markers', marker=dict(color='#3dc5ef')))
if (len(show_proteins) > 0):
fig.add_trace(go.Scatter(x=plot_df.loc[show_proteins]['t_test_diff'], y=plot_df.loc[show_proteins]['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=show_proteins, mode='markers+text', textposition='top center', marker_color='#18212b', textfont=dict(family='Courier New, monospace', size=16, color='#18212b')))
fig.update_layout(height=600, width=600)
fig.update_layout(showlegend=False)
st.write(fig) | def volcano_plot(file: str, options: list):
'Plots a volcano plot of the proteins.\n Shows streamlit widgets to select the groups.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Volcano plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
if multiple_file_check(df.columns):
group_1 = col1.multiselect('Group1', df.columns)
group_2 = col2.multiselect('Group2', df.columns)
show_proteins = st.multiselect('Highlight proteins', df.index)
if ((len(group_1) > 0) and (len(group_2) > 0)):
with st.spinner('Creating plot..'):
test = stats.ttest_ind(df_log[group_1].values, df_log[group_2].values, nan_policy='omit', axis=1)
t_diff = (np.nanmean(df_log[group_1].values, axis=1) - np.nanmean(df_log[group_2].values, axis=1))
plot_df = pd.DataFrame()
plot_df['t_test_diff'] = t_diff
plot_df['-log(pvalue)'] = (- np.log(test.pvalue.data))
plot_df['id'] = df.index
plot_df.index = df.index
fig = make_subplots()
fig.add_trace(go.Scatter(x=plot_df['t_test_diff'], y=plot_df['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=plot_df.index, opacity=0.8, mode='markers', marker=dict(color='#3dc5ef')))
if (len(show_proteins) > 0):
fig.add_trace(go.Scatter(x=plot_df.loc[show_proteins]['t_test_diff'], y=plot_df.loc[show_proteins]['-log(pvalue)'], hovertemplate=(('<b>%{text}</b>' + '<br>t_test diff: %{y:.3f}') + '<br>-log(pvalue): %{x:.3f}'), text=show_proteins, mode='markers+text', textposition='top center', marker_color='#18212b', textfont=dict(family='Courier New, monospace', size=16, color='#18212b')))
fig.update_layout(height=600, width=600)
fig.update_layout(showlegend=False)
st.write(fig)<|docstring|>Plots a volcano plot of the proteins.
Shows streamlit widgets to select the groups.
Args:
file (str): Path to file.
options (list): List of plot options.<|endoftext|> |
8c560d4e3f8fa2de00e1130611e23bf3a26926caec8543b687cd727165f8f47b | def scatter_plot(file: str, options: list):
'Plots a scatter plot of the proteins.\n Shows streamlit widgets to select the files.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Scatter plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
all_cols = df.columns
if multiple_file_check(all_cols):
group_1 = col1.selectbox('Group1', all_cols)
group_2 = col2.selectbox('Group2', all_cols)
with st.spinner('Creating plot..'):
df_log['id'] = df_log.index
fig = px.scatter(df_log, x=group_1, y=group_2, hover_data=['id'], title='Scatterplot', opacity=0.2, trendline='ols')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
results = px.get_trendline_results(fig)
st.write(fig)
st.code(results.px_fit_results.iloc[0].summary()) | Plots a scatter plot of the proteins.
Shows streamlit widgets to select the files.
Args:
file (str): Path to file.
options (list): List of plot options. | alphapept/gui/results.py | scatter_plot | MannLabs/alphapept | 97 | python | def scatter_plot(file: str, options: list):
'Plots a scatter plot of the proteins.\n Shows streamlit widgets to select the files.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Scatter plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
all_cols = df.columns
if multiple_file_check(all_cols):
group_1 = col1.selectbox('Group1', all_cols)
group_2 = col2.selectbox('Group2', all_cols)
with st.spinner('Creating plot..'):
df_log['id'] = df_log.index
fig = px.scatter(df_log, x=group_1, y=group_2, hover_data=['id'], title='Scatterplot', opacity=0.2, trendline='ols')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
results = px.get_trendline_results(fig)
st.write(fig)
st.code(results.px_fit_results.iloc[0].summary()) | def scatter_plot(file: str, options: list):
'Plots a scatter plot of the proteins.\n Shows streamlit widgets to select the files.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n '
if ('/protein_table' in options):
with st.expander('Scatter plot'):
df = cached_file(file)
df_log = np.log(df.copy())
(col1, col2) = st.columns(2)
all_cols = df.columns
if multiple_file_check(all_cols):
group_1 = col1.selectbox('Group1', all_cols)
group_2 = col2.selectbox('Group2', all_cols)
with st.spinner('Creating plot..'):
df_log['id'] = df_log.index
fig = px.scatter(df_log, x=group_1, y=group_2, hover_data=['id'], title='Scatterplot', opacity=0.2, trendline='ols')
fig.update_layout(height=600, width=600)
fig.update_traces(marker=dict(color='#18212b'))
results = px.get_trendline_results(fig)
st.write(fig)
st.code(results.px_fit_results.iloc[0].summary())<|docstring|>Plots a scatter plot of the proteins.
Shows streamlit widgets to select the files.
Args:
file (str): Path to file.
options (list): List of plot options.<|endoftext|> |
9d3cda92e51512e3d2150c77c2039ae8b7060bb10c911929fbdd4f521cb3cc4d | def sequence_coverage_map(file: str, options: list, results_yaml: dict):
'Plots PSM coverage of total protein sequence.\n Shows streamlit widgets to select the file and target protein.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n results_yaml (dict): Results yaml dict.\n '
if (not all([check_file(results_yaml['experiment']['database_path'])])):
return
if ('/protein_fdr' in options):
protein_fdr = pd.read_hdf(file, 'protein_fdr')
with st.expander('Sequence coverage map'):
protein_id = st.selectbox('Select protein', protein_fdr['protein'].unique().tolist())
selections = (['all'] + readable_files_from_yaml(results_yaml)[1:])
selection = st.selectbox('File selection', selections)
if (not protein_id):
return
protein_id = protein_id.replace('|', '\\|')
with st.spinner('Fetching sequence..'):
database = read_database(results_yaml['experiment']['database_path'], array_name='proteins')
filter_target = database[database['name'].str.contains(protein_id)]
if (len(filter_target) != 1):
st.info('Protein name/identifier is ambiguous: {0} matches returned from FASTA file'.format(len(filter_target)))
return
target_sequence = filter_target.loc[(filter_target.index[0], 'sequence')]
target_name = filter_target.loc[(filter_target.index[0], 'name')]
target_protein_peptide_matches = protein_fdr[protein_fdr['protein'].str.contains(protein_id)]
try:
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['type'] == 'msms')]
except KeyError:
pass
if (selection != 'all'):
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['filename'] == selection)]
selection_label = ('in ' + os.path.basename(selection))
else:
selection_label = 'across all files'
if any(target_protein_peptide_matches['naked_sequence'].tolist()):
peptide_list = target_protein_peptide_matches['naked_sequence'].tolist()
else:
peptide_list = target_protein_peptide_matches['sequence'].tolist()
(total, total_covered, coverage_percent, residue_list) = calculate_sequence_coverage(target_sequence, peptide_list)
row_length = 50
group_length = 10
formatted_sequence = ''
counter = 0
for residue in residue_list:
if ((counter % group_length) == 0):
formatted_sequence += ' '
if ((counter % row_length) == 0):
formatted_sequence += '<br>'
formatted_sequence += (str(counter) + (' ' * (5 - len(str(counter)))))
if residue['covered']:
formatted_sequence += ('<strong style="color: red;">%s</strong>' % residue['res'])
else:
formatted_sequence += ('<strong style="color: black;">%s</strong>' % residue['res'])
counter += 1
st.markdown('{0}'.format(target_name), unsafe_allow_html=False)
st.markdown('Sequence coverage: {0} of {1} residues ({2:.1f}%) from {3} PSMs {4}'.format(total_covered, total, coverage_percent, len(target_protein_peptide_matches), selection_label), unsafe_allow_html=True)
st.markdown((('<pre>' + formatted_sequence) + '</pre>'), unsafe_allow_html=True) | Plots PSM coverage of total protein sequence.
Shows streamlit widgets to select the file and target protein.
Args:
file (str): Path to file.
options (list): List of plot options.
results_yaml (dict): Results yaml dict. | alphapept/gui/results.py | sequence_coverage_map | MannLabs/alphapept | 97 | python | def sequence_coverage_map(file: str, options: list, results_yaml: dict):
'Plots PSM coverage of total protein sequence.\n Shows streamlit widgets to select the file and target protein.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n results_yaml (dict): Results yaml dict.\n '
if (not all([check_file(results_yaml['experiment']['database_path'])])):
return
if ('/protein_fdr' in options):
protein_fdr = pd.read_hdf(file, 'protein_fdr')
with st.expander('Sequence coverage map'):
protein_id = st.selectbox('Select protein', protein_fdr['protein'].unique().tolist())
selections = (['all'] + readable_files_from_yaml(results_yaml)[1:])
selection = st.selectbox('File selection', selections)
if (not protein_id):
return
protein_id = protein_id.replace('|', '\\|')
with st.spinner('Fetching sequence..'):
database = read_database(results_yaml['experiment']['database_path'], array_name='proteins')
filter_target = database[database['name'].str.contains(protein_id)]
if (len(filter_target) != 1):
st.info('Protein name/identifier is ambiguous: {0} matches returned from FASTA file'.format(len(filter_target)))
return
target_sequence = filter_target.loc[(filter_target.index[0], 'sequence')]
target_name = filter_target.loc[(filter_target.index[0], 'name')]
target_protein_peptide_matches = protein_fdr[protein_fdr['protein'].str.contains(protein_id)]
try:
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['type'] == 'msms')]
except KeyError:
pass
if (selection != 'all'):
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['filename'] == selection)]
selection_label = ('in ' + os.path.basename(selection))
else:
selection_label = 'across all files'
if any(target_protein_peptide_matches['naked_sequence'].tolist()):
peptide_list = target_protein_peptide_matches['naked_sequence'].tolist()
else:
peptide_list = target_protein_peptide_matches['sequence'].tolist()
(total, total_covered, coverage_percent, residue_list) = calculate_sequence_coverage(target_sequence, peptide_list)
row_length = 50
group_length = 10
formatted_sequence =
counter = 0
for residue in residue_list:
if ((counter % group_length) == 0):
formatted_sequence += ' '
if ((counter % row_length) == 0):
formatted_sequence += '<br>'
formatted_sequence += (str(counter) + (' ' * (5 - len(str(counter)))))
if residue['covered']:
formatted_sequence += ('<strong style="color: red;">%s</strong>' % residue['res'])
else:
formatted_sequence += ('<strong style="color: black;">%s</strong>' % residue['res'])
counter += 1
st.markdown('{0}'.format(target_name), unsafe_allow_html=False)
st.markdown('Sequence coverage: {0} of {1} residues ({2:.1f}%) from {3} PSMs {4}'.format(total_covered, total, coverage_percent, len(target_protein_peptide_matches), selection_label), unsafe_allow_html=True)
st.markdown((('<pre>' + formatted_sequence) + '</pre>'), unsafe_allow_html=True) | def sequence_coverage_map(file: str, options: list, results_yaml: dict):
'Plots PSM coverage of total protein sequence.\n Shows streamlit widgets to select the file and target protein.\n\n Args:\n file (str): Path to file.\n options (list): List of plot options.\n results_yaml (dict): Results yaml dict.\n '
if (not all([check_file(results_yaml['experiment']['database_path'])])):
return
if ('/protein_fdr' in options):
protein_fdr = pd.read_hdf(file, 'protein_fdr')
with st.expander('Sequence coverage map'):
protein_id = st.selectbox('Select protein', protein_fdr['protein'].unique().tolist())
selections = (['all'] + readable_files_from_yaml(results_yaml)[1:])
selection = st.selectbox('File selection', selections)
if (not protein_id):
return
protein_id = protein_id.replace('|', '\\|')
with st.spinner('Fetching sequence..'):
database = read_database(results_yaml['experiment']['database_path'], array_name='proteins')
filter_target = database[database['name'].str.contains(protein_id)]
if (len(filter_target) != 1):
st.info('Protein name/identifier is ambiguous: {0} matches returned from FASTA file'.format(len(filter_target)))
return
target_sequence = filter_target.loc[(filter_target.index[0], 'sequence')]
target_name = filter_target.loc[(filter_target.index[0], 'name')]
target_protein_peptide_matches = protein_fdr[protein_fdr['protein'].str.contains(protein_id)]
try:
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['type'] == 'msms')]
except KeyError:
pass
if (selection != 'all'):
target_protein_peptide_matches = target_protein_peptide_matches[(target_protein_peptide_matches['filename'] == selection)]
selection_label = ('in ' + os.path.basename(selection))
else:
selection_label = 'across all files'
if any(target_protein_peptide_matches['naked_sequence'].tolist()):
peptide_list = target_protein_peptide_matches['naked_sequence'].tolist()
else:
peptide_list = target_protein_peptide_matches['sequence'].tolist()
(total, total_covered, coverage_percent, residue_list) = calculate_sequence_coverage(target_sequence, peptide_list)
row_length = 50
group_length = 10
formatted_sequence =
counter = 0
for residue in residue_list:
if ((counter % group_length) == 0):
formatted_sequence += ' '
if ((counter % row_length) == 0):
formatted_sequence += '<br>'
formatted_sequence += (str(counter) + (' ' * (5 - len(str(counter)))))
if residue['covered']:
formatted_sequence += ('<strong style="color: red;">%s</strong>' % residue['res'])
else:
formatted_sequence += ('<strong style="color: black;">%s</strong>' % residue['res'])
counter += 1
st.markdown('{0}'.format(target_name), unsafe_allow_html=False)
st.markdown('Sequence coverage: {0} of {1} residues ({2:.1f}%) from {3} PSMs {4}'.format(total_covered, total, coverage_percent, len(target_protein_peptide_matches), selection_label), unsafe_allow_html=True)
st.markdown((('<pre>' + formatted_sequence) + '</pre>'), unsafe_allow_html=True)<|docstring|>Plots PSM coverage of total protein sequence.
Shows streamlit widgets to select the file and target protein.
Args:
file (str): Path to file.
options (list): List of plot options.
results_yaml (dict): Results yaml dict.<|endoftext|> |
0babc68287f0f51ae79210e0f2a6d9a20c7eb92f1d4d57cfa00268fcee1e3f10 | def parse_file_and_display(file: str, results_yaml: dict):
'Wrapper function to load file and displays dataframe in streamlit.\n\n Args:\n file (str): Path to file.\n results_yaml (dict): Results yaml dict.\n '
pandas_hdf = False
try:
ms_file = alphapept.io.MS_Data_File(file)
options = [_ for _ in ms_file.read() if (_ != 'Raw')]
except KeyError:
pandas_hdf = True
ms_file = None
with pd.HDFStore(file) as hdf:
options = list(hdf.keys())
if pandas_hdf:
volcano_plot(file, options)
correlation_heatmap(file, options)
scatter_plot(file, options)
pca_plot(file, options)
if results_yaml:
sequence_coverage_map(file, options, results_yaml)
if (ms_file is not None):
st.write('Basic Plots')
ion_plot(ms_file, options)
opt = st.selectbox('Select group', ([None] + options))
if (opt is not None):
if pandas_hdf:
df = pd.read_hdf(file, opt)
else:
df = ms_file.read(dataset_name=opt)
if (not isinstance(df, pd.DataFrame)):
df = pd.DataFrame(df)
data_range = st.slider('Data range', 0, len(df), (0, 1000))
st.write(df.iloc[data_range[0]:data_range[1]])
make_df_downloadble(df, file) | Wrapper function to load file and displays dataframe in streamlit.
Args:
file (str): Path to file.
results_yaml (dict): Results yaml dict. | alphapept/gui/results.py | parse_file_and_display | MannLabs/alphapept | 97 | python | def parse_file_and_display(file: str, results_yaml: dict):
'Wrapper function to load file and displays dataframe in streamlit.\n\n Args:\n file (str): Path to file.\n results_yaml (dict): Results yaml dict.\n '
pandas_hdf = False
try:
ms_file = alphapept.io.MS_Data_File(file)
options = [_ for _ in ms_file.read() if (_ != 'Raw')]
except KeyError:
pandas_hdf = True
ms_file = None
with pd.HDFStore(file) as hdf:
options = list(hdf.keys())
if pandas_hdf:
volcano_plot(file, options)
correlation_heatmap(file, options)
scatter_plot(file, options)
pca_plot(file, options)
if results_yaml:
sequence_coverage_map(file, options, results_yaml)
if (ms_file is not None):
st.write('Basic Plots')
ion_plot(ms_file, options)
opt = st.selectbox('Select group', ([None] + options))
if (opt is not None):
if pandas_hdf:
df = pd.read_hdf(file, opt)
else:
df = ms_file.read(dataset_name=opt)
if (not isinstance(df, pd.DataFrame)):
df = pd.DataFrame(df)
data_range = st.slider('Data range', 0, len(df), (0, 1000))
st.write(df.iloc[data_range[0]:data_range[1]])
make_df_downloadble(df, file) | def parse_file_and_display(file: str, results_yaml: dict):
'Wrapper function to load file and displays dataframe in streamlit.\n\n Args:\n file (str): Path to file.\n results_yaml (dict): Results yaml dict.\n '
pandas_hdf = False
try:
ms_file = alphapept.io.MS_Data_File(file)
options = [_ for _ in ms_file.read() if (_ != 'Raw')]
except KeyError:
pandas_hdf = True
ms_file = None
with pd.HDFStore(file) as hdf:
options = list(hdf.keys())
if pandas_hdf:
volcano_plot(file, options)
correlation_heatmap(file, options)
scatter_plot(file, options)
pca_plot(file, options)
if results_yaml:
sequence_coverage_map(file, options, results_yaml)
if (ms_file is not None):
st.write('Basic Plots')
ion_plot(ms_file, options)
opt = st.selectbox('Select group', ([None] + options))
if (opt is not None):
if pandas_hdf:
df = pd.read_hdf(file, opt)
else:
df = ms_file.read(dataset_name=opt)
if (not isinstance(df, pd.DataFrame)):
df = pd.DataFrame(df)
data_range = st.slider('Data range', 0, len(df), (0, 1000))
st.write(df.iloc[data_range[0]:data_range[1]])
make_df_downloadble(df, file)<|docstring|>Wrapper function to load file and displays dataframe in streamlit.
Args:
file (str): Path to file.
results_yaml (dict): Results yaml dict.<|endoftext|> |
58ff4532b08998f29100198c5333ccad9c0401c9331707b2648ac9a7713ffea4 | def plot_summary(results_yaml: dict, selection: str):
'Plot summary information of a selected resutlts.yaml\n\n Args:\n results_yaml (dict): Results yaml dict.\n selection (str): Selected file.\n '
files = [os.path.splitext(_)[0] for _ in results_yaml['summary']['processed_files']]
data = [results_yaml['summary'][_] for _ in files]
data_df = pd.DataFrame(data)
data_df['filename'] = files
for _ in ['feature_table (n in table)', 'sequence (protein_fdr, n unique)', 'protein_group (protein_fdr, n unique)']:
if (_ not in data_df):
data_df[_] = 0
median_features = int(data_df['feature_table (n in table)'].median())
median_peptides = int(data_df['sequence (protein_fdr, n unique)'].median())
median_protein_groups = int(data_df['protein_group (protein_fdr, n unique)'].median())
st.write(f'### {escape_markdown(selection)}')
st.write(f'### {median_features:,} features | {median_peptides:,} peptides | {median_protein_groups:,} protein groups (median)')
fig = make_subplots(rows=1, cols=3, subplot_titles=('Features', 'Peptides', 'Protein Groups'))
hovertext = list(data_df['filename'].values)
fig.add_bar(x=data_df.index, y=data_df['feature_table (n in table)'], hovertext=hovertext, row=1, col=1, marker_color='#3dc5ef')
fig.add_bar(x=data_df.index, y=data_df['sequence (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=2, marker_color='#42dee1')
fig.add_bar(x=data_df.index, y=data_df['protein_group (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=3, marker_color='#6eecb9')
fig.update_layout(showlegend=False)
fig.update_layout(title_text='Run Summary')
st.write(fig) | Plot summary information of a selected resutlts.yaml
Args:
results_yaml (dict): Results yaml dict.
selection (str): Selected file. | alphapept/gui/results.py | plot_summary | MannLabs/alphapept | 97 | python | def plot_summary(results_yaml: dict, selection: str):
'Plot summary information of a selected resutlts.yaml\n\n Args:\n results_yaml (dict): Results yaml dict.\n selection (str): Selected file.\n '
files = [os.path.splitext(_)[0] for _ in results_yaml['summary']['processed_files']]
data = [results_yaml['summary'][_] for _ in files]
data_df = pd.DataFrame(data)
data_df['filename'] = files
for _ in ['feature_table (n in table)', 'sequence (protein_fdr, n unique)', 'protein_group (protein_fdr, n unique)']:
if (_ not in data_df):
data_df[_] = 0
median_features = int(data_df['feature_table (n in table)'].median())
median_peptides = int(data_df['sequence (protein_fdr, n unique)'].median())
median_protein_groups = int(data_df['protein_group (protein_fdr, n unique)'].median())
st.write(f'### {escape_markdown(selection)}')
st.write(f'### {median_features:,} features | {median_peptides:,} peptides | {median_protein_groups:,} protein groups (median)')
fig = make_subplots(rows=1, cols=3, subplot_titles=('Features', 'Peptides', 'Protein Groups'))
hovertext = list(data_df['filename'].values)
fig.add_bar(x=data_df.index, y=data_df['feature_table (n in table)'], hovertext=hovertext, row=1, col=1, marker_color='#3dc5ef')
fig.add_bar(x=data_df.index, y=data_df['sequence (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=2, marker_color='#42dee1')
fig.add_bar(x=data_df.index, y=data_df['protein_group (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=3, marker_color='#6eecb9')
fig.update_layout(showlegend=False)
fig.update_layout(title_text='Run Summary')
st.write(fig) | def plot_summary(results_yaml: dict, selection: str):
'Plot summary information of a selected resutlts.yaml\n\n Args:\n results_yaml (dict): Results yaml dict.\n selection (str): Selected file.\n '
files = [os.path.splitext(_)[0] for _ in results_yaml['summary']['processed_files']]
data = [results_yaml['summary'][_] for _ in files]
data_df = pd.DataFrame(data)
data_df['filename'] = files
for _ in ['feature_table (n in table)', 'sequence (protein_fdr, n unique)', 'protein_group (protein_fdr, n unique)']:
if (_ not in data_df):
data_df[_] = 0
median_features = int(data_df['feature_table (n in table)'].median())
median_peptides = int(data_df['sequence (protein_fdr, n unique)'].median())
median_protein_groups = int(data_df['protein_group (protein_fdr, n unique)'].median())
st.write(f'### {escape_markdown(selection)}')
st.write(f'### {median_features:,} features | {median_peptides:,} peptides | {median_protein_groups:,} protein groups (median)')
fig = make_subplots(rows=1, cols=3, subplot_titles=('Features', 'Peptides', 'Protein Groups'))
hovertext = list(data_df['filename'].values)
fig.add_bar(x=data_df.index, y=data_df['feature_table (n in table)'], hovertext=hovertext, row=1, col=1, marker_color='#3dc5ef')
fig.add_bar(x=data_df.index, y=data_df['sequence (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=2, marker_color='#42dee1')
fig.add_bar(x=data_df.index, y=data_df['protein_group (protein_fdr, n unique)'], hovertext=hovertext, row=1, col=3, marker_color='#6eecb9')
fig.update_layout(showlegend=False)
fig.update_layout(title_text='Run Summary')
st.write(fig)<|docstring|>Plot summary information of a selected resutlts.yaml
Args:
results_yaml (dict): Results yaml dict.
selection (str): Selected file.<|endoftext|> |
72997e1abcfc6dfeaa747e32a8b3672fb8e03eb4831760c66158fc3c8b1ccabc | def results():
'Streamlit page that displays information on how to get started.'
st.write('# Results')
st.text('This page allows to explore the analysis results.\nAlphaPept uses the HDF container format which can be accessed here.')
selection = st.selectbox('File selection', ('Previous results', 'Enter file'))
results_yaml = {}
if (selection == 'Previous results'):
results_files = files_in_folder(PROCESSED_PATH, '.yaml', sort='date')
selection = st.selectbox('Last run', results_files)
if selection:
filepath_selection = os.path.join(PROCESSED_PATH, selection)
results_yaml = load_settings(filepath_selection)
with st.spinner('Loading data..'):
plot_summary(results_yaml, selection)
with st.expander('Run summary'):
st.write(results_yaml['summary'])
read_log((os.path.splitext(filepath_selection)[0] + '.log'))
raw_files = readable_files_from_yaml(results_yaml)
st.write('### Explore tables from experiment')
file = st.selectbox('Select file from experiment', raw_files)
else:
file = None
elif (selection == 'Enter file'):
file = st.text_input('Enter path to hdf file.', os.getcwd())
else:
file = ''
if (file is None):
file = ''
if (not os.path.isfile(file)):
st.warning('Not a valid file.')
else:
with st.spinner('Parsing file'):
parse_file_and_display(file, results_yaml) | Streamlit page that displays information on how to get started. | alphapept/gui/results.py | results | MannLabs/alphapept | 97 | python | def results():
st.write('# Results')
st.text('This page allows to explore the analysis results.\nAlphaPept uses the HDF container format which can be accessed here.')
selection = st.selectbox('File selection', ('Previous results', 'Enter file'))
results_yaml = {}
if (selection == 'Previous results'):
results_files = files_in_folder(PROCESSED_PATH, '.yaml', sort='date')
selection = st.selectbox('Last run', results_files)
if selection:
filepath_selection = os.path.join(PROCESSED_PATH, selection)
results_yaml = load_settings(filepath_selection)
with st.spinner('Loading data..'):
plot_summary(results_yaml, selection)
with st.expander('Run summary'):
st.write(results_yaml['summary'])
read_log((os.path.splitext(filepath_selection)[0] + '.log'))
raw_files = readable_files_from_yaml(results_yaml)
st.write('### Explore tables from experiment')
file = st.selectbox('Select file from experiment', raw_files)
else:
file = None
elif (selection == 'Enter file'):
file = st.text_input('Enter path to hdf file.', os.getcwd())
else:
file =
if (file is None):
file =
if (not os.path.isfile(file)):
st.warning('Not a valid file.')
else:
with st.spinner('Parsing file'):
parse_file_and_display(file, results_yaml) | def results():
st.write('# Results')
st.text('This page allows to explore the analysis results.\nAlphaPept uses the HDF container format which can be accessed here.')
selection = st.selectbox('File selection', ('Previous results', 'Enter file'))
results_yaml = {}
if (selection == 'Previous results'):
results_files = files_in_folder(PROCESSED_PATH, '.yaml', sort='date')
selection = st.selectbox('Last run', results_files)
if selection:
filepath_selection = os.path.join(PROCESSED_PATH, selection)
results_yaml = load_settings(filepath_selection)
with st.spinner('Loading data..'):
plot_summary(results_yaml, selection)
with st.expander('Run summary'):
st.write(results_yaml['summary'])
read_log((os.path.splitext(filepath_selection)[0] + '.log'))
raw_files = readable_files_from_yaml(results_yaml)
st.write('### Explore tables from experiment')
file = st.selectbox('Select file from experiment', raw_files)
else:
file = None
elif (selection == 'Enter file'):
file = st.text_input('Enter path to hdf file.', os.getcwd())
else:
file =
if (file is None):
file =
if (not os.path.isfile(file)):
st.warning('Not a valid file.')
else:
with st.spinner('Parsing file'):
parse_file_and_display(file, results_yaml)<|docstring|>Streamlit page that displays information on how to get started.<|endoftext|> |
5f1523d2e6efe12099140d40232613d1e8b36b3d5ac354cfc58974f4a4698587 | def setup_schema():
'For each model, install a marshmallow schema generator as\n `model.__schema__()`, and add an entry to the `schema`\n module.\n\n '
for class_ in _Base._decl_class_registry.values():
if hasattr(class_, '__tablename__'):
if class_.__name__.endswith('Schema'):
raise _ModelConversionError("For safety, setup_schema can not be used when aModel class ends with 'Schema'")
def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class())
schema_class_name = class_.__name__
add_schema(schema_class_name, exclude=['created_at', 'modified'], add_to_model=True)
add_schema(f'{schema_class_name}NoID', exclude=['created_at', 'id', 'modified', 'single_user_group']) | For each model, install a marshmallow schema generator as
`model.__schema__()`, and add an entry to the `schema`
module. | skyportal/schema.py | setup_schema | rcthomas/skyportal | 0 | python | def setup_schema():
'For each model, install a marshmallow schema generator as\n `model.__schema__()`, and add an entry to the `schema`\n module.\n\n '
for class_ in _Base._decl_class_registry.values():
if hasattr(class_, '__tablename__'):
if class_.__name__.endswith('Schema'):
raise _ModelConversionError("For safety, setup_schema can not be used when aModel class ends with 'Schema'")
def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class())
schema_class_name = class_.__name__
add_schema(schema_class_name, exclude=['created_at', 'modified'], add_to_model=True)
add_schema(f'{schema_class_name}NoID', exclude=['created_at', 'id', 'modified', 'single_user_group']) | def setup_schema():
'For each model, install a marshmallow schema generator as\n `model.__schema__()`, and add an entry to the `schema`\n module.\n\n '
for class_ in _Base._decl_class_registry.values():
if hasattr(class_, '__tablename__'):
if class_.__name__.endswith('Schema'):
raise _ModelConversionError("For safety, setup_schema can not be used when aModel class ends with 'Schema'")
def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class())
schema_class_name = class_.__name__
add_schema(schema_class_name, exclude=['created_at', 'modified'], add_to_model=True)
add_schema(f'{schema_class_name}NoID', exclude=['created_at', 'id', 'modified', 'single_user_group'])<|docstring|>For each model, install a marshmallow schema generator as
`model.__schema__()`, and add an entry to the `schema`
module.<|endoftext|> |
42406b7d4e4a4bcff0df8cbbe6ac51f5074bb20dbf4d01bec8a4734aa4146116 | @post_load
def parse_flux(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryFlux` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryFlux schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryFlux object.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
table = Table([data])
if (data['flux'] is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (data['flux'] is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p | Return a `Photometry` object from a `PhotometryFlux` marshmallow
schema.
Parameters
----------
data : dict
The instance of the PhotometryFlux schema to convert to Photometry.
Returns
-------
Photometry
The Photometry object generated from the PhotometryFlux object. | skyportal/schema.py | parse_flux | rcthomas/skyportal | 0 | python | @post_load
def parse_flux(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryFlux` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryFlux schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryFlux object.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
table = Table([data])
if (data['flux'] is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (data['flux'] is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p | @post_load
def parse_flux(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryFlux` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryFlux schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryFlux object.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
table = Table([data])
if (data['flux'] is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (data['flux'] is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p<|docstring|>Return a `Photometry` object from a `PhotometryFlux` marshmallow
schema.
Parameters
----------
data : dict
The instance of the PhotometryFlux schema to convert to Photometry.
Returns
-------
Photometry
The Photometry object generated from the PhotometryFlux object.<|endoftext|> |
b8776549501169f401deebc7915e46d51b856cdc0b92047b8b5e13155d304d41 | @post_load
def parse_mag(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryMag` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryMag schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryMag dict.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
ok = any([all([op(field, None) for field in [data['mag'], data['magerr']]]) for op in [operator.is_, operator.is_not]])
if (not ok):
raise ValidationError(f'Error parsing packet "{data}": mag and magerr must both be null, or both be not null.')
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
hasmag = (data['mag'] is not None)
if hasmag:
flux = (10 ** ((- 0.4) * (data['mag'] - PHOT_ZP)))
fluxerr = ((data['magerr'] / (2.5 / np.log(10))) * flux)
else:
fivesigflux = (10 ** ((- 0.4) * (data['limiting_mag'] - PHOT_ZP)))
flux = None
fluxerr = (fivesigflux / 5)
table = Table([{'flux': flux, 'fluxerr': fluxerr, 'magsys': data['magsys'], 'zp': PHOT_ZP, 'filter': data['filter'], 'mjd': data['mjd']}])
if (flux is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (flux is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p | Return a `Photometry` object from a `PhotometryMag` marshmallow
schema.
Parameters
----------
data : dict
The instance of the PhotometryMag schema to convert to Photometry.
Returns
-------
Photometry
The Photometry object generated from the PhotometryMag dict. | skyportal/schema.py | parse_mag | rcthomas/skyportal | 0 | python | @post_load
def parse_mag(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryMag` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryMag schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryMag dict.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
ok = any([all([op(field, None) for field in [data['mag'], data['magerr']]]) for op in [operator.is_, operator.is_not]])
if (not ok):
raise ValidationError(f'Error parsing packet "{data}": mag and magerr must both be null, or both be not null.')
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
hasmag = (data['mag'] is not None)
if hasmag:
flux = (10 ** ((- 0.4) * (data['mag'] - PHOT_ZP)))
fluxerr = ((data['magerr'] / (2.5 / np.log(10))) * flux)
else:
fivesigflux = (10 ** ((- 0.4) * (data['limiting_mag'] - PHOT_ZP)))
flux = None
fluxerr = (fivesigflux / 5)
table = Table([{'flux': flux, 'fluxerr': fluxerr, 'magsys': data['magsys'], 'zp': PHOT_ZP, 'filter': data['filter'], 'mjd': data['mjd']}])
if (flux is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (flux is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p | @post_load
def parse_mag(self, data, **kwargs):
'Return a `Photometry` object from a `PhotometryMag` marshmallow\n schema.\n\n Parameters\n ----------\n data : dict\n The instance of the PhotometryMag schema to convert to Photometry.\n\n Returns\n -------\n Photometry\n The Photometry object generated from the PhotometryMag dict.\n '
from skyportal.models import Instrument, Obj, PHOT_SYS, PHOT_ZP, Photometry
from sncosmo.photdata import PhotometricData
ok = any([all([op(field, None) for field in [data['mag'], data['magerr']]]) for op in [operator.is_, operator.is_not]])
if (not ok):
raise ValidationError(f'Error parsing packet "{data}": mag and magerr must both be null, or both be not null.')
instrument = Instrument.query.get(data['instrument_id'])
if (not instrument):
raise ValidationError(f"Invalid instrument ID: {data['instrument_id']}")
obj = Obj.query.get(data['obj_id'])
if (not obj):
raise ValidationError(f"Invalid object ID: {data['obj_id']}")
if (data['filter'] not in instrument.filters):
raise ValidationError(f"Instrument {instrument.name} has no filter {data['filter']}.")
hasmag = (data['mag'] is not None)
if hasmag:
flux = (10 ** ((- 0.4) * (data['mag'] - PHOT_ZP)))
fluxerr = ((data['magerr'] / (2.5 / np.log(10))) * flux)
else:
fivesigflux = (10 ** ((- 0.4) * (data['limiting_mag'] - PHOT_ZP)))
flux = None
fluxerr = (fivesigflux / 5)
table = Table([{'flux': flux, 'fluxerr': fluxerr, 'magsys': data['magsys'], 'zp': PHOT_ZP, 'filter': data['filter'], 'mjd': data['mjd']}])
if (flux is None):
table['flux'] = 0.0
photdata = PhotometricData(table).normalized(zp=PHOT_ZP, zpsys=PHOT_SYS)
final_flux = (None if (flux is None) else photdata.flux[0])
p = Photometry(obj_id=data['obj_id'], mjd=data['mjd'], flux=final_flux, fluxerr=photdata.fluxerr[0], instrument_id=data['instrument_id'], assignment_id=data['assignment_id'], filter=data['filter'], ra=data['ra'], dec=data['dec'], ra_unc=data['ra_unc'], dec_unc=data['dec_unc'])
if (('alert_id' in data) and (data['alert_id'] is not None)):
p.alert_id = data['alert_id']
return p<|docstring|>Return a `Photometry` object from a `PhotometryMag` marshmallow
schema.
Parameters
----------
data : dict
The instance of the PhotometryMag schema to convert to Photometry.
Returns
-------
Photometry
The Photometry object generated from the PhotometryMag dict.<|endoftext|> |
2961124153c0e9cfdb6b8c07efd32e2cf4d80ba176fba89059093b9f5d6d3df3 | def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class()) | Add schema to module namespace, and, optionally, to model object.
Parameters
----------
schema_class_name : str
Name of schema.
exclude : list of str, optional
List of model attributes to exclude from schema. Defaults to `[]`.
add_to_model : bool, optional
Boolean indicating whether to install this schema generator
on the model as `model.__schema__`. Defaults to `False`. | skyportal/schema.py | add_schema | rcthomas/skyportal | 0 | python | def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class()) | def add_schema(schema_class_name, exclude=[], add_to_model=False):
'Add schema to module namespace, and, optionally, to model object.\n\n Parameters\n ----------\n schema_class_name : str\n Name of schema.\n exclude : list of str, optional\n List of model attributes to exclude from schema. Defaults to `[]`.\n add_to_model : bool, optional\n Boolean indicating whether to install this schema generator\n on the model as `model.__schema__`. Defaults to `False`.\n '
schema_class_meta = type(f'{schema_class_name}_meta', (), {'model': class_, 'sqla_session': _DBSession, 'ordered': True, 'exclude': [], 'include_fk': True, 'include_relationships': False})
for exclude_attr in exclude:
if (hasattr(class_, exclude_attr) and (getattr(class_, exclude_attr) is not None)):
schema_class_meta.exclude.append(exclude_attr)
schema_class = type(schema_class_name, (_ModelSchema,), {'Meta': schema_class_meta})
if add_to_model:
setattr(class_, '__schema__', schema_class)
setattr(sys.modules[__name__], schema_class_name, schema_class())<|docstring|>Add schema to module namespace, and, optionally, to model object.
Parameters
----------
schema_class_name : str
Name of schema.
exclude : list of str, optional
List of model attributes to exclude from schema. Defaults to `[]`.
add_to_model : bool, optional
Boolean indicating whether to install this schema generator
on the model as `model.__schema__`. Defaults to `False`.<|endoftext|> |
2271ee6f33394c5c8569d9740910f153105b105eeefb005129884352175f30f8 | @staticmethod
def decode_event(event_code):
' decode an xterm mouse event character not shifted by 32 '
button_code = (event_code & 3)
moved = (event_code & 32)
released = (button_code == 3)
left = False
middle = False
right = False
scroll = 0
if moved:
action = 'moved'
elif released:
action = 'up'
else:
action = 'down'
if (event_code & 64):
if (not moved):
if (button_code == 0):
scroll = (- 1)
elif (button_code == 1):
scroll = 1
elif (button_code == 0):
left = True
elif (button_code == 1):
middle = True
elif (button_code == 2):
right = True
else:
left = True
middle = True
right = True
mod_code = ((event_code >> 2) & 7)
if (mod_code & 1):
pass
if (mod_code & 2):
pass
if (mod_code & 4):
pass
return {'action': action, 'left': left, 'middle': middle, 'right': right, 'scroll': scroll} | decode an xterm mouse event character not shifted by 32 | termpixels/unix_keys.py | decode_event | loganzartman/termpixels | 17 | python | @staticmethod
def decode_event(event_code):
' '
button_code = (event_code & 3)
moved = (event_code & 32)
released = (button_code == 3)
left = False
middle = False
right = False
scroll = 0
if moved:
action = 'moved'
elif released:
action = 'up'
else:
action = 'down'
if (event_code & 64):
if (not moved):
if (button_code == 0):
scroll = (- 1)
elif (button_code == 1):
scroll = 1
elif (button_code == 0):
left = True
elif (button_code == 1):
middle = True
elif (button_code == 2):
right = True
else:
left = True
middle = True
right = True
mod_code = ((event_code >> 2) & 7)
if (mod_code & 1):
pass
if (mod_code & 2):
pass
if (mod_code & 4):
pass
return {'action': action, 'left': left, 'middle': middle, 'right': right, 'scroll': scroll} | @staticmethod
def decode_event(event_code):
' '
button_code = (event_code & 3)
moved = (event_code & 32)
released = (button_code == 3)
left = False
middle = False
right = False
scroll = 0
if moved:
action = 'moved'
elif released:
action = 'up'
else:
action = 'down'
if (event_code & 64):
if (not moved):
if (button_code == 0):
scroll = (- 1)
elif (button_code == 1):
scroll = 1
elif (button_code == 0):
left = True
elif (button_code == 1):
middle = True
elif (button_code == 2):
right = True
else:
left = True
middle = True
right = True
mod_code = ((event_code >> 2) & 7)
if (mod_code & 1):
pass
if (mod_code & 2):
pass
if (mod_code & 4):
pass
return {'action': action, 'left': left, 'middle': middle, 'right': right, 'scroll': scroll}<|docstring|>decode an xterm mouse event character not shifted by 32<|endoftext|> |
9f3add46e6a4a91698175e76b9c39c74b8e38141e4fa4b3283140e431cc3d756 | def rss(v, xdata, ydata, beta):
'Function to be minimized in the Nelder Mead algorithm'
fitted_curve = ((xdata * v[0]) * np.exp((v[1] * beta)))
return np.sum((np.abs((ydata - fitted_curve)) ** 2)) | Function to be minimized in the Nelder Mead algorithm | dpcmaps/dpc.py | rss | dmgav/dpcmaps | 0 | python | def rss(v, xdata, ydata, beta):
fitted_curve = ((xdata * v[0]) * np.exp((v[1] * beta)))
return np.sum((np.abs((ydata - fitted_curve)) ** 2)) | def rss(v, xdata, ydata, beta):
fitted_curve = ((xdata * v[0]) * np.exp((v[1] * beta)))
return np.sum((np.abs((ydata - fitted_curve)) ** 2))<|docstring|>Function to be minimized in the Nelder Mead algorithm<|endoftext|> |
e1c0f9b8dbcde4004f3b450ef8a14b9d5a624d1e7de8e0219fa0879d12d1104f | def load_file(fn, roi=None, bad_pixels=[], zip_file=None):
'\n Load an image file\n '
if os.path.exists(fn):
im = load_timepix.load(fn)
elif (zip_file is not None):
raise NotImplementedError
f = zip_file.open(fn)
stream = StringIO.StringIO()
stream.write(f.read())
f.close()
stream.seek(0)
im = plt.imread(stream, format='tif')
else:
raise Exception(('File not found: %s' % fn))
if (bad_pixels is not None):
for (x, y) in bad_pixels:
im[(x, y)] = 0
if (roi is not None):
(x1, y1, x2, y2) = roi
im = im[(x1:(x2 + 1), y1:(y2 + 1))]
xline = np.sum(im, axis=1)
yline = np.sum(im, axis=0)
fx = np.fft.fftshift(np.fft.ifft(xline))
fy = np.fft.fftshift(np.fft.ifft(yline))
return (im, fx, fy) | Load an image file | dpcmaps/dpc.py | load_file | dmgav/dpcmaps | 0 | python | def load_file(fn, roi=None, bad_pixels=[], zip_file=None):
'\n \n '
if os.path.exists(fn):
im = load_timepix.load(fn)
elif (zip_file is not None):
raise NotImplementedError
f = zip_file.open(fn)
stream = StringIO.StringIO()
stream.write(f.read())
f.close()
stream.seek(0)
im = plt.imread(stream, format='tif')
else:
raise Exception(('File not found: %s' % fn))
if (bad_pixels is not None):
for (x, y) in bad_pixels:
im[(x, y)] = 0
if (roi is not None):
(x1, y1, x2, y2) = roi
im = im[(x1:(x2 + 1), y1:(y2 + 1))]
xline = np.sum(im, axis=1)
yline = np.sum(im, axis=0)
fx = np.fft.fftshift(np.fft.ifft(xline))
fy = np.fft.fftshift(np.fft.ifft(yline))
return (im, fx, fy) | def load_file(fn, roi=None, bad_pixels=[], zip_file=None):
'\n \n '
if os.path.exists(fn):
im = load_timepix.load(fn)
elif (zip_file is not None):
raise NotImplementedError
f = zip_file.open(fn)
stream = StringIO.StringIO()
stream.write(f.read())
f.close()
stream.seek(0)
im = plt.imread(stream, format='tif')
else:
raise Exception(('File not found: %s' % fn))
if (bad_pixels is not None):
for (x, y) in bad_pixels:
im[(x, y)] = 0
if (roi is not None):
(x1, y1, x2, y2) = roi
im = im[(x1:(x2 + 1), y1:(y2 + 1))]
xline = np.sum(im, axis=1)
yline = np.sum(im, axis=0)
fx = np.fft.fftshift(np.fft.ifft(xline))
fy = np.fft.fftshift(np.fft.ifft(yline))
return (im, fx, fy)<|docstring|>Load an image file<|endoftext|> |
5f1765198167b7e1445202c724f025f571370bdd9104d79c0d46b54dcbda114d | def run_dpc(filename, i, j, ref_fx=None, ref_fy=None, start_point=[1, 0], pixel_size=55, focus_to_det=1.46, dx=0.1, dy=0.1, energy=19.5, zip_file=None, roi=None, bad_pixels=[], max_iters=1000, solver='Nelder-Mead', invers=False):
'\n All units in micron\n\n pixel_size\n focus_to_det: focus to detector distance\n dx: scan step size x\n dy: scan step size y\n energy: in keV\n '
try:
(img, fx, fy) = load_file(filename, zip_file=zip_file, roi=roi, bad_pixels=bad_pixels)
except Exception as ex:
print(('Failed to load file %s: %s' % (filename, ex)))
return (0.0, 0.0, 0.0)
res = minimize(rss, start_point, args=(ref_fx, fx, get_beta(ref_fx)), method=solver, tol=0.0001, options=dict(maxiter=max_iters))
vx = res.x
a = vx[0]
if invers:
gx = (- vx[1])
else:
gx = vx[1]
res = minimize(rss, start_point, args=(ref_fy, fy, get_beta(ref_fy)), method=solver, tol=1e-06, options=dict(maxiter=max_iters))
vy = res.x
gy = vy[1]
return (a, gx, gy) | All units in micron
pixel_size
focus_to_det: focus to detector distance
dx: scan step size x
dy: scan step size y
energy: in keV | dpcmaps/dpc.py | run_dpc | dmgav/dpcmaps | 0 | python | def run_dpc(filename, i, j, ref_fx=None, ref_fy=None, start_point=[1, 0], pixel_size=55, focus_to_det=1.46, dx=0.1, dy=0.1, energy=19.5, zip_file=None, roi=None, bad_pixels=[], max_iters=1000, solver='Nelder-Mead', invers=False):
'\n All units in micron\n\n pixel_size\n focus_to_det: focus to detector distance\n dx: scan step size x\n dy: scan step size y\n energy: in keV\n '
try:
(img, fx, fy) = load_file(filename, zip_file=zip_file, roi=roi, bad_pixels=bad_pixels)
except Exception as ex:
print(('Failed to load file %s: %s' % (filename, ex)))
return (0.0, 0.0, 0.0)
res = minimize(rss, start_point, args=(ref_fx, fx, get_beta(ref_fx)), method=solver, tol=0.0001, options=dict(maxiter=max_iters))
vx = res.x
a = vx[0]
if invers:
gx = (- vx[1])
else:
gx = vx[1]
res = minimize(rss, start_point, args=(ref_fy, fy, get_beta(ref_fy)), method=solver, tol=1e-06, options=dict(maxiter=max_iters))
vy = res.x
gy = vy[1]
return (a, gx, gy) | def run_dpc(filename, i, j, ref_fx=None, ref_fy=None, start_point=[1, 0], pixel_size=55, focus_to_det=1.46, dx=0.1, dy=0.1, energy=19.5, zip_file=None, roi=None, bad_pixels=[], max_iters=1000, solver='Nelder-Mead', invers=False):
'\n All units in micron\n\n pixel_size\n focus_to_det: focus to detector distance\n dx: scan step size x\n dy: scan step size y\n energy: in keV\n '
try:
(img, fx, fy) = load_file(filename, zip_file=zip_file, roi=roi, bad_pixels=bad_pixels)
except Exception as ex:
print(('Failed to load file %s: %s' % (filename, ex)))
return (0.0, 0.0, 0.0)
res = minimize(rss, start_point, args=(ref_fx, fx, get_beta(ref_fx)), method=solver, tol=0.0001, options=dict(maxiter=max_iters))
vx = res.x
a = vx[0]
if invers:
gx = (- vx[1])
else:
gx = vx[1]
res = minimize(rss, start_point, args=(ref_fy, fy, get_beta(ref_fy)), method=solver, tol=1e-06, options=dict(maxiter=max_iters))
vy = res.x
gy = vy[1]
return (a, gx, gy)<|docstring|>All units in micron
pixel_size
focus_to_det: focus to detector distance
dx: scan step size x
dy: scan step size y
energy: in keV<|endoftext|> |
352c15d089356ddcc02593c7f98f78d12ea6fe9d3378fdb152e55269cd005067 | def recon(gx, gy, dx=0.1, dy=0.1, pad=1, w=1.0):
'\n Reconstruct the final phase image\n Parameters\n ----------\n gx : 2-D numpy array\n phase gradient along x direction\n\n gy : 2-D numpy array\n phase gradient along y direction\n\n dx : float\n scanning step size in x direction (in micro-meter)\n\n dy : float\n scanning step size in y direction (in micro-meter)\n\n pad : float\n padding parameter\n default value, pad = 1 --> no padding\n p p p\n pad = 3 --> p v p\n p p p\n\n w : float\n weighting parameter for the phase gradient along x and y direction when\n constructing the final phase image\n\n Returns\n ----------\n phi : 2-D numpy array\n final phase image\n\n References\n ----------\n [1] Yan, Hanfei, Yong S. Chu, Jorg Maser, Evgeny Nazaretski, Jungdae Kim,\n Hyon Chol Kang, Jeffrey J. Lombardo, and Wilson KS Chiu, "Quantitative\n x-ray phase imaging at the nanoscale by multilayer Laue lenses," Scientific\n reports 3 (2013).\n\n '
(rows, cols) = gx.shape
gx_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gy_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gx_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gx
gy_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gy
tx = np.fft.fftshift(np.fft.fft2(gx_padding))
ty = np.fft.fftshift(np.fft.fft2(gy_padding))
c = np.zeros(((pad * rows), (pad * cols)), dtype=complex)
mid_col = (((pad * cols) // 2.0) + 1)
mid_row = (((pad * rows) // 2.0) + 1)
ax = (((2 * np.pi) * ((np.arange((pad * cols)) + 1) - mid_col)) / ((pad * cols) * dx))
ay = (((2 * np.pi) * ((np.arange((pad * rows)) + 1) - mid_row)) / ((pad * rows) * dy))
(kappax, kappay) = np.meshgrid(ax, ay)
c = ((- 1j) * ((kappax * tx) + ((w * kappay) * ty)))
c = np.ma.masked_values(c, 0)
c /= ((kappax ** 2) + (w * (kappay ** 2)))
c = np.ma.filled(c, 0)
c = np.fft.ifftshift(c)
phi_padding = np.fft.ifft2(c)
phi_padding = (- phi_padding.real)
phi = phi_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))]
return phi | Reconstruct the final phase image
Parameters
----------
gx : 2-D numpy array
phase gradient along x direction
gy : 2-D numpy array
phase gradient along y direction
dx : float
scanning step size in x direction (in micro-meter)
dy : float
scanning step size in y direction (in micro-meter)
pad : float
padding parameter
default value, pad = 1 --> no padding
p p p
pad = 3 --> p v p
p p p
w : float
weighting parameter for the phase gradient along x and y direction when
constructing the final phase image
Returns
----------
phi : 2-D numpy array
final phase image
References
----------
[1] Yan, Hanfei, Yong S. Chu, Jorg Maser, Evgeny Nazaretski, Jungdae Kim,
Hyon Chol Kang, Jeffrey J. Lombardo, and Wilson KS Chiu, "Quantitative
x-ray phase imaging at the nanoscale by multilayer Laue lenses," Scientific
reports 3 (2013). | dpcmaps/dpc.py | recon | dmgav/dpcmaps | 0 | python | def recon(gx, gy, dx=0.1, dy=0.1, pad=1, w=1.0):
'\n Reconstruct the final phase image\n Parameters\n ----------\n gx : 2-D numpy array\n phase gradient along x direction\n\n gy : 2-D numpy array\n phase gradient along y direction\n\n dx : float\n scanning step size in x direction (in micro-meter)\n\n dy : float\n scanning step size in y direction (in micro-meter)\n\n pad : float\n padding parameter\n default value, pad = 1 --> no padding\n p p p\n pad = 3 --> p v p\n p p p\n\n w : float\n weighting parameter for the phase gradient along x and y direction when\n constructing the final phase image\n\n Returns\n ----------\n phi : 2-D numpy array\n final phase image\n\n References\n ----------\n [1] Yan, Hanfei, Yong S. Chu, Jorg Maser, Evgeny Nazaretski, Jungdae Kim,\n Hyon Chol Kang, Jeffrey J. Lombardo, and Wilson KS Chiu, "Quantitative\n x-ray phase imaging at the nanoscale by multilayer Laue lenses," Scientific\n reports 3 (2013).\n\n '
(rows, cols) = gx.shape
gx_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gy_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gx_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gx
gy_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gy
tx = np.fft.fftshift(np.fft.fft2(gx_padding))
ty = np.fft.fftshift(np.fft.fft2(gy_padding))
c = np.zeros(((pad * rows), (pad * cols)), dtype=complex)
mid_col = (((pad * cols) // 2.0) + 1)
mid_row = (((pad * rows) // 2.0) + 1)
ax = (((2 * np.pi) * ((np.arange((pad * cols)) + 1) - mid_col)) / ((pad * cols) * dx))
ay = (((2 * np.pi) * ((np.arange((pad * rows)) + 1) - mid_row)) / ((pad * rows) * dy))
(kappax, kappay) = np.meshgrid(ax, ay)
c = ((- 1j) * ((kappax * tx) + ((w * kappay) * ty)))
c = np.ma.masked_values(c, 0)
c /= ((kappax ** 2) + (w * (kappay ** 2)))
c = np.ma.filled(c, 0)
c = np.fft.ifftshift(c)
phi_padding = np.fft.ifft2(c)
phi_padding = (- phi_padding.real)
phi = phi_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))]
return phi | def recon(gx, gy, dx=0.1, dy=0.1, pad=1, w=1.0):
'\n Reconstruct the final phase image\n Parameters\n ----------\n gx : 2-D numpy array\n phase gradient along x direction\n\n gy : 2-D numpy array\n phase gradient along y direction\n\n dx : float\n scanning step size in x direction (in micro-meter)\n\n dy : float\n scanning step size in y direction (in micro-meter)\n\n pad : float\n padding parameter\n default value, pad = 1 --> no padding\n p p p\n pad = 3 --> p v p\n p p p\n\n w : float\n weighting parameter for the phase gradient along x and y direction when\n constructing the final phase image\n\n Returns\n ----------\n phi : 2-D numpy array\n final phase image\n\n References\n ----------\n [1] Yan, Hanfei, Yong S. Chu, Jorg Maser, Evgeny Nazaretski, Jungdae Kim,\n Hyon Chol Kang, Jeffrey J. Lombardo, and Wilson KS Chiu, "Quantitative\n x-ray phase imaging at the nanoscale by multilayer Laue lenses," Scientific\n reports 3 (2013).\n\n '
(rows, cols) = gx.shape
gx_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gy_padding = np.zeros(((pad * rows), (pad * cols)), dtype='d')
gx_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gx
gy_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))] = gy
tx = np.fft.fftshift(np.fft.fft2(gx_padding))
ty = np.fft.fftshift(np.fft.fft2(gy_padding))
c = np.zeros(((pad * rows), (pad * cols)), dtype=complex)
mid_col = (((pad * cols) // 2.0) + 1)
mid_row = (((pad * rows) // 2.0) + 1)
ax = (((2 * np.pi) * ((np.arange((pad * cols)) + 1) - mid_col)) / ((pad * cols) * dx))
ay = (((2 * np.pi) * ((np.arange((pad * rows)) + 1) - mid_row)) / ((pad * rows) * dy))
(kappax, kappay) = np.meshgrid(ax, ay)
c = ((- 1j) * ((kappax * tx) + ((w * kappay) * ty)))
c = np.ma.masked_values(c, 0)
c /= ((kappax ** 2) + (w * (kappay ** 2)))
c = np.ma.filled(c, 0)
c = np.fft.ifftshift(c)
phi_padding = np.fft.ifft2(c)
phi_padding = (- phi_padding.real)
phi = phi_padding[(((pad // 2) * rows):(((pad // 2) + 1) * rows), ((pad // 2) * cols):(((pad // 2) + 1) * cols))]
return phi<|docstring|>Reconstruct the final phase image
Parameters
----------
gx : 2-D numpy array
phase gradient along x direction
gy : 2-D numpy array
phase gradient along y direction
dx : float
scanning step size in x direction (in micro-meter)
dy : float
scanning step size in y direction (in micro-meter)
pad : float
padding parameter
default value, pad = 1 --> no padding
p p p
pad = 3 --> p v p
p p p
w : float
weighting parameter for the phase gradient along x and y direction when
constructing the final phase image
Returns
----------
phi : 2-D numpy array
final phase image
References
----------
[1] Yan, Hanfei, Yong S. Chu, Jorg Maser, Evgeny Nazaretski, Jungdae Kim,
Hyon Chol Kang, Jeffrey J. Lombardo, and Wilson KS Chiu, "Quantitative
x-ray phase imaging at the nanoscale by multilayer Laue lenses," Scientific
reports 3 (2013).<|endoftext|> |
7d151e1c0d00c4aa47097e46789f32610701d18306831b6d56504b29ca4a0b75 | def set_zero(var_name, scope=fluid.global_scope(), place=fluid.CPUPlace(), param_type='int64'):
'\n Set tensor of a Variable to zero.\n Args:\n var_name(str): name of Variable\n scope(Scope): Scope object, default is fluid.global_scope()\n place(Place): Place object, default is fluid.CPUPlace()\n param_type(str): param data type, default is int64\n '
param = scope.var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype(param_type)
param.set(param_array, place) | Set tensor of a Variable to zero.
Args:
var_name(str): name of Variable
scope(Scope): Scope object, default is fluid.global_scope()
place(Place): Place object, default is fluid.CPUPlace()
param_type(str): param data type, default is int64 | PaddleRec/ctr/xdeepfm/infer.py | set_zero | LDOUBLEV/models | 5 | python | def set_zero(var_name, scope=fluid.global_scope(), place=fluid.CPUPlace(), param_type='int64'):
'\n Set tensor of a Variable to zero.\n Args:\n var_name(str): name of Variable\n scope(Scope): Scope object, default is fluid.global_scope()\n place(Place): Place object, default is fluid.CPUPlace()\n param_type(str): param data type, default is int64\n '
param = scope.var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype(param_type)
param.set(param_array, place) | def set_zero(var_name, scope=fluid.global_scope(), place=fluid.CPUPlace(), param_type='int64'):
'\n Set tensor of a Variable to zero.\n Args:\n var_name(str): name of Variable\n scope(Scope): Scope object, default is fluid.global_scope()\n place(Place): Place object, default is fluid.CPUPlace()\n param_type(str): param data type, default is int64\n '
param = scope.var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype(param_type)
param.set(param_array, place)<|docstring|>Set tensor of a Variable to zero.
Args:
var_name(str): name of Variable
scope(Scope): Scope object, default is fluid.global_scope()
place(Place): Place object, default is fluid.CPUPlace()
param_type(str): param data type, default is int64<|endoftext|> |
9c0b93fdc4f29b0ae6431d0971a17c5816df0bd0aa7f9a1f207ee6901f739fc5 | def patch_sys_path() -> None:
'Modify sys.path to include all paths from the\n current environment.\n '
syspath = _get_external_sys_path()
for each in reversed(syspath):
if (each not in sys.path):
sys.path.insert(0, each) | Modify sys.path to include all paths from the
current environment. | ped/pypath.py | patch_sys_path | anordin95/ped | 45 | python | def patch_sys_path() -> None:
'Modify sys.path to include all paths from the\n current environment.\n '
syspath = _get_external_sys_path()
for each in reversed(syspath):
if (each not in sys.path):
sys.path.insert(0, each) | def patch_sys_path() -> None:
'Modify sys.path to include all paths from the\n current environment.\n '
syspath = _get_external_sys_path()
for each in reversed(syspath):
if (each not in sys.path):
sys.path.insert(0, each)<|docstring|>Modify sys.path to include all paths from the
current environment.<|endoftext|> |
6338fbf38251639089134639ff2d63503fc663c59ac1337585a815b67166748c | def __init__(self, lock_file=None, log_file=None, lock_details=None):
'\n Register lock and log file.\n\n :param lock_file: path to the lock file, does not have to exist\n :param log_file: path to the log file, does not have to exist\n :return:\n '
self.lock_file = (lock_file or ('/tmp/lock-' + str(uuid.uuid1())))
self.log_file = (log_file or (self.lock_file + '.log'))
if (lock_details is None):
self.lock_details = ''
elif isinstance(lock_details, basestring):
self.lock_details = lock_details
elif isinstance(lock_details, dict):
self.lock_details = '\n'.join(('{} {}'.format(k, v) for (k, v) in lock_details.items()))
elif isinstance(lock_details, list):
self.lock_details = '\n'.join(map(str, lock_details))
else:
self.lock_details = str(lock_details)
self.acquired = False | Register lock and log file.
:param lock_file: path to the lock file, does not have to exist
:param log_file: path to the log file, does not have to exist
:return: | relationships/config_on_worker.py | __init__ | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def __init__(self, lock_file=None, log_file=None, lock_details=None):
'\n Register lock and log file.\n\n :param lock_file: path to the lock file, does not have to exist\n :param log_file: path to the log file, does not have to exist\n :return:\n '
self.lock_file = (lock_file or ('/tmp/lock-' + str(uuid.uuid1())))
self.log_file = (log_file or (self.lock_file + '.log'))
if (lock_details is None):
self.lock_details =
elif isinstance(lock_details, basestring):
self.lock_details = lock_details
elif isinstance(lock_details, dict):
self.lock_details = '\n'.join(('{} {}'.format(k, v) for (k, v) in lock_details.items()))
elif isinstance(lock_details, list):
self.lock_details = '\n'.join(map(str, lock_details))
else:
self.lock_details = str(lock_details)
self.acquired = False | def __init__(self, lock_file=None, log_file=None, lock_details=None):
'\n Register lock and log file.\n\n :param lock_file: path to the lock file, does not have to exist\n :param log_file: path to the log file, does not have to exist\n :return:\n '
self.lock_file = (lock_file or ('/tmp/lock-' + str(uuid.uuid1())))
self.log_file = (log_file or (self.lock_file + '.log'))
if (lock_details is None):
self.lock_details =
elif isinstance(lock_details, basestring):
self.lock_details = lock_details
elif isinstance(lock_details, dict):
self.lock_details = '\n'.join(('{} {}'.format(k, v) for (k, v) in lock_details.items()))
elif isinstance(lock_details, list):
self.lock_details = '\n'.join(map(str, lock_details))
else:
self.lock_details = str(lock_details)
self.acquired = False<|docstring|>Register lock and log file.
:param lock_file: path to the lock file, does not have to exist
:param log_file: path to the log file, does not have to exist
:return:<|endoftext|> |
bdb433e4f700c4316cd38bde3531a97ecd315f600db77fc245ab9b1ff9a65b3e | def __enter__(self):
'\n Open lock and log files, write\n\n :return: reference to instantiated lock\n '
self._acquire()
return self | Open lock and log files, write
:return: reference to instantiated lock | relationships/config_on_worker.py | __enter__ | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def __enter__(self):
'\n Open lock and log files, write\n\n :return: reference to instantiated lock\n '
self._acquire()
return self | def __enter__(self):
'\n Open lock and log files, write\n\n :return: reference to instantiated lock\n '
self._acquire()
return self<|docstring|>Open lock and log files, write
:return: reference to instantiated lock<|endoftext|> |
9eb5735e7a25a33535324873856cf820b74247686579bf0e6cb881cd85c23d8f | def __exit__(self, exc_type, _v, _tb):
'\n Clean up and release any locks, close open files.\n\n :param exc_type: part of generic signature\n :param _v: part of generic signature\n :param _tb: part of generic signature\n :return:\n '
self._release() | Clean up and release any locks, close open files.
:param exc_type: part of generic signature
:param _v: part of generic signature
:param _tb: part of generic signature
:return: | relationships/config_on_worker.py | __exit__ | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def __exit__(self, exc_type, _v, _tb):
'\n Clean up and release any locks, close open files.\n\n :param exc_type: part of generic signature\n :param _v: part of generic signature\n :param _tb: part of generic signature\n :return:\n '
self._release() | def __exit__(self, exc_type, _v, _tb):
'\n Clean up and release any locks, close open files.\n\n :param exc_type: part of generic signature\n :param _v: part of generic signature\n :param _tb: part of generic signature\n :return:\n '
self._release()<|docstring|>Clean up and release any locks, close open files.
:param exc_type: part of generic signature
:param _v: part of generic signature
:param _tb: part of generic signature
:return:<|endoftext|> |
b2d0b8bb26e5017345a5f9eda4c6d3d2bae0e355e5404b45e8ed7317e7bf27ea | def _acquire(self):
'\n Open lock and log files, write identification details into lock.\n\n :return:\n '
self.lock_fd = open(self.lock_file, 'w+')
self.log_fd = open(self.log_file, 'a')
lockf(self.lock_fd, LOCK_EX)
self.lock_fd.truncate()
self.lock_fd.write('lock_pid {}\nlock_status locked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
self.acquired = True | Open lock and log files, write identification details into lock.
:return: | relationships/config_on_worker.py | _acquire | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def _acquire(self):
'\n Open lock and log files, write identification details into lock.\n\n :return:\n '
self.lock_fd = open(self.lock_file, 'w+')
self.log_fd = open(self.log_file, 'a')
lockf(self.lock_fd, LOCK_EX)
self.lock_fd.truncate()
self.lock_fd.write('lock_pid {}\nlock_status locked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
self.acquired = True | def _acquire(self):
'\n Open lock and log files, write identification details into lock.\n\n :return:\n '
self.lock_fd = open(self.lock_file, 'w+')
self.log_fd = open(self.log_file, 'a')
lockf(self.lock_fd, LOCK_EX)
self.lock_fd.truncate()
self.lock_fd.write('lock_pid {}\nlock_status locked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
self.acquired = True<|docstring|>Open lock and log files, write identification details into lock.
:return:<|endoftext|> |
1f67d896a67976568195e5277099443b167752ce5473953646c94e5c0610908a | def _release(self):
'\n Update lock file, release lock and clean up..\n\n :return:\n '
if self.acquired:
self.log_fd.seek(0)
self.lock_fd.write('lock_pid {}\nlock_status unlocked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
lockf(self.lock_fd, LOCK_UN)
self.lock_fd.close()
self.log_fd.close() | Update lock file, release lock and clean up..
:return: | relationships/config_on_worker.py | _release | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def _release(self):
'\n Update lock file, release lock and clean up..\n\n :return:\n '
if self.acquired:
self.log_fd.seek(0)
self.lock_fd.write('lock_pid {}\nlock_status unlocked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
lockf(self.lock_fd, LOCK_UN)
self.lock_fd.close()
self.log_fd.close() | def _release(self):
'\n Update lock file, release lock and clean up..\n\n :return:\n '
if self.acquired:
self.log_fd.seek(0)
self.lock_fd.write('lock_pid {}\nlock_status unlocked\n{}'.format(getpid(), self.lock_details))
self.lock_fd.flush()
lockf(self.lock_fd, LOCK_UN)
self.lock_fd.close()
self.log_fd.close()<|docstring|>Update lock file, release lock and clean up..
:return:<|endoftext|> |
63efc954355f40cd1b40b9662c7411610e193a8ccfb214f82dde011ee8f71cf3 | def log(self, text):
'\n Non-fancily log text to log file by writing out a line.\n\n :param text: message to log\n :return:\n '
if self.acquired:
self.log_fd.write((text + '\n'))
else:
raise Exception('trying to write when unlocked') | Non-fancily log text to log file by writing out a line.
:param text: message to log
:return: | relationships/config_on_worker.py | log | buhanec/cloudify-oryx2-flexiant-blueprint | 0 | python | def log(self, text):
'\n Non-fancily log text to log file by writing out a line.\n\n :param text: message to log\n :return:\n '
if self.acquired:
self.log_fd.write((text + '\n'))
else:
raise Exception('trying to write when unlocked') | def log(self, text):
'\n Non-fancily log text to log file by writing out a line.\n\n :param text: message to log\n :return:\n '
if self.acquired:
self.log_fd.write((text + '\n'))
else:
raise Exception('trying to write when unlocked')<|docstring|>Non-fancily log text to log file by writing out a line.
:param text: message to log
:return:<|endoftext|> |
a723685016a9efb9315c85ee3f064e15b2e136abaac00e3cf93f24908cff6d7a | @classmethod
def generate(cls, config: object):
'\n :param config:\n :return:\n ' | :param config:
:return: | manga_py/libs/modules/html/__init__.py | generate | gromenauer/manga-py | 4 | python | @classmethod
def generate(cls, config: object):
'\n :param config:\n :return:\n ' | @classmethod
def generate(cls, config: object):
'\n :param config:\n :return:\n '<|docstring|>:param config:
:return:<|endoftext|> |
986b88fedc89b00506abb8e97d2d9f80236e81227dc7270261335d7698bd0420 | def min_file_sector_coverage(prods):
' Selects a number of files that cover all available sectors.\n Result might not be the minimal set of files that covers all available sectors in general\n (to avoid computational and implementation overhead).\n But given the process of data validation file creation it is highly likely that the minimal set is found.'
file_names = list(prods['dataURI'])
coverages = ([None] * len(file_names))
all_sectors = set()
for (f, file_name) in enumerate(file_names):
result = re.findall('-s\\d+', file_name)
if (len(result) != 2):
raise RuntimeError(('Invalid file name encountered!\n' + f"File name '{file_name}' is not valid according to convention fot dvt.fits files."))
start_sector = int(result[0][2:])
end_sector = int(result[1][2:])
coverages[f] = set(range(start_sector, (end_sector + 1)))
all_sectors = all_sectors.union(coverages[f])
selected_file_mask = np.zeros(len(file_names), dtype=bool)
cover_lens = [len(i) for i in coverages]
for i in np.asarray(cover_lens).argsort()[::(- 1)]:
if (len(all_sectors) == 0):
return prods[selected_file_mask]
set_diff = all_sectors.difference(coverages[i])
if (len(set_diff) == len(all_sectors)):
continue
else:
selected_file_mask[i] = True
all_sectors = all_sectors.difference(coverages[i])
return prods[selected_file_mask] | Selects a number of files that cover all available sectors.
Result might not be the minimal set of files that covers all available sectors in general
(to avoid computational and implementation overhead).
But given the process of data validation file creation it is highly likely that the minimal set is found. | data_preparation/data_retrieval.py | min_file_sector_coverage | ChristophHoenes/AnomalousExoplanetTransits | 1 | python | def min_file_sector_coverage(prods):
' Selects a number of files that cover all available sectors.\n Result might not be the minimal set of files that covers all available sectors in general\n (to avoid computational and implementation overhead).\n But given the process of data validation file creation it is highly likely that the minimal set is found.'
file_names = list(prods['dataURI'])
coverages = ([None] * len(file_names))
all_sectors = set()
for (f, file_name) in enumerate(file_names):
result = re.findall('-s\\d+', file_name)
if (len(result) != 2):
raise RuntimeError(('Invalid file name encountered!\n' + f"File name '{file_name}' is not valid according to convention fot dvt.fits files."))
start_sector = int(result[0][2:])
end_sector = int(result[1][2:])
coverages[f] = set(range(start_sector, (end_sector + 1)))
all_sectors = all_sectors.union(coverages[f])
selected_file_mask = np.zeros(len(file_names), dtype=bool)
cover_lens = [len(i) for i in coverages]
for i in np.asarray(cover_lens).argsort()[::(- 1)]:
if (len(all_sectors) == 0):
return prods[selected_file_mask]
set_diff = all_sectors.difference(coverages[i])
if (len(set_diff) == len(all_sectors)):
continue
else:
selected_file_mask[i] = True
all_sectors = all_sectors.difference(coverages[i])
return prods[selected_file_mask] | def min_file_sector_coverage(prods):
' Selects a number of files that cover all available sectors.\n Result might not be the minimal set of files that covers all available sectors in general\n (to avoid computational and implementation overhead).\n But given the process of data validation file creation it is highly likely that the minimal set is found.'
file_names = list(prods['dataURI'])
coverages = ([None] * len(file_names))
all_sectors = set()
for (f, file_name) in enumerate(file_names):
result = re.findall('-s\\d+', file_name)
if (len(result) != 2):
raise RuntimeError(('Invalid file name encountered!\n' + f"File name '{file_name}' is not valid according to convention fot dvt.fits files."))
start_sector = int(result[0][2:])
end_sector = int(result[1][2:])
coverages[f] = set(range(start_sector, (end_sector + 1)))
all_sectors = all_sectors.union(coverages[f])
selected_file_mask = np.zeros(len(file_names), dtype=bool)
cover_lens = [len(i) for i in coverages]
for i in np.asarray(cover_lens).argsort()[::(- 1)]:
if (len(all_sectors) == 0):
return prods[selected_file_mask]
set_diff = all_sectors.difference(coverages[i])
if (len(set_diff) == len(all_sectors)):
continue
else:
selected_file_mask[i] = True
all_sectors = all_sectors.difference(coverages[i])
return prods[selected_file_mask]<|docstring|>Selects a number of files that cover all available sectors.
Result might not be the minimal set of files that covers all available sectors in general
(to avoid computational and implementation overhead).
But given the process of data validation file creation it is highly likely that the minimal set is found.<|endoftext|> |
a1004d767be1b19ded7268a0e402c2b332a176c110e7cc3840383363a258100a | def register(linter):
'Register the reporter classes with the linter.'
linter.register_reporter(GithubReporter) | Register the reporter classes with the linter. | tools/pylint/githubreporter.py | register | Asnanon/ts-gw2-verifyBot | 2 | python | def register(linter):
linter.register_reporter(GithubReporter) | def register(linter):
linter.register_reporter(GithubReporter)<|docstring|>Register the reporter classes with the linter.<|endoftext|> |
c0d8f7aa2bea1e6b240ae24db5233037c83fdf6fc5b2aee9cb20e395c58fd51a | def __init__(self, folder):
'Create an acoustic model reader-writer.\n\n :param folder: (str) Name of the folder with the acoustic model files\n\n '
self.__folder = u(folder) | Create an acoustic model reader-writer.
:param folder: (str) Name of the folder with the acoustic model files | sppas/sppas/src/models/acm/readwrite.py | __init__ | mirfan899/MTTS | 0 | python | def __init__(self, folder):
'Create an acoustic model reader-writer.\n\n :param folder: (str) Name of the folder with the acoustic model files\n\n '
self.__folder = u(folder) | def __init__(self, folder):
'Create an acoustic model reader-writer.\n\n :param folder: (str) Name of the folder with the acoustic model files\n\n '
self.__folder = u(folder)<|docstring|>Create an acoustic model reader-writer.
:param folder: (str) Name of the folder with the acoustic model files<|endoftext|> |
a438dfae5803b7edf69d8b63cb3679d142292114615d22f62631339bf519d267 | @staticmethod
def get_formats():
'Return the list of accepted formats for acoustic models.'
return sppasACMRW.ACM_TYPES.keys() | Return the list of accepted formats for acoustic models. | sppas/sppas/src/models/acm/readwrite.py | get_formats | mirfan899/MTTS | 0 | python | @staticmethod
def get_formats():
return sppasACMRW.ACM_TYPES.keys() | @staticmethod
def get_formats():
return sppasACMRW.ACM_TYPES.keys()<|docstring|>Return the list of accepted formats for acoustic models.<|endoftext|> |
84439cea32c5f725d4adc43f103b9b08ffe5564326f706de37d2ecc77980e2c2 | def get_folder(self):
'Return the name of the folder of the acoustic model.'
return self.__folder | Return the name of the folder of the acoustic model. | sppas/sppas/src/models/acm/readwrite.py | get_folder | mirfan899/MTTS | 0 | python | def get_folder(self):
return self.__folder | def get_folder(self):
return self.__folder<|docstring|>Return the name of the folder of the acoustic model.<|endoftext|> |
911bc7c07fc79c5cf64ccfe384baa67bd157ba4c6fcf043cdc7379147f32730e | def set_folder(self, folder):
'Set a new folder to store files of the acoustic model.\n\n :param folder: (str) New name of the folder of the acoustic model.\n\n '
self.__folder = u(folder) | Set a new folder to store files of the acoustic model.
:param folder: (str) New name of the folder of the acoustic model. | sppas/sppas/src/models/acm/readwrite.py | set_folder | mirfan899/MTTS | 0 | python | def set_folder(self, folder):
'Set a new folder to store files of the acoustic model.\n\n :param folder: (str) New name of the folder of the acoustic model.\n\n '
self.__folder = u(folder) | def set_folder(self, folder):
'Set a new folder to store files of the acoustic model.\n\n :param folder: (str) New name of the folder of the acoustic model.\n\n '
self.__folder = u(folder)<|docstring|>Set a new folder to store files of the acoustic model.
:param folder: (str) New name of the folder of the acoustic model.<|endoftext|> |
d6d8c26706a92a06574a4c77ab269e2297c9ed4222b02168c3e0d29fd466e7c1 | def read(self):
'Read an acoustic model from the folder.\n\n :returns: sppasAcModel()\n\n '
try:
acm = self.get_reader()
acm.read(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise
return acm | Read an acoustic model from the folder.
:returns: sppasAcModel() | sppas/sppas/src/models/acm/readwrite.py | read | mirfan899/MTTS | 0 | python | def read(self):
'Read an acoustic model from the folder.\n\n :returns: sppasAcModel()\n\n '
try:
acm = self.get_reader()
acm.read(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise
return acm | def read(self):
'Read an acoustic model from the folder.\n\n :returns: sppasAcModel()\n\n '
try:
acm = self.get_reader()
acm.read(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise
return acm<|docstring|>Read an acoustic model from the folder.
:returns: sppasAcModel()<|endoftext|> |
cd7163eb9ab459a391476a4043a376379e13e30faf1fc9f28b0710a40edeff8d | def get_reader(self):
'Return an acoustic model according to the given folder.\n\n :returns: sppasAcModel()\n\n '
for file_reader in sppasACMRW.ACM_TYPES.values():
try:
if (file_reader.detect(self.__folder) is True):
return file_reader()
except:
continue
raise MioFolderError(self.__folder) | Return an acoustic model according to the given folder.
:returns: sppasAcModel() | sppas/sppas/src/models/acm/readwrite.py | get_reader | mirfan899/MTTS | 0 | python | def get_reader(self):
'Return an acoustic model according to the given folder.\n\n :returns: sppasAcModel()\n\n '
for file_reader in sppasACMRW.ACM_TYPES.values():
try:
if (file_reader.detect(self.__folder) is True):
return file_reader()
except:
continue
raise MioFolderError(self.__folder) | def get_reader(self):
'Return an acoustic model according to the given folder.\n\n :returns: sppasAcModel()\n\n '
for file_reader in sppasACMRW.ACM_TYPES.values():
try:
if (file_reader.detect(self.__folder) is True):
return file_reader()
except:
continue
raise MioFolderError(self.__folder)<|docstring|>Return an acoustic model according to the given folder.
:returns: sppasAcModel()<|endoftext|> |
379df18c428e80d59b86b1b0c467a2a27e12b0268da046894589db9bb2fc4b25 | def write(self, acmodel, format='hmmdefs'):
'Write an acoustic model into a folder.\n\n :param acmodel: (str)\n :param format: The format to save the acoustic model\n\n '
if (format not in sppasACMRW.ACM_TYPES):
raise MioFileFormatError(format)
acm_rw = sppasACMRW.ACM_TYPES[format]()
acm_rw.set(acmodel)
try:
acm_rw.write(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise | Write an acoustic model into a folder.
:param acmodel: (str)
:param format: The format to save the acoustic model | sppas/sppas/src/models/acm/readwrite.py | write | mirfan899/MTTS | 0 | python | def write(self, acmodel, format='hmmdefs'):
'Write an acoustic model into a folder.\n\n :param acmodel: (str)\n :param format: The format to save the acoustic model\n\n '
if (format not in sppasACMRW.ACM_TYPES):
raise MioFileFormatError(format)
acm_rw = sppasACMRW.ACM_TYPES[format]()
acm_rw.set(acmodel)
try:
acm_rw.write(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise | def write(self, acmodel, format='hmmdefs'):
'Write an acoustic model into a folder.\n\n :param acmodel: (str)\n :param format: The format to save the acoustic model\n\n '
if (format not in sppasACMRW.ACM_TYPES):
raise MioFileFormatError(format)
acm_rw = sppasACMRW.ACM_TYPES[format]()
acm_rw.set(acmodel)
try:
acm_rw.write(self.__folder)
except UnicodeError as e:
raise MioEncodingError(self.__folder, str(e))
except Exception:
raise<|docstring|>Write an acoustic model into a folder.
:param acmodel: (str)
:param format: The format to save the acoustic model<|endoftext|> |
a401f50ce91c91b764b48a7a62690d91d57edb263eb477511e58da1d59f0f53f | def initialize_logging(log_file=None):
'\n Setup logging\n :param log_file: (string) The file to log to\n :return: (Logger) a logging instance\n '
formatter = logging.Formatter('[%(asctime)s] [%(filename)30s:%(lineno)4s - %(funcName)30s()][%(threadName)5s] [%(name)10.10s] [%(levelname)8s] %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
if log_file:
rh = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=10485760)
rh.setFormatter(formatter)
rh.setLevel(logging.DEBUG)
logger.addHandler(rh)
logger.addHandler(sh)
return logger | Setup logging
:param log_file: (string) The file to log to
:return: (Logger) a logging instance | scripts/delete_assignment_types.py | initialize_logging | STEFANIHUNT/workforce-scripts | 74 | python | def initialize_logging(log_file=None):
'\n Setup logging\n :param log_file: (string) The file to log to\n :return: (Logger) a logging instance\n '
formatter = logging.Formatter('[%(asctime)s] [%(filename)30s:%(lineno)4s - %(funcName)30s()][%(threadName)5s] [%(name)10.10s] [%(levelname)8s] %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
if log_file:
rh = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=10485760)
rh.setFormatter(formatter)
rh.setLevel(logging.DEBUG)
logger.addHandler(rh)
logger.addHandler(sh)
return logger | def initialize_logging(log_file=None):
'\n Setup logging\n :param log_file: (string) The file to log to\n :return: (Logger) a logging instance\n '
formatter = logging.Formatter('[%(asctime)s] [%(filename)30s:%(lineno)4s - %(funcName)30s()][%(threadName)5s] [%(name)10.10s] [%(levelname)8s] %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
if log_file:
rh = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=10485760)
rh.setFormatter(formatter)
rh.setLevel(logging.DEBUG)
logger.addHandler(rh)
logger.addHandler(sh)
return logger<|docstring|>Setup logging
:param log_file: (string) The file to log to
:return: (Logger) a logging instance<|endoftext|> |
2d03c5b61fa4855de63ae6a6f12c7e205a02d49d788c7c1f0bfc8b3931030690 | def gaussian_noise(x, sigmas):
'Apply normally-distributed noise to the N,D array x.\n Parameters:\n -----------\n x : array\n (N,D) array of values\n sigmas : array\n D-element vector of std. dev. for each column of x\n '
n = np.random.normal(np.zeros(len(sigmas)), sigmas, size=(x.shape[0], len(sigmas)))
return (x + n) | Apply normally-distributed noise to the N,D array x.
Parameters:
-----------
x : array
(N,D) array of values
sigmas : array
D-element vector of std. dev. for each column of x | dynamic/pfilter.py | gaussian_noise | johnhw/summerschool2017 | 6 | python | def gaussian_noise(x, sigmas):
'Apply normally-distributed noise to the N,D array x.\n Parameters:\n -----------\n x : array\n (N,D) array of values\n sigmas : array\n D-element vector of std. dev. for each column of x\n '
n = np.random.normal(np.zeros(len(sigmas)), sigmas, size=(x.shape[0], len(sigmas)))
return (x + n) | def gaussian_noise(x, sigmas):
'Apply normally-distributed noise to the N,D array x.\n Parameters:\n -----------\n x : array\n (N,D) array of values\n sigmas : array\n D-element vector of std. dev. for each column of x\n '
n = np.random.normal(np.zeros(len(sigmas)), sigmas, size=(x.shape[0], len(sigmas)))
return (x + n)<|docstring|>Apply normally-distributed noise to the N,D array x.
Parameters:
-----------
x : array
(N,D) array of values
sigmas : array
D-element vector of std. dev. for each column of x<|endoftext|> |
f739e8f4444521d68b0b3dd0279c53287300a7701c12bf9fd3452fbe44ff05c5 | def __init__(self, initial, observe_fn, n_particles=200, dynamics_fn=None, noise_fn=None, weight_fn=None, resample_proportion=0.05, column_names=None, internal_weight_fn=None):
'\n \n Parameters:\n -----------\n \n initial : list\n sequence of prior distributions; should be a frozen distribution from scipy.stats; \n e.g. scipy.stats.norm(loc=0,scale=1) for unit normal\n observe_fn : function(states) => observations\n transformation function from the internal state to the sensor state. Takes an (N,D) array of states \n and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).\n n_particles : int \n number of particles in the filter\n dynamics_fn : function(states) => states\n dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.\n noise_fn : function(states) => states\n noise function, takes a state vector and returns a new one with noise added.\n weight_fn : function(real, hypothesized) => weights\n computes the distance from the real sensed variable and that returned by observe_fn. Takes\n a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and \n returns a strictly positive weight for the each hypothesis as an N-element vector. \n This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.\n internal_weight_fn : function(states, observed) => weights\n Reweights the particles based on their *internal* state. This is function which takes\n an (N,D) array of internal states and the observation and \n returns a strictly positive weight for the each state as an N-element vector. \n Typically used to force particles inside of bounds, etc. \n resample_proportion : float\n proportion of samples to draw from the initial on each iteration.\n column_names : list of strings\n names of each the columns of the state vector\n \n '
self.column_names = column_names
self.prior = initial
self.d = self.prior(n_particles).shape[1]
self.n_particles = n_particles
self.observe_fn = observe_fn
self.dynamics_fn = (dynamics_fn or no_dynamics)
self.noise_fn = (noise_fn or no_noise)
self.weight_fn = (weight_fn or squared_error)
self.resample_proportion = resample_proportion
self.particles = np.zeros((self.n_particles, self.d))
self.internal_weight_fn = internal_weight_fn
self.init_filter()
self.original_particles = np.array(self.particles) | Parameters:
-----------
initial : list
sequence of prior distributions; should be a frozen distribution from scipy.stats;
e.g. scipy.stats.norm(loc=0,scale=1) for unit normal
observe_fn : function(states) => observations
transformation function from the internal state to the sensor state. Takes an (N,D) array of states
and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).
n_particles : int
number of particles in the filter
dynamics_fn : function(states) => states
dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.
noise_fn : function(states) => states
noise function, takes a state vector and returns a new one with noise added.
weight_fn : function(real, hypothesized) => weights
computes the distance from the real sensed variable and that returned by observe_fn. Takes
a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and
returns a strictly positive weight for the each hypothesis as an N-element vector.
This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.
internal_weight_fn : function(states, observed) => weights
Reweights the particles based on their *internal* state. This is function which takes
an (N,D) array of internal states and the observation and
returns a strictly positive weight for the each state as an N-element vector.
Typically used to force particles inside of bounds, etc.
resample_proportion : float
proportion of samples to draw from the initial on each iteration.
column_names : list of strings
names of each the columns of the state vector | dynamic/pfilter.py | __init__ | johnhw/summerschool2017 | 6 | python | def __init__(self, initial, observe_fn, n_particles=200, dynamics_fn=None, noise_fn=None, weight_fn=None, resample_proportion=0.05, column_names=None, internal_weight_fn=None):
'\n \n Parameters:\n -----------\n \n initial : list\n sequence of prior distributions; should be a frozen distribution from scipy.stats; \n e.g. scipy.stats.norm(loc=0,scale=1) for unit normal\n observe_fn : function(states) => observations\n transformation function from the internal state to the sensor state. Takes an (N,D) array of states \n and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).\n n_particles : int \n number of particles in the filter\n dynamics_fn : function(states) => states\n dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.\n noise_fn : function(states) => states\n noise function, takes a state vector and returns a new one with noise added.\n weight_fn : function(real, hypothesized) => weights\n computes the distance from the real sensed variable and that returned by observe_fn. Takes\n a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and \n returns a strictly positive weight for the each hypothesis as an N-element vector. \n This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.\n internal_weight_fn : function(states, observed) => weights\n Reweights the particles based on their *internal* state. This is function which takes\n an (N,D) array of internal states and the observation and \n returns a strictly positive weight for the each state as an N-element vector. \n Typically used to force particles inside of bounds, etc. \n resample_proportion : float\n proportion of samples to draw from the initial on each iteration.\n column_names : list of strings\n names of each the columns of the state vector\n \n '
self.column_names = column_names
self.prior = initial
self.d = self.prior(n_particles).shape[1]
self.n_particles = n_particles
self.observe_fn = observe_fn
self.dynamics_fn = (dynamics_fn or no_dynamics)
self.noise_fn = (noise_fn or no_noise)
self.weight_fn = (weight_fn or squared_error)
self.resample_proportion = resample_proportion
self.particles = np.zeros((self.n_particles, self.d))
self.internal_weight_fn = internal_weight_fn
self.init_filter()
self.original_particles = np.array(self.particles) | def __init__(self, initial, observe_fn, n_particles=200, dynamics_fn=None, noise_fn=None, weight_fn=None, resample_proportion=0.05, column_names=None, internal_weight_fn=None):
'\n \n Parameters:\n -----------\n \n initial : list\n sequence of prior distributions; should be a frozen distribution from scipy.stats; \n e.g. scipy.stats.norm(loc=0,scale=1) for unit normal\n observe_fn : function(states) => observations\n transformation function from the internal state to the sensor state. Takes an (N,D) array of states \n and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).\n n_particles : int \n number of particles in the filter\n dynamics_fn : function(states) => states\n dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.\n noise_fn : function(states) => states\n noise function, takes a state vector and returns a new one with noise added.\n weight_fn : function(real, hypothesized) => weights\n computes the distance from the real sensed variable and that returned by observe_fn. Takes\n a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and \n returns a strictly positive weight for the each hypothesis as an N-element vector. \n This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.\n internal_weight_fn : function(states, observed) => weights\n Reweights the particles based on their *internal* state. This is function which takes\n an (N,D) array of internal states and the observation and \n returns a strictly positive weight for the each state as an N-element vector. \n Typically used to force particles inside of bounds, etc. \n resample_proportion : float\n proportion of samples to draw from the initial on each iteration.\n column_names : list of strings\n names of each the columns of the state vector\n \n '
self.column_names = column_names
self.prior = initial
self.d = self.prior(n_particles).shape[1]
self.n_particles = n_particles
self.observe_fn = observe_fn
self.dynamics_fn = (dynamics_fn or no_dynamics)
self.noise_fn = (noise_fn or no_noise)
self.weight_fn = (weight_fn or squared_error)
self.resample_proportion = resample_proportion
self.particles = np.zeros((self.n_particles, self.d))
self.internal_weight_fn = internal_weight_fn
self.init_filter()
self.original_particles = np.array(self.particles)<|docstring|>Parameters:
-----------
initial : list
sequence of prior distributions; should be a frozen distribution from scipy.stats;
e.g. scipy.stats.norm(loc=0,scale=1) for unit normal
observe_fn : function(states) => observations
transformation function from the internal state to the sensor state. Takes an (N,D) array of states
and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).
n_particles : int
number of particles in the filter
dynamics_fn : function(states) => states
dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.
noise_fn : function(states) => states
noise function, takes a state vector and returns a new one with noise added.
weight_fn : function(real, hypothesized) => weights
computes the distance from the real sensed variable and that returned by observe_fn. Takes
a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and
returns a strictly positive weight for the each hypothesis as an N-element vector.
This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.
internal_weight_fn : function(states, observed) => weights
Reweights the particles based on their *internal* state. This is function which takes
an (N,D) array of internal states and the observation and
returns a strictly positive weight for the each state as an N-element vector.
Typically used to force particles inside of bounds, etc.
resample_proportion : float
proportion of samples to draw from the initial on each iteration.
column_names : list of strings
names of each the columns of the state vector<|endoftext|> |
54eb3b655784e5fe54da596ddd4fb1632a59f4c21d860e6baf99d189a3aa113d | def init_filter(self, mask=None):
'Initialise the filter by drawing samples from the prior.\n \n Parameters:\n -----------\n mask : array, optional\n boolean mask specifying the elements of the particle array to draw from the prior. None (default)\n implies all particles will be resampled (i.e. a complete reset)\n '
new_sample = self.prior(self.n_particles)
if (mask is None):
self.particles = new_sample
else:
self.particles[(mask, :)] = new_sample[(mask, :)] | Initialise the filter by drawing samples from the prior.
Parameters:
-----------
mask : array, optional
boolean mask specifying the elements of the particle array to draw from the prior. None (default)
implies all particles will be resampled (i.e. a complete reset) | dynamic/pfilter.py | init_filter | johnhw/summerschool2017 | 6 | python | def init_filter(self, mask=None):
'Initialise the filter by drawing samples from the prior.\n \n Parameters:\n -----------\n mask : array, optional\n boolean mask specifying the elements of the particle array to draw from the prior. None (default)\n implies all particles will be resampled (i.e. a complete reset)\n '
new_sample = self.prior(self.n_particles)
if (mask is None):
self.particles = new_sample
else:
self.particles[(mask, :)] = new_sample[(mask, :)] | def init_filter(self, mask=None):
'Initialise the filter by drawing samples from the prior.\n \n Parameters:\n -----------\n mask : array, optional\n boolean mask specifying the elements of the particle array to draw from the prior. None (default)\n implies all particles will be resampled (i.e. a complete reset)\n '
new_sample = self.prior(self.n_particles)
if (mask is None):
self.particles = new_sample
else:
self.particles[(mask, :)] = new_sample[(mask, :)]<|docstring|>Initialise the filter by drawing samples from the prior.
Parameters:
-----------
mask : array, optional
boolean mask specifying the elements of the particle array to draw from the prior. None (default)
implies all particles will be resampled (i.e. a complete reset)<|endoftext|> |
07484aea1ae9ec4b85f73787682e0be1fd76739d2a14cab08a1682629223c69d | def update(self, observed=None):
'Update the state of the particle filter given an observation.\n \n Parameters:\n ----------\n \n observed: array\n The observed output, in the same format as observe_fn() will produce. This is typically the\n input from the sensor observing the process (e.g. a camera image in optical tracking).\n If None, then the observation step is skipped, and the filter will run one step in prediction-only mode.\n '
self.particles = self.dynamics_fn(self.particles)
self.particles = self.noise_fn(self.particles)
self.hypotheses = self.observe_fn(self.particles)
if ((observed is not None) and (not np.any(np.isnan(observed)))):
weights = np.clip(np.array(self.weight_fn(self.hypotheses, observed)), 0, np.inf)
else:
weights = np.ones((self.n_particles,))
if (self.internal_weight_fn is not None):
internal_weights = self.internal_weight_fn(self.particles, observed)
internal_weights = np.clip(internal_weights, 0, np.inf)
internal_weights = (internal_weights / np.sum(internal_weights))
weights *= internal_weights
self.weights = (weights / np.sum(weights))
indices = resample(self.weights)
self.particles = self.particles[(indices, :)]
self.mean_hypothesis = np.sum((self.hypotheses.T * self.weights), axis=(- 1)).T
self.mean_state = np.sum((self.particles.T * self.weights), axis=(- 1)).T
self.map = self.particles[np.argmax(self.weights)]
self.original_particles = np.array(self.particles)
random_mask = (np.random.random(size=(self.n_particles,)) < self.resample_proportion)
self.resampled_particles = random_mask
self.init_filter(mask=random_mask) | Update the state of the particle filter given an observation.
Parameters:
----------
observed: array
The observed output, in the same format as observe_fn() will produce. This is typically the
input from the sensor observing the process (e.g. a camera image in optical tracking).
If None, then the observation step is skipped, and the filter will run one step in prediction-only mode. | dynamic/pfilter.py | update | johnhw/summerschool2017 | 6 | python | def update(self, observed=None):
'Update the state of the particle filter given an observation.\n \n Parameters:\n ----------\n \n observed: array\n The observed output, in the same format as observe_fn() will produce. This is typically the\n input from the sensor observing the process (e.g. a camera image in optical tracking).\n If None, then the observation step is skipped, and the filter will run one step in prediction-only mode.\n '
self.particles = self.dynamics_fn(self.particles)
self.particles = self.noise_fn(self.particles)
self.hypotheses = self.observe_fn(self.particles)
if ((observed is not None) and (not np.any(np.isnan(observed)))):
weights = np.clip(np.array(self.weight_fn(self.hypotheses, observed)), 0, np.inf)
else:
weights = np.ones((self.n_particles,))
if (self.internal_weight_fn is not None):
internal_weights = self.internal_weight_fn(self.particles, observed)
internal_weights = np.clip(internal_weights, 0, np.inf)
internal_weights = (internal_weights / np.sum(internal_weights))
weights *= internal_weights
self.weights = (weights / np.sum(weights))
indices = resample(self.weights)
self.particles = self.particles[(indices, :)]
self.mean_hypothesis = np.sum((self.hypotheses.T * self.weights), axis=(- 1)).T
self.mean_state = np.sum((self.particles.T * self.weights), axis=(- 1)).T
self.map = self.particles[np.argmax(self.weights)]
self.original_particles = np.array(self.particles)
random_mask = (np.random.random(size=(self.n_particles,)) < self.resample_proportion)
self.resampled_particles = random_mask
self.init_filter(mask=random_mask) | def update(self, observed=None):
'Update the state of the particle filter given an observation.\n \n Parameters:\n ----------\n \n observed: array\n The observed output, in the same format as observe_fn() will produce. This is typically the\n input from the sensor observing the process (e.g. a camera image in optical tracking).\n If None, then the observation step is skipped, and the filter will run one step in prediction-only mode.\n '
self.particles = self.dynamics_fn(self.particles)
self.particles = self.noise_fn(self.particles)
self.hypotheses = self.observe_fn(self.particles)
if ((observed is not None) and (not np.any(np.isnan(observed)))):
weights = np.clip(np.array(self.weight_fn(self.hypotheses, observed)), 0, np.inf)
else:
weights = np.ones((self.n_particles,))
if (self.internal_weight_fn is not None):
internal_weights = self.internal_weight_fn(self.particles, observed)
internal_weights = np.clip(internal_weights, 0, np.inf)
internal_weights = (internal_weights / np.sum(internal_weights))
weights *= internal_weights
self.weights = (weights / np.sum(weights))
indices = resample(self.weights)
self.particles = self.particles[(indices, :)]
self.mean_hypothesis = np.sum((self.hypotheses.T * self.weights), axis=(- 1)).T
self.mean_state = np.sum((self.particles.T * self.weights), axis=(- 1)).T
self.map = self.particles[np.argmax(self.weights)]
self.original_particles = np.array(self.particles)
random_mask = (np.random.random(size=(self.n_particles,)) < self.resample_proportion)
self.resampled_particles = random_mask
self.init_filter(mask=random_mask)<|docstring|>Update the state of the particle filter given an observation.
Parameters:
----------
observed: array
The observed output, in the same format as observe_fn() will produce. This is typically the
input from the sensor observing the process (e.g. a camera image in optical tracking).
If None, then the observation step is skipped, and the filter will run one step in prediction-only mode.<|endoftext|> |
3a0369e65361c1dee30911237c31ca1ebb6d3669b9cf74f0f6349a741f8858cf | def run_sanity_test(test_case: TestCase, path: str) -> NoReturn:
'\n Run test case for sanity test that uses a broken print("foo" line in the otherwise correct sample to get past the\n compilation test. Expected outcome is is one missing_brackets.normal.closing message on line 1\n :param test_case: self of TestCase\n :param path: path to file\n '
expected_message = get_formatted_message('missing_brackets.normal.closing', count=1, line_start=1, line_end=1)
run_test_scenario(test_case, path, 1, expected_message) | Run test case for sanity test that uses a broken print("foo" line in the otherwise correct sample to get past the
compilation test. Expected outcome is is one missing_brackets.normal.closing message on line 1
:param test_case: self of TestCase
:param path: path to file | test/test_utils.py | run_sanity_test | K44rel/error-explainer | 3 | python | def run_sanity_test(test_case: TestCase, path: str) -> NoReturn:
'\n Run test case for sanity test that uses a broken print("foo" line in the otherwise correct sample to get past the\n compilation test. Expected outcome is is one missing_brackets.normal.closing message on line 1\n :param test_case: self of TestCase\n :param path: path to file\n '
expected_message = get_formatted_message('missing_brackets.normal.closing', count=1, line_start=1, line_end=1)
run_test_scenario(test_case, path, 1, expected_message) | def run_sanity_test(test_case: TestCase, path: str) -> NoReturn:
'\n Run test case for sanity test that uses a broken print("foo" line in the otherwise correct sample to get past the\n compilation test. Expected outcome is is one missing_brackets.normal.closing message on line 1\n :param test_case: self of TestCase\n :param path: path to file\n '
expected_message = get_formatted_message('missing_brackets.normal.closing', count=1, line_start=1, line_end=1)
run_test_scenario(test_case, path, 1, expected_message)<|docstring|>Run test case for sanity test that uses a broken print("foo" line in the otherwise correct sample to get past the
compilation test. Expected outcome is is one missing_brackets.normal.closing message on line 1
:param test_case: self of TestCase
:param path: path to file<|endoftext|> |
cb8e3c8e689b95bef648d0a0cc0c383f57adb2c6b3e4498b569c28c1890de849 | def run_test_scenario(test_case: TestCase, path: str, expected_messages_count: int, expected_messages: Union[(List[str], str, None)]) -> NoReturn:
'\n Default test scenario for checking errors in a python file.\n :param test_case: self of TestCase\n :param path: path to file\n :param expected_messages_count: number of expected messages\n :param expected_messages: 1 or more expected messages can be string or list\n '
messages = run_checks(path)
print('---------------------------------------------')
print(f'expected {expected_messages}')
print(f'actual {messages}')
print('')
print('---------------------------------------------')
test_case.assertEqual(expected_messages_count, len(messages))
if (expected_messages is not None):
if (type(expected_messages) == str):
expected_messages = [expected_messages]
test_case.assertTrue((set(expected_messages) == set(messages))) | Default test scenario for checking errors in a python file.
:param test_case: self of TestCase
:param path: path to file
:param expected_messages_count: number of expected messages
:param expected_messages: 1 or more expected messages can be string or list | test/test_utils.py | run_test_scenario | K44rel/error-explainer | 3 | python | def run_test_scenario(test_case: TestCase, path: str, expected_messages_count: int, expected_messages: Union[(List[str], str, None)]) -> NoReturn:
'\n Default test scenario for checking errors in a python file.\n :param test_case: self of TestCase\n :param path: path to file\n :param expected_messages_count: number of expected messages\n :param expected_messages: 1 or more expected messages can be string or list\n '
messages = run_checks(path)
print('---------------------------------------------')
print(f'expected {expected_messages}')
print(f'actual {messages}')
print()
print('---------------------------------------------')
test_case.assertEqual(expected_messages_count, len(messages))
if (expected_messages is not None):
if (type(expected_messages) == str):
expected_messages = [expected_messages]
test_case.assertTrue((set(expected_messages) == set(messages))) | def run_test_scenario(test_case: TestCase, path: str, expected_messages_count: int, expected_messages: Union[(List[str], str, None)]) -> NoReturn:
'\n Default test scenario for checking errors in a python file.\n :param test_case: self of TestCase\n :param path: path to file\n :param expected_messages_count: number of expected messages\n :param expected_messages: 1 or more expected messages can be string or list\n '
messages = run_checks(path)
print('---------------------------------------------')
print(f'expected {expected_messages}')
print(f'actual {messages}')
print()
print('---------------------------------------------')
test_case.assertEqual(expected_messages_count, len(messages))
if (expected_messages is not None):
if (type(expected_messages) == str):
expected_messages = [expected_messages]
test_case.assertTrue((set(expected_messages) == set(messages)))<|docstring|>Default test scenario for checking errors in a python file.
:param test_case: self of TestCase
:param path: path to file
:param expected_messages_count: number of expected messages
:param expected_messages: 1 or more expected messages can be string or list<|endoftext|> |
3c4945c8d22da196c228c3df9126fe07d8649af2c62ab01c7bd9ac4a54424752 | def get(self):
' Retrieve the page content. '
self.render('index.html') | Retrieve the page content. | server.py | get | vmlaker/hello-websocket | 89 | python | def get(self):
' '
self.render('index.html') | def get(self):
' '
self.render('index.html')<|docstring|>Retrieve the page content.<|endoftext|> |
d82afdec38233332bca2c38562d66c09ec09aa2a33be8a1d25302a0200241b21 | def __init__(self, *args, **kwargs):
' Initialize the Redis store and framerate monitor. '
super().__init__(*args, **kwargs)
self._store = redis.Redis()
self._prev_image_id = None | Initialize the Redis store and framerate monitor. | server.py | __init__ | vmlaker/hello-websocket | 89 | python | def __init__(self, *args, **kwargs):
' '
super().__init__(*args, **kwargs)
self._store = redis.Redis()
self._prev_image_id = None | def __init__(self, *args, **kwargs):
' '
super().__init__(*args, **kwargs)
self._store = redis.Redis()
self._prev_image_id = None<|docstring|>Initialize the Redis store and framerate monitor.<|endoftext|> |
15845c415078cdccd8f5e84eb87246b10316c82feb9be882e8279ea1569ca4ba | def on_message(self, message):
' Retrieve image ID from database until different from last ID,\n then retrieve image, de-serialize, encode and send to client. '
while True:
time.sleep((1.0 / MAX_FPS))
image_id = self._store.get('image_id')
if (image_id != self._prev_image_id):
break
self._prev_image_id = image_id
image = self._store.get('image')
image = base64.b64encode(image)
self.write_message(image)
print(image_id) | Retrieve image ID from database until different from last ID,
then retrieve image, de-serialize, encode and send to client. | server.py | on_message | vmlaker/hello-websocket | 89 | python | def on_message(self, message):
' Retrieve image ID from database until different from last ID,\n then retrieve image, de-serialize, encode and send to client. '
while True:
time.sleep((1.0 / MAX_FPS))
image_id = self._store.get('image_id')
if (image_id != self._prev_image_id):
break
self._prev_image_id = image_id
image = self._store.get('image')
image = base64.b64encode(image)
self.write_message(image)
print(image_id) | def on_message(self, message):
' Retrieve image ID from database until different from last ID,\n then retrieve image, de-serialize, encode and send to client. '
while True:
time.sleep((1.0 / MAX_FPS))
image_id = self._store.get('image_id')
if (image_id != self._prev_image_id):
break
self._prev_image_id = image_id
image = self._store.get('image')
image = base64.b64encode(image)
self.write_message(image)
print(image_id)<|docstring|>Retrieve image ID from database until different from last ID,
then retrieve image, de-serialize, encode and send to client.<|endoftext|> |
0bc672c347abc6a61fd073aa0dbdaa306a896bd78dcac0e168ffefa781aef3f5 | def return_inf_imb_two_selected_coords(self, coords1, coords2, k=1, dtype='mean'):
"Returns the imbalances between distances taken as the i and the j component of the coordinate matrix X.\n\n Args:\n coords1 (list(int)): components for the first distance\n coords2 (list(int)): components for the second distance\n k (int): order of nearest neighbour considered for the calculation of the imbalance, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from distance i to distance j and vice versa\n "
X_ = self.X[(:, coords1)]
(_, dist_indices_i) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
X_ = self.X[(:, coords2)]
(_, dist_indices_j) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_ij = ut._return_imbalance(dist_indices_i, dist_indices_j, k=k, dtype=dtype)
imb_ji = ut._return_imbalance(dist_indices_j, dist_indices_i, k=k, dtype=dtype)
return (imb_ij, imb_ji) | Returns the imbalances between distances taken as the i and the j component of the coordinate matrix X.
Args:
coords1 (list(int)): components for the first distance
coords2 (list(int)): components for the second distance
k (int): order of nearest neighbour considered for the calculation of the imbalance, default is 1
dtype (str): type of information imbalance computation, default is 'mean'
Returns:
(float, float): the information imbalance from distance i to distance j and vice versa | dadapy/metric_comparisons.py | return_inf_imb_two_selected_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_two_selected_coords(self, coords1, coords2, k=1, dtype='mean'):
"Returns the imbalances between distances taken as the i and the j component of the coordinate matrix X.\n\n Args:\n coords1 (list(int)): components for the first distance\n coords2 (list(int)): components for the second distance\n k (int): order of nearest neighbour considered for the calculation of the imbalance, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from distance i to distance j and vice versa\n "
X_ = self.X[(:, coords1)]
(_, dist_indices_i) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
X_ = self.X[(:, coords2)]
(_, dist_indices_j) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_ij = ut._return_imbalance(dist_indices_i, dist_indices_j, k=k, dtype=dtype)
imb_ji = ut._return_imbalance(dist_indices_j, dist_indices_i, k=k, dtype=dtype)
return (imb_ij, imb_ji) | def return_inf_imb_two_selected_coords(self, coords1, coords2, k=1, dtype='mean'):
"Returns the imbalances between distances taken as the i and the j component of the coordinate matrix X.\n\n Args:\n coords1 (list(int)): components for the first distance\n coords2 (list(int)): components for the second distance\n k (int): order of nearest neighbour considered for the calculation of the imbalance, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from distance i to distance j and vice versa\n "
X_ = self.X[(:, coords1)]
(_, dist_indices_i) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
X_ = self.X[(:, coords2)]
(_, dist_indices_j) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_ij = ut._return_imbalance(dist_indices_i, dist_indices_j, k=k, dtype=dtype)
imb_ji = ut._return_imbalance(dist_indices_j, dist_indices_i, k=k, dtype=dtype)
return (imb_ij, imb_ji)<|docstring|>Returns the imbalances between distances taken as the i and the j component of the coordinate matrix X.
Args:
coords1 (list(int)): components for the first distance
coords2 (list(int)): components for the second distance
k (int): order of nearest neighbour considered for the calculation of the imbalance, default is 1
dtype (str): type of information imbalance computation, default is 'mean'
Returns:
(float, float): the information imbalance from distance i to distance j and vice versa<|endoftext|> |
95e6c58ef86095f94e110089ddce8f3a56dd8cb8a2a3dbb3131a58c31ffabcd2 | def return_inf_imb_matrix_of_coords(self, k=1, dtype='mean'):
'Compute the information imbalances between all pairs of D features of the data.\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n n_mat (np.array(float)): a DxD matrix containing all the information imbalances\n '
assert (self.X is not None)
ncoords = self.dims
n_mat = np.zeros((ncoords, ncoords))
if (self.njobs == 1):
for i in range(ncoords):
for j in range(i):
if self.verb:
print('computing loss between coords ', i, j)
(nij, nji) = self.return_inf_imb_two_selected_coords([i], [j], k, dtype)
n_mat[(i, j)] = nij
n_mat[(j, i)] = nji
elif (self.njobs > 1):
if self.verb:
print('computing imbalances with coord number on {} processors'.format(self.njobs))
nmats = Parallel(n_jobs=self.njobs)((delayed(self.return_inf_imb_two_selected_coords)([i], [j], k, dtype) for i in range(ncoords) for j in range(i)))
indices = [(i, j) for i in range(ncoords) for j in range(i)]
for (idx, n) in zip(indices, nmats):
print(indices, nmats)
n_mat[(idx[0], idx[1])] = n[0]
n_mat[(idx[1], idx[0])] = n[1]
return n_mat | Compute the information imbalances between all pairs of D features of the data.
Args:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
n_mat (np.array(float)): a DxD matrix containing all the information imbalances | dadapy/metric_comparisons.py | return_inf_imb_matrix_of_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_matrix_of_coords(self, k=1, dtype='mean'):
'Compute the information imbalances between all pairs of D features of the data.\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n n_mat (np.array(float)): a DxD matrix containing all the information imbalances\n '
assert (self.X is not None)
ncoords = self.dims
n_mat = np.zeros((ncoords, ncoords))
if (self.njobs == 1):
for i in range(ncoords):
for j in range(i):
if self.verb:
print('computing loss between coords ', i, j)
(nij, nji) = self.return_inf_imb_two_selected_coords([i], [j], k, dtype)
n_mat[(i, j)] = nij
n_mat[(j, i)] = nji
elif (self.njobs > 1):
if self.verb:
print('computing imbalances with coord number on {} processors'.format(self.njobs))
nmats = Parallel(n_jobs=self.njobs)((delayed(self.return_inf_imb_two_selected_coords)([i], [j], k, dtype) for i in range(ncoords) for j in range(i)))
indices = [(i, j) for i in range(ncoords) for j in range(i)]
for (idx, n) in zip(indices, nmats):
print(indices, nmats)
n_mat[(idx[0], idx[1])] = n[0]
n_mat[(idx[1], idx[0])] = n[1]
return n_mat | def return_inf_imb_matrix_of_coords(self, k=1, dtype='mean'):
'Compute the information imbalances between all pairs of D features of the data.\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n n_mat (np.array(float)): a DxD matrix containing all the information imbalances\n '
assert (self.X is not None)
ncoords = self.dims
n_mat = np.zeros((ncoords, ncoords))
if (self.njobs == 1):
for i in range(ncoords):
for j in range(i):
if self.verb:
print('computing loss between coords ', i, j)
(nij, nji) = self.return_inf_imb_two_selected_coords([i], [j], k, dtype)
n_mat[(i, j)] = nij
n_mat[(j, i)] = nji
elif (self.njobs > 1):
if self.verb:
print('computing imbalances with coord number on {} processors'.format(self.njobs))
nmats = Parallel(n_jobs=self.njobs)((delayed(self.return_inf_imb_two_selected_coords)([i], [j], k, dtype) for i in range(ncoords) for j in range(i)))
indices = [(i, j) for i in range(ncoords) for j in range(i)]
for (idx, n) in zip(indices, nmats):
print(indices, nmats)
n_mat[(idx[0], idx[1])] = n[0]
n_mat[(idx[1], idx[0])] = n[1]
return n_mat<|docstring|>Compute the information imbalances between all pairs of D features of the data.
Args:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
n_mat (np.array(float)): a DxD matrix containing all the information imbalances<|endoftext|> |
d74c3edb8f1a68427aef2d09cfde19a05f0338da9486da36fbf6df056b8963c5 | def return_inf_imb_full_all_coords(self, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and each one of its D features\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xD matrix containing the information imbalances between\n the original space and each of its D features.\n\n "
assert (self.X is not None)
ncoords = self.X.shape[1]
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_full_selected_coords(coord_list, k=k, dtype=dtype)
return imbalances | Compute the information imbalances between the 'full' space and each one of its D features
Args:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xD matrix containing the information imbalances between
the original space and each of its D features. | dadapy/metric_comparisons.py | return_inf_imb_full_all_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_full_all_coords(self, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and each one of its D features\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xD matrix containing the information imbalances between\n the original space and each of its D features.\n\n "
assert (self.X is not None)
ncoords = self.X.shape[1]
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_full_selected_coords(coord_list, k=k, dtype=dtype)
return imbalances | def return_inf_imb_full_all_coords(self, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and each one of its D features\n\n Args:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xD matrix containing the information imbalances between\n the original space and each of its D features.\n\n "
assert (self.X is not None)
ncoords = self.X.shape[1]
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_full_selected_coords(coord_list, k=k, dtype=dtype)
return imbalances<|docstring|>Compute the information imbalances between the 'full' space and each one of its D features
Args:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xD matrix containing the information imbalances between
the original space and each of its D features.<|endoftext|> |
7270e8f537848327a3e63ac4b23f6bedabaeef24f5ce2af717c2839882dc5bbd | def return_inf_imb_full_selected_coords(self, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and a selection of features.\n\n Args:\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the original space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
print('total number of computations is: ', len(coord_list))
imbalances = self.return_inf_imb_target_selected_coords(self.dist_indices, coord_list, k=k, dtype=dtype)
return imbalances | Compute the information imbalances between the 'full' space and a selection of features.
Args:
coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each
sub-list defines a set of coordinates for which the information imbalance should be
computed.
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the original space and each one of the L subspaces defined in coord_list | dadapy/metric_comparisons.py | return_inf_imb_full_selected_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_full_selected_coords(self, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and a selection of features.\n\n Args:\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the original space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
print('total number of computations is: ', len(coord_list))
imbalances = self.return_inf_imb_target_selected_coords(self.dist_indices, coord_list, k=k, dtype=dtype)
return imbalances | def return_inf_imb_full_selected_coords(self, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'full' space and a selection of features.\n\n Args:\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the original space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
print('total number of computations is: ', len(coord_list))
imbalances = self.return_inf_imb_target_selected_coords(self.dist_indices, coord_list, k=k, dtype=dtype)
return imbalances<|docstring|>Compute the information imbalances between the 'full' space and a selection of features.
Args:
coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each
sub-list defines a set of coordinates for which the information imbalance should be
computed.
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the original space and each one of the L subspaces defined in coord_list<|endoftext|> |
b7955daaf09c4b9e4e69f97e156c948218804f62252e1f081131757b4ec5352b | def return_inf_imb_target_all_coords(self, target_ranks, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a all single feature spaces in X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
ncoords = self.dims
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return imbalances | Compute the information imbalances between the 'target' space and a all single feature spaces in X.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the target space and each one of the L subspaces defined in coord_list | dadapy/metric_comparisons.py | return_inf_imb_target_all_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_target_all_coords(self, target_ranks, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a all single feature spaces in X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
ncoords = self.dims
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return imbalances | def return_inf_imb_target_all_coords(self, target_ranks, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a all single feature spaces in X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
ncoords = self.dims
coord_list = [[i] for i in range(ncoords)]
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return imbalances<|docstring|>Compute the information imbalances between the 'target' space and a all single feature spaces in X.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the target space and each one of the L subspaces defined in coord_list<|endoftext|> |
0be23c5960b7a0bb6b4f75ea8ccaab2d13ee2e87bd86f92b5548d3b24e45f84a | def return_inf_imb_target_selected_coords(self, target_ranks, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a selection of features.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
assert (target_ranks.shape[0] == self.X.shape[0])
print('total number of computations is: ', len(coord_list))
if (self.njobs == 1):
n1s_n2s = []
for coords in coord_list:
if self.verb:
print('computing loss with coord selection')
(n0i, ni0) = self._return_imb_with_coords(self.X, coords, target_ranks, k, dtype)
n1s_n2s.append((n0i, ni0))
elif (self.njobs > 1):
if self.verb:
print('computing loss with coord number on {} processors'.format(self.njobs))
n1s_n2s = Parallel(n_jobs=self.njobs)((delayed(self._return_imb_with_coords)(self.X, coords, target_ranks, k, dtype) for coords in coord_list))
else:
raise ValueError('njobs cannot be negative')
return np.array(n1s_n2s).T | Compute the information imbalances between the 'target' space and a selection of features.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each
sub-list defines a set of coordinates for which the information imbalance should be
computed.
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the target space and each one of the L subspaces defined in coord_list | dadapy/metric_comparisons.py | return_inf_imb_target_selected_coords | sissa-data-science/DULY | 3 | python | def return_inf_imb_target_selected_coords(self, target_ranks, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a selection of features.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
assert (target_ranks.shape[0] == self.X.shape[0])
print('total number of computations is: ', len(coord_list))
if (self.njobs == 1):
n1s_n2s = []
for coords in coord_list:
if self.verb:
print('computing loss with coord selection')
(n0i, ni0) = self._return_imb_with_coords(self.X, coords, target_ranks, k, dtype)
n1s_n2s.append((n0i, ni0))
elif (self.njobs > 1):
if self.verb:
print('computing loss with coord number on {} processors'.format(self.njobs))
n1s_n2s = Parallel(n_jobs=self.njobs)((delayed(self._return_imb_with_coords)(self.X, coords, target_ranks, k, dtype) for coords in coord_list))
else:
raise ValueError('njobs cannot be negative')
return np.array(n1s_n2s).T | def return_inf_imb_target_selected_coords(self, target_ranks, coord_list, k=1, dtype='mean'):
"Compute the information imbalances between the 'target' space and a selection of features.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each\n sub-list defines a set of coordinates for which the information imbalance should be\n computed.\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n (np.array(float)): a 2xL matrix containing the information imbalances between\n the target space and each one of the L subspaces defined in coord_list\n\n "
assert (self.X is not None)
assert (target_ranks.shape[0] == self.X.shape[0])
print('total number of computations is: ', len(coord_list))
if (self.njobs == 1):
n1s_n2s = []
for coords in coord_list:
if self.verb:
print('computing loss with coord selection')
(n0i, ni0) = self._return_imb_with_coords(self.X, coords, target_ranks, k, dtype)
n1s_n2s.append((n0i, ni0))
elif (self.njobs > 1):
if self.verb:
print('computing loss with coord number on {} processors'.format(self.njobs))
n1s_n2s = Parallel(n_jobs=self.njobs)((delayed(self._return_imb_with_coords)(self.X, coords, target_ranks, k, dtype) for coords in coord_list))
else:
raise ValueError('njobs cannot be negative')
return np.array(n1s_n2s).T<|docstring|>Compute the information imbalances between the 'target' space and a selection of features.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
coord_list (list(list(int))): a list of the type [[1, 2], [8, 3, 5], ...] where each
sub-list defines a set of coordinates for which the information imbalance should be
computed.
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:
(np.array(float)): a 2xL matrix containing the information imbalances between
the target space and each one of the L subspaces defined in coord_list<|endoftext|> |
043d5951e2643952abf77730b4c0ae052faeb5f56d463aa1e30ac148dac40a89 | def greedy_feature_selection_full(self, n_coords, k=1, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about full distance measure.\n\n Args:\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking full space as the complete representation')
assert (self.X is not None)
(selected_coords, all_imbalances) = self.greedy_feature_selection_target(self.dist_indices, n_coords, k, dtype=dtype, symm=symm)
return (selected_coords, all_imbalances) | Greedy selection of the set of coordinates which is most informative about full distance measure.
Args:
n_coords: number of coodinates after which the algorithm is stopped
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
symm (bool): whether to use the symmetrised information imbalance
Returns:
selected_coords:
all_imbalances: | dadapy/metric_comparisons.py | greedy_feature_selection_full | sissa-data-science/DULY | 3 | python | def greedy_feature_selection_full(self, n_coords, k=1, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about full distance measure.\n\n Args:\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking full space as the complete representation')
assert (self.X is not None)
(selected_coords, all_imbalances) = self.greedy_feature_selection_target(self.dist_indices, n_coords, k, dtype=dtype, symm=symm)
return (selected_coords, all_imbalances) | def greedy_feature_selection_full(self, n_coords, k=1, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about full distance measure.\n\n Args:\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking full space as the complete representation')
assert (self.X is not None)
(selected_coords, all_imbalances) = self.greedy_feature_selection_target(self.dist_indices, n_coords, k, dtype=dtype, symm=symm)
return (selected_coords, all_imbalances)<|docstring|>Greedy selection of the set of coordinates which is most informative about full distance measure.
Args:
n_coords: number of coodinates after which the algorithm is stopped
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
symm (bool): whether to use the symmetrised information imbalance
Returns:
selected_coords:
all_imbalances:<|endoftext|> |
5ea39b82e582779d0ecb18c515a8111d39c9b863c79241c7e886cde9485c5444 | def greedy_feature_selection_target(self, target_ranks, n_coords, k, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about a target distance.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking labels as the reference representation')
assert (self.X is not None)
dims = self.dims
imbalances = self.return_inf_imb_target_all_coords(target_ranks, k=k, dtype=dtype)
if symm:
proj = np.dot(imbalances.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
selected_coord = np.argmin(proj)
else:
selected_coord = np.argmin(imbalances[1])
print('1 coordinate selected: ', selected_coord)
other_coords = list(np.arange(dims).astype(int))
other_coords.remove(selected_coord)
selected_coords = [selected_coord]
all_imbalances = [imbalances]
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
for i in range(n_coords):
coord_list = [(selected_coords + [oc]) for oc in other_coords]
imbalances_ = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
imbalances = np.empty((2, dims))
imbalances[(:, :)] = None
imbalances[(:, other_coords)] = imbalances_
if symm:
proj = np.dot(imbalances_.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
to_select = np.argmin(proj)
else:
to_select = np.argmin(imbalances_[1])
selected_coord = other_coords[to_select]
print('{} coordinate selected: '.format((i + 2)), selected_coord)
other_coords.remove(selected_coord)
selected_coords.append(selected_coord)
all_imbalances.append(imbalances)
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
return (selected_coords, all_imbalances) | Greedy selection of the set of coordinates which is most informative about a target distance.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
n_coords: number of coodinates after which the algorithm is stopped
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
symm (bool): whether to use the symmetrised information imbalance
Returns:
selected_coords:
all_imbalances: | dadapy/metric_comparisons.py | greedy_feature_selection_target | sissa-data-science/DULY | 3 | python | def greedy_feature_selection_target(self, target_ranks, n_coords, k, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about a target distance.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking labels as the reference representation')
assert (self.X is not None)
dims = self.dims
imbalances = self.return_inf_imb_target_all_coords(target_ranks, k=k, dtype=dtype)
if symm:
proj = np.dot(imbalances.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
selected_coord = np.argmin(proj)
else:
selected_coord = np.argmin(imbalances[1])
print('1 coordinate selected: ', selected_coord)
other_coords = list(np.arange(dims).astype(int))
other_coords.remove(selected_coord)
selected_coords = [selected_coord]
all_imbalances = [imbalances]
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
for i in range(n_coords):
coord_list = [(selected_coords + [oc]) for oc in other_coords]
imbalances_ = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
imbalances = np.empty((2, dims))
imbalances[(:, :)] = None
imbalances[(:, other_coords)] = imbalances_
if symm:
proj = np.dot(imbalances_.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
to_select = np.argmin(proj)
else:
to_select = np.argmin(imbalances_[1])
selected_coord = other_coords[to_select]
print('{} coordinate selected: '.format((i + 2)), selected_coord)
other_coords.remove(selected_coord)
selected_coords.append(selected_coord)
all_imbalances.append(imbalances)
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
return (selected_coords, all_imbalances) | def greedy_feature_selection_target(self, target_ranks, n_coords, k, dtype='mean', symm=True):
'Greedy selection of the set of coordinates which is most informative about a target distance.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n n_coords: number of coodinates after which the algorithm is stopped\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n symm (bool): whether to use the symmetrised information imbalance\n\n Returns:\n selected_coords:\n all_imbalances:\n '
print('taking labels as the reference representation')
assert (self.X is not None)
dims = self.dims
imbalances = self.return_inf_imb_target_all_coords(target_ranks, k=k, dtype=dtype)
if symm:
proj = np.dot(imbalances.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
selected_coord = np.argmin(proj)
else:
selected_coord = np.argmin(imbalances[1])
print('1 coordinate selected: ', selected_coord)
other_coords = list(np.arange(dims).astype(int))
other_coords.remove(selected_coord)
selected_coords = [selected_coord]
all_imbalances = [imbalances]
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
for i in range(n_coords):
coord_list = [(selected_coords + [oc]) for oc in other_coords]
imbalances_ = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
imbalances = np.empty((2, dims))
imbalances[(:, :)] = None
imbalances[(:, other_coords)] = imbalances_
if symm:
proj = np.dot(imbalances_.T, np.array([np.sqrt(0.5), np.sqrt(0.5)]))
to_select = np.argmin(proj)
else:
to_select = np.argmin(imbalances_[1])
selected_coord = other_coords[to_select]
print('{} coordinate selected: '.format((i + 2)), selected_coord)
other_coords.remove(selected_coord)
selected_coords.append(selected_coord)
all_imbalances.append(imbalances)
np.savetxt('selected_coords.txt', selected_coords, fmt='%i')
np.save('all_losses.npy', all_imbalances)
return (selected_coords, all_imbalances)<|docstring|>Greedy selection of the set of coordinates which is most informative about a target distance.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
n_coords: number of coodinates after which the algorithm is stopped
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
symm (bool): whether to use the symmetrised information imbalance
Returns:
selected_coords:
all_imbalances:<|endoftext|> |
607630327ce2cf9d53d2b5dabf40f9200a8b7227a5e71780c562a35900913923 | def return_inf_imb_target_all_dplets(self, target_ranks, d, k=1, dtype='mean'):
'Compute the information imbalances between a target distance and all possible combinations of d coordinates\n contained of X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n d:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n\n '
assert (self.X is not None)
import itertools
print("WARNING: computational cost grows combinatorially! Don't forget to save the results.")
if self.verb:
print('computing loss between all {}-plets and the target label'.format(d))
D = self.X.shape[1]
all_coords = list(np.arange(D).astype(int))
coord_list = list(itertools.combinations(all_coords, d))
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return (np.array(coord_list), np.array(imbalances)) | Compute the information imbalances between a target distance and all possible combinations of d coordinates
contained of X.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
d:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns: | dadapy/metric_comparisons.py | return_inf_imb_target_all_dplets | sissa-data-science/DULY | 3 | python | def return_inf_imb_target_all_dplets(self, target_ranks, d, k=1, dtype='mean'):
'Compute the information imbalances between a target distance and all possible combinations of d coordinates\n contained of X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n d:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n\n '
assert (self.X is not None)
import itertools
print("WARNING: computational cost grows combinatorially! Don't forget to save the results.")
if self.verb:
print('computing loss between all {}-plets and the target label'.format(d))
D = self.X.shape[1]
all_coords = list(np.arange(D).astype(int))
coord_list = list(itertools.combinations(all_coords, d))
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return (np.array(coord_list), np.array(imbalances)) | def return_inf_imb_target_all_dplets(self, target_ranks, d, k=1, dtype='mean'):
'Compute the information imbalances between a target distance and all possible combinations of d coordinates\n contained of X.\n\n Args:\n target_ranks (np.array(int)): an array containing the ranks in the target space\n d:\n k (int): number of neighbours considered in the computation of the imbalances\n dtype (str): specific way to characterise the deviation from a delta distribution\n\n Returns:\n\n '
assert (self.X is not None)
import itertools
print("WARNING: computational cost grows combinatorially! Don't forget to save the results.")
if self.verb:
print('computing loss between all {}-plets and the target label'.format(d))
D = self.X.shape[1]
all_coords = list(np.arange(D).astype(int))
coord_list = list(itertools.combinations(all_coords, d))
imbalances = self.return_inf_imb_target_selected_coords(target_ranks, coord_list, k=k, dtype=dtype)
return (np.array(coord_list), np.array(imbalances))<|docstring|>Compute the information imbalances between a target distance and all possible combinations of d coordinates
contained of X.
Args:
target_ranks (np.array(int)): an array containing the ranks in the target space
d:
k (int): number of neighbours considered in the computation of the imbalances
dtype (str): specific way to characterise the deviation from a delta distribution
Returns:<|endoftext|> |
87f707196741eec76fe4872864f41a50eb6ea583dab39118ee697f4cb57ca239 | def _return_imb_with_coords(self, X, coords, dist_indices, k, dtype='mean'):
"Returns the imbalances between a 'full' distance computed using all coordinates, and an alternative distance\n built using a subset of coordinates.\n\n Args:\n X: coordinate matrix\n coords: subset of coordinates to be used when building the alternative distance\n dist_indices (int[:,:]): nearest neighbours according to full distance\n k (int): order of nearest neighbour considered, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from 'full' to 'alternative' and vice versa\n "
X_ = X[(:, coords)]
(_, dist_indices_coords) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_coords_full = ut._return_imbalance(dist_indices_coords, dist_indices, k=k, dtype=dtype)
imb_full_coords = ut._return_imbalance(dist_indices, dist_indices_coords, k=k, dtype=dtype)
print('computing imbalances with coords ', coords)
return (imb_full_coords, imb_coords_full) | Returns the imbalances between a 'full' distance computed using all coordinates, and an alternative distance
built using a subset of coordinates.
Args:
X: coordinate matrix
coords: subset of coordinates to be used when building the alternative distance
dist_indices (int[:,:]): nearest neighbours according to full distance
k (int): order of nearest neighbour considered, default is 1
dtype (str): type of information imbalance computation, default is 'mean'
Returns:
(float, float): the information imbalance from 'full' to 'alternative' and vice versa | dadapy/metric_comparisons.py | _return_imb_with_coords | sissa-data-science/DULY | 3 | python | def _return_imb_with_coords(self, X, coords, dist_indices, k, dtype='mean'):
"Returns the imbalances between a 'full' distance computed using all coordinates, and an alternative distance\n built using a subset of coordinates.\n\n Args:\n X: coordinate matrix\n coords: subset of coordinates to be used when building the alternative distance\n dist_indices (int[:,:]): nearest neighbours according to full distance\n k (int): order of nearest neighbour considered, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from 'full' to 'alternative' and vice versa\n "
X_ = X[(:, coords)]
(_, dist_indices_coords) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_coords_full = ut._return_imbalance(dist_indices_coords, dist_indices, k=k, dtype=dtype)
imb_full_coords = ut._return_imbalance(dist_indices, dist_indices_coords, k=k, dtype=dtype)
print('computing imbalances with coords ', coords)
return (imb_full_coords, imb_coords_full) | def _return_imb_with_coords(self, X, coords, dist_indices, k, dtype='mean'):
"Returns the imbalances between a 'full' distance computed using all coordinates, and an alternative distance\n built using a subset of coordinates.\n\n Args:\n X: coordinate matrix\n coords: subset of coordinates to be used when building the alternative distance\n dist_indices (int[:,:]): nearest neighbours according to full distance\n k (int): order of nearest neighbour considered, default is 1\n dtype (str): type of information imbalance computation, default is 'mean'\n\n Returns:\n (float, float): the information imbalance from 'full' to 'alternative' and vice versa\n "
X_ = X[(:, coords)]
(_, dist_indices_coords) = compute_nn_distances(X_, self.maxk, self.metric, self.period)
imb_coords_full = ut._return_imbalance(dist_indices_coords, dist_indices, k=k, dtype=dtype)
imb_full_coords = ut._return_imbalance(dist_indices, dist_indices_coords, k=k, dtype=dtype)
print('computing imbalances with coords ', coords)
return (imb_full_coords, imb_coords_full)<|docstring|>Returns the imbalances between a 'full' distance computed using all coordinates, and an alternative distance
built using a subset of coordinates.
Args:
X: coordinate matrix
coords: subset of coordinates to be used when building the alternative distance
dist_indices (int[:,:]): nearest neighbours according to full distance
k (int): order of nearest neighbour considered, default is 1
dtype (str): type of information imbalance computation, default is 'mean'
Returns:
(float, float): the information imbalance from 'full' to 'alternative' and vice versa<|endoftext|> |
544639063a0948ecc177aacba94aad2a513864fe38a0b50e18219221d6495535 | def random(self):
'Get the next random number in the range [0.0, 1.0).'
return ((int.from_bytes(_random(7), 'big') >> 3) * RECIP_BPF) | Get the next random number in the range [0.0, 1.0). | passgen.py | random | Zenexer/passgen | 1 | python | def random(self):
return ((int.from_bytes(_random(7), 'big') >> 3) * RECIP_BPF) | def random(self):
return ((int.from_bytes(_random(7), 'big') >> 3) * RECIP_BPF)<|docstring|>Get the next random number in the range [0.0, 1.0).<|endoftext|> |
579f816cb08c8bd7076f42153f9116bc9b3d4fc273ee1263e6a38bedea4674a6 | def getrandbits(self, k):
'getrandbits(k) -> x. Generates an int with k random bits.'
if (k <= 0):
raise ValueError('number of bits must be greater than zero')
if (k != int(k)):
raise TypeError('number of bits should be an integer')
numbytes = ((k + 7) // 8)
x = int.from_bytes(_random(numbytes), 'big')
return (x >> ((numbytes * 8) - k)) | getrandbits(k) -> x. Generates an int with k random bits. | passgen.py | getrandbits | Zenexer/passgen | 1 | python | def getrandbits(self, k):
if (k <= 0):
raise ValueError('number of bits must be greater than zero')
if (k != int(k)):
raise TypeError('number of bits should be an integer')
numbytes = ((k + 7) // 8)
x = int.from_bytes(_random(numbytes), 'big')
return (x >> ((numbytes * 8) - k)) | def getrandbits(self, k):
if (k <= 0):
raise ValueError('number of bits must be greater than zero')
if (k != int(k)):
raise TypeError('number of bits should be an integer')
numbytes = ((k + 7) // 8)
x = int.from_bytes(_random(numbytes), 'big')
return (x >> ((numbytes * 8) - k))<|docstring|>getrandbits(k) -> x. Generates an int with k random bits.<|endoftext|> |
054f37431079fc101e842407244958941f270dea949f33671908dd58eb43adf7 | def test_book_has_been_created(book):
'Count Books must be 1.'
assert (Book.objects.count() == 1) | Count Books must be 1. | library/core/tests.py | test_book_has_been_created | gabrielloliveira/library-ql | 2 | python | def test_book_has_been_created(book):
assert (Book.objects.count() == 1) | def test_book_has_been_created(book):
assert (Book.objects.count() == 1)<|docstring|>Count Books must be 1.<|endoftext|> |
8a740d128cb6d17877b3485d8b8e2e988d33d51a23343c9a920f6dd39f889d63 | def test_book_has_fields_from_abstract_model(book):
'Book should inherit fields from the abstract model.'
assert hasattr(book, 'uuid')
assert hasattr(book, 'created_at')
assert hasattr(book, 'updated_at') | Book should inherit fields from the abstract model. | library/core/tests.py | test_book_has_fields_from_abstract_model | gabrielloliveira/library-ql | 2 | python | def test_book_has_fields_from_abstract_model(book):
assert hasattr(book, 'uuid')
assert hasattr(book, 'created_at')
assert hasattr(book, 'updated_at') | def test_book_has_fields_from_abstract_model(book):
assert hasattr(book, 'uuid')
assert hasattr(book, 'created_at')
assert hasattr(book, 'updated_at')<|docstring|>Book should inherit fields from the abstract model.<|endoftext|> |
8e49b60eabc15f4f73a496906fa7e5cb59d85d7141da36a59a3ef5446eda5cc5 | @staticmethod
def make_server(processor):
'\n Creates a TCppServer given a processor. This is the function used\n internally, but it may be of interest separately as well.\n '
server = TCppServer(processor)
server.setPort(0)
server.setNumCPUWorkerThreads(1)
server.setNumIOWorkerThreads(1)
server.setNewSimpleThreadManager(count=1, pendingTaskCountMax=5)
return server | Creates a TCppServer given a processor. This is the function used
internally, but it may be of interest separately as well. | thrift/lib/py/util/TCppServerTestManager.py | make_server | killight98/fbthrift | 2,112 | python | @staticmethod
def make_server(processor):
'\n Creates a TCppServer given a processor. This is the function used\n internally, but it may be of interest separately as well.\n '
server = TCppServer(processor)
server.setPort(0)
server.setNumCPUWorkerThreads(1)
server.setNumIOWorkerThreads(1)
server.setNewSimpleThreadManager(count=1, pendingTaskCountMax=5)
return server | @staticmethod
def make_server(processor):
'\n Creates a TCppServer given a processor. This is the function used\n internally, but it may be of interest separately as well.\n '
server = TCppServer(processor)
server.setPort(0)
server.setNumCPUWorkerThreads(1)
server.setNumIOWorkerThreads(1)
server.setNewSimpleThreadManager(count=1, pendingTaskCountMax=5)
return server<|docstring|>Creates a TCppServer given a processor. This is the function used
internally, but it may be of interest separately as well.<|endoftext|> |
1550ac3c4dd368e58478b236bf2428c24154e4abea74f1dfe8070b9756dffc37 | def start(self):
'\n Starts the server in another thread.\n\n Returns after the server has bound to and listened on its port. Callers\n may immediately open connections without needing to wait or poll.\n '
if self.__is_handler(self.__obj):
self.__handler = self.__obj
self.__processor = self.__make_processor(self.__handler)
self.__server = self.__make_server(self.__processor)
elif self.__is_processor(self.__obj):
self.__processor = self.__obj
self.__server = self.__make_server(self.__processor)
elif self.__is_server(self.__obj):
self.__server = self.__obj
else:
raise Exception('Not a handler, a processor, or a server.')
self.__server_started_ev = threading.Event()
self.__thread = threading.Thread(target=self.__serve)
self.__thread.start()
self.__server_started_ev.wait()
self.__server_started_ev = None | Starts the server in another thread.
Returns after the server has bound to and listened on its port. Callers
may immediately open connections without needing to wait or poll. | thrift/lib/py/util/TCppServerTestManager.py | start | killight98/fbthrift | 2,112 | python | def start(self):
'\n Starts the server in another thread.\n\n Returns after the server has bound to and listened on its port. Callers\n may immediately open connections without needing to wait or poll.\n '
if self.__is_handler(self.__obj):
self.__handler = self.__obj
self.__processor = self.__make_processor(self.__handler)
self.__server = self.__make_server(self.__processor)
elif self.__is_processor(self.__obj):
self.__processor = self.__obj
self.__server = self.__make_server(self.__processor)
elif self.__is_server(self.__obj):
self.__server = self.__obj
else:
raise Exception('Not a handler, a processor, or a server.')
self.__server_started_ev = threading.Event()
self.__thread = threading.Thread(target=self.__serve)
self.__thread.start()
self.__server_started_ev.wait()
self.__server_started_ev = None | def start(self):
'\n Starts the server in another thread.\n\n Returns after the server has bound to and listened on its port. Callers\n may immediately open connections without needing to wait or poll.\n '
if self.__is_handler(self.__obj):
self.__handler = self.__obj
self.__processor = self.__make_processor(self.__handler)
self.__server = self.__make_server(self.__processor)
elif self.__is_processor(self.__obj):
self.__processor = self.__obj
self.__server = self.__make_server(self.__processor)
elif self.__is_server(self.__obj):
self.__server = self.__obj
else:
raise Exception('Not a handler, a processor, or a server.')
self.__server_started_ev = threading.Event()
self.__thread = threading.Thread(target=self.__serve)
self.__thread.start()
self.__server_started_ev.wait()
self.__server_started_ev = None<|docstring|>Starts the server in another thread.
Returns after the server has bound to and listened on its port. Callers
may immediately open connections without needing to wait or poll.<|endoftext|> |
867745c795b31c554881832cb23adef9e141c222ff77b6a9a7bdc6dbeae325d3 | def stop(self):
'\n Stops the server.\n\n Returns after the server has been stopped and all resources have been\n cleaned up.\n '
self.__server.stop()
self.__thread.join()
self.__thread = None
self.__server = None
self.__processor = None
self.__handler = None | Stops the server.
Returns after the server has been stopped and all resources have been
cleaned up. | thrift/lib/py/util/TCppServerTestManager.py | stop | killight98/fbthrift | 2,112 | python | def stop(self):
'\n Stops the server.\n\n Returns after the server has been stopped and all resources have been\n cleaned up.\n '
self.__server.stop()
self.__thread.join()
self.__thread = None
self.__server = None
self.__processor = None
self.__handler = None | def stop(self):
'\n Stops the server.\n\n Returns after the server has been stopped and all resources have been\n cleaned up.\n '
self.__server.stop()
self.__thread.join()
self.__thread = None
self.__server = None
self.__processor = None
self.__handler = None<|docstring|>Stops the server.
Returns after the server has been stopped and all resources have been
cleaned up.<|endoftext|> |
787fd2d2b15f8e8f50e0dcceb1ce4da1706b24beed960150b83ebac8009a7829 | def addr(self):
'\n Returns a pair of host-addr and port on which the running server is\n listening.\n\n If constructed with a handler or a processor, addr is * or :: and port\n is ephemeral.\n '
addr = self.__server.getAddress()
return (addr[0], addr[1]) | Returns a pair of host-addr and port on which the running server is
listening.
If constructed with a handler or a processor, addr is * or :: and port
is ephemeral. | thrift/lib/py/util/TCppServerTestManager.py | addr | killight98/fbthrift | 2,112 | python | def addr(self):
'\n Returns a pair of host-addr and port on which the running server is\n listening.\n\n If constructed with a handler or a processor, addr is * or :: and port\n is ephemeral.\n '
addr = self.__server.getAddress()
return (addr[0], addr[1]) | def addr(self):
'\n Returns a pair of host-addr and port on which the running server is\n listening.\n\n If constructed with a handler or a processor, addr is * or :: and port\n is ephemeral.\n '
addr = self.__server.getAddress()
return (addr[0], addr[1])<|docstring|>Returns a pair of host-addr and port on which the running server is
listening.
If constructed with a handler or a processor, addr is * or :: and port
is ephemeral.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.