body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def unstack(df, level=(- 1), reset_index=True):
'pd.DataFrame.unstack adapter.\n\n Call the `df.unstack` method using the indicated level and afterwards\n join the column names using an underscore.\n\n Args:\n df (pandas.DataFrame): DataFrame to unstack.\n level (str, int or list): Level(s) of index to unstack, can pass level name\n reset_index (bool): Whether to reset the index after unstacking\n\n Returns:\n pandas.Dataframe: unstacked dataframe\n '
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
| 6,275,348,741,341,324,000
|
pd.DataFrame.unstack adapter.
Call the `df.unstack` method using the indicated level and afterwards
join the column names using an underscore.
Args:
df (pandas.DataFrame): DataFrame to unstack.
level (str, int or list): Level(s) of index to unstack, can pass level name
reset_index (bool): Whether to reset the index after unstacking
Returns:
pandas.Dataframe: unstacked dataframe
|
mlprimitives/adapters/pandas.py
|
unstack
|
AlexanderGeiger/MLPrimitives
|
python
|
def unstack(df, level=(- 1), reset_index=True):
'pd.DataFrame.unstack adapter.\n\n Call the `df.unstack` method using the indicated level and afterwards\n join the column names using an underscore.\n\n Args:\n df (pandas.DataFrame): DataFrame to unstack.\n level (str, int or list): Level(s) of index to unstack, can pass level name\n reset_index (bool): Whether to reset the index after unstacking\n\n Returns:\n pandas.Dataframe: unstacked dataframe\n '
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
|
@pytest.mark.usefixtures('data_config', 'nepc_connect')
def test_states_table_has_species_metadata(data_config, nepc_connect):
'\n check that the states table has a species_id column\n '
NEPC_DATA = data_config[0]
number_of_states = (util.wc_fxn((NEPC_DATA + 'states.tsv')) - 1)
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert (len(df_states) == number_of_states)
assert ('species_id' in list(df_states.columns))
| -812,163,596,748,494,200
|
check that the states table has a species_id column
|
tests/test_mysql_build.py
|
test_states_table_has_species_metadata
|
USNavalResearchLaboratory/nepc
|
python
|
@pytest.mark.usefixtures('data_config', 'nepc_connect')
def test_states_table_has_species_metadata(data_config, nepc_connect):
'\n \n '
NEPC_DATA = data_config[0]
number_of_states = (util.wc_fxn((NEPC_DATA + 'states.tsv')) - 1)
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert (len(df_states) == number_of_states)
assert ('species_id' in list(df_states.columns))
|
def getProperties(configFile):
'\n\tdictionary getProperties(str)\n\t\n\tThis funciton reads the entire config file and builds a dictionary from the config file\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to read from\n\t\t\n\tReturns:\n\t\tdictionary: A list of key value pairs from the config file\n\t\n\t'
dict = {}
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
loc = line.find('=')
if (loc == (- 1)):
continue
key = line[:loc]
value = line[(loc + 1):]
dict[key] = value
return dict
| -7,746,971,963,087,934,000
|
dictionary getProperties(str)
This funciton reads the entire config file and builds a dictionary from the config file
Args:
configFile: The configuration file to read from
Returns:
dictionary: A list of key value pairs from the config file
|
HackPSUconfig.py
|
getProperties
|
hackpsu-tech/hackPSUS2018-rfid
|
python
|
def getProperties(configFile):
'\n\tdictionary getProperties(str)\n\t\n\tThis funciton reads the entire config file and builds a dictionary from the config file\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to read from\n\t\t\n\tReturns:\n\t\tdictionary: A list of key value pairs from the config file\n\t\n\t'
dict = {}
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
loc = line.find('=')
if (loc == (- 1)):
continue
key = line[:loc]
value = line[(loc + 1):]
dict[key] = value
return dict
|
def setProperties(configFile, dict):
'\n\tvoid setProperties (str, dictionary)\n\t\n\tThis function iterates over the entire dictionary and saves each dictionary entry to the specified config file\n\t\n\tArgs:\n\t\tconfigFile: The file to overwrite with the new configuration\n\t\tdict: The dictionary to write\n\t'
with open(configFile, 'w') as file:
for key in dict:
file.write((((key + '=') + dict[key]) + '\n'))
| -8,388,814,040,620,741,000
|
void setProperties (str, dictionary)
This function iterates over the entire dictionary and saves each dictionary entry to the specified config file
Args:
configFile: The file to overwrite with the new configuration
dict: The dictionary to write
|
HackPSUconfig.py
|
setProperties
|
hackpsu-tech/hackPSUS2018-rfid
|
python
|
def setProperties(configFile, dict):
'\n\tvoid setProperties (str, dictionary)\n\t\n\tThis function iterates over the entire dictionary and saves each dictionary entry to the specified config file\n\t\n\tArgs:\n\t\tconfigFile: The file to overwrite with the new configuration\n\t\tdict: The dictionary to write\n\t'
with open(configFile, 'w') as file:
for key in dict:
file.write((((key + '=') + dict[key]) + '\n'))
|
def getProperty(configFile, prop):
'\n\tstr getProperty(str, str)\n\t\n\tThis function searches a configFile for a specific property and returns its value\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to open\n\t\tprop: The property to search for\n\t\t\n\tReturns:\n\t\tstring: The property value if found or None for no value found\n\t\n\t'
retVal = None
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(prop):
retVal = line.replace(prop, '')
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
| 478,470,915,018,306,560
|
str getProperty(str, str)
This function searches a configFile for a specific property and returns its value
Args:
configFile: The configuration file to open
prop: The property to search for
Returns:
string: The property value if found or None for no value found
|
HackPSUconfig.py
|
getProperty
|
hackpsu-tech/hackPSUS2018-rfid
|
python
|
def getProperty(configFile, prop):
'\n\tstr getProperty(str, str)\n\t\n\tThis function searches a configFile for a specific property and returns its value\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to open\n\t\tprop: The property to search for\n\t\t\n\tReturns:\n\t\tstring: The property value if found or None for no value found\n\t\n\t'
retVal = None
with open(configFile) as file:
for line in file:
line = line.strip()
if line.startswith('#'):
continue
if line.startswith(prop):
retVal = line.replace(prop, )
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
|
def setProperty(configFile, prop, value):
'\n\tvoid setProperty(str, str, str)\n\t\n\tThis function searches a config file for the specified propery and updates its value if found.\n\tIf the specified property is not found, then a new line for the property will be created\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to open and update\n\t\tprop: The property key to update \n\t\tvalue: The new value for the property\n\t\n\t'
written = False
with open(configFile) as inFile:
(tmpHandle, outPath) = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
for line in inFile:
if line.startswith(prop):
outFile.write((((prop + '=') + value) + '\n'))
written = True
else:
outFile.write(line)
if (not written):
outFile.write((((prop + ':') + value) + '\n'))
remove(configFile)
move(outPath, configFile)
| -7,819,968,205,918,925,000
|
void setProperty(str, str, str)
This function searches a config file for the specified propery and updates its value if found.
If the specified property is not found, then a new line for the property will be created
Args:
configFile: The configuration file to open and update
prop: The property key to update
value: The new value for the property
|
HackPSUconfig.py
|
setProperty
|
hackpsu-tech/hackPSUS2018-rfid
|
python
|
def setProperty(configFile, prop, value):
'\n\tvoid setProperty(str, str, str)\n\t\n\tThis function searches a config file for the specified propery and updates its value if found.\n\tIf the specified property is not found, then a new line for the property will be created\n\t\n\tArgs:\n\t\tconfigFile: The configuration file to open and update\n\t\tprop: The property key to update \n\t\tvalue: The new value for the property\n\t\n\t'
written = False
with open(configFile) as inFile:
(tmpHandle, outPath) = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
for line in inFile:
if line.startswith(prop):
outFile.write((((prop + '=') + value) + '\n'))
written = True
else:
outFile.write(line)
if (not written):
outFile.write((((prop + ':') + value) + '\n'))
remove(configFile)
move(outPath, configFile)
|
def sample_user(email='example@example.com', password='testpass'):
'Creating sample user'
return get_user_model().objects.create_user(email, password)
| 4,007,906,150,354,790,000
|
Creating sample user
|
app/core/tests/test_models.py
|
sample_user
|
Rish1711/recipe-app-api
|
python
|
def sample_user(email='example@example.com', password='testpass'):
return get_user_model().objects.create_user(email, password)
|
def test_create_user_with_email_successful(self):
'Test creating a new user with an email is successful'
email = 'example@example.com'
password = 'Password123'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
| -1,354,818,704,170,135,600
|
Test creating a new user with an email is successful
|
app/core/tests/test_models.py
|
test_create_user_with_email_successful
|
Rish1711/recipe-app-api
|
python
|
def test_create_user_with_email_successful(self):
email = 'example@example.com'
password = 'Password123'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
|
def test_email_normalize(self):
'Testing weather email is in normalize form or not'
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
| -3,077,353,306,868,135,000
|
Testing weather email is in normalize form or not
|
app/core/tests/test_models.py
|
test_email_normalize
|
Rish1711/recipe-app-api
|
python
|
def test_email_normalize(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
|
def test_create_superuser(self):
'Test for creating super user'
email = 'example@example.com'
password = 'Password123'
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
| 4,411,465,961,249,509,400
|
Test for creating super user
|
app/core/tests/test_models.py
|
test_create_superuser
|
Rish1711/recipe-app-api
|
python
|
def test_create_superuser(self):
email = 'example@example.com'
password = 'Password123'
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
|
def write_transposed_dataset(reader: Reader, outfname: Union[(Path, str)], start: datetime.datetime=None, end: datetime.datetime=None, chunks: dict=None, memory: float=2, n_threads: int=4, zlib: bool=True, complevel: int=4, distributed: Union[(bool, Client)]=False, use_dask: bool=True):
'\n Creates a stacked and transposed netCDF file from a given reader.\n\n WARNING: very experimental!\n\n Parameters\n ----------\n reader : XarrayImageReaderBase\n Reader for the dataset.\n outfname : str or Path\n Output filename. Must end with ".nc" for netCDF output or with ".zarr"\n for zarr output.\n start : datetime.datetime, optional\n If not given, start at first timestamp in dataset.\n end : datetime.datetime, optional\n If not given, end at last timestamp in dataset.\n chunks : dictionary, optional\n The chunk sizes that are used for the transposed file. If none are\n given, chunks with a size of 1MB are used for netCDF, and chunks with a\n size of 50MB are used for zarr output.\n memory : float, optional\n The amount of memory to be used for buffering in GB. Default is 2.\n Higher is faster.\n n_threads : int, optional\n The amount of threads to use. Default is 4.\n zlib : bool, optional\n Whether to use compression when storing the files. Reduces file size,\n but strongly increases write time, and maybe also access time. Default\n is ``False``.\n complevel : int, optional\n Compression level to use. Default is 4. Range is from 1 (low) to 9\n (high).\n distributed : bool or Client, optional\n Whether to use the local or the distributed dask scheduler. If a client\n for a distributed scheduler is used, this is used instead.\n use_dask : bool, optional\n Whether to use dask for the transposing. Default is True, but sometimes\n (especially with large datasets) this fails. If set to False, the data\n is written to an intermediate zarr store.\n '
dask_config = {'array.slicing.split_large_chunks': False}
args = (reader, outfname)
kwargs = {'start': start, 'end': end, 'memory': memory, 'zlib': zlib, 'complevel': complevel, 'chunks': chunks}
if (not use_dask):
_transpose_no_dask(*args, **kwargs)
elif (isinstance(distributed, Client) or (not distributed)):
if (not distributed):
dask_config.update({'scheduler': 'threads', 'pool': ThreadPool(n_threads)})
with dask.config.set(**dask_config):
_transpose(*args, **kwargs)
elif distributed:
with dask.config.set(**dask_config), Client(n_workers=1, threads_per_worker=n_threads, memory_limit=f'{memory}GB') as client:
print('Dask dashboard accessible at:', client.dashboard_link)
_transpose(*args, **kwargs)
| 8,170,015,279,336,511,000
|
Creates a stacked and transposed netCDF file from a given reader.
WARNING: very experimental!
Parameters
----------
reader : XarrayImageReaderBase
Reader for the dataset.
outfname : str or Path
Output filename. Must end with ".nc" for netCDF output or with ".zarr"
for zarr output.
start : datetime.datetime, optional
If not given, start at first timestamp in dataset.
end : datetime.datetime, optional
If not given, end at last timestamp in dataset.
chunks : dictionary, optional
The chunk sizes that are used for the transposed file. If none are
given, chunks with a size of 1MB are used for netCDF, and chunks with a
size of 50MB are used for zarr output.
memory : float, optional
The amount of memory to be used for buffering in GB. Default is 2.
Higher is faster.
n_threads : int, optional
The amount of threads to use. Default is 4.
zlib : bool, optional
Whether to use compression when storing the files. Reduces file size,
but strongly increases write time, and maybe also access time. Default
is ``False``.
complevel : int, optional
Compression level to use. Default is 4. Range is from 1 (low) to 9
(high).
distributed : bool or Client, optional
Whether to use the local or the distributed dask scheduler. If a client
for a distributed scheduler is used, this is used instead.
use_dask : bool, optional
Whether to use dask for the transposing. Default is True, but sometimes
(especially with large datasets) this fails. If set to False, the data
is written to an intermediate zarr store.
|
src/qa4sm_preprocessing/nc_image_reader/transpose.py
|
write_transposed_dataset
|
awst-austria/qa4sm-preprocessing
|
python
|
def write_transposed_dataset(reader: Reader, outfname: Union[(Path, str)], start: datetime.datetime=None, end: datetime.datetime=None, chunks: dict=None, memory: float=2, n_threads: int=4, zlib: bool=True, complevel: int=4, distributed: Union[(bool, Client)]=False, use_dask: bool=True):
'\n Creates a stacked and transposed netCDF file from a given reader.\n\n WARNING: very experimental!\n\n Parameters\n ----------\n reader : XarrayImageReaderBase\n Reader for the dataset.\n outfname : str or Path\n Output filename. Must end with ".nc" for netCDF output or with ".zarr"\n for zarr output.\n start : datetime.datetime, optional\n If not given, start at first timestamp in dataset.\n end : datetime.datetime, optional\n If not given, end at last timestamp in dataset.\n chunks : dictionary, optional\n The chunk sizes that are used for the transposed file. If none are\n given, chunks with a size of 1MB are used for netCDF, and chunks with a\n size of 50MB are used for zarr output.\n memory : float, optional\n The amount of memory to be used for buffering in GB. Default is 2.\n Higher is faster.\n n_threads : int, optional\n The amount of threads to use. Default is 4.\n zlib : bool, optional\n Whether to use compression when storing the files. Reduces file size,\n but strongly increases write time, and maybe also access time. Default\n is ``False``.\n complevel : int, optional\n Compression level to use. Default is 4. Range is from 1 (low) to 9\n (high).\n distributed : bool or Client, optional\n Whether to use the local or the distributed dask scheduler. If a client\n for a distributed scheduler is used, this is used instead.\n use_dask : bool, optional\n Whether to use dask for the transposing. Default is True, but sometimes\n (especially with large datasets) this fails. If set to False, the data\n is written to an intermediate zarr store.\n '
dask_config = {'array.slicing.split_large_chunks': False}
args = (reader, outfname)
kwargs = {'start': start, 'end': end, 'memory': memory, 'zlib': zlib, 'complevel': complevel, 'chunks': chunks}
if (not use_dask):
_transpose_no_dask(*args, **kwargs)
elif (isinstance(distributed, Client) or (not distributed)):
if (not distributed):
dask_config.update({'scheduler': 'threads', 'pool': ThreadPool(n_threads)})
with dask.config.set(**dask_config):
_transpose(*args, **kwargs)
elif distributed:
with dask.config.set(**dask_config), Client(n_workers=1, threads_per_worker=n_threads, memory_limit=f'{memory}GB') as client:
print('Dask dashboard accessible at:', client.dashboard_link)
_transpose(*args, **kwargs)
|
def _get_intermediate_chunks(array, chunks, new_last_dim, zarr_output, memory):
'\n Calculates chunk sizes for the given array for the intermediate output\n files.\n\n Parameters\n ----------\n array : xr.DataArray\n Array to rechunk and transpose\n chunks : dict or None\n Chunks passed to write_transposed_dataset, None if none were given.\n new_last_dim : str\n Name of the new last dimension, normally "time".\n zarr_output : bool\n Whether the final file will be a zarr file (True) or a netCDf (False).\n memory : float\n The amount of memory to be used for buffering in GB.\n\n Returns\n -------\n tmp_chunks : dict\n Chunks to be used for rechunking the array to a temporary file. The\n order of keys corresponds to the order of dimensions in the transposed\n array.\n '
dtype = array.dtype
dims = dict(zip(array.dims, array.shape))
transposed_shape = [length for (dim, length) in dims.items() if (dim != new_last_dim)]
transposed_shape.append(dims[new_last_dim])
if (chunks is None):
if zarr_output:
chunksizes = infer_chunks(transposed_shape, 100, dtype)[:(- 1)]
else:
chunksizes = infer_chunks(transposed_shape, 1, dtype)[:(- 1)]
chunks = dict(zip([dim for dim in dims if (dim != new_last_dim)], chunksizes))
chunks[new_last_dim] = (- 1)
else:
chunks = copy.copy(chunks)
tmp_chunks = {dim: chunks[dim] for dim in dims if (dim != new_last_dim)}
size = dtype.itemsize
chunksizes = [(size if (size != (- 1)) else dims[dim]) for (dim, size) in chunks.items()]
chunksize_MB = ((np.prod(chunksizes) * size) / (1024 ** 2))
img_shape = transposed_shape[:(- 1)]
len_time = transposed_shape[(- 1)]
imagesize_GB = ((np.prod(img_shape) * size) / (1024 ** 3))
stepsize = (int(math.floor((memory / imagesize_GB))) // 2)
stepsize = min(stepsize, len_time)
tmp_chunks[new_last_dim] = stepsize
tmp_chunks_str = str(tuple(tmp_chunks.values()))
logging.info(f'write_transposed_dataset: Creating chunks {tmp_chunks_str} with chunksize {chunksize_MB:.2f} MB')
return tmp_chunks
| 3,076,908,307,733,543,000
|
Calculates chunk sizes for the given array for the intermediate output
files.
Parameters
----------
array : xr.DataArray
Array to rechunk and transpose
chunks : dict or None
Chunks passed to write_transposed_dataset, None if none were given.
new_last_dim : str
Name of the new last dimension, normally "time".
zarr_output : bool
Whether the final file will be a zarr file (True) or a netCDf (False).
memory : float
The amount of memory to be used for buffering in GB.
Returns
-------
tmp_chunks : dict
Chunks to be used for rechunking the array to a temporary file. The
order of keys corresponds to the order of dimensions in the transposed
array.
|
src/qa4sm_preprocessing/nc_image_reader/transpose.py
|
_get_intermediate_chunks
|
awst-austria/qa4sm-preprocessing
|
python
|
def _get_intermediate_chunks(array, chunks, new_last_dim, zarr_output, memory):
'\n Calculates chunk sizes for the given array for the intermediate output\n files.\n\n Parameters\n ----------\n array : xr.DataArray\n Array to rechunk and transpose\n chunks : dict or None\n Chunks passed to write_transposed_dataset, None if none were given.\n new_last_dim : str\n Name of the new last dimension, normally "time".\n zarr_output : bool\n Whether the final file will be a zarr file (True) or a netCDf (False).\n memory : float\n The amount of memory to be used for buffering in GB.\n\n Returns\n -------\n tmp_chunks : dict\n Chunks to be used for rechunking the array to a temporary file. The\n order of keys corresponds to the order of dimensions in the transposed\n array.\n '
dtype = array.dtype
dims = dict(zip(array.dims, array.shape))
transposed_shape = [length for (dim, length) in dims.items() if (dim != new_last_dim)]
transposed_shape.append(dims[new_last_dim])
if (chunks is None):
if zarr_output:
chunksizes = infer_chunks(transposed_shape, 100, dtype)[:(- 1)]
else:
chunksizes = infer_chunks(transposed_shape, 1, dtype)[:(- 1)]
chunks = dict(zip([dim for dim in dims if (dim != new_last_dim)], chunksizes))
chunks[new_last_dim] = (- 1)
else:
chunks = copy.copy(chunks)
tmp_chunks = {dim: chunks[dim] for dim in dims if (dim != new_last_dim)}
size = dtype.itemsize
chunksizes = [(size if (size != (- 1)) else dims[dim]) for (dim, size) in chunks.items()]
chunksize_MB = ((np.prod(chunksizes) * size) / (1024 ** 2))
img_shape = transposed_shape[:(- 1)]
len_time = transposed_shape[(- 1)]
imagesize_GB = ((np.prod(img_shape) * size) / (1024 ** 3))
stepsize = (int(math.floor((memory / imagesize_GB))) // 2)
stepsize = min(stepsize, len_time)
tmp_chunks[new_last_dim] = stepsize
tmp_chunks_str = str(tuple(tmp_chunks.values()))
logging.info(f'write_transposed_dataset: Creating chunks {tmp_chunks_str} with chunksize {chunksize_MB:.2f} MB')
return tmp_chunks
|
def __init__(self, key_id=None, key_state=None):
'KeyStatusInfo - a model defined in huaweicloud sdk'
self._key_id = None
self._key_state = None
self.discriminator = None
if (key_id is not None):
self.key_id = key_id
if (key_state is not None):
self.key_state = key_state
| -7,973,678,530,902,859,000
|
KeyStatusInfo - a model defined in huaweicloud sdk
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
__init__
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def __init__(self, key_id=None, key_state=None):
self._key_id = None
self._key_state = None
self.discriminator = None
if (key_id is not None):
self.key_id = key_id
if (key_state is not None):
self.key_state = key_state
|
@property
def key_id(self):
'Gets the key_id of this KeyStatusInfo.\n\n 密钥ID\n\n :return: The key_id of this KeyStatusInfo.\n :rtype: str\n '
return self._key_id
| 2,992,302,185,481,682,000
|
Gets the key_id of this KeyStatusInfo.
密钥ID
:return: The key_id of this KeyStatusInfo.
:rtype: str
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
key_id
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
@property
def key_id(self):
'Gets the key_id of this KeyStatusInfo.\n\n 密钥ID\n\n :return: The key_id of this KeyStatusInfo.\n :rtype: str\n '
return self._key_id
|
@key_id.setter
def key_id(self, key_id):
'Sets the key_id of this KeyStatusInfo.\n\n 密钥ID\n\n :param key_id: The key_id of this KeyStatusInfo.\n :type: str\n '
self._key_id = key_id
| -7,281,734,985,210,797,000
|
Sets the key_id of this KeyStatusInfo.
密钥ID
:param key_id: The key_id of this KeyStatusInfo.
:type: str
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
key_id
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
@key_id.setter
def key_id(self, key_id):
'Sets the key_id of this KeyStatusInfo.\n\n 密钥ID\n\n :param key_id: The key_id of this KeyStatusInfo.\n :type: str\n '
self._key_id = key_id
|
@property
def key_state(self):
'Gets the key_state of this KeyStatusInfo.\n\n 密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态\n\n :return: The key_state of this KeyStatusInfo.\n :rtype: str\n '
return self._key_state
| -3,301,752,105,416,907,300
|
Gets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:return: The key_state of this KeyStatusInfo.
:rtype: str
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
key_state
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
@property
def key_state(self):
'Gets the key_state of this KeyStatusInfo.\n\n 密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态\n\n :return: The key_state of this KeyStatusInfo.\n :rtype: str\n '
return self._key_state
|
@key_state.setter
def key_state(self, key_state):
'Sets the key_state of this KeyStatusInfo.\n\n 密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态\n\n :param key_state: The key_state of this KeyStatusInfo.\n :type: str\n '
self._key_state = key_state
| 348,206,808,607,359,740
|
Sets the key_state of this KeyStatusInfo.
密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态
:param key_state: The key_state of this KeyStatusInfo.
:type: str
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
key_state
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
@key_state.setter
def key_state(self, key_state):
'Sets the key_state of this KeyStatusInfo.\n\n 密钥状态: - 2为启用状态 - 3为禁用状态 - 4为计划删除状态 - 5为等待导入状态 - 7为冻结状态\n\n :param key_state: The key_state of this KeyStatusInfo.\n :type: str\n '
self._key_state = key_state
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
| 2,594,216,033,120,720,000
|
Returns the model properties as a dict
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
to_dict
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
| 5,849,158,643,760,736,000
|
Returns the string representation of the model
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
to_str
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
| -8,960,031,694,814,905,000
|
For `print` and `pprint`
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
__repr__
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def __repr__(self):
return self.to_str()
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, KeyStatusInfo)):
return False
return (self.__dict__ == other.__dict__)
| -8,891,837,020,552,251,000
|
Returns true if both objects are equal
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
__eq__
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def __eq__(self, other):
if (not isinstance(other, KeyStatusInfo)):
return False
return (self.__dict__ == other.__dict__)
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
| 7,764,124,047,908,058,000
|
Returns true if both objects are not equal
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py
|
__ne__
|
Adek06/huaweicloud-sdk-python-v3
|
python
|
def __ne__(self, other):
return (not (self == other))
|
def sample(self, size, random_state=None):
'Generate random samples from the model.\n Returns\n -------\n X : array_like, shape (n_samples, n_features)\n List of samples\n '
if (random_state is None):
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
for comp in range(len(self.weights)):
comp_in_X = (comp == comps)
num_comp_in_X = comp_in_X.sum()
if (num_comp_in_X > 0):
cv = self.covs[comp]
g = sample_gaussian2(self.means[comp], cv, num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
| 4,209,959,704,689,310,700
|
Generate random samples from the model.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
|
bananas/model.py
|
sample
|
bccp/bananaplots
|
python
|
def sample(self, size, random_state=None):
'Generate random samples from the model.\n Returns\n -------\n X : array_like, shape (n_samples, n_features)\n List of samples\n '
if (random_state is None):
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
for comp in range(len(self.weights)):
comp_in_X = (comp == comps)
num_comp_in_X = comp_in_X.sum()
if (num_comp_in_X > 0):
cv = self.covs[comp]
g = sample_gaussian2(self.means[comp], cv, num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
|
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
'Compute declaration for tensorcore'
assert (isinstance(stride, int) or (len(stride) == 2))
assert (isinstance(dilation, int) or (len(dilation) == 2))
if isinstance(stride, int):
stride_h = stride_w = stride
else:
(stride_h, stride_w) = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
(dilation_h, dilation_w) = dilation
(batch, in_height, in_width, in_channel) = get_const_tuple(Input.shape)
(kernel_h, kernel_w, _, num_filter) = get_const_tuple(Filter.shape)
assert ((((batch % 16) == 0) and ((in_channel % 16) == 0) and ((num_filter % 16) == 0)) or (((batch % 8) == 0) and ((in_channel % 16) == 0) and ((num_filter % 32) == 0)) or (((batch % 32) == 0) and ((in_channel % 16) == 0) and ((num_filter % 8) == 0))), 'The shape of (batch, in_channel, num_filter) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now'
dilated_kernel_h = (((kernel_h - 1) * dilation_h) + 1)
dilated_kernel_w = (((kernel_w - 1) * dilation_w) + 1)
(pad_top, pad_left, pad_down, pad_right) = get_pad_tuple(padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((((((in_height - dilated_kernel_h) + pad_top) + pad_down) // stride_h) + 1))
out_width = simplify((((((in_width - dilated_kernel_w) + pad_left) + pad_right) // stride_w) + 1))
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name='PaddedInput')
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
TransPaddedInput = te.compute(PaddedInput.shape, (lambda n, h, w, c: PaddedInput[(n, h, w, c)].astype('float16')))
TransFilter = te.compute(Filter.shape, (lambda h, w, i, o: Filter[(h, w, i, o)].astype('float16')))
Output = te.compute((batch, out_height, out_width, out_channel), (lambda nn, yy, xx, ff: te.sum((TransPaddedInput[(nn, ((yy * stride_h) + (ry * dilation_h)), ((xx * stride_w) + (rx * dilation_w)), rc)].astype(out_dtype) * TransFilter[(ry, rx, rc, ff)].astype(out_dtype)), axis=[ry, rx, rc])), name='Conv2dOutput', tag='conv2d_nhwc_tensorcore')
return Output
| 8,498,808,538,734,013,000
|
Compute declaration for tensorcore
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
nhwc_tensorcore_cuda
|
HatsuneMiku4/incubator-tvm
|
python
|
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
assert (isinstance(stride, int) or (len(stride) == 2))
assert (isinstance(dilation, int) or (len(dilation) == 2))
if isinstance(stride, int):
stride_h = stride_w = stride
else:
(stride_h, stride_w) = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
(dilation_h, dilation_w) = dilation
(batch, in_height, in_width, in_channel) = get_const_tuple(Input.shape)
(kernel_h, kernel_w, _, num_filter) = get_const_tuple(Filter.shape)
assert ((((batch % 16) == 0) and ((in_channel % 16) == 0) and ((num_filter % 16) == 0)) or (((batch % 8) == 0) and ((in_channel % 16) == 0) and ((num_filter % 32) == 0)) or (((batch % 32) == 0) and ((in_channel % 16) == 0) and ((num_filter % 8) == 0))), 'The shape of (batch, in_channel, num_filter) must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now'
dilated_kernel_h = (((kernel_h - 1) * dilation_h) + 1)
dilated_kernel_w = (((kernel_w - 1) * dilation_w) + 1)
(pad_top, pad_left, pad_down, pad_right) = get_pad_tuple(padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((((((in_height - dilated_kernel_h) + pad_top) + pad_down) // stride_h) + 1))
out_width = simplify((((((in_width - dilated_kernel_w) + pad_left) + pad_right) // stride_w) + 1))
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name='PaddedInput')
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
TransPaddedInput = te.compute(PaddedInput.shape, (lambda n, h, w, c: PaddedInput[(n, h, w, c)].astype('float16')))
TransFilter = te.compute(Filter.shape, (lambda h, w, i, o: Filter[(h, w, i, o)].astype('float16')))
Output = te.compute((batch, out_height, out_width, out_channel), (lambda nn, yy, xx, ff: te.sum((TransPaddedInput[(nn, ((yy * stride_h) + (ry * dilation_h)), ((xx * stride_w) + (rx * dilation_w)), rc)].astype(out_dtype) * TransFilter[(ry, rx, rc, ff)].astype(out_dtype)), axis=[ry, rx, rc])), name='Conv2dOutput', tag='conv2d_nhwc_tensorcore')
return Output
|
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
'Schedule tensorcore template'
(kh, kw, ic) = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
(trans_paddata, kernel) = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
(batch, _, _, _) = get_const_tuple(Conv.shape)
(_, _, _, out_channels) = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if (Conv.op in s.outputs):
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
cfg.define_knob('block_row_warps', [1, 2, 4])
cfg.define_knob('block_col_warps', [1, 2, 4])
cfg.define_knob('warp_row_tiles', [1, 2, 4])
cfg.define_knob('warp_col_tiles', [1, 2, 4])
cfg.define_knob('chunk', [1, 2, 4, 8])
cfg.define_knob('offset', [0, 8])
cfg.define_knob('vector_width', [1, 2, 4, 8])
if (((batch % 16) == 0) and ((out_channels % 16) == 0)):
cfg.define_knob('wmma_m', [16, 8, 32])
elif (((batch % 8) == 0) and ((out_channels % 32) == 0)):
cfg.define_knob('wmma_m', [8, 16, 32])
elif (((batch % 32) == 0) and ((out_channels % 8) == 0)):
cfg.define_knob('wmma_m', [32, 16, 8])
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg['block_row_warps'].val
block_col_warps = cfg['block_col_warps'].val
warp_row_tiles = cfg['warp_row_tiles'].val
warp_col_tiles = cfg['warp_col_tiles'].val
chunk = cfg['chunk'].val
offset = cfg['offset'].val
wmma_m = cfg['wmma_m'].val
vector_width = cfg['vector_width'].val
wmma_k = 16
if (wmma_m == 16):
wmma_n = 16
elif (wmma_m == 8):
wmma_n = 32
elif (wmma_m == 32):
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = ((chunk * wmma_k) + offset)
WS_align = (((warp_col_tiles * block_col_warps) * wmma_n) + offset)
block_factor_n = ((wmma_m * warp_row_tiles) * block_row_warps)
block_factor_o = ((wmma_n * warp_col_tiles) * block_col_warps)
CS_align = (block_factor_o + offset)
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([(wmma_n * warp_col_tiles), 1])
CL_strides = get_strides([1, 1, (wmma_n * warp_col_tiles), 1])
CS_strides = get_strides([1, 1, CS_align, 1])
(nc, hc, wc, oc) = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
(block_i, nc) = s[output].split(nc, factor=block_factor_n)
(block_j, oc) = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
(t, ti) = s[output].split(t, factor=vector_width)
(t, tx) = s[output].split(t, factor=warp_size)
(t, ty) = s[output].split(t, factor=block_row_warps)
(t, tz) = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
s[OL].compute_at(s[output], block_j)
(nc, hc, wc, oc) = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, (CS_align - 1), CS_align)
(oc, ooc) = s[OL].split(oc, factor=wmma_n)
(oc, oci) = s[OL].split(oc, factor=warp_col_tiles)
(_, oc) = s[OL].split(oc, factor=block_col_warps)
(nc, nnc) = s[OL].split(nc, factor=wmma_m)
(nc, nci) = s[OL].split(nc, factor=warp_row_tiles)
(_, nc) = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
s[ConvF].compute_at(s[OL], oc)
(n, h, w, o) = ConvF.op.axis
(n, nnf) = s[ConvF].split(n, factor=wmma_m)
(o, oof) = s[ConvF].split(o, factor=wmma_n)
(ic, ii) = s[ConvF].split(ic, factor=wmma_k)
(ko, ki) = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
(n, h, w, i) = AF.op.axis
(n, nn) = s[AF].split(n, factor=wmma_m)
(i, ii) = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
(kh, kw, i, o) = WF.op.axis
(i, ii) = s[WF].split(i, factor=wmma_k)
(o, oo) = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
(n, h, w, i) = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, (AS_align - 1), AS_align)
t = s[AS].fuse(n, i)
(t, ti) = s[AS].split(t, factor=vector_width)
(t, tx) = s[AS].split(t, factor=warp_size)
(t, ty) = s[AS].split(t, factor=block_row_warps)
(_, tz) = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
(kh, kw, ic, o) = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, (WS_align - 1), WS_align)
(t, ti) = s[WS].split(t, factor=vector_width)
(t, tx) = s[WS].split(t, factor=warp_size)
(t, ty) = s[WS].split(t, factor=block_row_warps)
(_, tz) = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name='k')
CL_compute = te.compute(CL_shape, (lambda ii, t0, t1, jj: te.sum((AL_gemm[(ii, t0, t1, k_gemm)].astype(out_dtype) * WL_gemm[(k_gemm, jj)].astype(out_dtype)), axis=k_gemm)), name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape, 'row_major', AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape, 'row_major', WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape))
(N, OH, OW, CO) = get_const_tuple(output.shape)
(KH, KW, CI, _) = get_const_tuple(kernel.shape)
cfg.add_flop((((((((2 * N) * OH) * OW) * CO) * CI) * KH) * KW))
| -3,248,361,691,220,659,700
|
Schedule tensorcore template
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
schedule_nhwc_tensorcore_cuda
|
HatsuneMiku4/incubator-tvm
|
python
|
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
(kh, kw, ic) = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
(trans_paddata, kernel) = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
(batch, _, _, _) = get_const_tuple(Conv.shape)
(_, _, _, out_channels) = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if (Conv.op in s.outputs):
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
cfg.define_knob('block_row_warps', [1, 2, 4])
cfg.define_knob('block_col_warps', [1, 2, 4])
cfg.define_knob('warp_row_tiles', [1, 2, 4])
cfg.define_knob('warp_col_tiles', [1, 2, 4])
cfg.define_knob('chunk', [1, 2, 4, 8])
cfg.define_knob('offset', [0, 8])
cfg.define_knob('vector_width', [1, 2, 4, 8])
if (((batch % 16) == 0) and ((out_channels % 16) == 0)):
cfg.define_knob('wmma_m', [16, 8, 32])
elif (((batch % 8) == 0) and ((out_channels % 32) == 0)):
cfg.define_knob('wmma_m', [8, 16, 32])
elif (((batch % 32) == 0) and ((out_channels % 8) == 0)):
cfg.define_knob('wmma_m', [32, 16, 8])
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg['block_row_warps'].val
block_col_warps = cfg['block_col_warps'].val
warp_row_tiles = cfg['warp_row_tiles'].val
warp_col_tiles = cfg['warp_col_tiles'].val
chunk = cfg['chunk'].val
offset = cfg['offset'].val
wmma_m = cfg['wmma_m'].val
vector_width = cfg['vector_width'].val
wmma_k = 16
if (wmma_m == 16):
wmma_n = 16
elif (wmma_m == 8):
wmma_n = 32
elif (wmma_m == 32):
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = ((chunk * wmma_k) + offset)
WS_align = (((warp_col_tiles * block_col_warps) * wmma_n) + offset)
block_factor_n = ((wmma_m * warp_row_tiles) * block_row_warps)
block_factor_o = ((wmma_n * warp_col_tiles) * block_col_warps)
CS_align = (block_factor_o + offset)
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([(wmma_n * warp_col_tiles), 1])
CL_strides = get_strides([1, 1, (wmma_n * warp_col_tiles), 1])
CS_strides = get_strides([1, 1, CS_align, 1])
(nc, hc, wc, oc) = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
(block_i, nc) = s[output].split(nc, factor=block_factor_n)
(block_j, oc) = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
(t, ti) = s[output].split(t, factor=vector_width)
(t, tx) = s[output].split(t, factor=warp_size)
(t, ty) = s[output].split(t, factor=block_row_warps)
(t, tz) = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
s[OL].compute_at(s[output], block_j)
(nc, hc, wc, oc) = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, (CS_align - 1), CS_align)
(oc, ooc) = s[OL].split(oc, factor=wmma_n)
(oc, oci) = s[OL].split(oc, factor=warp_col_tiles)
(_, oc) = s[OL].split(oc, factor=block_col_warps)
(nc, nnc) = s[OL].split(nc, factor=wmma_m)
(nc, nci) = s[OL].split(nc, factor=warp_row_tiles)
(_, nc) = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
s[ConvF].compute_at(s[OL], oc)
(n, h, w, o) = ConvF.op.axis
(n, nnf) = s[ConvF].split(n, factor=wmma_m)
(o, oof) = s[ConvF].split(o, factor=wmma_n)
(ic, ii) = s[ConvF].split(ic, factor=wmma_k)
(ko, ki) = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
(n, h, w, i) = AF.op.axis
(n, nn) = s[AF].split(n, factor=wmma_m)
(i, ii) = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
(kh, kw, i, o) = WF.op.axis
(i, ii) = s[WF].split(i, factor=wmma_k)
(o, oo) = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
(n, h, w, i) = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, (AS_align - 1), AS_align)
t = s[AS].fuse(n, i)
(t, ti) = s[AS].split(t, factor=vector_width)
(t, tx) = s[AS].split(t, factor=warp_size)
(t, ty) = s[AS].split(t, factor=block_row_warps)
(_, tz) = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
(kh, kw, ic, o) = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, (WS_align - 1), WS_align)
(t, ti) = s[WS].split(t, factor=vector_width)
(t, tx) = s[WS].split(t, factor=warp_size)
(t, ty) = s[WS].split(t, factor=block_row_warps)
(_, tz) = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name='k')
CL_compute = te.compute(CL_shape, (lambda ii, t0, t1, jj: te.sum((AL_gemm[(ii, t0, t1, k_gemm)].astype(out_dtype) * WL_gemm[(k_gemm, jj)].astype(out_dtype)), axis=k_gemm)), name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape, 'row_major', AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape, 'row_major', WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides, shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides, WL_strides, CL_strides, shape))
(N, OH, OW, CO) = get_const_tuple(output.shape)
(KH, KW, CI, _) = get_const_tuple(kernel.shape)
cfg.add_flop((((((((2 * N) * OH) * OW) * CO) * CI) * KH) * KW))
|
@autotvm.register_topi_compute('conv2d_nhwc_tensorcore.cuda')
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
'Compute conv2d with tensorcore for NCHW layout'
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
| -5,249,468,009,470,257,000
|
Compute conv2d with tensorcore for NCHW layout
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
conv2d_nhwc_tensorcore
|
HatsuneMiku4/incubator-tvm
|
python
|
@autotvm.register_topi_compute('conv2d_nhwc_tensorcore.cuda')
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
|
@autotvm.register_topi_schedule('conv2d_nhwc_tensorcore.cuda')
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
'TOPI schedule callback'
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if ('conv2d_nhwc_tensorcore' in op.tag):
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| -8,333,864,018,856,409,000
|
TOPI schedule callback
|
topi/python/topi/cuda/conv2d_nhwc_tensorcore.py
|
schedule_conv2d_nhwc_tensorcore
|
HatsuneMiku4/incubator-tvm
|
python
|
@autotvm.register_topi_schedule('conv2d_nhwc_tensorcore.cuda')
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if ('conv2d_nhwc_tensorcore' in op.tag):
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
|
@property
def path(self):
'str: Path added to client base.'
return 'cgi-bin/browse-edgar'
| -5,470,262,742,582,340,000
|
str: Path added to client base.
|
secedgar/core/company.py
|
path
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def path(self):
return 'cgi-bin/browse-edgar'
|
@property
def params(self):
':obj:`dict`: Parameters to include in requests.'
return self._params
| -2,608,281,908,461,058,000
|
:obj:`dict`: Parameters to include in requests.
|
secedgar/core/company.py
|
params
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def params(self):
return self._params
|
@property
def client(self):
'``secedgar.client._base``: Client to use to make requests.'
return self._client
| 3,123,311,791,598,018,000
|
``secedgar.client._base``: Client to use to make requests.
|
secedgar/core/company.py
|
client
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def client(self):
return self._client
|
@property
def start_date(self):
'Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched.'
return self._start_date
| 4,825,291,131,827,527,000
|
Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched.
|
secedgar/core/company.py
|
start_date
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def start_date(self):
return self._start_date
|
@property
def match_format(self):
'The match format to use when searching for filings.'
return self._match_format
| -6,210,562,446,347,363,000
|
The match format to use when searching for filings.
|
secedgar/core/company.py
|
match_format
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def match_format(self):
return self._match_format
|
@property
def end_date(self):
'Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched.'
return self._end_date
| -4,085,546,527,455,213,000
|
Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched.
|
secedgar/core/company.py
|
end_date
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def end_date(self):
return self._end_date
|
@property
def filing_type(self):
'``secedgar.core.FilingType``: FilingType enum of filing.'
return self._filing_type
| -7,117,107,396,546,987,000
|
``secedgar.core.FilingType``: FilingType enum of filing.
|
secedgar/core/company.py
|
filing_type
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def filing_type(self):
return self._filing_type
|
@property
def count(self):
'Number of filings to fetch.'
return self._count
| 1,136,969,923,357,941,900
|
Number of filings to fetch.
|
secedgar/core/company.py
|
count
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def count(self):
return self._count
|
@property
def cik_lookup(self):
'``secedgar.cik_lookup.CIKLookup``: CIKLookup object.'
return self._cik_lookup
| -6,005,614,127,776,856,000
|
``secedgar.cik_lookup.CIKLookup``: CIKLookup object.
|
secedgar/core/company.py
|
cik_lookup
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
@property
def cik_lookup(self):
return self._cik_lookup
|
def get_urls(self, **kwargs):
'Get urls for all CIKs given to Filing object.\n\n Args:\n **kwargs: Anything to be passed to requests when making get request.\n See keyword arguments accepted for\n ``secedgar.client._base.AbstractClient.get_soup``.\n\n Returns:\n urls (list): List of urls for txt files to download.\n '
return {key: self._get_urls_for_cik(cik, **kwargs) for (key, cik) in self.cik_lookup.lookup_dict.items()}
| 5,007,671,513,216,153,000
|
Get urls for all CIKs given to Filing object.
Args:
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
urls (list): List of urls for txt files to download.
|
secedgar/core/company.py
|
get_urls
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
def get_urls(self, **kwargs):
'Get urls for all CIKs given to Filing object.\n\n Args:\n **kwargs: Anything to be passed to requests when making get request.\n See keyword arguments accepted for\n ``secedgar.client._base.AbstractClient.get_soup``.\n\n Returns:\n urls (list): List of urls for txt files to download.\n '
return {key: self._get_urls_for_cik(cik, **kwargs) for (key, cik) in self.cik_lookup.lookup_dict.items()}
|
def _get_urls_for_cik(self, cik, **kwargs):
'Get all urls for specific company according to CIK.\n\n Must match start date, end date, filing_type, and count parameters.\n\n Args:\n cik (str): CIK for company.\n **kwargs: Anything to be passed to requests when making get request.\n See keyword arguments accepted for\n ``secedgar.client._base.AbstractClient.get_soup``.\n\n Returns:\n txt_urls (list of str): Up to the desired number of URLs for that specific company\n if available.\n '
self.params['CIK'] = cik
links = []
self.params['start'] = 0
while ((self.count is None) or (len(links) < self.count)):
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all('filinghref')])
self.params['start'] += self.client.batch_size
if (len(data.find_all('filinghref')) == 0):
break
txt_urls = [(link[:link.rfind('-')].strip() + '.txt') for link in links]
if (isinstance(self.count, int) and (len(txt_urls) < self.count)):
warnings.warn('Only {num} of {count} filings were found for {cik}.'.format(num=len(txt_urls), count=self.count, cik=cik))
return txt_urls[:self.count]
| -2,237,234,858,518,616,000
|
Get all urls for specific company according to CIK.
Must match start date, end date, filing_type, and count parameters.
Args:
cik (str): CIK for company.
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
txt_urls (list of str): Up to the desired number of URLs for that specific company
if available.
|
secedgar/core/company.py
|
_get_urls_for_cik
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
def _get_urls_for_cik(self, cik, **kwargs):
'Get all urls for specific company according to CIK.\n\n Must match start date, end date, filing_type, and count parameters.\n\n Args:\n cik (str): CIK for company.\n **kwargs: Anything to be passed to requests when making get request.\n See keyword arguments accepted for\n ``secedgar.client._base.AbstractClient.get_soup``.\n\n Returns:\n txt_urls (list of str): Up to the desired number of URLs for that specific company\n if available.\n '
self.params['CIK'] = cik
links = []
self.params['start'] = 0
while ((self.count is None) or (len(links) < self.count)):
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all('filinghref')])
self.params['start'] += self.client.batch_size
if (len(data.find_all('filinghref')) == 0):
break
txt_urls = [(link[:link.rfind('-')].strip() + '.txt') for link in links]
if (isinstance(self.count, int) and (len(txt_urls) < self.count)):
warnings.warn('Only {num} of {count} filings were found for {cik}.'.format(num=len(txt_urls), count=self.count, cik=cik))
return txt_urls[:self.count]
|
def save(self, directory, dir_pattern=None, file_pattern=None):
'Save files in specified directory.\n\n Each txt url looks something like:\n https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt\n\n Args:\n directory (str): Path to directory where files should be saved.\n dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".\n Valid options are {cik} and/or {type}.\n file_pattern (str): Format string for files. Default is "{accession_number}".\n Valid options are {accession_number}.\n\n Returns:\n None\n\n Raises:\n ValueError: If no text urls are available for given filing object.\n '
urls = self.get_urls_safely()
if (dir_pattern is None):
dir_pattern = os.path.join('{cik}', '{type}')
if (file_pattern is None):
file_pattern = '{accession_number}'
inputs = []
for (cik, links) in urls.items():
formatted_dir = dir_pattern.format(cik=cik, type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
| -3,339,035,536,558,945,300
|
Save files in specified directory.
Each txt url looks something like:
https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt
Args:
directory (str): Path to directory where files should be saved.
dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".
Valid options are {cik} and/or {type}.
file_pattern (str): Format string for files. Default is "{accession_number}".
Valid options are {accession_number}.
Returns:
None
Raises:
ValueError: If no text urls are available for given filing object.
|
secedgar/core/company.py
|
save
|
Ahrvo-Trading-Systems/sec-edgar
|
python
|
def save(self, directory, dir_pattern=None, file_pattern=None):
'Save files in specified directory.\n\n Each txt url looks something like:\n https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt\n\n Args:\n directory (str): Path to directory where files should be saved.\n dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".\n Valid options are {cik} and/or {type}.\n file_pattern (str): Format string for files. Default is "{accession_number}".\n Valid options are {accession_number}.\n\n Returns:\n None\n\n Raises:\n ValueError: If no text urls are available for given filing object.\n '
urls = self.get_urls_safely()
if (dir_pattern is None):
dir_pattern = os.path.join('{cik}', '{type}')
if (file_pattern is None):
file_pattern = '{accession_number}'
inputs = []
for (cik, links) in urls.items():
formatted_dir = dir_pattern.format(cik=cik, type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
|
def __init__(self, host='localhost', port=8125, prefix=None, maxudpsize=512, ipv6=False):
'Create a new client.'
fam = (socket.AF_INET6 if ipv6 else socket.AF_INET)
(family, _, _, _, addr) = socket.getaddrinfo(host, port, fam, socket.SOCK_DGRAM)[0]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
| 3,399,727,971,886,863,400
|
Create a new client.
|
statsd/client/udp.py
|
__init__
|
alanhamlett/pystatsd
|
python
|
def __init__(self, host='localhost', port=8125, prefix=None, maxudpsize=512, ipv6=False):
fam = (socket.AF_INET6 if ipv6 else socket.AF_INET)
(family, _, _, _, addr) = socket.getaddrinfo(host, port, fam, socket.SOCK_DGRAM)[0]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
|
def _send(self, data):
'Send data to statsd.'
try:
self._sock.sendto(data.encode('ascii'), self._addr)
except (socket.error, RuntimeError):
pass
| -785,161,261,134,684,800
|
Send data to statsd.
|
statsd/client/udp.py
|
_send
|
alanhamlett/pystatsd
|
python
|
def _send(self, data):
try:
self._sock.sendto(data.encode('ascii'), self._addr)
except (socket.error, RuntimeError):
pass
|
def _grpc_launch_server(self) -> Optional[int]:
'Launch grpc server and return port.'
kwargs: Dict[(str, Any)] = dict(close_fds=True)
pid = os.getpid()
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, f'port-{pid}.txt')
pid_str = str(os.getpid())
exec_cmd_list = [sys.executable, '-m']
if os.environ.get('COVERAGE_RCFILE'):
exec_cmd_list += ['coverage', 'run', '-m']
internal_proc = subprocess.Popen((exec_cmd_list + ['wandb', 'service', '--port-filename', fname, '--pid', pid_str, '--debug', 'true']), env=os.environ, **kwargs)
port = self._grpc_wait_for_port(fname, proc=internal_proc)
return port
| 100,848,241,870,260,340
|
Launch grpc server and return port.
|
wandb/sdk/service/service.py
|
_grpc_launch_server
|
KnightZhang625/client
|
python
|
def _grpc_launch_server(self) -> Optional[int]:
kwargs: Dict[(str, Any)] = dict(close_fds=True)
pid = os.getpid()
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, f'port-{pid}.txt')
pid_str = str(os.getpid())
exec_cmd_list = [sys.executable, '-m']
if os.environ.get('COVERAGE_RCFILE'):
exec_cmd_list += ['coverage', 'run', '-m']
internal_proc = subprocess.Popen((exec_cmd_list + ['wandb', 'service', '--port-filename', fname, '--pid', pid_str, '--debug', 'true']), env=os.environ, **kwargs)
port = self._grpc_wait_for_port(fname, proc=internal_proc)
return port
|
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100, output_dir=None, summary_writer=None):
'Returns a default set of typically-used monitors.\n\n Args:\n loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`\n at the default interval.\n summary_op: See `SummarySaver`.\n save_summary_steps: See `SummarySaver`.\n output_dir: See `SummarySaver`.\n summary_writer: See `SummarySaver`.\n Returns:\n `list` of monitors.\n '
monitors = []
if (loss_op is not None):
monitors.append(PrintTensor(tensor_names={'loss': loss_op.name}))
if (summary_op is not None):
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps, output_dir=output_dir, summary_writer=summary_writer))
return monitors
| 6,604,171,130,763,241,000
|
Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
get_default_monitors
|
Najah-lshanableh/tensorflow
|
python
|
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100, output_dir=None, summary_writer=None):
'Returns a default set of typically-used monitors.\n\n Args:\n loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`\n at the default interval.\n summary_op: See `SummarySaver`.\n save_summary_steps: See `SummarySaver`.\n output_dir: See `SummarySaver`.\n summary_writer: See `SummarySaver`.\n Returns:\n `list` of monitors.\n '
monitors = []
if (loss_op is not None):
monitors.append(PrintTensor(tensor_names={'loss': loss_op.name}))
if (summary_op is not None):
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps, output_dir=output_dir, summary_writer=summary_writer))
return monitors
|
def _as_graph_element(obj):
'Retrieves Graph element.'
graph = ops.get_default_graph()
if (not isinstance(obj, six.string_types)):
if ((not hasattr(obj, 'graph')) or (obj.graph != graph)):
raise ValueError(('Passed %s should have graph attribute that is equal to current graph %s.' % (obj, graph)))
return obj
if (':' in obj):
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element((obj + ':0'))
try:
graph.as_graph_element((obj + ':1'))
except (KeyError, ValueError):
pass
else:
raise ValueError(('Name %s is ambiguous, as this `Operation` has multiple outputs (at least 2).' % obj))
return element
| -4,531,516,043,276,649,000
|
Retrieves Graph element.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
_as_graph_element
|
Najah-lshanableh/tensorflow
|
python
|
def _as_graph_element(obj):
graph = ops.get_default_graph()
if (not isinstance(obj, six.string_types)):
if ((not hasattr(obj, 'graph')) or (obj.graph != graph)):
raise ValueError(('Passed %s should have graph attribute that is equal to current graph %s.' % (obj, graph)))
return obj
if (':' in obj):
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element((obj + ':0'))
try:
graph.as_graph_element((obj + ':1'))
except (KeyError, ValueError):
pass
else:
raise ValueError(('Name %s is ambiguous, as this `Operation` has multiple outputs (at least 2).' % obj))
return element
|
def set_estimator(self, estimator):
'A setter called automatically by the target estimator.\n\n If the estimator is locked, this method does nothing.\n\n Args:\n estimator: the estimator that this monitor monitors.\n\n Raises:\n ValueError: if the estimator is None.\n '
if self._estimator_locked:
return
if (estimator is None):
raise ValueError('Missing estimator.')
self._estimator = estimator
| -7,733,641,930,113,615,000
|
A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
set_estimator
|
Najah-lshanableh/tensorflow
|
python
|
def set_estimator(self, estimator):
'A setter called automatically by the target estimator.\n\n If the estimator is locked, this method does nothing.\n\n Args:\n estimator: the estimator that this monitor monitors.\n\n Raises:\n ValueError: if the estimator is None.\n '
if self._estimator_locked:
return
if (estimator is None):
raise ValueError('Missing estimator.')
self._estimator = estimator
|
def _lock_estimator(self):
'Locks the estimator until _unlock_estimator is called.'
self._estimator_locked = True
| 3,023,368,246,751,459,000
|
Locks the estimator until _unlock_estimator is called.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
_lock_estimator
|
Najah-lshanableh/tensorflow
|
python
|
def _lock_estimator(self):
self._estimator_locked = True
|
def _unlock_estimator(self):
'Unlocks the estimator.'
self._estimator_locked = False
| 8,796,999,282,623,188,000
|
Unlocks the estimator.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
_unlock_estimator
|
Najah-lshanableh/tensorflow
|
python
|
def _unlock_estimator(self):
self._estimator_locked = False
|
def begin(self, max_steps=None):
"Called at the beginning of training.\n\n When called, the default graph is the one we are executing.\n\n Args:\n max_steps: `int`, the maximum global step this training will run until.\n\n Raises:\n ValueError: if we've already begun a run.\n "
if self._begun:
raise ValueError('begin called twice without end.')
self._max_steps = max_steps
self._begun = True
| -249,357,529,644,142,240
|
Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
begin
|
Najah-lshanableh/tensorflow
|
python
|
def begin(self, max_steps=None):
"Called at the beginning of training.\n\n When called, the default graph is the one we are executing.\n\n Args:\n max_steps: `int`, the maximum global step this training will run until.\n\n Raises:\n ValueError: if we've already begun a run.\n "
if self._begun:
raise ValueError('begin called twice without end.')
self._max_steps = max_steps
self._begun = True
|
def end(self, session=None):
"Callback at the end of training/evaluation.\n\n Args:\n session: A `tf.Session` object that can be used to run ops.\n\n Raises:\n ValueError: if we've not begun a run.\n "
_ = session
if (not self._begun):
raise ValueError('end called without begin.')
self._max_steps = None
self._begun = False
| 3,358,963,026,610,282,500
|
Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
end
|
Najah-lshanableh/tensorflow
|
python
|
def end(self, session=None):
"Callback at the end of training/evaluation.\n\n Args:\n session: A `tf.Session` object that can be used to run ops.\n\n Raises:\n ValueError: if we've not begun a run.\n "
_ = session
if (not self._begun):
raise ValueError('end called without begin.')
self._max_steps = None
self._begun = False
|
def epoch_begin(self, epoch):
"Begin epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've already begun an epoch, or `epoch` < 0.\n "
if (self._current_epoch is not None):
raise ValueError('epoch_begin called twice without epoch_end.')
if (epoch < 0):
raise ValueError(('Invalid epoch %s.' % epoch))
self._current_epoch = epoch
| -6,977,125,567,667,057,000
|
Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
epoch_begin
|
Najah-lshanableh/tensorflow
|
python
|
def epoch_begin(self, epoch):
"Begin epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've already begun an epoch, or `epoch` < 0.\n "
if (self._current_epoch is not None):
raise ValueError('epoch_begin called twice without epoch_end.')
if (epoch < 0):
raise ValueError(('Invalid epoch %s.' % epoch))
self._current_epoch = epoch
|
def epoch_end(self, epoch):
"End epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've not begun an epoch, or `epoch` number does not match.\n "
if (self._current_epoch != epoch):
raise ValueError('epoch_end expected %s but got %s.', self._current_epoch, epoch)
self._current_epoch = None
| 7,613,804,702,632,359,000
|
End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
epoch_end
|
Najah-lshanableh/tensorflow
|
python
|
def epoch_end(self, epoch):
"End epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've not begun an epoch, or `epoch` number does not match.\n "
if (self._current_epoch != epoch):
raise ValueError('epoch_end expected %s but got %s.', self._current_epoch, epoch)
self._current_epoch = None
|
def step_begin(self, step):
"Callback before training step begins.\n\n You may use this callback to request evaluation of additional tensors\n in the graph.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n List of `Tensor` objects or string tensor names to be run.\n\n Raises:\n ValueError: if we've already begun a step, or `step` < 0, or\n `step` > `max_steps`.\n "
if ((step < 0) or ((self._max_steps is not None) and (step > self._max_steps))):
raise ValueError(('Invalid step %s.' % step))
self._current_step = step
return []
| -5,978,711,741,628,458,000
|
Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
step_begin
|
Najah-lshanableh/tensorflow
|
python
|
def step_begin(self, step):
"Callback before training step begins.\n\n You may use this callback to request evaluation of additional tensors\n in the graph.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n List of `Tensor` objects or string tensor names to be run.\n\n Raises:\n ValueError: if we've already begun a step, or `step` < 0, or\n `step` > `max_steps`.\n "
if ((step < 0) or ((self._max_steps is not None) and (step > self._max_steps))):
raise ValueError(('Invalid step %s.' % step))
self._current_step = step
return []
|
def step_end(self, step, output):
"Callback after training step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Note that this method is not called if the call to `Session.run()` that\n followed the last call to `step_begin()` failed.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n\n Raises:\n ValueError: if we've not begun a step, or `step` number does not match.\n "
if (self._current_step != step):
raise ValueError('step_end expected %s but got %s.', self._current_step, step)
self._current_step = None
return False
| 8,606,716,016,347,808,000
|
Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
step_end
|
Najah-lshanableh/tensorflow
|
python
|
def step_end(self, step, output):
"Callback after training step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Note that this method is not called if the call to `Session.run()` that\n followed the last call to `step_begin()` failed.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n\n Raises:\n ValueError: if we've not begun a step, or `step` number does not match.\n "
if (self._current_step != step):
raise ValueError('step_end expected %s but got %s.', self._current_step, step)
self._current_step = None
return False
|
def post_step(self, step, session):
'Callback after the step is finished.\n\n Called after step_end and receives session to perform extra session.run\n calls. If failure occurred in the process, will be called as well.\n\n Args:\n step: `int`, global step of the model.\n session: `Session` object.\n '
_ = (step, session)
| -6,104,330,041,348,578,000
|
Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
post_step
|
Najah-lshanableh/tensorflow
|
python
|
def post_step(self, step, session):
'Callback after the step is finished.\n\n Called after step_end and receives session to perform extra session.run\n calls. If failure occurred in the process, will be called as well.\n\n Args:\n step: `int`, global step of the model.\n session: `Session` object.\n '
_ = (step, session)
|
def __init__(self, every_n_steps=100, first_n_steps=1):
'Initializes an `EveryN` monitor.\n\n Args:\n every_n_steps: `int`, the number of steps to allow between callbacks.\n first_n_steps: `int`, specifying the number of initial steps during\n which the callbacks will always be executed, regardless of the value\n of `every_n_steps`. Note that this value is relative to the global step\n '
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
self._last_successful_step = None
self._last_active_step = 0
self._every_n_step_begin_called = False
| 4,045,912,460,634,086,400
|
Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, every_n_steps=100, first_n_steps=1):
'Initializes an `EveryN` monitor.\n\n Args:\n every_n_steps: `int`, the number of steps to allow between callbacks.\n first_n_steps: `int`, specifying the number of initial steps during\n which the callbacks will always be executed, regardless of the value\n of `every_n_steps`. Note that this value is relative to the global step\n '
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
self._last_successful_step = None
self._last_active_step = 0
self._every_n_step_begin_called = False
|
def every_n_step_begin(self, step):
"Callback before every n'th step begins.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n A `list` of tensors that will be evaluated at this step.\n "
return []
| 864,840,899,116,364,400
|
Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
every_n_step_begin
|
Najah-lshanableh/tensorflow
|
python
|
def every_n_step_begin(self, step):
"Callback before every n'th step begins.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n A `list` of tensors that will be evaluated at this step.\n "
return []
|
def every_n_step_end(self, step, outputs):
"Callback after every n'th step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Args:\n step: `int`, the current value of the global step.\n outputs: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n "
return False
| 3,292,307,751,707,634,000
|
Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
every_n_step_end
|
Najah-lshanableh/tensorflow
|
python
|
def every_n_step_end(self, step, outputs):
"Callback after every n'th step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Args:\n step: `int`, the current value of the global step.\n outputs: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n "
return False
|
def every_n_post_step(self, step, session):
'Callback after a step is finished or `end()` is called.\n\n Args:\n step: `int`, the current value of the global step.\n session: `Session` object.\n '
pass
| -5,656,285,919,200,097,000
|
Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
every_n_post_step
|
Najah-lshanableh/tensorflow
|
python
|
def every_n_post_step(self, step, session):
'Callback after a step is finished or `end()` is called.\n\n Args:\n step: `int`, the current value of the global step.\n session: `Session` object.\n '
pass
|
def step_begin(self, step):
'Overrides `BaseMonitor.step_begin`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n Returns:\n A `list`, the result of every_n_step_begin, if that was called this step,\n or an empty list otherwise.\n\n Raises:\n ValueError: if called more than once during a step.\n '
super(EveryN, self).step_begin(step)
if ((step <= self._first_n_steps) or (step >= (self._every_n_steps + self._last_active_step)) or (step == self._max_steps)):
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
| -3,994,016,857,027,051,000
|
Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
step_begin
|
Najah-lshanableh/tensorflow
|
python
|
def step_begin(self, step):
'Overrides `BaseMonitor.step_begin`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n Returns:\n A `list`, the result of every_n_step_begin, if that was called this step,\n or an empty list otherwise.\n\n Raises:\n ValueError: if called more than once during a step.\n '
super(EveryN, self).step_begin(step)
if ((step <= self._first_n_steps) or (step >= (self._every_n_steps + self._last_active_step)) or (step == self._max_steps)):
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
|
def step_end(self, step, output):
'Overrides `BaseMonitor.step_end`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n Returns:\n `bool`, the result of every_n_step_end, if that was called this step,\n or `False` otherwise.\n '
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
| 7,474,241,267,920,073,000
|
Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
step_end
|
Najah-lshanableh/tensorflow
|
python
|
def step_end(self, step, output):
'Overrides `BaseMonitor.step_end`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n Returns:\n `bool`, the result of every_n_step_end, if that was called this step,\n or `False` otherwise.\n '
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
|
def __init__(self, num_steps=None, last_step=None):
'Create a StopAtStep monitor.\n\n This monitor requests stop after either a number of steps have been\n executed or a last step has been reached. Only of the two options can be\n specified.\n\n if `num_steps` is specified, it indicates the number of steps to execute\n after `begin()` is called. If instead `last_step` is specified, it\n indicates the last step we want to execute, as passed to the `step_begin()`\n call.\n\n Args:\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n '
super(StopAtStep, self).__init__()
if ((num_steps is None) and (last_step is None)):
raise ValueError('One of num_steps or last_step must be specified.')
if ((num_steps is not None) and (last_step is not None)):
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
| 8,602,064,052,097,256,000
|
Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, num_steps=None, last_step=None):
'Create a StopAtStep monitor.\n\n This monitor requests stop after either a number of steps have been\n executed or a last step has been reached. Only of the two options can be\n specified.\n\n if `num_steps` is specified, it indicates the number of steps to execute\n after `begin()` is called. If instead `last_step` is specified, it\n indicates the last step we want to execute, as passed to the `step_begin()`\n call.\n\n Args:\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n '
super(StopAtStep, self).__init__()
if ((num_steps is None) and (last_step is None)):
raise ValueError('One of num_steps or last_step must be specified.')
if ((num_steps is not None) and (last_step is not None)):
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
|
def __init__(self, tensor_names, every_n=100, first_n=1):
'Initializes a PrintTensor monitor.\n\n Args:\n tensor_names: `dict` of tag to tensor names or\n `iterable` of tensor names (strings).\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n '
super(PrintTensor, self).__init__(every_n, first_n)
if (not isinstance(tensor_names, dict)):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
| 8,793,686,181,907,383,000
|
Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, tensor_names, every_n=100, first_n=1):
'Initializes a PrintTensor monitor.\n\n Args:\n tensor_names: `dict` of tag to tensor names or\n `iterable` of tensor names (strings).\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n '
super(PrintTensor, self).__init__(every_n, first_n)
if (not isinstance(tensor_names, dict)):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
|
def __init__(self, scope=None, every_n=100, first_n=1):
'Initializes LoggingTrainable monitor.\n\n Args:\n scope: An optional string to match variable names using re.match.\n every_n: Print every N steps.\n first_n: Print first N steps.\n '
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
| -239,949,726,753,383,100
|
Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, scope=None, every_n=100, first_n=1):
'Initializes LoggingTrainable monitor.\n\n Args:\n scope: An optional string to match variable names using re.match.\n every_n: Print every N steps.\n first_n: Print first N steps.\n '
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
|
def __init__(self, summary_op, save_steps=100, output_dir=None, summary_writer=None, scaffold=None):
"Initializes a `SummarySaver` monitor.\n\n Args:\n summary_op: `Tensor` of type `string`. A serialized `Summary` protocol\n buffer, as output by TF summary methods like `scalar_summary` or\n `merge_all_summaries`.\n save_steps: `int`, save summaries every N steps. See `EveryN`.\n output_dir: `string`, the directory to save the summaries to. Only used\n if no `summary_writer` is supplied.\n summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\n one will be created accordingly.\n scaffold: `Scaffold` to get summary_op if it's not provided.\n "
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if ((summary_writer is None) and output_dir):
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
| 7,110,141,303,646,149,000
|
Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, summary_op, save_steps=100, output_dir=None, summary_writer=None, scaffold=None):
"Initializes a `SummarySaver` monitor.\n\n Args:\n summary_op: `Tensor` of type `string`. A serialized `Summary` protocol\n buffer, as output by TF summary methods like `scalar_summary` or\n `merge_all_summaries`.\n save_steps: `int`, save summaries every N steps. See `EveryN`.\n output_dir: `string`, the directory to save the summaries to. Only used\n if no `summary_writer` is supplied.\n summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\n one will be created accordingly.\n scaffold: `Scaffold` to get summary_op if it's not provided.\n "
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if ((summary_writer is None) and output_dir):
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
|
def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric='loss', early_stopping_metric_minimize=True, name=None):
'Initializes a ValidationMonitor.\n\n Args:\n x: See `BaseEstimator.evaluate`.\n y: See `BaseEstimator.evaluate`.\n input_fn: See `BaseEstimator.evaluate`.\n batch_size: See `BaseEstimator.evaluate`.\n eval_steps: See `BaseEstimator.evaluate`.\n every_n_steps: Check for new checkpoints to evaluate every N steps. If a\n new checkpoint is found, it is evaluated. See `EveryN`.\n metrics: See `BaseEstimator.evaluate`.\n early_stopping_rounds: `int`. If the metric indicated by\n `early_stopping_metric` does not change according to\n `early_stopping_metric_minimize` for this many steps, then training\n will be stopped.\n early_stopping_metric: `string`, name of the metric to check for early\n stopping.\n early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is\n expected to decrease (thus early stopping occurs when this metric\n stops decreasing), False if `early_stopping_metric` is expected to\n increase. Typically, `early_stopping_metric_minimize` is True for\n loss metrics like mean squared error, and False for performance\n metrics like accuracy.\n name: See `BaseEstimator.evaluate`.\n\n Raises:\n ValueError: If both x and input_fn are provided.\n '
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=(- 1))
if ((x is None) and (input_fn is None)):
raise ValueError('Either x or input_fn should be provided.')
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
| 6,542,023,680,113,299,000
|
Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric='loss', early_stopping_metric_minimize=True, name=None):
'Initializes a ValidationMonitor.\n\n Args:\n x: See `BaseEstimator.evaluate`.\n y: See `BaseEstimator.evaluate`.\n input_fn: See `BaseEstimator.evaluate`.\n batch_size: See `BaseEstimator.evaluate`.\n eval_steps: See `BaseEstimator.evaluate`.\n every_n_steps: Check for new checkpoints to evaluate every N steps. If a\n new checkpoint is found, it is evaluated. See `EveryN`.\n metrics: See `BaseEstimator.evaluate`.\n early_stopping_rounds: `int`. If the metric indicated by\n `early_stopping_metric` does not change according to\n `early_stopping_metric_minimize` for this many steps, then training\n will be stopped.\n early_stopping_metric: `string`, name of the metric to check for early\n stopping.\n early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is\n expected to decrease (thus early stopping occurs when this metric\n stops decreasing), False if `early_stopping_metric` is expected to\n increase. Typically, `early_stopping_metric_minimize` is True for\n loss metrics like mean squared error, and False for performance\n metrics like accuracy.\n name: See `BaseEstimator.evaluate`.\n\n Raises:\n ValueError: If both x and input_fn are provided.\n '
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=(- 1))
if ((x is None) and (input_fn is None)):
raise ValueError('Either x or input_fn should be provided.')
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
|
@property
def early_stopped(self):
'Returns True if this monitor caused an early stop.'
return self._early_stopped
| -4,954,369,659,552,044,000
|
Returns True if this monitor caused an early stop.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
early_stopped
|
Najah-lshanableh/tensorflow
|
python
|
@property
def early_stopped(self):
return self._early_stopped
|
@property
def best_step(self):
'Returns the step at which the best early stopping metric was found.'
return self._best_value_step
| -5,269,908,394,140,271,000
|
Returns the step at which the best early stopping metric was found.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
best_step
|
Najah-lshanableh/tensorflow
|
python
|
@property
def best_step(self):
return self._best_value_step
|
@property
def best_value(self):
'Returns the best early stopping metric value found so far.'
return self._best_value
| 1,251,436,279,331,486,200
|
Returns the best early stopping metric value found so far.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
best_value
|
Najah-lshanableh/tensorflow
|
python
|
@property
def best_value(self):
return self._best_value
|
def __init__(self, var_name, every_n=100, first_n=1):
'Initializes a CaptureVariable monitor.\n\n Args:\n var_name: `string`. The variable name, including suffix (typically ":0").\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n '
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
| -715,672,332,563,627,600
|
Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, var_name, every_n=100, first_n=1):
'Initializes a CaptureVariable monitor.\n\n Args:\n var_name: `string`. The variable name, including suffix (typically ":0").\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n '
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
|
@property
def values(self):
'Returns the values captured so far.\n\n Returns:\n `dict` mapping `int` step numbers to that values of the variable at the\n respective step.\n '
return self._var_values
| -2,056,683,639,811,959,000
|
Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
values
|
Najah-lshanableh/tensorflow
|
python
|
@property
def values(self):
'Returns the values captured so far.\n\n Returns:\n `dict` mapping `int` step numbers to that values of the variable at the\n respective step.\n '
return self._var_values
|
def __init__(self, ignore_ops=None):
'Initializes GraphDump monitor.\n\n Args:\n ignore_ops: `list` of `string`. Names of ops to ignore.\n If None, `GraphDump.IGNORE_OPS` is used.\n '
super(GraphDump, self).__init__()
self._ignore_ops = (ignore_ops or GraphDump.IGNORE_OPS)
self._data = {}
| -7,470,051,855,634,178,000
|
Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, ignore_ops=None):
'Initializes GraphDump monitor.\n\n Args:\n ignore_ops: `list` of `string`. Names of ops to ignore.\n If None, `GraphDump.IGNORE_OPS` is used.\n '
super(GraphDump, self).__init__()
self._ignore_ops = (ignore_ops or GraphDump.IGNORE_OPS)
self._data = {}
|
def compare(self, other_dump, step, atol=1e-06):
'Compares two `GraphDump` monitors and returns differences.\n\n Args:\n other_dump: Another `GraphDump` monitor.\n step: `int`, step to compare on.\n atol: `float`, absolute tolerance in comparison of floating arrays.\n\n Returns:\n Returns tuple:\n matched: `list` of keys that matched.\n non_matched: `dict` of keys to tuple of 2 mismatched values.\n\n Raises:\n ValueError: if a key in `data` is missing from `other_dump` at `step`.\n '
non_matched = {}
matched = []
this_output = (self.data[step] if (step in self.data) else {})
other_output = (other_dump.data[step] if (step in other_dump.data) else {})
for key in this_output:
if ((not isinstance(key, str)) and (not isinstance(key, unicode))):
continue
if (key not in other_output):
raise ValueError('%s missing at step %s.', (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if (not np.allclose(value1, value2, atol=atol)):
non_matched[key] = (value1 - value2)
else:
matched.append(key)
elif (value1 != value2):
non_matched[key] = (value1, value2)
else:
matched.append(key)
return (matched, non_matched)
| 3,847,063,021,059,542,500
|
Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
compare
|
Najah-lshanableh/tensorflow
|
python
|
def compare(self, other_dump, step, atol=1e-06):
'Compares two `GraphDump` monitors and returns differences.\n\n Args:\n other_dump: Another `GraphDump` monitor.\n step: `int`, step to compare on.\n atol: `float`, absolute tolerance in comparison of floating arrays.\n\n Returns:\n Returns tuple:\n matched: `list` of keys that matched.\n non_matched: `dict` of keys to tuple of 2 mismatched values.\n\n Raises:\n ValueError: if a key in `data` is missing from `other_dump` at `step`.\n '
non_matched = {}
matched = []
this_output = (self.data[step] if (step in self.data) else {})
other_output = (other_dump.data[step] if (step in other_dump.data) else {})
for key in this_output:
if ((not isinstance(key, str)) and (not isinstance(key, unicode))):
continue
if (key not in other_output):
raise ValueError('%s missing at step %s.', (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if (not np.allclose(value1, value2, atol=atol)):
non_matched[key] = (value1 - value2)
else:
matched.append(key)
elif (value1 != value2):
non_matched[key] = (value1, value2)
else:
matched.append(key)
return (matched, non_matched)
|
@deprecated_arg_values('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate. input_fn (and in most cases, input_feature_key) will both become required args.", input_fn=None)
def __init__(self, every_n_steps, export_dir, input_fn=None, input_feature_key=None, exports_to_keep=5, signature_fn=None, default_batch_size=1):
"Initializes ExportMonitor.\n\n Args:\n every_n_steps: Run monitor every N steps.\n export_dir: str, folder to export.\n input_fn: A function that takes no argument and returns a tuple of\n (features, targets), where features is a dict of string key to `Tensor`\n and targets is a `Tensor` that's currently not used (and so can be\n `None`).\n input_feature_key: String key into the features dict returned by\n `input_fn` that corresponds to the raw `Example` strings `Tensor` that\n the exported model will take as input. Can only be `None` if you're\n using a custom `signature_fn` that does not use the first arg\n (examples).\n exports_to_keep: int, number of exports to keep.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `dict` of `Tensor`s for predictions.\n default_batch_size: Default batch size of the `Example` placeholder.\n\n Raises:\n ValueError: If `input_fn` and `input_feature_key` are not both defined or\n are not both `None`.\n "
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = (input_fn is None)
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
| 2,032,589,773,287,838,200
|
Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, targets), where features is a dict of string key to `Tensor`
and targets is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Can only be `None` if you're
using a custom `signature_fn` that does not use the first arg
(examples).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
@deprecated_arg_values('2016-09-23', "The signature of the input_fn accepted by export is changing to be consistent with what's used by tf.Learn Estimator's train/evaluate. input_fn (and in most cases, input_feature_key) will both become required args.", input_fn=None)
def __init__(self, every_n_steps, export_dir, input_fn=None, input_feature_key=None, exports_to_keep=5, signature_fn=None, default_batch_size=1):
"Initializes ExportMonitor.\n\n Args:\n every_n_steps: Run monitor every N steps.\n export_dir: str, folder to export.\n input_fn: A function that takes no argument and returns a tuple of\n (features, targets), where features is a dict of string key to `Tensor`\n and targets is a `Tensor` that's currently not used (and so can be\n `None`).\n input_feature_key: String key into the features dict returned by\n `input_fn` that corresponds to the raw `Example` strings `Tensor` that\n the exported model will take as input. Can only be `None` if you're\n using a custom `signature_fn` that does not use the first arg\n (examples).\n exports_to_keep: int, number of exports to keep.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `dict` of `Tensor`s for predictions.\n default_batch_size: Default batch size of the `Example` placeholder.\n\n Raises:\n ValueError: If `input_fn` and `input_feature_key` are not both defined or\n are not both `None`.\n "
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = (input_fn is None)
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
|
@property
def last_export_dir(self):
'Returns the directory containing the last completed export.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added on 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because the\n estimator being fitted does not yet return a value during export.\n '
return self._last_export_dir
| -7,370,270,321,409,989,000
|
Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
last_export_dir
|
Najah-lshanableh/tensorflow
|
python
|
@property
def last_export_dir(self):
'Returns the directory containing the last completed export.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added on 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because the\n estimator being fitted does not yet return a value during export.\n '
return self._last_export_dir
|
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None):
'Initialize CheckpointSaver monitor.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n\n Raises:\n ValueError: If both `save_steps` and `save_secs` are not `None`.\n ValueError: If both `save_steps` and `save_secs` are `None`.\n '
logging.info('Create CheckpointSaver.')
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if ((save_steps is None) and (save_secs is None)):
raise ValueError('Either save_steps or save_secs should be provided')
if ((save_steps is not None) and (save_secs is not None)):
raise ValueError('Can not provide both save_steps and save_secs.')
| -8,126,788,662,230,020,000
|
Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename='model.ckpt', scaffold=None):
'Initialize CheckpointSaver monitor.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n\n Raises:\n ValueError: If both `save_steps` and `save_secs` are not `None`.\n ValueError: If both `save_steps` and `save_secs` are `None`.\n '
logging.info('Create CheckpointSaver.')
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if ((save_steps is None) and (save_secs is None)):
raise ValueError('Either save_steps or save_secs should be provided')
if ((save_steps is not None) and (save_secs is not None)):
raise ValueError('Can not provide both save_steps and save_secs.')
|
def _save(self, step, session):
'Saves the latest checkpoint.'
if (step == self._last_saved_step):
return
logging.info('Saving checkpoints for %d into %s.', step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if (self._saver is None):
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(SessionLog(status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step)
| 9,139,546,512,531,420,000
|
Saves the latest checkpoint.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
_save
|
Najah-lshanableh/tensorflow
|
python
|
def _save(self, step, session):
if (step == self._last_saved_step):
return
logging.info('Saving checkpoints for %d into %s.', step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if (self._saver is None):
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(SessionLog(status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step)
|
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
'Initializes NanLoss monitor.\n\n Args:\n loss_tensor: `Tensor`, the loss tensor.\n every_n_steps: `int`, run check every this many steps.\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.\n '
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
| -1,714,058,577,219,217,000
|
Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
|
tensorflow/contrib/learn/python/learn/monitors.py
|
__init__
|
Najah-lshanableh/tensorflow
|
python
|
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
'Initializes NanLoss monitor.\n\n Args:\n loss_tensor: `Tensor`, the loss tensor.\n every_n_steps: `int`, run check every this many steps.\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.\n '
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
|
def reload(self):
"Reload image from disk. This facilitates re-loading of\n images from disk in case the image content changes.\n\n .. versionadded:: 1.3.0\n\n Usage::\n\n im = Image(source = '1.jpg')\n # -- do something --\n im.reload()\n # image will be re-loaded from disk\n\n "
self.remove_from_cache()
old_source = self.source
self.source = ''
self.source = old_source
| 4,258,685,529,131,743,700
|
Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
|
kivy/uix/image.py
|
reload
|
eman1can/kivy
|
python
|
def reload(self):
"Reload image from disk. This facilitates re-loading of\n images from disk in case the image content changes.\n\n .. versionadded:: 1.3.0\n\n Usage::\n\n im = Image(source = '1.jpg')\n # -- do something --\n im.reload()\n # image will be re-loaded from disk\n\n "
self.remove_from_cache()
old_source = self.source
self.source =
self.source = old_source
|
def remove_from_cache(self):
'Remove image from cache.\n\n .. versionadded:: 2.0.0\n '
if self._coreimage:
self._coreimage.remove_from_cache()
| 5,514,899,281,433,177,000
|
Remove image from cache.
.. versionadded:: 2.0.0
|
kivy/uix/image.py
|
remove_from_cache
|
eman1can/kivy
|
python
|
def remove_from_cache(self):
'Remove image from cache.\n\n .. versionadded:: 2.0.0\n '
if self._coreimage:
self._coreimage.remove_from_cache()
|
def load_collada(file_obj, resolver=None, **kwargs):
'\n Load a COLLADA (.dae) file into a list of trimesh kwargs.\n\n Parameters\n ----------\n file_obj : file object\n Containing a COLLADA file\n resolver : trimesh.visual.Resolver or None\n For loading referenced files, like texture images\n kwargs : **\n Passed to trimesh.Trimesh.__init__\n\n Returns\n -------\n loaded : list of dict\n kwargs for Trimesh constructor\n '
c = collada.Collada(file_obj)
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
meshes = {}
graph = []
for node in c.scene.nodes:
_parse_node(node=node, parent_matrix=np.eye(4), material_map=material_map, meshes=meshes, graph=graph, resolver=resolver)
result = {'class': 'Scene', 'graph': graph, 'geometry': meshes}
return result
| 6,059,635,877,470,790,000
|
Load a COLLADA (.dae) file into a list of trimesh kwargs.
Parameters
----------
file_obj : file object
Containing a COLLADA file
resolver : trimesh.visual.Resolver or None
For loading referenced files, like texture images
kwargs : **
Passed to trimesh.Trimesh.__init__
Returns
-------
loaded : list of dict
kwargs for Trimesh constructor
|
trimesh/exchange/dae.py
|
load_collada
|
BerkeleyAutomation/trimesh
|
python
|
def load_collada(file_obj, resolver=None, **kwargs):
'\n Load a COLLADA (.dae) file into a list of trimesh kwargs.\n\n Parameters\n ----------\n file_obj : file object\n Containing a COLLADA file\n resolver : trimesh.visual.Resolver or None\n For loading referenced files, like texture images\n kwargs : **\n Passed to trimesh.Trimesh.__init__\n\n Returns\n -------\n loaded : list of dict\n kwargs for Trimesh constructor\n '
c = collada.Collada(file_obj)
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
meshes = {}
graph = []
for node in c.scene.nodes:
_parse_node(node=node, parent_matrix=np.eye(4), material_map=material_map, meshes=meshes, graph=graph, resolver=resolver)
result = {'class': 'Scene', 'graph': graph, 'geometry': meshes}
return result
|
def export_collada(mesh, **kwargs):
'\n Export a mesh or a list of meshes as a COLLADA .dae file.\n\n Parameters\n -----------\n mesh: Trimesh object or list of Trimesh objects\n The mesh(es) to export.\n\n Returns\n -----------\n export: str, string of COLLADA format output\n '
meshes = mesh
if (not isinstance(mesh, (list, tuple, set, np.ndarray))):
meshes = [mesh]
c = collada.Collada()
nodes = []
for (i, m) in enumerate(meshes):
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if (m.visual.kind == 'texture'):
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif (m.visual.kind == 'vertex'):
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
vertices = collada.source.FloatSource('verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource('normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if (uv is not None):
texcoords = collada.source.FloatSource('texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if (colors is not None):
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array', colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(c, uuid.uuid4().hex, uuid.uuid4().hex, arrays)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
| -1,312,581,392,268,734,700
|
Export a mesh or a list of meshes as a COLLADA .dae file.
Parameters
-----------
mesh: Trimesh object or list of Trimesh objects
The mesh(es) to export.
Returns
-----------
export: str, string of COLLADA format output
|
trimesh/exchange/dae.py
|
export_collada
|
BerkeleyAutomation/trimesh
|
python
|
def export_collada(mesh, **kwargs):
'\n Export a mesh or a list of meshes as a COLLADA .dae file.\n\n Parameters\n -----------\n mesh: Trimesh object or list of Trimesh objects\n The mesh(es) to export.\n\n Returns\n -----------\n export: str, string of COLLADA format output\n '
meshes = mesh
if (not isinstance(mesh, (list, tuple, set, np.ndarray))):
meshes = [mesh]
c = collada.Collada()
nodes = []
for (i, m) in enumerate(meshes):
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if (m.visual.kind == 'texture'):
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif (m.visual.kind == 'vertex'):
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
vertices = collada.source.FloatSource('verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource('normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if (uv is not None):
texcoords = collada.source.FloatSource('texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if (colors is not None):
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array', colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(c, uuid.uuid4().hex, uuid.uuid4().hex, arrays)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
|
def _parse_node(node, parent_matrix, material_map, meshes, graph, resolver=None):
'\n Recursively parse COLLADA scene nodes.\n '
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if (m.id in material_map):
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
for (i, primitive) in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape((len(vertex_index) * 3), 3)
normals = None
if (primitive.normal is not None):
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape((len(normal_index) * 3), 3)
colors = None
s = primitive.sources
if (('COLOR' in s) and (len(s['COLOR']) > 0) and (len(primitive.index) > 0)):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape((len(color_index) * 3), 3)
faces = np.arange(vertices.shape[0]).reshape((vertices.shape[0] // 3), 3)
vis = None
if (primitive.material in local_material_map):
material = copy.copy(local_material_map[primitive.material])
uv = None
if (len(primitive.texcoordset) > 0):
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(((len(texcoord_index) * 3), 2))
vis = visual.texture.TextureVisuals(uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {'vertices': vertices, 'faces': faces, 'vertex_normals': normals, 'vertex_colors': colors, 'visual': vis}
graph.append({'frame_to': primid, 'matrix': parent_matrix, 'geometry': primid})
elif isinstance(node, collada.scene.Node):
if (node.children is not None):
for child in node.children:
matrix = np.dot(parent_matrix, node.matrix)
_parse_node(node=child, parent_matrix=matrix, material_map=material_map, meshes=meshes, graph=graph, resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
pass
elif isinstance(node, collada.scene.LightNode):
pass
| -3,186,675,801,806,256,000
|
Recursively parse COLLADA scene nodes.
|
trimesh/exchange/dae.py
|
_parse_node
|
BerkeleyAutomation/trimesh
|
python
|
def _parse_node(node, parent_matrix, material_map, meshes, graph, resolver=None):
'\n \n '
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if (m.id in material_map):
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
for (i, primitive) in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape((len(vertex_index) * 3), 3)
normals = None
if (primitive.normal is not None):
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape((len(normal_index) * 3), 3)
colors = None
s = primitive.sources
if (('COLOR' in s) and (len(s['COLOR']) > 0) and (len(primitive.index) > 0)):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape((len(color_index) * 3), 3)
faces = np.arange(vertices.shape[0]).reshape((vertices.shape[0] // 3), 3)
vis = None
if (primitive.material in local_material_map):
material = copy.copy(local_material_map[primitive.material])
uv = None
if (len(primitive.texcoordset) > 0):
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(((len(texcoord_index) * 3), 2))
vis = visual.texture.TextureVisuals(uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {'vertices': vertices, 'faces': faces, 'vertex_normals': normals, 'vertex_colors': colors, 'visual': vis}
graph.append({'frame_to': primid, 'matrix': parent_matrix, 'geometry': primid})
elif isinstance(node, collada.scene.Node):
if (node.children is not None):
for child in node.children:
matrix = np.dot(parent_matrix, node.matrix)
_parse_node(node=child, parent_matrix=matrix, material_map=material_map, meshes=meshes, graph=graph, resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
pass
elif isinstance(node, collada.scene.LightNode):
pass
|
def _load_texture(file_name, resolver):
'\n Load a texture from a file into a PIL image.\n '
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
| 5,463,406,226,342,628,000
|
Load a texture from a file into a PIL image.
|
trimesh/exchange/dae.py
|
_load_texture
|
BerkeleyAutomation/trimesh
|
python
|
def _load_texture(file_name, resolver):
'\n \n '
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
|
def _parse_material(effect, resolver):
'\n Turn a COLLADA effect into a trimesh material.\n '
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture', exc_info=True)
elif (effect.diffuse is not None):
baseColorFactor = effect.diffuse
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture', exc_info=True)
elif (effect.emission is not None):
emissiveFactor = effect.emission[:3]
roughnessFactor = 1.0
if ((not isinstance(effect.shininess, collada.material.Map)) and (effect.shininess is not None)):
roughnessFactor = np.sqrt((2.0 / (2.0 + effect.shininess)))
metallicFactor = 0.0
normalTexture = None
if (effect.bumpmap is not None):
try:
normalTexture = _load_texture(effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap', exc_info=True)
if ((effect.transparent is not None) and (not isinstance(effect.transparent, collada.material.Map))):
baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))
return visual.material.PBRMaterial(emissiveFactor=emissiveFactor, emissiveTexture=emissiveTexture, normalTexture=normalTexture, baseColorTexture=baseColorTexture, baseColorFactor=baseColorFactor, metallicFactor=metallicFactor, roughnessFactor=roughnessFactor)
| -8,106,719,459,313,488,000
|
Turn a COLLADA effect into a trimesh material.
|
trimesh/exchange/dae.py
|
_parse_material
|
BerkeleyAutomation/trimesh
|
python
|
def _parse_material(effect, resolver):
'\n \n '
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture', exc_info=True)
elif (effect.diffuse is not None):
baseColorFactor = effect.diffuse
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture', exc_info=True)
elif (effect.emission is not None):
emissiveFactor = effect.emission[:3]
roughnessFactor = 1.0
if ((not isinstance(effect.shininess, collada.material.Map)) and (effect.shininess is not None)):
roughnessFactor = np.sqrt((2.0 / (2.0 + effect.shininess)))
metallicFactor = 0.0
normalTexture = None
if (effect.bumpmap is not None):
try:
normalTexture = _load_texture(effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap', exc_info=True)
if ((effect.transparent is not None) and (not isinstance(effect.transparent, collada.material.Map))):
baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))
return visual.material.PBRMaterial(emissiveFactor=emissiveFactor, emissiveTexture=emissiveTexture, normalTexture=normalTexture, baseColorTexture=baseColorTexture, baseColorFactor=baseColorFactor, metallicFactor=metallicFactor, roughnessFactor=roughnessFactor)
|
def _unparse_material(material):
'\n Turn a trimesh material into a COLLADA material.\n '
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if (diffuse is not None):
diffuse = list(diffuse)
emission = material.emissiveFactor
if (emission is not None):
emission = [float(emission[0]), float(emission[1]), float(emission[2]), 1.0]
shininess = material.roughnessFactor
if (shininess is not None):
shininess = ((2.0 / (shininess ** 2)) - 2.0)
effect = collada.material.Effect(uuid.uuid4().hex, params=[], shadingtype='phong', diffuse=diffuse, emission=emission, specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess))
material = collada.material.Material(uuid.uuid4().hex, 'pbrmaterial', effect)
else:
effect = collada.material.Effect(uuid.uuid4().hex, params=[], shadingtype='phong')
material = collada.material.Material(uuid.uuid4().hex, 'defaultmaterial', effect)
return material
| -5,805,063,635,426,141,000
|
Turn a trimesh material into a COLLADA material.
|
trimesh/exchange/dae.py
|
_unparse_material
|
BerkeleyAutomation/trimesh
|
python
|
def _unparse_material(material):
'\n \n '
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if (diffuse is not None):
diffuse = list(diffuse)
emission = material.emissiveFactor
if (emission is not None):
emission = [float(emission[0]), float(emission[1]), float(emission[2]), 1.0]
shininess = material.roughnessFactor
if (shininess is not None):
shininess = ((2.0 / (shininess ** 2)) - 2.0)
effect = collada.material.Effect(uuid.uuid4().hex, params=[], shadingtype='phong', diffuse=diffuse, emission=emission, specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess))
material = collada.material.Material(uuid.uuid4().hex, 'pbrmaterial', effect)
else:
effect = collada.material.Effect(uuid.uuid4().hex, params=[], shadingtype='phong')
material = collada.material.Material(uuid.uuid4().hex, 'defaultmaterial', effect)
return material
|
def load_zae(file_obj, resolver=None, **kwargs):
'\n Load a ZAE file, which is just a zipped DAE file.\n\n Parameters\n -------------\n file_obj : file object\n Contains ZAE data\n resolver : trimesh.visual.Resolver\n Resolver to load additional assets\n kwargs : dict\n Passed to load_collada\n\n Returns\n ------------\n loaded : dict\n Results of loading\n '
archive = util.decompress(file_obj, file_type='zip')
file_name = next((i for i in archive.keys() if i.lower().endswith('.dae')))
resolver = visual.resolvers.ZipResolver(archive)
loaded = load_collada(archive[file_name], resolver=resolver, **kwargs)
return loaded
| -1,790,349,105,444,850,700
|
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
|
trimesh/exchange/dae.py
|
load_zae
|
BerkeleyAutomation/trimesh
|
python
|
def load_zae(file_obj, resolver=None, **kwargs):
'\n Load a ZAE file, which is just a zipped DAE file.\n\n Parameters\n -------------\n file_obj : file object\n Contains ZAE data\n resolver : trimesh.visual.Resolver\n Resolver to load additional assets\n kwargs : dict\n Passed to load_collada\n\n Returns\n ------------\n loaded : dict\n Results of loading\n '
archive = util.decompress(file_obj, file_type='zip')
file_name = next((i for i in archive.keys() if i.lower().endswith('.dae')))
resolver = visual.resolvers.ZipResolver(archive)
loaded = load_collada(archive[file_name], resolver=resolver, **kwargs)
return loaded
|
def tamper(payload, **kwargs):
"\n Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> SELECT)\n\n Notes:\n * Useful to bypass weak filtering and/or WAFs in JSON contexes\n\n >>> tamper('SELECT FIELD FROM TABLE')\n '\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'\n "
retVal = payload
if payload:
retVal = ''
i = 0
while (i < len(payload)):
if ((payload[i] == '%') and (i < (len(payload) - 2)) and (payload[(i + 1):(i + 2)] in string.hexdigits) and (payload[(i + 2):(i + 3)] in string.hexdigits)):
retVal += ('\\u00%s' % payload[(i + 1):(i + 3)])
i += 3
else:
retVal += ('\\u%.4X' % ord(payload[i]))
i += 1
return retVal
| -7,932,019,435,247,317,000
|
Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> SELECT)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\u0053\\u0045\\u004C\\u0045\\u0043\\u0054\\u0020\\u0046\\u0049\\u0045\\u004C\\u0044\\u0020\\u0046\\u0052\\u004F\\u004D\\u0020\\u0054\\u0041\\u0042\\u004C\\u0045'
|
Toolz/sqlmap/tamper/charunicodeescape.py
|
tamper
|
6un9-h0-Dan/CTF-Heaven
|
python
|
def tamper(payload, **kwargs):
"\n Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> SELECT)\n\n Notes:\n * Useful to bypass weak filtering and/or WAFs in JSON contexes\n\n >>> tamper('SELECT FIELD FROM TABLE')\n '\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'\n "
retVal = payload
if payload:
retVal =
i = 0
while (i < len(payload)):
if ((payload[i] == '%') and (i < (len(payload) - 2)) and (payload[(i + 1):(i + 2)] in string.hexdigits) and (payload[(i + 2):(i + 3)] in string.hexdigits)):
retVal += ('\\u00%s' % payload[(i + 1):(i + 3)])
i += 3
else:
retVal += ('\\u%.4X' % ord(payload[i]))
i += 1
return retVal
|
def split_on_numbers(s):
'\n Splits the string into a list where the numbers and the characters between numbers are each element\n Copied from spt3g_software to fix dependencies (sorry)\n '
prevDig = False
outList = []
for char in s:
if char.isdigit():
if prevDig:
outList[(- 1)] += char
else:
prevDig = True
outList.append(char)
elif ((not prevDig) and (len(outList) > 0)):
outList[(- 1)] += char
else:
prevDig = False
outList.append(char)
return outList
| -8,418,773,352,372,961,000
|
Splits the string into a list where the numbers and the characters between numbers are each element
Copied from spt3g_software to fix dependencies (sorry)
|
bin/kookaburra.py
|
split_on_numbers
|
simonsobs/lyrebird
|
python
|
def split_on_numbers(s):
'\n Splits the string into a list where the numbers and the characters between numbers are each element\n Copied from spt3g_software to fix dependencies (sorry)\n '
prevDig = False
outList = []
for char in s:
if char.isdigit():
if prevDig:
outList[(- 1)] += char
else:
prevDig = True
outList.append(char)
elif ((not prevDig) and (len(outList) > 0)):
outList[(- 1)] += char
else:
prevDig = False
outList.append(char)
return outList
|
def str_cmp_with_numbers_sorted(str1, str2):
'\n Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function\n Copied from spt3g_software to fix dependencies (sorry)\n '
if (str1 == str2):
return 0
split1 = split_on_numbers(str1)
split2 = split_on_numbers(str2)
largestStr = 0
for l in [split1, split2]:
for s in l:
if s[0].isdigit():
largestStr = (len(s) if (len(s) > largestStr) else largestStr)
for l in [split1, split2]:
for i in range(len(l)):
if l[i][0].isdigit():
l[i] = (('0' * (largestStr - len(l[i]))) + l[i])
p1 = reduce((lambda x, y: (x + y)), split1)
p2 = reduce((lambda x, y: (x + y)), split2)
return ((- 1) if (p1 < p2) else 1)
| 2,616,566,904,823,767,600
|
Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function
Copied from spt3g_software to fix dependencies (sorry)
|
bin/kookaburra.py
|
str_cmp_with_numbers_sorted
|
simonsobs/lyrebird
|
python
|
def str_cmp_with_numbers_sorted(str1, str2):
'\n Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function\n Copied from spt3g_software to fix dependencies (sorry)\n '
if (str1 == str2):
return 0
split1 = split_on_numbers(str1)
split2 = split_on_numbers(str2)
largestStr = 0
for l in [split1, split2]:
for s in l:
if s[0].isdigit():
largestStr = (len(s) if (len(s) > largestStr) else largestStr)
for l in [split1, split2]:
for i in range(len(l)):
if l[i][0].isdigit():
l[i] = (('0' * (largestStr - len(l[i]))) + l[i])
p1 = reduce((lambda x, y: (x + y)), split1)
p2 = reduce((lambda x, y: (x + y)), split2)
return ((- 1) if (p1 < p2) else 1)
|
def _set_permissions(self):
'\n Make sure all xml files are readable by the world so that anyone can grab them\n '
for (remote, _) in self.artifacts:
self.transport.open_session().exec_command('sudo chmod -R +r {}'.format(remote))
| 5,895,479,352,179,789,000
|
Make sure all xml files are readable by the world so that anyone can grab them
|
tests/support/copyartifacts.py
|
_set_permissions
|
0x416e746f6e/salt
|
python
|
def _set_permissions(self):
'\n \n '
for (remote, _) in self.artifacts:
self.transport.open_session().exec_command('sudo chmod -R +r {}'.format(remote))
|
def run(filepath):
'Create a wallpaper image from a PNG file.'
src = Image.open(filepath)
target = swap_quadrants(src)
paste_with_alpha(target, src, (0, 0), 16)
return target
| 817,709,833,370,899,200
|
Create a wallpaper image from a PNG file.
|
source/_sample/pillow/pattern.py
|
run
|
showa-yojyo/note
|
python
|
def run(filepath):
src = Image.open(filepath)
target = swap_quadrants(src)
paste_with_alpha(target, src, (0, 0), 16)
return target
|
def swap_quadrants(img):
'Quarter the image and swap two diagonal quadrant pairs.'
boxes = quarter_bbox(img)
regions = [img.crop(box) for box in boxes]
target = img.copy()
paste_with_alpha(target, regions[3], (0, 0), 128)
paste_with_alpha(target, regions[2], (regions[3].size[0], 0), 128)
paste_with_alpha(target, regions[1], (0, regions[3].size[1]), 128)
paste_with_alpha(target, regions[0], regions[3].size, 128)
return target
| -6,387,083,641,273,382,000
|
Quarter the image and swap two diagonal quadrant pairs.
|
source/_sample/pillow/pattern.py
|
swap_quadrants
|
showa-yojyo/note
|
python
|
def swap_quadrants(img):
boxes = quarter_bbox(img)
regions = [img.crop(box) for box in boxes]
target = img.copy()
paste_with_alpha(target, regions[3], (0, 0), 128)
paste_with_alpha(target, regions[2], (regions[3].size[0], 0), 128)
paste_with_alpha(target, regions[1], (0, regions[3].size[1]), 128)
paste_with_alpha(target, regions[0], regions[3].size, 128)
return target
|
def paste_with_alpha(target, source, left_upper, opacity):
'An alpha_composite-like operation.'
mask = Image.new('L', source.size, opacity)
target.paste(source, left_upper, mask=mask)
| -1,079,140,637,357,208,300
|
An alpha_composite-like operation.
|
source/_sample/pillow/pattern.py
|
paste_with_alpha
|
showa-yojyo/note
|
python
|
def paste_with_alpha(target, source, left_upper, opacity):
mask = Image.new('L', source.size, opacity)
target.paste(source, left_upper, mask=mask)
|
def quarter_bbox(img):
'Quarter the bounding box of an image.'
(left, upper, right, bottom) = img.getbbox()
xmid = (((left + right) - 1) // 2)
ymid = (((upper + bottom) - 1) // 2)
return [(left, upper, xmid, ymid), ((xmid + 1), upper, right, ymid), (left, (ymid + 1), xmid, bottom), ((xmid + 1), (ymid + 1), right, bottom)]
| 4,406,968,220,061,805,000
|
Quarter the bounding box of an image.
|
source/_sample/pillow/pattern.py
|
quarter_bbox
|
showa-yojyo/note
|
python
|
def quarter_bbox(img):
(left, upper, right, bottom) = img.getbbox()
xmid = (((left + right) - 1) // 2)
ymid = (((upper + bottom) - 1) // 2)
return [(left, upper, xmid, ymid), ((xmid + 1), upper, right, ymid), (left, (ymid + 1), xmid, bottom), ((xmid + 1), (ymid + 1), right, bottom)]
|
@step('ActiveDocsDetailView')
def detail(self, active_doc):
'Navigate to active doc detail/preview page'
self.active_docs_table.row(name=active_doc['name']).name.click()
| -694,965,796,845,580,200
|
Navigate to active doc detail/preview page
|
testsuite/ui/views/admin/product/active_docs.py
|
detail
|
3scale-qe/3scale-tests
|
python
|
@step('ActiveDocsDetailView')
def detail(self, active_doc):
self.active_docs_table.row(name=active_doc['name']).name.click()
|
def make_request(self, endpoint):
'\n Make request on preview page\n :param endpoint: string of endpoint which should be tried\n :return:\n '
self.expand_operations_link.click()
self.active_docs_section.try_it_out(endpoint)
| 8,992,228,005,639,734,000
|
Make request on preview page
:param endpoint: string of endpoint which should be tried
:return:
|
testsuite/ui/views/admin/product/active_docs.py
|
make_request
|
3scale-qe/3scale-tests
|
python
|
def make_request(self, endpoint):
'\n Make request on preview page\n :param endpoint: string of endpoint which should be tried\n :return:\n '
self.expand_operations_link.click()
self.active_docs_section.try_it_out(endpoint)
|
def make_request(self, method, path, key):
'\n Make request on preview page\n :param path string eg. /post, /get\n :param method string eg. GET, POST\n :param key string name of application\n :return:\n '
self.active_docs_section.try_it_out(method, path, key)
| -849,955,223,380,817,300
|
Make request on preview page
:param path string eg. /post, /get
:param method string eg. GET, POST
:param key string name of application
:return:
|
testsuite/ui/views/admin/product/active_docs.py
|
make_request
|
3scale-qe/3scale-tests
|
python
|
def make_request(self, method, path, key):
'\n Make request on preview page\n :param path string eg. /post, /get\n :param method string eg. GET, POST\n :param key string name of application\n :return:\n '
self.active_docs_section.try_it_out(method, path, key)
|
def get_olfa_config(config_filename=''):
'\n Find and parse olfactometer configuration JSON.\n\n :param config_filename: string with path to configuration.\n :return: returns a tuple with (config_fn, config_dict)\n :rtype: tuple\n '
if (not config_filename):
logging.info('No olfa config file specified, looking for default in OLFA_CONFIG os variable')
config_filename = os.environ.get('OLFA_CONFIG')
if (not config_filename):
config_filename = CONFIG_FILENAME_DEFAULT
logging.info(('No OLFA_CONFIG os variable, trying with legacy default ' + CONFIG_FILENAME_DEFAULT))
if os.path.exists(config_filename):
with open(config_filename) as f:
config = json.load(f)
else:
raise Exception('No olfactometer configuration file found at {0}'.format(config_filename))
return (config_filename, config)
| 5,484,870,733,860,152,000
|
Find and parse olfactometer configuration JSON.
:param config_filename: string with path to configuration.
:return: returns a tuple with (config_fn, config_dict)
:rtype: tuple
|
olfactometry/utils.py
|
get_olfa_config
|
mohamedelgohary1/PyBpodGUI
|
python
|
def get_olfa_config(config_filename=):
'\n Find and parse olfactometer configuration JSON.\n\n :param config_filename: string with path to configuration.\n :return: returns a tuple with (config_fn, config_dict)\n :rtype: tuple\n '
if (not config_filename):
logging.info('No olfa config file specified, looking for default in OLFA_CONFIG os variable')
config_filename = os.environ.get('OLFA_CONFIG')
if (not config_filename):
config_filename = CONFIG_FILENAME_DEFAULT
logging.info(('No OLFA_CONFIG os variable, trying with legacy default ' + CONFIG_FILENAME_DEFAULT))
if os.path.exists(config_filename):
with open(config_filename) as f:
config = json.load(f)
else:
raise Exception('No olfactometer configuration file found at {0}'.format(config_filename))
return (config_filename, config)
|
def flatten_dictionary(dictionary, separator=':', flattened_dict=None, parent_string=''):
"\n Flattens nested dictionary into a single dictionary:\n {'hello': {'world': 1,\n 'moon': 2}}\n becomes:\n {'hello:world': 1,\n 'hello:moon': 2}\n\n Uses recursion to flatten as many layers as exist in your dictionary.\n\n :param dictionary: nested dictionary you wish to flatten.\n :param flattened_dict: (used for recursion) current flattened dictionary to add to\n :param parent_string: (used for recursion) current key string to use as prefix for\n :return: flattened dictionary\n :type dictionary: dict\n :type flattened_dict: dict\n :type parent_string: str\n :rtype: dict\n "
if (flattened_dict is None):
flattened_dict = {}
for (k, v) in dictionary.items():
if parent_string:
full_key = '{0}{1}{2}'.format(parent_string, separator, k)
else:
full_key = k
if isinstance(v, dict):
_ = flatten_dictionary(v, flattened_dict=flattened_dict, parent_string=full_key)
else:
flattened_dict[full_key] = v
return flattened_dict
| -1,371,423,723,073,061,000
|
Flattens nested dictionary into a single dictionary:
{'hello': {'world': 1,
'moon': 2}}
becomes:
{'hello:world': 1,
'hello:moon': 2}
Uses recursion to flatten as many layers as exist in your dictionary.
:param dictionary: nested dictionary you wish to flatten.
:param flattened_dict: (used for recursion) current flattened dictionary to add to
:param parent_string: (used for recursion) current key string to use as prefix for
:return: flattened dictionary
:type dictionary: dict
:type flattened_dict: dict
:type parent_string: str
:rtype: dict
|
olfactometry/utils.py
|
flatten_dictionary
|
mohamedelgohary1/PyBpodGUI
|
python
|
def flatten_dictionary(dictionary, separator=':', flattened_dict=None, parent_string=):
"\n Flattens nested dictionary into a single dictionary:\n {'hello': {'world': 1,\n 'moon': 2}}\n becomes:\n {'hello:world': 1,\n 'hello:moon': 2}\n\n Uses recursion to flatten as many layers as exist in your dictionary.\n\n :param dictionary: nested dictionary you wish to flatten.\n :param flattened_dict: (used for recursion) current flattened dictionary to add to\n :param parent_string: (used for recursion) current key string to use as prefix for\n :return: flattened dictionary\n :type dictionary: dict\n :type flattened_dict: dict\n :type parent_string: str\n :rtype: dict\n "
if (flattened_dict is None):
flattened_dict = {}
for (k, v) in dictionary.items():
if parent_string:
full_key = '{0}{1}{2}'.format(parent_string, separator, k)
else:
full_key = k
if isinstance(v, dict):
_ = flatten_dictionary(v, flattened_dict=flattened_dict, parent_string=full_key)
else:
flattened_dict[full_key] = v
return flattened_dict
|
def connect_serial(port, baudrate=115200, timeout=1, writeTimeout=1):
'\n Return Serial object after making sure that the port is accessible and that the port is expressed as a string.\n\n :param port: str or int (ie "COM4" or 4 for Windows).\n :param baudrate: baudrate.\n :param timeout: read timeout in seconds, default 1 sec.\n :param writeTimeout: write timeout in seconds, default 1 sec.\n :return: serial port object.\n :rtype: serial.Serial\n '
if isinstance(port, int):
port = 'COM{0}'.format(port)
names_list = list()
for i in list_ports.comports():
names_list.append(i[0])
if (port not in names_list):
print('Serial not found on {0}.'.format(port))
print('Listing current serial ports with devices:')
for ser in list_ports.comports():
ser_str = '\t{0}: {1}'.format(ser[0], ser[1])
print(ser_str)
time.sleep(0.01)
raise serial.SerialException('Requested COM port: {0} is not listed as connected.'.format(port))
else:
return serial.Serial(port, baudrate=baudrate, timeout=timeout, writeTimeout=writeTimeout)
| 7,971,361,087,577,091,000
|
Return Serial object after making sure that the port is accessible and that the port is expressed as a string.
:param port: str or int (ie "COM4" or 4 for Windows).
:param baudrate: baudrate.
:param timeout: read timeout in seconds, default 1 sec.
:param writeTimeout: write timeout in seconds, default 1 sec.
:return: serial port object.
:rtype: serial.Serial
|
olfactometry/utils.py
|
connect_serial
|
mohamedelgohary1/PyBpodGUI
|
python
|
def connect_serial(port, baudrate=115200, timeout=1, writeTimeout=1):
'\n Return Serial object after making sure that the port is accessible and that the port is expressed as a string.\n\n :param port: str or int (ie "COM4" or 4 for Windows).\n :param baudrate: baudrate.\n :param timeout: read timeout in seconds, default 1 sec.\n :param writeTimeout: write timeout in seconds, default 1 sec.\n :return: serial port object.\n :rtype: serial.Serial\n '
if isinstance(port, int):
port = 'COM{0}'.format(port)
names_list = list()
for i in list_ports.comports():
names_list.append(i[0])
if (port not in names_list):
print('Serial not found on {0}.'.format(port))
print('Listing current serial ports with devices:')
for ser in list_ports.comports():
ser_str = '\t{0}: {1}'.format(ser[0], ser[1])
print(ser_str)
time.sleep(0.01)
raise serial.SerialException('Requested COM port: {0} is not listed as connected.'.format(port))
else:
return serial.Serial(port, baudrate=baudrate, timeout=timeout, writeTimeout=writeTimeout)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.