body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def process_file(filename: str) -> DateDict:
'\n Method that take path to crawled file and outputs date dictionary:\n Date dictionary is a dictionary where keys are dates in format YYYY-mm-dd-hh (2018-04-08-15)\n and value is dictionary where keys are devices (specified in configuration file)\n and value is CSVDataLine.csv_data_line with device,date and occurrence\n\n Args:\n filename: name of processed file\n\n Returns:\n None if not implemented\n date_dict when implemented\n '
date_dict = {}
with open(filename, 'r') as file:
YEAR_START = 1
YEAR_END = 11
for line in file:
array = line.split(';')
time_ = max(array[2][1:(- 1)], array[3][1:(- 1)], key=(lambda x: time.mktime(datetime.datetime.strptime(x, '%H:%M').timetuple())))
date = date_formating.date_time_formatter(((array[14][YEAR_START:YEAR_END] + ' ') + time_))
name = array[10][1:(- 1)]
if (name == ''):
continue
if (date not in date_dict):
date_dict[date] = {}
if (name in date_dict[date]):
date_dict[date][name].occurrence = int(array[12])
else:
date_dict[date][name] = csv_data_line.CSVDataLine(name, date, int(array[12]))
return date_dict
| -5,522,921,133,619,537,000
|
Method that take path to crawled file and outputs date dictionary:
Date dictionary is a dictionary where keys are dates in format YYYY-mm-dd-hh (2018-04-08-15)
and value is dictionary where keys are devices (specified in configuration file)
and value is CSVDataLine.csv_data_line with device,date and occurrence
Args:
filename: name of processed file
Returns:
None if not implemented
date_dict when implemented
|
modules/crawler/DatasetProcessing/OBSAZENIMISTNOSTI_processor.py
|
process_file
|
kivzcu/heatmap.zcu
|
python
|
def process_file(filename: str) -> DateDict:
'\n Method that take path to crawled file and outputs date dictionary:\n Date dictionary is a dictionary where keys are dates in format YYYY-mm-dd-hh (2018-04-08-15)\n and value is dictionary where keys are devices (specified in configuration file)\n and value is CSVDataLine.csv_data_line with device,date and occurrence\n\n Args:\n filename: name of processed file\n\n Returns:\n None if not implemented\n date_dict when implemented\n '
date_dict = {}
with open(filename, 'r') as file:
YEAR_START = 1
YEAR_END = 11
for line in file:
array = line.split(';')
time_ = max(array[2][1:(- 1)], array[3][1:(- 1)], key=(lambda x: time.mktime(datetime.datetime.strptime(x, '%H:%M').timetuple())))
date = date_formating.date_time_formatter(((array[14][YEAR_START:YEAR_END] + ' ') + time_))
name = array[10][1:(- 1)]
if (name == ):
continue
if (date not in date_dict):
date_dict[date] = {}
if (name in date_dict[date]):
date_dict[date][name].occurrence = int(array[12])
else:
date_dict[date][name] = csv_data_line.CSVDataLine(name, date, int(array[12]))
return date_dict
|
def user_input_handling_function_ninth():
' a parser '
print()
user_input = input('Enter: ')
print()
term = ''
lict = []
for element in user_input:
if (element != ' '):
term = (term + element)
else:
lict.append(term)
term = ''
lict.append(term)
return lict
| 8,264,336,917,387,615,000
|
a parser
|
lists_of_terms/shodule_for_lists_of_terms.py
|
user_input_handling_function_ninth
|
ShawnJSavoie2/ToBeRedone
|
python
|
def user_input_handling_function_ninth():
' '
print()
user_input = input('Enter: ')
print()
term =
lict = []
for element in user_input:
if (element != ' '):
term = (term + element)
else:
lict.append(term)
term =
lict.append(term)
return lict
|
def user_input_handling_function_tenth(dictionary):
' a dictionary checker '
user_input = user_input_handling_function_ninth()
good_to_go = 'no'
errors = []
while (good_to_go == 'no'):
string = ''
lict = []
for element in user_input:
string = (string + element)
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if (element not in lict):
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if ('yes' in errors):
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
| -2,635,322,934,253,187,600
|
a dictionary checker
|
lists_of_terms/shodule_for_lists_of_terms.py
|
user_input_handling_function_tenth
|
ShawnJSavoie2/ToBeRedone
|
python
|
def user_input_handling_function_tenth(dictionary):
' '
user_input = user_input_handling_function_ninth()
good_to_go = 'no'
errors = []
while (good_to_go == 'no'):
string =
lict = []
for element in user_input:
string = (string + element)
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if (element not in lict):
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if ('yes' in errors):
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
|
@login_required
def contact_list(request, pk):
'\n Displays a list of :model:`rr.Contact` linked to\n :model:`rr.ServiceProvider`.\n\n Includes a ModelForm for adding :model:`rr.Contact` to\n :model:`rr.ServiceProvider`.\n\n **Context**\n\n ``object_list``\n List of :model:`rr.Contact`.\n\n ``form``\n ModelForm for creating a :model:`rr.Contact`\n\n ``object``\n An instance of :model:`rr.ServiceProvider`.\n\n **Template:**\n\n :template:`rr/contact.html`\n '
sp = get_service_provider(pk, request.user)
form = ContactForm(sp=sp)
if (request.method == 'POST'):
if ('add_contact' in request.POST):
form = _add_contact(request, sp)
elif ('remove_contact' in request.POST):
_remove_contacts(request, sp)
contacts = Contact.objects.filter(sp=sp, end_at=None)
return render(request, 'rr/contact.html', {'object_list': contacts, 'form': form, 'object': sp})
| 7,067,613,641,276,462,000
|
Displays a list of :model:`rr.Contact` linked to
:model:`rr.ServiceProvider`.
Includes a ModelForm for adding :model:`rr.Contact` to
:model:`rr.ServiceProvider`.
**Context**
``object_list``
List of :model:`rr.Contact`.
``form``
ModelForm for creating a :model:`rr.Contact`
``object``
An instance of :model:`rr.ServiceProvider`.
**Template:**
:template:`rr/contact.html`
|
rr/views/contact.py
|
contact_list
|
UniversityofHelsinki/sp-registry
|
python
|
@login_required
def contact_list(request, pk):
'\n Displays a list of :model:`rr.Contact` linked to\n :model:`rr.ServiceProvider`.\n\n Includes a ModelForm for adding :model:`rr.Contact` to\n :model:`rr.ServiceProvider`.\n\n **Context**\n\n ``object_list``\n List of :model:`rr.Contact`.\n\n ``form``\n ModelForm for creating a :model:`rr.Contact`\n\n ``object``\n An instance of :model:`rr.ServiceProvider`.\n\n **Template:**\n\n :template:`rr/contact.html`\n '
sp = get_service_provider(pk, request.user)
form = ContactForm(sp=sp)
if (request.method == 'POST'):
if ('add_contact' in request.POST):
form = _add_contact(request, sp)
elif ('remove_contact' in request.POST):
_remove_contacts(request, sp)
contacts = Contact.objects.filter(sp=sp, end_at=None)
return render(request, 'rr/contact.html', {'object_list': contacts, 'form': form, 'object': sp})
|
def preprocess_input(x):
'Preprocesses a numpy array encoding a batch of images.\n\n Arguments:\n x: a 4D numpy array consists of RGB values within [0, 255].\n\n Returns:\n Preprocessed array.\n '
return imagenet_utils.preprocess_input(x, mode='tf')
| -231,657,472,479,496,000
|
Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
|
tensorflow/python/keras/_impl/keras/applications/mobilenet.py
|
preprocess_input
|
DylanDmitri/tensorflow
|
python
|
def preprocess_input(x):
'Preprocesses a numpy array encoding a batch of images.\n\n Arguments:\n x: a 4D numpy array consists of RGB values within [0, 255].\n\n Returns:\n Preprocessed array.\n '
return imagenet_utils.preprocess_input(x, mode='tf')
|
def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000):
"Instantiates the MobileNet architecture.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n To load a MobileNet model via `load_model`, import the custom\n objects `relu6` and `DepthwiseConv2D` and pass them to the\n `custom_objects` parameter.\n E.g.\n model = load_model('mobilenet.h5', custom_objects={\n 'relu6': mobilenet.relu6,\n 'DepthwiseConv2D': mobilenet.DepthwiseConv2D})\n\n Arguments:\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or (3, 224, 224) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: depth multiplier for depthwise convolution\n (also called the resolution multiplier)\n dropout: dropout rate\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n "
if (K.backend() != 'tensorflow'):
raise RuntimeError('Only TensorFlow backend is currently supported, as other backends do not support depthwise convolution.')
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as ImageNet with `include_top` as true, `classes` should be 1000')
if (input_shape is None):
default_size = 224
else:
if (K.image_data_format() == 'channels_first'):
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if ((rows == cols) and (rows in [128, 160, 192, 224])):
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=K.image_data_format(), require_flatten=include_top, weights=weights)
if (K.image_data_format() == 'channels_last'):
(row_axis, col_axis) = (0, 1)
else:
(row_axis, col_axis) = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if (weights == 'imagenet'):
if (depth_multiplier != 1):
raise ValueError('If imagenet weights are being loaded, depth multiplier must be 1')
if (alpha not in [0.25, 0.5, 0.75, 1.0]):
raise ValueError('If imagenet weights are being loaded, alpha can be one of`0.25`, `0.50`, `0.75` or `1.0` only.')
if ((rows != cols) or (rows not in [128, 160, 192, 224])):
raise ValueError(('If imagenet weights are being loaded, input must have a static square shape (one of (128,128), (160,160), (192,192), or (224, 224)). Input shape provided = %s' % (input_shape,)))
if (K.image_data_format() != 'channels_last'):
logging.warning('The MobileNet family of models is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if (K.image_data_format() == 'channels_first'):
shape = (int((1024 * alpha)), 1, 1)
else:
shape = (1, 1, int((1024 * alpha)))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name=('mobilenet_%0.2f_%s' % (alpha, rows)))
if (weights == 'imagenet'):
if (K.image_data_format() == 'channels_first'):
raise ValueError('Weights for "channels_last" format are not available.')
if (alpha == 1.0):
alpha_text = '1_0'
elif (alpha == 0.75):
alpha_text = '7_5'
elif (alpha == 0.5):
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = ('mobilenet_%s_%d_tf.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = ('mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
| -3,885,951,292,928,177,000
|
Instantiates the MobileNet architecture.
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
|
tensorflow/python/keras/_impl/keras/applications/mobilenet.py
|
MobileNet
|
DylanDmitri/tensorflow
|
python
|
def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000):
"Instantiates the MobileNet architecture.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n To load a MobileNet model via `load_model`, import the custom\n objects `relu6` and `DepthwiseConv2D` and pass them to the\n `custom_objects` parameter.\n E.g.\n model = load_model('mobilenet.h5', custom_objects={\n 'relu6': mobilenet.relu6,\n 'DepthwiseConv2D': mobilenet.DepthwiseConv2D})\n\n Arguments:\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or (3, 224, 224) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: depth multiplier for depthwise convolution\n (also called the resolution multiplier)\n dropout: dropout rate\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n "
if (K.backend() != 'tensorflow'):
raise RuntimeError('Only TensorFlow backend is currently supported, as other backends do not support depthwise convolution.')
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as ImageNet with `include_top` as true, `classes` should be 1000')
if (input_shape is None):
default_size = 224
else:
if (K.image_data_format() == 'channels_first'):
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if ((rows == cols) and (rows in [128, 160, 192, 224])):
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=K.image_data_format(), require_flatten=include_top, weights=weights)
if (K.image_data_format() == 'channels_last'):
(row_axis, col_axis) = (0, 1)
else:
(row_axis, col_axis) = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if (weights == 'imagenet'):
if (depth_multiplier != 1):
raise ValueError('If imagenet weights are being loaded, depth multiplier must be 1')
if (alpha not in [0.25, 0.5, 0.75, 1.0]):
raise ValueError('If imagenet weights are being loaded, alpha can be one of`0.25`, `0.50`, `0.75` or `1.0` only.')
if ((rows != cols) or (rows not in [128, 160, 192, 224])):
raise ValueError(('If imagenet weights are being loaded, input must have a static square shape (one of (128,128), (160,160), (192,192), or (224, 224)). Input shape provided = %s' % (input_shape,)))
if (K.image_data_format() != 'channels_last'):
logging.warning('The MobileNet family of models is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if (K.image_data_format() == 'channels_first'):
shape = (int((1024 * alpha)), 1, 1)
else:
shape = (1, 1, int((1024 * alpha)))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name=('mobilenet_%0.2f_%s' % (alpha, rows)))
if (weights == 'imagenet'):
if (K.image_data_format() == 'channels_first'):
raise ValueError('Weights for "channels_last" format are not available.')
if (alpha == 1.0):
alpha_text = '1_0'
elif (alpha == 0.75):
alpha_text = '7_5'
elif (alpha == 0.5):
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = ('mobilenet_%s_%d_tf.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = ('mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows))
weigh_path = (BASE_WEIGHT_PATH + model_name)
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
|
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"Adds an initial convolution layer (with batch normalization and relu6).\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, 3)`\n (with `channels_last` data format) or\n (3, rows, cols) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n "
channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1))
filters = int((filters * alpha))
x = Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
| 321,854,916,225,204,350
|
Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
|
tensorflow/python/keras/_impl/keras/applications/mobilenet.py
|
_conv_block
|
DylanDmitri/tensorflow
|
python
|
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"Adds an initial convolution layer (with batch normalization and relu6).\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, 3)`\n (with `channels_last` data format) or\n (3, rows, cols) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n "
channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1))
filters = int((filters * alpha))
x = Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
|
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1):
"Adds a depthwise convolution block.\n\n A depthwise convolution block consists of a depthwise conv,\n batch normalization, relu6, pointwise convolution,\n batch normalization and relu6 activation.\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, channels)`\n (with `channels_last` data format) or\n (channels, rows, cols) (with `channels_first` data format).\n pointwise_conv_filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the pointwise convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n block_id: Integer, a unique identification designating the block number.\n\n Input shape:\n 4D tensor with shape:\n `(batch, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n "
channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1))
pointwise_conv_filters = int((pointwise_conv_filters * alpha))
x = DepthwiseConv2D((3, 3), padding='same', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(inputs)
x = BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x)
x = Activation(relu6, name=('conv_dw_%d_relu' % block_id))(x)
x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x)
x = BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x)
return Activation(relu6, name=('conv_pw_%d_relu' % block_id))(x)
| 4,224,460,810,474,824,700
|
Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
|
tensorflow/python/keras/_impl/keras/applications/mobilenet.py
|
_depthwise_conv_block
|
DylanDmitri/tensorflow
|
python
|
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1):
"Adds a depthwise convolution block.\n\n A depthwise convolution block consists of a depthwise conv,\n batch normalization, relu6, pointwise convolution,\n batch normalization and relu6 activation.\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, channels)`\n (with `channels_last` data format) or\n (channels, rows, cols) (with `channels_first` data format).\n pointwise_conv_filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the pointwise convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n block_id: Integer, a unique identification designating the block number.\n\n Input shape:\n 4D tensor with shape:\n `(batch, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n "
channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1))
pointwise_conv_filters = int((pointwise_conv_filters * alpha))
x = DepthwiseConv2D((3, 3), padding='same', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(inputs)
x = BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x)
x = Activation(relu6, name=('conv_dw_%d_relu' % block_id))(x)
x = Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x)
x = BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x)
return Activation(relu6, name=('conv_pw_%d_relu' % block_id))(x)
|
def __call__(self, image, crop_region=False, return_result=False, output_path=None):
'\n Input: path to image\n Output: boxes (coordinates of 4 points)\n '
if (output_path is None):
assert crop_region, 'Please specify output_path'
else:
output_path = os.path.join(output_path, 'crops')
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
(_, boxes_list, _) = self.model.predict(image, output_path, crop_region=crop_region)
if return_result:
img = detection.draw_bbox(image, boxes_list)
if return_result:
return (boxes_list, img)
else:
return boxes_list
| -4,561,561,648,685,451,000
|
Input: path to image
Output: boxes (coordinates of 4 points)
|
modules/__init__.py
|
__call__
|
kaylode/vietnamese-ocr-toolbox
|
python
|
def __call__(self, image, crop_region=False, return_result=False, output_path=None):
'\n Input: path to image\n Output: boxes (coordinates of 4 points)\n '
if (output_path is None):
assert crop_region, 'Please specify output_path'
else:
output_path = os.path.join(output_path, 'crops')
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
(_, boxes_list, _) = self.model.predict(image, output_path, crop_region=crop_region)
if return_result:
img = detection.draw_bbox(image, boxes_list)
if return_result:
return (boxes_list, img)
else:
return boxes_list
|
def successful_signing_test(self):
'Create and sign a valid raw transaction with one input.\n\n Expected results:\n\n 1) The transaction has a complete set of signatures\n 2) No script verification error occurred'
privKeys = ['EXAMPLE_KEY']
inputs = [{'txid': 'EXAMPLE_KEY', 'vout': 0, 'scriptPubKey': 'EXAMPLE_KEY'}]
outputs = {'EXAMPLE_KEY': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
assert ('complete' in rawTxSigned)
assert_equal(rawTxSigned['complete'], True)
assert ('errors' not in rawTxSigned)
| 1,666,485,727,681,946,600
|
Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred
|
test/functional/rpc_signrawtransaction.py
|
successful_signing_test
|
anandsinha095/JDCION
|
python
|
def successful_signing_test(self):
'Create and sign a valid raw transaction with one input.\n\n Expected results:\n\n 1) The transaction has a complete set of signatures\n 2) No script verification error occurred'
privKeys = ['EXAMPLE_KEY']
inputs = [{'txid': 'EXAMPLE_KEY', 'vout': 0, 'scriptPubKey': 'EXAMPLE_KEY'}]
outputs = {'EXAMPLE_KEY': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
assert ('complete' in rawTxSigned)
assert_equal(rawTxSigned['complete'], True)
assert ('errors' not in rawTxSigned)
|
def script_verification_error_test(self):
'Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.\n\n Expected results:\n\n 3) The transaction has no complete set of signatures\n 4) Two script verification errors occurred\n 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")\n 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)'
privKeys = ['EXAMPLE_KEY']
inputs = [{'txid': 'EXAMPLE_KEY', 'vout': 0}, {'txid': 'EXAMPLE_KEY', 'vout': 7}, {'txid': 'EXAMPLE_KEY', 'vout': 1}]
scripts = [{'txid': 'EXAMPLE_KEY', 'vout': 0, 'scriptPubKey': 'EXAMPLE_KEY'}, {'txid': 'EXAMPLE_KEY', 'vout': 7, 'scriptPubKey': 'badbadbadbad'}]
outputs = {'EXAMPLE_KEY': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
assert ('complete' in rawTxSigned)
assert_equal(rawTxSigned['complete'], False)
assert ('errors' in rawTxSigned)
assert_equal(len(rawTxSigned['errors']), 2)
assert ('txid' in rawTxSigned['errors'][0])
assert ('vout' in rawTxSigned['errors'][0])
assert ('scriptSig' in rawTxSigned['errors'][0])
assert ('sequence' in rawTxSigned['errors'][0])
assert ('error' in rawTxSigned['errors'][0])
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
| -7,263,459,259,928,583,000
|
Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
|
test/functional/rpc_signrawtransaction.py
|
script_verification_error_test
|
anandsinha095/JDCION
|
python
|
def script_verification_error_test(self):
'Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.\n\n Expected results:\n\n 3) The transaction has no complete set of signatures\n 4) Two script verification errors occurred\n 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")\n 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)'
privKeys = ['EXAMPLE_KEY']
inputs = [{'txid': 'EXAMPLE_KEY', 'vout': 0}, {'txid': 'EXAMPLE_KEY', 'vout': 7}, {'txid': 'EXAMPLE_KEY', 'vout': 1}]
scripts = [{'txid': 'EXAMPLE_KEY', 'vout': 0, 'scriptPubKey': 'EXAMPLE_KEY'}, {'txid': 'EXAMPLE_KEY', 'vout': 7, 'scriptPubKey': 'badbadbadbad'}]
outputs = {'EXAMPLE_KEY': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
assert ('complete' in rawTxSigned)
assert_equal(rawTxSigned['complete'], False)
assert ('errors' in rawTxSigned)
assert_equal(len(rawTxSigned['errors']), 2)
assert ('txid' in rawTxSigned['errors'][0])
assert ('vout' in rawTxSigned['errors'][0])
assert ('scriptSig' in rawTxSigned['errors'][0])
assert ('sequence' in rawTxSigned['errors'][0])
assert ('error' in rawTxSigned['errors'][0])
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
|
def assertTypedEquals(self, expected, actual):
'Asserts that both the types and values are the same.'
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
| -3,258,969,304,466,242,600
|
Asserts that both the types and values are the same.
|
Mark_attandance_py_selenium/py/App/Python/Lib/test/test_fractions.py
|
assertTypedEquals
|
4nkitd/pyAutomation
|
python
|
def assertTypedEquals(self, expected, actual):
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
|
def assertRaisesMessage(self, exc_type, message, callable, *args, **kwargs):
'Asserts that callable(*args, **kwargs) raises exc_type(message).'
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail(('%s not raised' % exc_type.__name__))
| -4,005,090,857,821,129,000
|
Asserts that callable(*args, **kwargs) raises exc_type(message).
|
Mark_attandance_py_selenium/py/App/Python/Lib/test/test_fractions.py
|
assertRaisesMessage
|
4nkitd/pyAutomation
|
python
|
def assertRaisesMessage(self, exc_type, message, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail(('%s not raised' % exc_type.__name__))
|
def __init__(self, parametrization):
'Initializes the CirqOperation\n\n Args:\n parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the\n PennyLane gate parameters to an ordered list of gates that are to be applied.\n '
self.parametrization = parametrization
self.parametrized_cirq_gates = None
self.is_inverse = False
| -9,183,222,663,540,377,000
|
Initializes the CirqOperation
Args:
parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the
PennyLane gate parameters to an ordered list of gates that are to be applied.
|
pennylane_cirq/cirq_operation.py
|
__init__
|
PennyLaneAI/pennylane-cirq
|
python
|
def __init__(self, parametrization):
'Initializes the CirqOperation\n\n Args:\n parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the\n PennyLane gate parameters to an ordered list of gates that are to be applied.\n '
self.parametrization = parametrization
self.parametrized_cirq_gates = None
self.is_inverse = False
|
def parametrize(self, *args):
'Parametrizes the CirqOperation.\n\n Args:\n *args (float): the parameters for the operations\n '
self.parametrized_cirq_gates = self.parametrization(*args)
if (not isinstance(self.parametrized_cirq_gates, Sequence)):
self.parametrized_cirq_gates = [self.parametrized_cirq_gates]
if self.is_inverse:
self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
| -2,579,435,966,490,960,400
|
Parametrizes the CirqOperation.
Args:
*args (float): the parameters for the operations
|
pennylane_cirq/cirq_operation.py
|
parametrize
|
PennyLaneAI/pennylane-cirq
|
python
|
def parametrize(self, *args):
'Parametrizes the CirqOperation.\n\n Args:\n *args (float): the parameters for the operations\n '
self.parametrized_cirq_gates = self.parametrization(*args)
if (not isinstance(self.parametrized_cirq_gates, Sequence)):
self.parametrized_cirq_gates = [self.parametrized_cirq_gates]
if self.is_inverse:
self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
|
def apply(self, *qubits):
'Applies the CirqOperation.\n\n Args:\n *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.\n '
if (not self.parametrized_cirq_gates):
raise qml.DeviceError('CirqOperation must be parametrized before it can be applied.')
return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
| 7,338,081,575,973,967,000
|
Applies the CirqOperation.
Args:
*qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.
|
pennylane_cirq/cirq_operation.py
|
apply
|
PennyLaneAI/pennylane-cirq
|
python
|
def apply(self, *qubits):
'Applies the CirqOperation.\n\n Args:\n *qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.\n '
if (not self.parametrized_cirq_gates):
raise qml.DeviceError('CirqOperation must be parametrized before it can be applied.')
return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
|
def inv(self):
'Inverses the CirqOperation.'
if self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.")
self.is_inverse = (not self.is_inverse)
| 4,247,073,013,365,585,000
|
Inverses the CirqOperation.
|
pennylane_cirq/cirq_operation.py
|
inv
|
PennyLaneAI/pennylane-cirq
|
python
|
def inv(self):
if self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.")
self.is_inverse = (not self.is_inverse)
|
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
'The event triggered when an error is raised while invoking a command.'
if hasattr(ctx.command, 'on_error'):
return
error = getattr(error, 'original', error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.MissingRequiredArgument):
return (await util.send_command_help(ctx))
command_logger.error(f'{type(error).__name__:25} > {ctx.guild} ? {ctx.author} "{ctx.message.content}" > {error}')
if isinstance(error, util.ErrorMessage):
return (await ctx.send(str(error)))
if isinstance(error, commands.MissingPermissions):
perms = ', '.join((f'`{x}`' for x in error.missing_perms))
return (await ctx.send(f':warning: You require {perms} permission to use this command!'))
elif isinstance(error, commands.BotMissingPermissions):
perms = ', '.join((f'`{x}`' for x in error.missing_perms))
return (await ctx.send(f':warning: Cannot execute command! Bot is missing permission {perms}'))
elif isinstance(error, commands.CommandOnCooldown):
if db.is_patron(ctx.author.id, (2, 3)):
return (await ctx.reinvoke())
else:
return (await ctx.send(f':hourglass: This command is on a cooldown! (`{error.retry_after:.2f}s` remaining)'))
elif isinstance(error, commands.DisabledCommand):
(await ctx.send(f':warning: `{ctx.command}` has been disabled!'))
elif isinstance(error, commands.NoPrivateMessage):
(await ctx.author.send(':warning: You cannot use this command in private messages'))
elif isinstance(error, util.PatronCheckFailure):
(await ctx.send(':no_entry: Support me on patreon to use this command! <https://patreon.com/joinemm>'))
elif isinstance(error, (commands.NotOwner, commands.CheckFailure)):
(await ctx.send(':warning: Sorry, you are not authorized to use this command!'))
elif isinstance(error, exceptions.BlacklistTrigger):
if (error.blacklist_type == 'command'):
message = 'This command has been blacklisted by the server moderators'
elif (error.blacklist_type == 'channel'):
message = 'Command usage on this channel has been blacklisted by the server moderators'
elif (error.blacklist_type == 'user'):
message = 'You have been blacklisted from using commands by the server moderators'
elif (error.blacklist_type == 'global'):
message = 'You have been blacklisted from using Miso Bot'
delete = error.do_delete
(await ctx.send(f':no_entry_sign: `{message}`', delete_after=(5 if delete else None)))
if delete:
(await asyncio.sleep(5))
(await ctx.message.delete())
elif isinstance(error, (commands.BadArgument, flags._parser.ArgumentParsingError)):
(await ctx.send(f'```{str(error)}```'))
elif isinstance(error, discord.errors.Forbidden):
try:
(await ctx.send(f'```{str(error)}```'))
except discord.errors.Forbidden:
try:
(await ctx.message.add_reaction('🙊'))
except discord.errors.Forbidden:
logger.error(str(error))
elif isinstance(error, exceptions.LastFMError):
(await ctx.send(f'```{str(error)}```'))
else:
traceback.print_exception(type(error), error, error.__traceback__)
(await ctx.send(f'''```
{type(error).__name__}: {str(error)}```'''))
| 6,489,049,612,741,809,000
|
The event triggered when an error is raised while invoking a command.
|
cogs/errorhandler.py
|
on_command_error
|
ZackHart2400/miso-bot
|
python
|
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
error = getattr(error, 'original', error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.MissingRequiredArgument):
return (await util.send_command_help(ctx))
command_logger.error(f'{type(error).__name__:25} > {ctx.guild} ? {ctx.author} "{ctx.message.content}" > {error}')
if isinstance(error, util.ErrorMessage):
return (await ctx.send(str(error)))
if isinstance(error, commands.MissingPermissions):
perms = ', '.join((f'`{x}`' for x in error.missing_perms))
return (await ctx.send(f':warning: You require {perms} permission to use this command!'))
elif isinstance(error, commands.BotMissingPermissions):
perms = ', '.join((f'`{x}`' for x in error.missing_perms))
return (await ctx.send(f':warning: Cannot execute command! Bot is missing permission {perms}'))
elif isinstance(error, commands.CommandOnCooldown):
if db.is_patron(ctx.author.id, (2, 3)):
return (await ctx.reinvoke())
else:
return (await ctx.send(f':hourglass: This command is on a cooldown! (`{error.retry_after:.2f}s` remaining)'))
elif isinstance(error, commands.DisabledCommand):
(await ctx.send(f':warning: `{ctx.command}` has been disabled!'))
elif isinstance(error, commands.NoPrivateMessage):
(await ctx.author.send(':warning: You cannot use this command in private messages'))
elif isinstance(error, util.PatronCheckFailure):
(await ctx.send(':no_entry: Support me on patreon to use this command! <https://patreon.com/joinemm>'))
elif isinstance(error, (commands.NotOwner, commands.CheckFailure)):
(await ctx.send(':warning: Sorry, you are not authorized to use this command!'))
elif isinstance(error, exceptions.BlacklistTrigger):
if (error.blacklist_type == 'command'):
message = 'This command has been blacklisted by the server moderators'
elif (error.blacklist_type == 'channel'):
message = 'Command usage on this channel has been blacklisted by the server moderators'
elif (error.blacklist_type == 'user'):
message = 'You have been blacklisted from using commands by the server moderators'
elif (error.blacklist_type == 'global'):
message = 'You have been blacklisted from using Miso Bot'
delete = error.do_delete
(await ctx.send(f':no_entry_sign: `{message}`', delete_after=(5 if delete else None)))
if delete:
(await asyncio.sleep(5))
(await ctx.message.delete())
elif isinstance(error, (commands.BadArgument, flags._parser.ArgumentParsingError)):
(await ctx.send(f'```{str(error)}```'))
elif isinstance(error, discord.errors.Forbidden):
try:
(await ctx.send(f'```{str(error)}```'))
except discord.errors.Forbidden:
try:
(await ctx.message.add_reaction('🙊'))
except discord.errors.Forbidden:
logger.error(str(error))
elif isinstance(error, exceptions.LastFMError):
(await ctx.send(f'```{str(error)}```'))
else:
traceback.print_exception(type(error), error, error.__traceback__)
(await ctx.send(f'```
{type(error).__name__}: {str(error)}```'))
|
def sigmoid_rampup(current, rampup_length):
'Exponential rampup from https://arxiv.org/abs/1610.02242'
if (rampup_length == 0):
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = (1.0 - (current / rampup_length))
w = float(np.exp((((- 2.0) * phase) * phase)))
return min(w, 0.5)
| -5,048,476,405,849,566,000
|
Exponential rampup from https://arxiv.org/abs/1610.02242
|
PNet/train_pnet.py
|
sigmoid_rampup
|
mangye16/ReID-Label-Noise
|
python
|
def sigmoid_rampup(current, rampup_length):
if (rampup_length == 0):
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = (1.0 - (current / rampup_length))
w = float(np.exp((((- 2.0) * phase) * phase)))
return min(w, 0.5)
|
@abstractmethod
def apply(self, board):
'\n Apply a move to a board and retrieve the board produced by the move.\n\n Parameters\n ----------\n board\n The board to apply the move to.\n\n Returns\n -------\n Board\n A new board that will be produced after applying this move.\n '
return board
| -6,482,721,186,070,927,000
|
Apply a move to a board and retrieve the board produced by the move.
Parameters
----------
board
The board to apply the move to.
Returns
-------
Board
A new board that will be produced after applying this move.
|
libcheckers/movement.py
|
apply
|
YuriyGuts/libcheckers
|
python
|
@abstractmethod
def apply(self, board):
'\n Apply a move to a board and retrieve the board produced by the move.\n\n Parameters\n ----------\n board\n The board to apply the move to.\n\n Returns\n -------\n Board\n A new board that will be produced after applying this move.\n '
return board
|
def find_opponent_square(self, board):
'\n Retrieve the index of the square that contains the enemy piece to be captured.\n '
path_indexes = get_indexes_between(self.start_index, self.end_index)
own_color = board.owner[self.start_index]
own_path_squares = [index for index in path_indexes if (board.owner[index] == own_color)]
opponent_path_squares = [index for index in path_indexes if (board.owner[index] and (board.owner[index] != own_color))]
if (len(own_path_squares) > 0):
msg = 'Cannot capture when own pieces are in the way: {0}'
raise InvalidMoveException(msg.format(', '.join((str(index) for index in own_path_squares))))
if (len(opponent_path_squares) != 1):
msg = 'Cannot capture: must have exactly one opponent piece along the way'
raise InvalidMoveException(msg)
if (not board.owner[self.start_index]):
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
return opponent_path_squares[0]
| 2,956,729,355,891,111,000
|
Retrieve the index of the square that contains the enemy piece to be captured.
|
libcheckers/movement.py
|
find_opponent_square
|
YuriyGuts/libcheckers
|
python
|
def find_opponent_square(self, board):
'\n \n '
path_indexes = get_indexes_between(self.start_index, self.end_index)
own_color = board.owner[self.start_index]
own_path_squares = [index for index in path_indexes if (board.owner[index] == own_color)]
opponent_path_squares = [index for index in path_indexes if (board.owner[index] and (board.owner[index] != own_color))]
if (len(own_path_squares) > 0):
msg = 'Cannot capture when own pieces are in the way: {0}'
raise InvalidMoveException(msg.format(', '.join((str(index) for index in own_path_squares))))
if (len(opponent_path_squares) != 1):
msg = 'Cannot capture: must have exactly one opponent piece along the way'
raise InvalidMoveException(msg)
if (not board.owner[self.start_index]):
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
return opponent_path_squares[0]
|
def move_piece(self, start_index, end_index):
'\n Move an existing game piece from point A to point B.\n '
self.owner[end_index] = self.owner[start_index]
self.owner[start_index] = None
self.piece_class[end_index] = self.piece_class[start_index]
self.piece_class[start_index] = None
if ((self.owner[end_index] == Player.WHITE) and is_black_home_row(end_index)):
self.piece_class[end_index] = PieceClass.KING
if ((self.owner[end_index] == Player.BLACK) and is_white_home_row(end_index)):
self.piece_class[end_index] = PieceClass.KING
| -6,216,466,120,835,834,000
|
Move an existing game piece from point A to point B.
|
libcheckers/movement.py
|
move_piece
|
YuriyGuts/libcheckers
|
python
|
def move_piece(self, start_index, end_index):
'\n \n '
self.owner[end_index] = self.owner[start_index]
self.owner[start_index] = None
self.piece_class[end_index] = self.piece_class[start_index]
self.piece_class[start_index] = None
if ((self.owner[end_index] == Player.WHITE) and is_black_home_row(end_index)):
self.piece_class[end_index] = PieceClass.KING
if ((self.owner[end_index] == Player.BLACK) and is_white_home_row(end_index)):
self.piece_class[end_index] = PieceClass.KING
|
def add_piece(self, index, player, piece_class):
'\n Place a new piece on the board with the specified owner and class.\n '
self.owner[index] = player
self.piece_class[index] = piece_class
| 467,345,209,544,386,750
|
Place a new piece on the board with the specified owner and class.
|
libcheckers/movement.py
|
add_piece
|
YuriyGuts/libcheckers
|
python
|
def add_piece(self, index, player, piece_class):
'\n \n '
self.owner[index] = player
self.piece_class[index] = piece_class
|
def remove_piece(self, index):
'\n Clear the specified square from the board.\n '
self.owner[index] = None
self.piece_class[index] = None
| -6,348,893,047,659,651,000
|
Clear the specified square from the board.
|
libcheckers/movement.py
|
remove_piece
|
YuriyGuts/libcheckers
|
python
|
def remove_piece(self, index):
'\n \n '
self.owner[index] = None
self.piece_class[index] = None
|
def get_player_squares(self, player):
'\n Get all squares on the board owned by the specified player.\n '
return [index for index in range(1, (BoardConfig.total_squares + 1)) if (self.owner[index] == player)]
| 1,008,148,386,689,905,000
|
Get all squares on the board owned by the specified player.
|
libcheckers/movement.py
|
get_player_squares
|
YuriyGuts/libcheckers
|
python
|
def get_player_squares(self, player):
'\n \n '
return [index for index in range(1, (BoardConfig.total_squares + 1)) if (self.owner[index] == player)]
|
def get_free_movement_destinations(self, index):
'\n Get all allowed destinations for free movement for the piece at the specified square.\n '
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = (BoardConfig.board_dim if (own_class == PieceClass.KING) else 1)
lines_of_sight = get_lines_of_sight(index, visibility_range)
if ((own_class == PieceClass.MAN) and (own_color == Player.WHITE)):
lines_of_sight = lines_of_sight[:2]
if ((own_class == PieceClass.MAN) and (own_color == Player.BLACK)):
lines_of_sight = lines_of_sight[(- 2):]
result = []
for line in lines_of_sight:
for i in range(0, len(line)):
if self.owner[line[i]]:
break
result.append(line[i])
return result
| -241,082,711,958,797,380
|
Get all allowed destinations for free movement for the piece at the specified square.
|
libcheckers/movement.py
|
get_free_movement_destinations
|
YuriyGuts/libcheckers
|
python
|
def get_free_movement_destinations(self, index):
'\n \n '
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = (BoardConfig.board_dim if (own_class == PieceClass.KING) else 1)
lines_of_sight = get_lines_of_sight(index, visibility_range)
if ((own_class == PieceClass.MAN) and (own_color == Player.WHITE)):
lines_of_sight = lines_of_sight[:2]
if ((own_class == PieceClass.MAN) and (own_color == Player.BLACK)):
lines_of_sight = lines_of_sight[(- 2):]
result = []
for line in lines_of_sight:
for i in range(0, len(line)):
if self.owner[line[i]]:
break
result.append(line[i])
return result
|
def get_capturable_pieces(self, index):
"\n Get all squares that contain opponent's pieces capturable from the specified position.\n "
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = (BoardConfig.board_dim if (own_class == PieceClass.KING) else 2)
lines_of_sight = get_lines_of_sight(index, visibility_range)
result = []
for line in lines_of_sight:
for i in range(0, (len(line) - 1)):
if (self.owner[line[i]] in (own_color, Player.ZOMBIE)):
break
if (self.owner[line[i]] and self.owner[line[(i + 1)]]):
break
if (self.owner[line[i]] and (self.owner[line[i]] != own_color) and (not self.owner[line[(i + 1)]])):
result.append(line[i])
break
return result
| 3,072,183,555,115,830,300
|
Get all squares that contain opponent's pieces capturable from the specified position.
|
libcheckers/movement.py
|
get_capturable_pieces
|
YuriyGuts/libcheckers
|
python
|
def get_capturable_pieces(self, index):
"\n \n "
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = (BoardConfig.board_dim if (own_class == PieceClass.KING) else 2)
lines_of_sight = get_lines_of_sight(index, visibility_range)
result = []
for line in lines_of_sight:
for i in range(0, (len(line) - 1)):
if (self.owner[line[i]] in (own_color, Player.ZOMBIE)):
break
if (self.owner[line[i]] and self.owner[line[(i + 1)]]):
break
if (self.owner[line[i]] and (self.owner[line[i]] != own_color) and (not self.owner[line[(i + 1)]])):
result.append(line[i])
break
return result
|
def get_available_capture_landing_positions(self, attacker_index, capture_index):
'\n If the specified square is captured by the specified attacker,\n get all possible squares the attacker can land on.\n '
own_class = self.piece_class[attacker_index]
(attacker_row, attacker_col) = index_to_coords(attacker_index)
(capture_row, capture_col) = index_to_coords(capture_index)
movement_row = ((capture_row - attacker_row) // abs((capture_row - attacker_row)))
movement_col = ((capture_col - attacker_col) // abs((capture_col - attacker_col)))
result = []
current_row = (capture_row + movement_row)
current_col = (capture_col + movement_col)
if (own_class == PieceClass.MAN):
return [coords_to_index(current_row, current_col)]
while ((1 <= current_row <= BoardConfig.board_dim) and (1 <= current_col <= BoardConfig.board_dim)):
current_index = coords_to_index(current_row, current_col)
if (not self.owner[current_index]):
result.append(current_index)
current_row += movement_row
current_col += movement_col
else:
break
return result
| 9,079,244,142,070,687,000
|
If the specified square is captured by the specified attacker,
get all possible squares the attacker can land on.
|
libcheckers/movement.py
|
get_available_capture_landing_positions
|
YuriyGuts/libcheckers
|
python
|
def get_available_capture_landing_positions(self, attacker_index, capture_index):
'\n If the specified square is captured by the specified attacker,\n get all possible squares the attacker can land on.\n '
own_class = self.piece_class[attacker_index]
(attacker_row, attacker_col) = index_to_coords(attacker_index)
(capture_row, capture_col) = index_to_coords(capture_index)
movement_row = ((capture_row - attacker_row) // abs((capture_row - attacker_row)))
movement_col = ((capture_col - attacker_col) // abs((capture_col - attacker_col)))
result = []
current_row = (capture_row + movement_row)
current_col = (capture_col + movement_col)
if (own_class == PieceClass.MAN):
return [coords_to_index(current_row, current_col)]
while ((1 <= current_row <= BoardConfig.board_dim) and (1 <= current_col <= BoardConfig.board_dim)):
current_index = coords_to_index(current_row, current_col)
if (not self.owner[current_index]):
result.append(current_index)
current_row += movement_row
current_col += movement_col
else:
break
return result
|
def get_capture_sequence_candidates(self, player):
'\n Get all possible capture move sequences (not necessarily maximum ones)\n starting from every piece owned by the specified player.\n '
player_squares = self.get_player_squares(player)
attack_options = []
for attacker in player_squares:
attack_options.extend([(attacker, target) for target in self.get_capturable_pieces(attacker)])
capture_sequences = []
queue = deque()
for (attacker, target) in attack_options:
queue.extend([(self, CaptureMove(attacker, landing), []) for landing in self.get_available_capture_landing_positions(attacker, target)])
while queue:
(board_before, move, prev_moves) = queue.popleft()
class_before = board_before.piece_class[move.start_index]
opponent_quare = move.find_opponent_square(board_before)
board_after = move.apply(board_before)
board_after.owner[opponent_quare] = Player.ZOMBIE
board_after.piece_class[move.end_index] = class_before
next_attack_options = [(move.end_index, target) for target in board_after.get_capturable_pieces(move.end_index)]
if (not next_attack_options):
capture_sequences.append((prev_moves + [move]))
for (attacker, target) in next_attack_options:
queue.extend([(board_after, CaptureMove(attacker, landing), (prev_moves + [move])) for landing in board_after.get_available_capture_landing_positions(attacker, target)])
return capture_sequences
| -7,375,068,571,418,206,000
|
Get all possible capture move sequences (not necessarily maximum ones)
starting from every piece owned by the specified player.
|
libcheckers/movement.py
|
get_capture_sequence_candidates
|
YuriyGuts/libcheckers
|
python
|
def get_capture_sequence_candidates(self, player):
'\n Get all possible capture move sequences (not necessarily maximum ones)\n starting from every piece owned by the specified player.\n '
player_squares = self.get_player_squares(player)
attack_options = []
for attacker in player_squares:
attack_options.extend([(attacker, target) for target in self.get_capturable_pieces(attacker)])
capture_sequences = []
queue = deque()
for (attacker, target) in attack_options:
queue.extend([(self, CaptureMove(attacker, landing), []) for landing in self.get_available_capture_landing_positions(attacker, target)])
while queue:
(board_before, move, prev_moves) = queue.popleft()
class_before = board_before.piece_class[move.start_index]
opponent_quare = move.find_opponent_square(board_before)
board_after = move.apply(board_before)
board_after.owner[opponent_quare] = Player.ZOMBIE
board_after.piece_class[move.end_index] = class_before
next_attack_options = [(move.end_index, target) for target in board_after.get_capturable_pieces(move.end_index)]
if (not next_attack_options):
capture_sequences.append((prev_moves + [move]))
for (attacker, target) in next_attack_options:
queue.extend([(board_after, CaptureMove(attacker, landing), (prev_moves + [move])) for landing in board_after.get_available_capture_landing_positions(attacker, target)])
return capture_sequences
|
def get_available_moves(self, player):
'\n For the specified player, get the list of all allowed moves that are applicable\n to this board according to the game rules.\n '
result = []
capture_sequences = self.get_capture_sequence_candidates(player)
if (not capture_sequences):
for source in self.get_player_squares(player):
result.extend([ForwardMove(source, destination) for destination in self.get_free_movement_destinations(source)])
else:
max_seq_length = max((len(seq) for seq in capture_sequences))
result.extend([(ComboCaptureMove(seq) if (len(seq) > 1) else seq[0]) for seq in capture_sequences if (len(seq) == max_seq_length)])
return result
| -5,256,897,743,708,423,000
|
For the specified player, get the list of all allowed moves that are applicable
to this board according to the game rules.
|
libcheckers/movement.py
|
get_available_moves
|
YuriyGuts/libcheckers
|
python
|
def get_available_moves(self, player):
'\n For the specified player, get the list of all allowed moves that are applicable\n to this board according to the game rules.\n '
result = []
capture_sequences = self.get_capture_sequence_candidates(player)
if (not capture_sequences):
for source in self.get_player_squares(player):
result.extend([ForwardMove(source, destination) for destination in self.get_free_movement_destinations(source)])
else:
max_seq_length = max((len(seq) for seq in capture_sequences))
result.extend([(ComboCaptureMove(seq) if (len(seq) > 1) else seq[0]) for seq in capture_sequences if (len(seq) == max_seq_length)])
return result
|
def check_game_over(self, player_turn):
"\n Check if the game board is in a terminal state from the specified player's point of view.\n (e.g. a certain player has won or lost, or there is a draw).\n "
white_moves = self.get_available_moves(Player.WHITE)
black_moves = self.get_available_moves(Player.BLACK)
if ((player_turn == Player.WHITE) and (not white_moves)):
return GameOverReason.BLACK_WON
if ((player_turn == Player.BLACK) and (not black_moves)):
return GameOverReason.WHITE_WON
white_squares = self.get_player_squares(Player.WHITE)
black_squares = self.get_player_squares(Player.BLACK)
only_one_king_each = ((len(white_squares) == 1) and (len(black_squares) == 1) and (self.piece_class[white_squares[0]] == PieceClass.KING) and (self.piece_class[black_squares[0]] == PieceClass.KING) and (not self.get_capturable_pieces(white_squares[0])) and (not self.get_capturable_pieces(black_squares[0])))
if only_one_king_each:
return GameOverReason.DRAW
return None
| -3,846,133,922,372,637,700
|
Check if the game board is in a terminal state from the specified player's point of view.
(e.g. a certain player has won or lost, or there is a draw).
|
libcheckers/movement.py
|
check_game_over
|
YuriyGuts/libcheckers
|
python
|
def check_game_over(self, player_turn):
"\n Check if the game board is in a terminal state from the specified player's point of view.\n (e.g. a certain player has won or lost, or there is a draw).\n "
white_moves = self.get_available_moves(Player.WHITE)
black_moves = self.get_available_moves(Player.BLACK)
if ((player_turn == Player.WHITE) and (not white_moves)):
return GameOverReason.BLACK_WON
if ((player_turn == Player.BLACK) and (not black_moves)):
return GameOverReason.WHITE_WON
white_squares = self.get_player_squares(Player.WHITE)
black_squares = self.get_player_squares(Player.BLACK)
only_one_king_each = ((len(white_squares) == 1) and (len(black_squares) == 1) and (self.piece_class[white_squares[0]] == PieceClass.KING) and (self.piece_class[black_squares[0]] == PieceClass.KING) and (not self.get_capturable_pieces(white_squares[0])) and (not self.get_capturable_pieces(black_squares[0])))
if only_one_king_each:
return GameOverReason.DRAW
return None
|
def clone(self):
'\n Create an independent copy of this board.\n '
return deepcopy(self)
| 7,946,742,403,737,371,000
|
Create an independent copy of this board.
|
libcheckers/movement.py
|
clone
|
YuriyGuts/libcheckers
|
python
|
def clone(self):
'\n \n '
return deepcopy(self)
|
def __init__(self):
'\n Inits a new transaction.\n '
import revitron
bundle = script.get_bundle_name().replace('.pushbutton', '')
self.transaction = revitron.DB.Transaction(revitron.DOC, bundle)
self.transaction.Start()
| -3,308,001,345,332,747,300
|
Inits a new transaction.
|
revitron/transaction.py
|
__init__
|
YKato521/revitron-for-RevitPythonShell
|
python
|
def __init__(self):
'\n \n '
import revitron
bundle = script.get_bundle_name().replace('.pushbutton', )
self.transaction = revitron.DB.Transaction(revitron.DOC, bundle)
self.transaction.Start()
|
def commit(self):
'\n Commits the open transaction.\n '
self.transaction.Commit()
| 6,035,886,319,970,189,000
|
Commits the open transaction.
|
revitron/transaction.py
|
commit
|
YKato521/revitron-for-RevitPythonShell
|
python
|
def commit(self):
'\n \n '
self.transaction.Commit()
|
def rollback(self):
'\n Rolls back the open transaction.\n '
self.transaction.RollBack()
| -1,146,533,772,664,958,100
|
Rolls back the open transaction.
|
revitron/transaction.py
|
rollback
|
YKato521/revitron-for-RevitPythonShell
|
python
|
def rollback(self):
'\n \n '
self.transaction.RollBack()
|
@classmethod
def defaults(cls, *args):
'Get default arguments added to a parser by all ``*args``.'
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
| 3,302,161,372,159,137,000
|
Get default arguments added to a parser by all ``*args``.
|
onmt/utils/parse.py
|
defaults
|
ACL2020-Submission/ACL2020
|
python
|
@classmethod
def defaults(cls, *args):
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
|
def __init__(self, hass: HomeAssistant, entry: ConfigEntry, client: GitHubAPI, repository: str) -> None:
'Initialize GitHub data update coordinator base class.'
self.config_entry = entry
self.repository = repository
self._client = client
super().__init__(hass, LOGGER, name=DOMAIN, update_interval=DEFAULT_UPDATE_INTERVAL)
| 5,124,193,583,015,939,000
|
Initialize GitHub data update coordinator base class.
|
homeassistant/components/github/coordinator.py
|
__init__
|
Arquiteto/core
|
python
|
def __init__(self, hass: HomeAssistant, entry: ConfigEntry, client: GitHubAPI, repository: str) -> None:
self.config_entry = entry
self.repository = repository
self._client = client
super().__init__(hass, LOGGER, name=DOMAIN, update_interval=DEFAULT_UPDATE_INTERVAL)
|
async def fetch_data(self) -> T:
'Fetch data from GitHub API.'
| -5,836,349,995,842,303,000
|
Fetch data from GitHub API.
|
homeassistant/components/github/coordinator.py
|
fetch_data
|
Arquiteto/core
|
python
|
async def fetch_data(self) -> T:
|
async def fetch_data(self) -> GitHubRepositoryModel:
'Get the latest data from GitHub.'
result = (await self._client.repos.get(self.repository))
return result.data
| 4,338,848,196,831,356,400
|
Get the latest data from GitHub.
|
homeassistant/components/github/coordinator.py
|
fetch_data
|
Arquiteto/core
|
python
|
async def fetch_data(self) -> GitHubRepositoryModel:
result = (await self._client.repos.get(self.repository))
return result.data
|
async def fetch_data(self) -> (GitHubReleaseModel | None):
'Get the latest data from GitHub.'
result = (await self._client.repos.releases.list(self.repository, **{'params': {'per_page': 1}}))
if (not result.data):
return None
for release in result.data:
if (not release.prerelease):
return release
return result.data[0]
| 5,219,791,198,494,509,000
|
Get the latest data from GitHub.
|
homeassistant/components/github/coordinator.py
|
fetch_data
|
Arquiteto/core
|
python
|
async def fetch_data(self) -> (GitHubReleaseModel | None):
result = (await self._client.repos.releases.list(self.repository, **{'params': {'per_page': 1}}))
if (not result.data):
return None
for release in result.data:
if (not release.prerelease):
return release
return result.data[0]
|
async def fetch_data(self) -> IssuesPulls:
'Get the latest data from GitHub.'
base_issue_response = (await self._client.repos.issues.list(self.repository, **{'params': {'per_page': 1}}))
pull_response = (await self._client.repos.pulls.list(self.repository, **{'params': {'per_page': 1}}))
pulls_count = (pull_response.last_page_number or 0)
issues_count = ((base_issue_response.last_page_number or 0) - pulls_count)
issue_last = (base_issue_response.data[0] if (issues_count != 0) else None)
if ((issue_last is not None) and issue_last.pull_request):
issue_response = (await self._client.repos.issues.list(self.repository))
for issue in issue_response.data:
if (not issue.pull_request):
issue_last = issue
break
return IssuesPulls(issues_count=issues_count, issue_last=issue_last, pulls_count=pulls_count, pull_last=(pull_response.data[0] if (pulls_count != 0) else None))
| -2,304,525,900,436,716,000
|
Get the latest data from GitHub.
|
homeassistant/components/github/coordinator.py
|
fetch_data
|
Arquiteto/core
|
python
|
async def fetch_data(self) -> IssuesPulls:
base_issue_response = (await self._client.repos.issues.list(self.repository, **{'params': {'per_page': 1}}))
pull_response = (await self._client.repos.pulls.list(self.repository, **{'params': {'per_page': 1}}))
pulls_count = (pull_response.last_page_number or 0)
issues_count = ((base_issue_response.last_page_number or 0) - pulls_count)
issue_last = (base_issue_response.data[0] if (issues_count != 0) else None)
if ((issue_last is not None) and issue_last.pull_request):
issue_response = (await self._client.repos.issues.list(self.repository))
for issue in issue_response.data:
if (not issue.pull_request):
issue_last = issue
break
return IssuesPulls(issues_count=issues_count, issue_last=issue_last, pulls_count=pulls_count, pull_last=(pull_response.data[0] if (pulls_count != 0) else None))
|
async def fetch_data(self) -> (GitHubCommitModel | None):
'Get the latest data from GitHub.'
result = (await self._client.repos.list_commits(self.repository, **{'params': {'per_page': 1}}))
return (result.data[0] if result.data else None)
| -1,005,978,423,350,740,500
|
Get the latest data from GitHub.
|
homeassistant/components/github/coordinator.py
|
fetch_data
|
Arquiteto/core
|
python
|
async def fetch_data(self) -> (GitHubCommitModel | None):
result = (await self._client.repos.list_commits(self.repository, **{'params': {'per_page': 1}}))
return (result.data[0] if result.data else None)
|
def htCache(factory):
'Output the cache of a servlet factory.'
html = []
wr = html.append
cache = factory._classCache
keys = sorted(cache)
wr(('<p>Uniqueness: %s</p>' % factory.uniqueness()))
wr(('<p>Extensions: %s</p>' % ', '.join(map(repr, factory.extensions()))))
wr(('<p>Unique paths in the servlet cache: <strong>%d</strong> <input type="submit" name="flush_%s" value="Flush"></p>' % (len(keys), factory.name())))
wr('<p>Click any link to jump to the details for that path.</p>')
wr('<h5>Filenames:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>File</th><th>Directory</th></tr>')
paths = []
for key in keys:
(head, tail) = os.path.split(key)
path = dict(dir=head, base=tail, full=key)
paths.append(path)
paths.sort(key=(lambda p: (p['base'].lower(), p['dir'].lower())))
for path in paths:
wr(('<tr><td><a href="#id%s">%s</a></td><td>%s</td></tr>' % (id(path['full']), path['base'], path['dir'])))
wr('</table>')
wr('<h5>Full paths:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>Servlet path</th></tr>')
for key in keys:
wr(('<tr><td><a href="#%s">%s</a></td></tr>' % (id(key), key)))
wr('</table>')
wr('<h5>Details:</h5>')
wr('<table class="NiceTable">')
for path in paths:
wr(('<tr class="NoTable"><td colspan="2"><a id="id%s"></a><strong>%s</strong> - %s</td></tr>' % (id(path['full']), path['base'], path['dir'])))
record = cache[path['full']].copy()
record['path'] = path['full']
if (path['full'] in factory._threadsafeServletCache):
record['instances'] = 'one servlet instance (threadsafe)'
else:
record['instances'] = ('free reusable servlets: %d' % len(factory._servletPool))
wr(htRecord(record))
wr('</table>')
return '\n'.join(html)
| 2,626,865,298,280,618,000
|
Output the cache of a servlet factory.
|
WebKit/Admin/ServletCache.py
|
htCache
|
Cito/w4py
|
python
|
def htCache(factory):
html = []
wr = html.append
cache = factory._classCache
keys = sorted(cache)
wr(('<p>Uniqueness: %s</p>' % factory.uniqueness()))
wr(('<p>Extensions: %s</p>' % ', '.join(map(repr, factory.extensions()))))
wr(('<p>Unique paths in the servlet cache: <strong>%d</strong> <input type="submit" name="flush_%s" value="Flush"></p>' % (len(keys), factory.name())))
wr('<p>Click any link to jump to the details for that path.</p>')
wr('<h5>Filenames:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>File</th><th>Directory</th></tr>')
paths = []
for key in keys:
(head, tail) = os.path.split(key)
path = dict(dir=head, base=tail, full=key)
paths.append(path)
paths.sort(key=(lambda p: (p['base'].lower(), p['dir'].lower())))
for path in paths:
wr(('<tr><td><a href="#id%s">%s</a></td><td>%s</td></tr>' % (id(path['full']), path['base'], path['dir'])))
wr('</table>')
wr('<h5>Full paths:</h5>')
wr('<table class="NiceTable">')
wr('<tr><th>Servlet path</th></tr>')
for key in keys:
wr(('<tr><td><a href="#%s">%s</a></td></tr>' % (id(key), key)))
wr('</table>')
wr('<h5>Details:</h5>')
wr('<table class="NiceTable">')
for path in paths:
wr(('<tr class="NoTable"><td colspan="2"><a id="id%s"></a><strong>%s</strong> - %s</td></tr>' % (id(path['full']), path['base'], path['dir'])))
record = cache[path['full']].copy()
record['path'] = path['full']
if (path['full'] in factory._threadsafeServletCache):
record['instances'] = 'one servlet instance (threadsafe)'
else:
record['instances'] = ('free reusable servlets: %d' % len(factory._servletPool))
wr(htRecord(record))
wr('</table>')
return '\n'.join(html)
|
def register_modules(self, **kwargs):
' Registers modules in current module dictionary.\n '
self.module_dict.update(kwargs)
| -1,205,675,546,938,575,400
|
Registers modules in current module dictionary.
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
register_modules
|
1ucky40nc3/mednerf
|
python
|
def register_modules(self, **kwargs):
' \n '
self.module_dict.update(kwargs)
|
def save(self, filename, **kwargs):
' Saves the current module dictionary.\n\n Args:\n filename (str): name of output file\n '
if (not os.path.isabs(filename)):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for (k, v) in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
| -7,547,054,072,814,241,000
|
Saves the current module dictionary.
Args:
filename (str): name of output file
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
save
|
1ucky40nc3/mednerf
|
python
|
def save(self, filename, **kwargs):
' Saves the current module dictionary.\n\n Args:\n filename (str): name of output file\n '
if (not os.path.isabs(filename)):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for (k, v) in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
|
def load(self, filename):
'Loads a module dictionary from local file or url.\n \n Args:\n filename (str): name of saved module dictionary\n '
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
| -2,890,154,249,354,038,300
|
Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
load
|
1ucky40nc3/mednerf
|
python
|
def load(self, filename):
'Loads a module dictionary from local file or url.\n \n Args:\n filename (str): name of saved module dictionary\n '
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
|
def load_file(self, filename):
'Loads a module dictionary from file.\n \n Args:\n filename (str): name of saved module dictionary\n '
if (not os.path.isabs(filename)):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileNotFoundError
| -6,940,442,115,957,106,000
|
Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
load_file
|
1ucky40nc3/mednerf
|
python
|
def load_file(self, filename):
'Loads a module dictionary from file.\n \n Args:\n filename (str): name of saved module dictionary\n '
if (not os.path.isabs(filename)):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileNotFoundError
|
def load_url(self, url):
'Load a module dictionary from url.\n \n Args:\n url (str): url to saved model\n '
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
| 610,299,147,669,909,500
|
Load a module dictionary from url.
Args:
url (str): url to saved model
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
load_url
|
1ucky40nc3/mednerf
|
python
|
def load_url(self, url):
'Load a module dictionary from url.\n \n Args:\n url (str): url to saved model\n '
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
|
def parse_state_dict(self, state_dict):
'Parse state_dict of model and return scalars.\n \n Args:\n state_dict (dict): State dict of model\n '
for (k, v) in self.module_dict.items():
if (k in state_dict):
v.load_state_dict(state_dict[k])
else:
print(('Warning: Could not find %s in checkpoint!' % k))
scalars = {k: v for (k, v) in state_dict.items() if (k not in self.module_dict)}
return scalars
| -3,472,864,303,789,836,300
|
Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
|
graf-main/submodules/GAN_stability/gan_training/checkpoints.py
|
parse_state_dict
|
1ucky40nc3/mednerf
|
python
|
def parse_state_dict(self, state_dict):
'Parse state_dict of model and return scalars.\n \n Args:\n state_dict (dict): State dict of model\n '
for (k, v) in self.module_dict.items():
if (k in state_dict):
v.load_state_dict(state_dict[k])
else:
print(('Warning: Could not find %s in checkpoint!' % k))
scalars = {k: v for (k, v) in state_dict.items() if (k not in self.module_dict)}
return scalars
|
def __init__(self, k: int) -> PolynomialFitting:
'\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n '
super().__init__()
self.deg_ = k
(self.vander_, self.vander_linear_) = (None, LinearRegression(False))
| 2,860,509,653,220,064,000
|
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
|
IMLearn/learners/regressors/polynomial_fitting.py
|
__init__
|
shirlevy007/IML.HUJI
|
python
|
def __init__(self, k: int) -> PolynomialFitting:
'\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n '
super().__init__()
self.deg_ = k
(self.vander_, self.vander_linear_) = (None, LinearRegression(False))
|
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
'\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n '
self.vander_linear_.fit(self.__transform(X), y)
| 1,178,176,510,061,192,200
|
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
|
IMLearn/learners/regressors/polynomial_fitting.py
|
_fit
|
shirlevy007/IML.HUJI
|
python
|
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
'\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n '
self.vander_linear_.fit(self.__transform(X), y)
|
def _predict(self, X: np.ndarray) -> np.ndarray:
'\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n '
return self.vander_linear_.predict(self.__transform(X))
| 3,464,361,381,715,352,000
|
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
|
IMLearn/learners/regressors/polynomial_fitting.py
|
_predict
|
shirlevy007/IML.HUJI
|
python
|
def _predict(self, X: np.ndarray) -> np.ndarray:
'\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n '
return self.vander_linear_.predict(self.__transform(X))
|
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
'\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n '
return self.vander_linear_.loss(self.__transform(X), y)
| -4,140,011,508,131,796,500
|
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
|
IMLearn/learners/regressors/polynomial_fitting.py
|
_loss
|
shirlevy007/IML.HUJI
|
python
|
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
'\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n '
return self.vander_linear_.loss(self.__transform(X), y)
|
def __transform(self, X: np.ndarray) -> np.ndarray:
'\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n '
X_vander = np.vander(X, (self.deg_ + 1), increasing=True)
return X_vander
| -2,425,109,902,432,135,700
|
Transform given input according to the univariate polynomial transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
|
IMLearn/learners/regressors/polynomial_fitting.py
|
__transform
|
shirlevy007/IML.HUJI
|
python
|
def __transform(self, X: np.ndarray) -> np.ndarray:
'\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n '
X_vander = np.vander(X, (self.deg_ + 1), increasing=True)
return X_vander
|
def __init__(self, config):
'\n Initialize SGD Env\n\n Parameters\n -------\n config : objdict\n Environment configuration\n '
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = ((not self.no_cuda) and torch.cuda.is_available())
self.device = torch.device(('cuda' if self.use_cuda else 'cpu'))
self.training_validation_ratio = 0.8
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction='none')
self.loss_function = extend(self.loss_function)
self.initial_lr = (config.lr * torch.ones(1, device=self.device, requires_grad=False))
self.current_lr = (config.lr * torch.ones(1, device=self.device, requires_grad=False))
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(1, device=self.device, requires_grad=False)
self.predictiveChangeVarUncertainty = torch.zeros(1, device=self.device, requires_grad=False)
self.lossVarDiscountedAverage = torch.zeros(1, device=self.device, requires_grad=False)
self.lossVarUncertainty = torch.zeros(1, device=self.device, requires_grad=False)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(1, device=self.device, requires_grad=False)
self.secondOrderMomentum = torch.zeros(1, device=self.device, requires_grad=False)
self.writer = None
if ('reward_function' in config.keys()):
self.get_reward = config['reward_function']
else:
self.get_reward = self.get_default_reward
if ('state_method' in config.keys()):
self.get_state = config['state_method']
else:
self.get_state = self.get_default_state
| -5,901,484,696,904,247,000
|
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
|
dacbench/envs/sgd.py
|
__init__
|
goktug97/DACBench
|
python
|
def __init__(self, config):
'\n Initialize SGD Env\n\n Parameters\n -------\n config : objdict\n Environment configuration\n '
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = ((not self.no_cuda) and torch.cuda.is_available())
self.device = torch.device(('cuda' if self.use_cuda else 'cpu'))
self.training_validation_ratio = 0.8
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction='none')
self.loss_function = extend(self.loss_function)
self.initial_lr = (config.lr * torch.ones(1, device=self.device, requires_grad=False))
self.current_lr = (config.lr * torch.ones(1, device=self.device, requires_grad=False))
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(1, device=self.device, requires_grad=False)
self.predictiveChangeVarUncertainty = torch.zeros(1, device=self.device, requires_grad=False)
self.lossVarDiscountedAverage = torch.zeros(1, device=self.device, requires_grad=False)
self.lossVarUncertainty = torch.zeros(1, device=self.device, requires_grad=False)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(1, device=self.device, requires_grad=False)
self.secondOrderMomentum = torch.zeros(1, device=self.device, requires_grad=False)
self.writer = None
if ('reward_function' in config.keys()):
self.get_reward = config['reward_function']
else:
self.get_reward = self.get_default_reward
if ('state_method' in config.keys()):
self.get_state = config['state_method']
else:
self.get_state = self.get_default_state
|
def seed(self, seed=None):
'\n Set rng seed\n\n Parameters\n ----------\n seed:\n seed for rng\n '
(_, seed) = seeding.np_random(seed)
if (seed is not None):
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
| -1,746,096,113,128,371,000
|
Set rng seed
Parameters
----------
seed:
seed for rng
|
dacbench/envs/sgd.py
|
seed
|
goktug97/DACBench
|
python
|
def seed(self, seed=None):
'\n Set rng seed\n\n Parameters\n ----------\n seed:\n seed for rng\n '
(_, seed) = seeding.np_random(seed)
if (seed is not None):
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
|
def step(self, action):
'\n Execute environment step\n\n Parameters\n ----------\n action : list\n action to execute\n\n Returns\n -------\n np.array, float, bool, dict\n state, reward, done, info\n '
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if (not isinstance(action, float)):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = (10 ** (- action))
self.current_lr = new_lr
delta_w = torch.mul(new_lr, (self.firstOrderMomentum / (torch.sqrt(self.secondOrderMomentum) + self.epsilon)))
for (i, p) in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = (p.data - delta_w[index:(index + layer_size)].reshape(shape=p.data.shape))
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return (self.get_state(self), reward, done, {})
| 483,047,106,780,406,660
|
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
|
dacbench/envs/sgd.py
|
step
|
goktug97/DACBench
|
python
|
def step(self, action):
'\n Execute environment step\n\n Parameters\n ----------\n action : list\n action to execute\n\n Returns\n -------\n np.array, float, bool, dict\n state, reward, done, info\n '
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if (not isinstance(action, float)):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = (10 ** (- action))
self.current_lr = new_lr
delta_w = torch.mul(new_lr, (self.firstOrderMomentum / (torch.sqrt(self.secondOrderMomentum) + self.epsilon)))
for (i, p) in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = (p.data - delta_w[index:(index + layer_size)].reshape(shape=p.data.shape))
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return (self.get_state(self), reward, done, {})
|
def reset(self):
'\n Reset environment\n\n Returns\n -------\n np.array\n Environment state\n '
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {'batch_size': self.batch_size}
validation_dataloader_args = {'batch_size': self.validation_batch_size}
if self.use_cuda:
param = {'num_workers': 1, 'pin_memory': True, 'shuffle': True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if (dataset == 'MNIST'):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST('../data', train=True, download=True, transform=transform)
else:
raise NotImplementedError
training_dataset_limit = math.floor((len(train_dataset) * self.training_validation_ratio))
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(train_dataset, range(0, (training_dataset_limit - 1)))
self.validation_dataset = torch.utils.data.Subset(train_dataset, range(training_dataset_limit, validation_dataset_limit))
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, **validation_dataloader_args)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce((lambda x, y: (x * y)), p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros((self.parameter_count,), device=self.device, requires_grad=False)
self.get_default_reward(self)
return self.get_state(self)
| -2,251,031,008,396,713,500
|
Reset environment
Returns
-------
np.array
Environment state
|
dacbench/envs/sgd.py
|
reset
|
goktug97/DACBench
|
python
|
def reset(self):
'\n Reset environment\n\n Returns\n -------\n np.array\n Environment state\n '
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {'batch_size': self.batch_size}
validation_dataloader_args = {'batch_size': self.validation_batch_size}
if self.use_cuda:
param = {'num_workers': 1, 'pin_memory': True, 'shuffle': True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if (dataset == 'MNIST'):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST('../data', train=True, download=True, transform=transform)
else:
raise NotImplementedError
training_dataset_limit = math.floor((len(train_dataset) * self.training_validation_ratio))
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(train_dataset, range(0, (training_dataset_limit - 1)))
self.validation_dataset = torch.utils.data.Subset(train_dataset, range(training_dataset_limit, validation_dataset_limit))
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, **validation_dataloader_args)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce((lambda x, y: (x * y)), p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros((self.parameter_count,), device=self.device, requires_grad=False)
self.get_default_reward(self)
return self.get_state(self)
|
def close(self):
'\n No additional cleanup necessary\n\n Returns\n -------\n bool\n Cleanup flag\n '
return True
| -9,155,946,635,410,039,000
|
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
|
dacbench/envs/sgd.py
|
close
|
goktug97/DACBench
|
python
|
def close(self):
'\n No additional cleanup necessary\n\n Returns\n -------\n bool\n Cleanup flag\n '
return True
|
def render(self, mode: str='human'):
'\n Render env in human mode\n\n Parameters\n ----------\n mode : str\n Execution mode\n '
if (mode != 'human'):
raise NotImplementedError
pass
| -4,692,031,195,429,529,000
|
Render env in human mode
Parameters
----------
mode : str
Execution mode
|
dacbench/envs/sgd.py
|
render
|
goktug97/DACBench
|
python
|
def render(self, mode: str='human'):
'\n Render env in human mode\n\n Parameters\n ----------\n mode : str\n Execution mode\n '
if (mode != 'human'):
raise NotImplementedError
pass
|
def get_default_state(self, _):
'\n Gather state description\n\n Returns\n -------\n dict\n Environment state\n\n '
gradients = self._get_gradients()
(self.firstOrderMomentum, self.secondOrderMomentum) = self._get_momentum(gradients)
(predictiveChangeVarDiscountedAverage, predictiveChangeVarUncertainty) = self._get_predictive_change_features(self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum)
(lossVarDiscountedAverage, lossVarUncertainty) = self._get_loss_features()
state = {'predictiveChangeVarDiscountedAverage': predictiveChangeVarDiscountedAverage, 'predictiveChangeVarUncertainty': predictiveChangeVarUncertainty, 'lossVarDiscountedAverage': lossVarDiscountedAverage, 'lossVarUncertainty': lossVarUncertainty, 'currentLR': self.current_lr, 'trainingLoss': self.current_training_loss, 'validationLoss': self.current_validation_loss}
return state
| 150,674,990,009,349,800
|
Gather state description
Returns
-------
dict
Environment state
|
dacbench/envs/sgd.py
|
get_default_state
|
goktug97/DACBench
|
python
|
def get_default_state(self, _):
'\n Gather state description\n\n Returns\n -------\n dict\n Environment state\n\n '
gradients = self._get_gradients()
(self.firstOrderMomentum, self.secondOrderMomentum) = self._get_momentum(gradients)
(predictiveChangeVarDiscountedAverage, predictiveChangeVarUncertainty) = self._get_predictive_change_features(self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum)
(lossVarDiscountedAverage, lossVarUncertainty) = self._get_loss_features()
state = {'predictiveChangeVarDiscountedAverage': predictiveChangeVarDiscountedAverage, 'predictiveChangeVarUncertainty': predictiveChangeVarUncertainty, 'lossVarDiscountedAverage': lossVarDiscountedAverage, 'lossVarUncertainty': lossVarUncertainty, 'currentLR': self.current_lr, 'trainingLoss': self.current_training_loss, 'validationLoss': self.current_validation_loss}
return state
|
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=('^http',), capacity=None):
'\n constructor, use variable of BloomFilter if capacity else variable of set\n '
self._re_black_list = ([re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else [])
self._re_white_list = ([re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else [])
self._url_set = (set() if (not capacity) else None)
self._bloom_filter = (pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None)
return
| 390,878,835,707,812,300
|
constructor, use variable of BloomFilter if capacity else variable of set
|
spider/utilities/util_urlfilter.py
|
__init__
|
charlesXu86/PSpider
|
python
|
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=('^http',), capacity=None):
'\n \n '
self._re_black_list = ([re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else [])
self._re_white_list = ([re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else [])
self._url_set = (set() if (not capacity) else None)
self._bloom_filter = (pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None)
return
|
def update(self, url_list):
'\n update this urlfilter using url_list\n '
if (self._url_set is not None):
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
| 6,770,847,336,451,332,000
|
update this urlfilter using url_list
|
spider/utilities/util_urlfilter.py
|
update
|
charlesXu86/PSpider
|
python
|
def update(self, url_list):
'\n \n '
if (self._url_set is not None):
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
|
def check(self, url):
'\n check the url based on self._re_black_list and self._re_white_list\n '
for re_black in self._re_black_list:
if re_black.search(url):
return False
for re_white in self._re_white_list:
if re_white.search(url):
return True
return (False if self._re_white_list else True)
| -5,244,254,235,561,446,000
|
check the url based on self._re_black_list and self._re_white_list
|
spider/utilities/util_urlfilter.py
|
check
|
charlesXu86/PSpider
|
python
|
def check(self, url):
'\n \n '
for re_black in self._re_black_list:
if re_black.search(url):
return False
for re_white in self._re_white_list:
if re_white.search(url):
return True
return (False if self._re_white_list else True)
|
def check_and_add(self, url):
"\n check the url to make sure that the url hasn't been fetched, and add url to urlfilter\n "
result = False
if self.check(url):
if (self._url_set is not None):
result = (url not in self._url_set)
self._url_set.add(url)
else:
result = (not self._bloom_filter.add(url))
return result
| 4,728,054,450,603,104,000
|
check the url to make sure that the url hasn't been fetched, and add url to urlfilter
|
spider/utilities/util_urlfilter.py
|
check_and_add
|
charlesXu86/PSpider
|
python
|
def check_and_add(self, url):
"\n \n "
result = False
if self.check(url):
if (self._url_set is not None):
result = (url not in self._url_set)
self._url_set.add(url)
else:
result = (not self._bloom_filter.add(url))
return result
|
def get_extra_rules(use_extra: bool, extra_json_path: Path_Str) -> Optional[ActionsDict]:
'Helper to provide custom (project level/user level) anonymization\n rules as a mapping of tags -> action function.\n\n Args:\n use_extra (bool): If use extra rules.\n extra_json_path (Path_Str): Path to extra rules json file.\n It should be flat json with action as a key and list of tags as value.\n\n Returns:\n Optional[ActionsDict]: extra rules mapping (tags -> action function)\n '
extra_rules = None
if use_extra:
with open(extra_json_path, 'r') as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
| -9,007,312,869,837,651,000
|
Helper to provide custom (project level/user level) anonymization
rules as a mapping of tags -> action function.
Args:
use_extra (bool): If use extra rules.
extra_json_path (Path_Str): Path to extra rules json file.
It should be flat json with action as a key and list of tags as value.
Returns:
Optional[ActionsDict]: extra rules mapping (tags -> action function)
|
dicomanonymizer/batch_anonymizer.py
|
get_extra_rules
|
ademyanchuk/dicom-anonymizer
|
python
|
def get_extra_rules(use_extra: bool, extra_json_path: Path_Str) -> Optional[ActionsDict]:
'Helper to provide custom (project level/user level) anonymization\n rules as a mapping of tags -> action function.\n\n Args:\n use_extra (bool): If use extra rules.\n extra_json_path (Path_Str): Path to extra rules json file.\n It should be flat json with action as a key and list of tags as value.\n\n Returns:\n Optional[ActionsDict]: extra rules mapping (tags -> action function)\n '
extra_rules = None
if use_extra:
with open(extra_json_path, 'r') as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
|
def anonymize_dicom_folder(in_path: Path_Str, out_path: Path_Str, debug: bool=False, **kwargs):
'Anonymize dicom files in `in_path`, if `in_path` doesn\'t\n contain dicom files, will do nothing. Debug == True will do\n sort of dry run to check if all good for the large data storages\n\n Args:\n in_path (Path_Str): path to the folder containing dicom files\n out_path (Path_Str): path to the folder there anonymized copies\n will be saved\n debuf (bool): if true, will do a "dry" run\n '
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f'Processing: {in_path}')
in_files = [p for p in in_path.iterdir() if p.is_file()]
if (not in_files):
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
f_in = random.choice(in_files)
f_out = (out_path / f_in.name)
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = (out_path / f_in.name)
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
| 8,963,746,500,322,977,000
|
Anonymize dicom files in `in_path`, if `in_path` doesn't
contain dicom files, will do nothing. Debug == True will do
sort of dry run to check if all good for the large data storages
Args:
in_path (Path_Str): path to the folder containing dicom files
out_path (Path_Str): path to the folder there anonymized copies
will be saved
debuf (bool): if true, will do a "dry" run
|
dicomanonymizer/batch_anonymizer.py
|
anonymize_dicom_folder
|
ademyanchuk/dicom-anonymizer
|
python
|
def anonymize_dicom_folder(in_path: Path_Str, out_path: Path_Str, debug: bool=False, **kwargs):
'Anonymize dicom files in `in_path`, if `in_path` doesn\'t\n contain dicom files, will do nothing. Debug == True will do\n sort of dry run to check if all good for the large data storages\n\n Args:\n in_path (Path_Str): path to the folder containing dicom files\n out_path (Path_Str): path to the folder there anonymized copies\n will be saved\n debuf (bool): if true, will do a "dry" run\n '
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f'Processing: {in_path}')
in_files = [p for p in in_path.iterdir() if p.is_file()]
if (not in_files):
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
f_in = random.choice(in_files)
f_out = (out_path / f_in.name)
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = (out_path / f_in.name)
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
|
def anonymize_root_folder(in_root: Path_Str, out_root: Path_Str, **kwargs):
'The fuction will get all nested folders from `in_root`\n and perform anonymization of all folders containg dicom-files\n Will recreate the `in_root` folders structure in the `out_root`\n\n Args:\n in_root (Path_Str): source root folder (presumably has\n some dicom-files inide, maybe nested)\n out_root (Path_Str): destination root folder, will create\n if not exists\n '
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info('Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped')
logger.info(f'if, you need to process data again delete files {_STATE_PATH}, please')
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if (str(rel_path) in state.visited_folders):
logger.info(f'{in_d} path is in cache, skipping')
continue
else:
out_d = (out_root / rel_path)
anonymize_dicom_folder(in_d, out_d, ds_callback=get_tags_callback, **kwargs)
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(prev_state.tag_counter.keys())
if new_tags:
logger.warning(f'During the anonymization new tags: {new_tags} were present')
else:
logger.info('No new tags werer present')
state.save_state()
| -6,255,924,792,182,228,000
|
The fuction will get all nested folders from `in_root`
and perform anonymization of all folders containg dicom-files
Will recreate the `in_root` folders structure in the `out_root`
Args:
in_root (Path_Str): source root folder (presumably has
some dicom-files inide, maybe nested)
out_root (Path_Str): destination root folder, will create
if not exists
|
dicomanonymizer/batch_anonymizer.py
|
anonymize_root_folder
|
ademyanchuk/dicom-anonymizer
|
python
|
def anonymize_root_folder(in_root: Path_Str, out_root: Path_Str, **kwargs):
'The fuction will get all nested folders from `in_root`\n and perform anonymization of all folders containg dicom-files\n Will recreate the `in_root` folders structure in the `out_root`\n\n Args:\n in_root (Path_Str): source root folder (presumably has\n some dicom-files inide, maybe nested)\n out_root (Path_Str): destination root folder, will create\n if not exists\n '
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info('Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped')
logger.info(f'if, you need to process data again delete files {_STATE_PATH}, please')
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if (str(rel_path) in state.visited_folders):
logger.info(f'{in_d} path is in cache, skipping')
continue
else:
out_d = (out_root / rel_path)
anonymize_dicom_folder(in_d, out_d, ds_callback=get_tags_callback, **kwargs)
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(prev_state.tag_counter.keys())
if new_tags:
logger.warning(f'During the anonymization new tags: {new_tags} were present')
else:
logger.info('No new tags werer present')
state.save_state()
|
@cmd.add(_cmdd, 'rules')
async def _cmdf_enable(self, substr, msg, privilege_level):
'`{cmd}` - View game rules.'
(await self._client.send_msg(msg, self._RULES_STRING))
return
| -7,856,957,651,585,113,000
|
`{cmd}` - View game rules.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_enable
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'rules')
async def _cmdf_enable(self, substr, msg, privilege_level):
(await self._client.send_msg(msg, self._RULES_STRING))
return
|
@cmd.add(_cmdd, 'newgame', top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
'`{cmd}` - New game.'
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
(await self._new_game(channel))
(await self._client.send_msg(channel, 'Truth game cleared.'))
return
| 5,801,103,587,893,892,000
|
`{cmd}` - New game.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_newgame
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'newgame', top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
(await self._new_game(channel))
(await self._client.send_msg(channel, 'Truth game cleared.'))
return
|
@cmd.add(_cmdd, 'in', top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
'\n `{cmd}` - Adds you to the game.\n\n This command also allows moderators to add other users and arbitrary strings as participants.\n **Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.\n '
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
new_participant = None
if ((privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0)):
new_participant = (('<@' + msg.author.id) + '>')
else:
new_participant = substr
if (self._PARTICIPANT_DELIMITER in new_participant):
(await self._client.send_msg(channel, 'Error: Not allowed to use the delimiter characters.'))
raise errors.OperationAborted
if (new_participant in self._get_participants(channel)):
(await self._client.send_msg(channel, 'Error: {} is already a participant.'.format(new_participant)))
else:
(await self._add_participant(channel, new_participant))
(await self._client.send_msg(channel, 'Added {} to the game.'.format(new_participant)))
return
| -4,829,615,238,631,485,000
|
`{cmd}` - Adds you to the game.
This command also allows moderators to add other users and arbitrary strings as participants.
**Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_in
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'in', top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
'\n `{cmd}` - Adds you to the game.\n\n This command also allows moderators to add other users and arbitrary strings as participants.\n **Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.\n '
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
new_participant = None
if ((privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0)):
new_participant = (('<@' + msg.author.id) + '>')
else:
new_participant = substr
if (self._PARTICIPANT_DELIMITER in new_participant):
(await self._client.send_msg(channel, 'Error: Not allowed to use the delimiter characters.'))
raise errors.OperationAborted
if (new_participant in self._get_participants(channel)):
(await self._client.send_msg(channel, 'Error: {} is already a participant.'.format(new_participant)))
else:
(await self._add_participant(channel, new_participant))
(await self._client.send_msg(channel, 'Added {} to the game.'.format(new_participant)))
return
|
@cmd.add(_cmdd, 'out', top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
'\n `{cmd}` - Removes you from the game.\n\n This command also allows moderators to remove other users and arbitrary strings.\n **Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.\n '
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
participant = None
if ((privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0)):
participant = (('<@' + msg.author.id) + '>')
else:
participant = substr
if (participant in self._get_participants(channel)):
(await self._remove_participant(channel, participant))
(await self._client.send_msg(channel, 'Removed {} from the game.'.format(participant)))
else:
(await self._client.send_msg(channel, 'Error: {} is not already a participant.'.format(participant)))
return
| 8,620,157,351,105,654,000
|
`{cmd}` - Removes you from the game.
This command also allows moderators to remove other users and arbitrary strings.
**Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_out
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'out', top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
'\n `{cmd}` - Removes you from the game.\n\n This command also allows moderators to remove other users and arbitrary strings.\n **Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.\n '
channel = msg.channel
(await self._abort_if_not_truth_channel(channel))
participant = None
if ((privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0)):
participant = (('<@' + msg.author.id) + '>')
else:
participant = substr
if (participant in self._get_participants(channel)):
(await self._remove_participant(channel, participant))
(await self._client.send_msg(channel, 'Removed {} from the game.'.format(participant)))
else:
(await self._client.send_msg(channel, 'Error: {} is not already a participant.'.format(participant)))
return
|
@cmd.add(_cmdd, 'enablechannel')
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
'`{cmd}` - Enable Truth in this channel.'
channel = msg.channel
if (channel.id in self._enabled_channels):
(await self._client.send_msg(channel, 'This channel is already a Truth game channel.'))
else:
self._enabled_channels.append(channel.id)
self._save_settings()
(await self._client.send_msg(channel, 'This channel is now a Truth game channel.'))
return
| 8,307,452,712,083,425,000
|
`{cmd}` - Enable Truth in this channel.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_enable
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'enablechannel')
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
channel = msg.channel
if (channel.id in self._enabled_channels):
(await self._client.send_msg(channel, 'This channel is already a Truth game channel.'))
else:
self._enabled_channels.append(channel.id)
self._save_settings()
(await self._client.send_msg(channel, 'This channel is now a Truth game channel.'))
return
|
@cmd.add(_cmdd, 'disablechannel')
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
'`{cmd}` - Disable Truth in this channel.'
channel = msg.channel
if (channel.id in self._enabled_channels):
self._enabled_channels.remove(channel.id)
self._save_settings()
(await self._client.send_msg(channel, 'This channel is no longer a Truth game channel.'))
else:
(await self._client.send_msg(channel, 'This channel is not a Truth game channel.'))
return
| 1,501,719,110,497,770,200
|
`{cmd}` - Disable Truth in this channel.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_disable
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'disablechannel')
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
channel = msg.channel
if (channel.id in self._enabled_channels):
self._enabled_channels.remove(channel.id)
self._save_settings()
(await self._client.send_msg(channel, 'This channel is no longer a Truth game channel.'))
else:
(await self._client.send_msg(channel, 'This channel is not a Truth game channel.'))
return
|
@cmd.add(_cmdd, 'viewenabled')
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
'`{cmd}` - View all channels that are enabled as Truth channels.'
buf = None
if (len(self._enabled_channels) == 0):
buf = 'No channels have Truth game enabled.'
else:
buf = '**Truth game enabled channels:**'
for channel_id in self._enabled_channels:
buf += '\n<#{0}> (ID: {0})'.format(channel_id)
(await self._client.send_msg(msg, buf))
return
| 6,277,485,582,689,504,000
|
`{cmd}` - View all channels that are enabled as Truth channels.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_viewenabled
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'viewenabled')
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
buf = None
if (len(self._enabled_channels) == 0):
buf = 'No channels have Truth game enabled.'
else:
buf = '**Truth game enabled channels:**'
for channel_id in self._enabled_channels:
buf += '\n<#{0}> (ID: {0})'.format(channel_id)
(await self._client.send_msg(msg, buf))
return
|
@cmd.add(_cmdd, 'choose', 'random', 'rand')
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
'`{cmd}` - Pick a random participant other than yourself.'
topic = msg.channel.topic
if (topic is None):
(await self._client.send_msg(msg, "There doesn't appear to be a truth game in here."))
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if (len(mentions) == 0):
(await self._client.send_msg(msg, "There doesn't appear to be a truth game in here."))
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if (len(mentions) == 0):
(await self._client.send_msg(msg, '<@{}>'.format(msg.author.id)))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = '<@{}>\n'.format(choice)
buf += 'My choices were: '
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if (user is None):
buf += '<@{}>, '.format(mention)
else:
buf += '{}, '.format(user.name)
buf = buf[:(- 2)]
(await self._client.send_msg(msg, buf))
return
| 5,147,312,669,561,219,000
|
`{cmd}` - Pick a random participant other than yourself.
|
mentionbot/servermodules/truthgame.py
|
_cmdf_choosetruth
|
simshadows/Discord-mentionbot
|
python
|
@cmd.add(_cmdd, 'choose', 'random', 'rand')
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
topic = msg.channel.topic
if (topic is None):
(await self._client.send_msg(msg, "There doesn't appear to be a truth game in here."))
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if (len(mentions) == 0):
(await self._client.send_msg(msg, "There doesn't appear to be a truth game in here."))
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if (len(mentions) == 0):
(await self._client.send_msg(msg, '<@{}>'.format(msg.author.id)))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = '<@{}>\n'.format(choice)
buf += 'My choices were: '
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if (user is None):
buf += '<@{}>, '.format(mention)
else:
buf += '{}, '.format(user.name)
buf = buf[:(- 2)]
(await self._client.send_msg(msg, buf))
return
|
def test_username_validation_error_msg(self, user: User):
"\n Tests UserCreation Form's unique validator functions correctly by testing:\n 1) A new user with an existing username cannot be added.\n 2) Only 1 error is raised by the UserCreation Form\n 3) The desired error message is raised\n "
form = UserCreationForm({'username': user.username, 'password1': user.password, 'password2': user.password})
assert (not form.is_valid())
assert (len(form.errors) == 1)
assert ('username' in form.errors)
assert (form.errors['username'][0] == _('This username has already been taken.'))
| 1,755,965,056,911,758,800
|
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
|
my_blog/users/tests/test_forms.py
|
test_username_validation_error_msg
|
Tanishk-Sharma/my_blog
|
python
|
def test_username_validation_error_msg(self, user: User):
"\n Tests UserCreation Form's unique validator functions correctly by testing:\n 1) A new user with an existing username cannot be added.\n 2) Only 1 error is raised by the UserCreation Form\n 3) The desired error message is raised\n "
form = UserCreationForm({'username': user.username, 'password1': user.password, 'password2': user.password})
assert (not form.is_valid())
assert (len(form.errors) == 1)
assert ('username' in form.errors)
assert (form.errors['username'][0] == _('This username has already been taken.'))
|
def convert_types(self, schema, col_type_dict, row):
'Convert values from DBAPI to output-friendly formats.'
return [self.convert_type(value, col_type_dict.get(name)) for (name, value) in zip(schema, row)]
| 7,978,365,602,373,941,000
|
Convert values from DBAPI to output-friendly formats.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
convert_types
|
FRI-DAY/airflow
|
python
|
def convert_types(self, schema, col_type_dict, row):
return [self.convert_type(value, col_type_dict.get(name)) for (name, value) in zip(schema, row)]
|
def _write_local_data_files(self, cursor):
'\n Takes a cursor, and writes results to a local file.\n\n :return: A dictionary where keys are filenames to be used as object\n names in GCS, and values are file handles to local files that\n contain the data for the GCS objects.\n '
schema = list(map((lambda schema_tuple: schema_tuple[0]), cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if (self.export_format == 'csv'):
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{'file_name': self.filename.format(file_no), 'file_handle': tmp_file_handle, 'file_mime_type': file_mime_type}]
if (self.export_format == 'csv'):
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
row = self.convert_types(schema, col_type_dict, row)
if (self.export_format == 'csv'):
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
tmp_file_handle.write(b'\n')
if (tmp_file_handle.tell() >= self.approx_max_file_size_bytes):
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({'file_name': self.filename.format(file_no), 'file_handle': tmp_file_handle, 'file_mime_type': file_mime_type})
if (self.export_format == 'csv'):
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
| 6,549,307,246,991,140,000
|
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
_write_local_data_files
|
FRI-DAY/airflow
|
python
|
def _write_local_data_files(self, cursor):
'\n Takes a cursor, and writes results to a local file.\n\n :return: A dictionary where keys are filenames to be used as object\n names in GCS, and values are file handles to local files that\n contain the data for the GCS objects.\n '
schema = list(map((lambda schema_tuple: schema_tuple[0]), cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if (self.export_format == 'csv'):
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{'file_name': self.filename.format(file_no), 'file_handle': tmp_file_handle, 'file_mime_type': file_mime_type}]
if (self.export_format == 'csv'):
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
row = self.convert_types(schema, col_type_dict, row)
if (self.export_format == 'csv'):
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
tmp_file_handle.write(b'\n')
if (tmp_file_handle.tell() >= self.approx_max_file_size_bytes):
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({'file_name': self.filename.format(file_no), 'file_handle': tmp_file_handle, 'file_mime_type': file_mime_type})
if (self.export_format == 'csv'):
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
|
def _configure_csv_file(self, file_handle, schema):
'Configure a csv writer with the file_handle and write schema\n as headers for the new file.\n '
csv_writer = csv.writer(file_handle, encoding='utf-8', delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
| 112,089,612,591,239,120
|
Configure a csv writer with the file_handle and write schema
as headers for the new file.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
_configure_csv_file
|
FRI-DAY/airflow
|
python
|
def _configure_csv_file(self, file_handle, schema):
'Configure a csv writer with the file_handle and write schema\n as headers for the new file.\n '
csv_writer = csv.writer(file_handle, encoding='utf-8', delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
|
@abc.abstractmethod
def query(self):
'Execute DBAPI query.'
| 2,809,730,422,140,370,000
|
Execute DBAPI query.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
query
|
FRI-DAY/airflow
|
python
|
@abc.abstractmethod
def query(self):
|
@abc.abstractmethod
def field_to_bigquery(self, field):
'Convert a DBAPI field to BigQuery schema format.'
| 2,258,237,604,328,877,600
|
Convert a DBAPI field to BigQuery schema format.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
field_to_bigquery
|
FRI-DAY/airflow
|
python
|
@abc.abstractmethod
def field_to_bigquery(self, field):
|
@abc.abstractmethod
def convert_type(self, value, schema_type):
'Convert a value from DBAPI to output-friendly formats.'
| -2,785,577,839,560,248,300
|
Convert a value from DBAPI to output-friendly formats.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
convert_type
|
FRI-DAY/airflow
|
python
|
@abc.abstractmethod
def convert_type(self, value, schema_type):
|
def _get_col_type_dict(self):
'\n Return a dict of column name and column type based on self.schema if not None.\n '
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif (self.schema is not None):
self.log.warning('Using default schema due to unexpected type.Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please refer to: https://cloud.google.com/bigquery/docs/schemas#specifying_a_json_schema_file')
return col_type_dict
| -6,582,290,293,196,102,000
|
Return a dict of column name and column type based on self.schema if not None.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
_get_col_type_dict
|
FRI-DAY/airflow
|
python
|
def _get_col_type_dict(self):
'\n \n '
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif (self.schema is not None):
self.log.warning('Using default schema due to unexpected type.Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please refer to: https://cloud.google.com/bigquery/docs/schemas#specifying_a_json_schema_file')
return col_type_dict
|
def _write_local_schema_file(self, cursor):
'\n Takes a cursor, and writes the BigQuery schema for the results to a\n local file system.\n\n :return: A dictionary where key is a filename to be used as an object\n name in GCS, and values are file handles to local files that\n contains the BigQuery schema fields in .json format.\n '
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {'file_name': self.schema_filename, 'file_handle': tmp_schema_file_handle, 'file_mime_type': 'application/json'}
return schema_file_to_upload
| 5,382,904,820,138,505,000
|
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
_write_local_schema_file
|
FRI-DAY/airflow
|
python
|
def _write_local_schema_file(self, cursor):
'\n Takes a cursor, and writes the BigQuery schema for the results to a\n local file system.\n\n :return: A dictionary where key is a filename to be used as an object\n name in GCS, and values are file handles to local files that\n contains the BigQuery schema fields in .json format.\n '
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {'file_name': self.schema_filename, 'file_handle': tmp_schema_file_handle, 'file_mime_type': 'application/json'}
return schema_file_to_upload
|
def _upload_to_gcs(self, files_to_upload):
'\n Upload all of the file splits (and optionally the schema .json file) to\n Google Cloud Storage.\n '
hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'), tmp_file.get('file_handle').name, mime_type=tmp_file.get('file_mime_type'), gzip=(self.gzip if (tmp_file.get('file_name') == self.schema_filename) else False))
| 4,584,763,336,989,765,600
|
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
_upload_to_gcs
|
FRI-DAY/airflow
|
python
|
def _upload_to_gcs(self, files_to_upload):
'\n Upload all of the file splits (and optionally the schema .json file) to\n Google Cloud Storage.\n '
hook = GCSHook(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'), tmp_file.get('file_handle').name, mime_type=tmp_file.get('file_mime_type'), gzip=(self.gzip if (tmp_file.get('file_name') == self.schema_filename) else False))
|
def description(self):
'\n Returns a description for the actor.\n\n :return: the actor description\n :rtype: str\n '
return 'Outputs an integer from the specified range.'
| 6,808,793,563,313,494,000
|
Returns a description for the actor.
:return: the actor description
:rtype: str
|
base/src/shallowflow/base/sources/_ForLoop.py
|
description
|
waikato-datamining/shallow-flow
|
python
|
def description(self):
'\n Returns a description for the actor.\n\n :return: the actor description\n :rtype: str\n '
return 'Outputs an integer from the specified range.'
|
def _define_options(self):
'\n For configuring the options.\n '
super()._define_options()
self._option_manager.add(Option(name='start', value_type=int, def_value=1, help='The starting value'))
self._option_manager.add(Option(name='end', value_type=int, def_value=10, help='The last value (incl)'))
self._option_manager.add(Option(name='step', value_type=int, def_value=1, help='The increment between values'))
| 1,926,360,857,306,023,200
|
For configuring the options.
|
base/src/shallowflow/base/sources/_ForLoop.py
|
_define_options
|
waikato-datamining/shallow-flow
|
python
|
def _define_options(self):
'\n \n '
super()._define_options()
self._option_manager.add(Option(name='start', value_type=int, def_value=1, help='The starting value'))
self._option_manager.add(Option(name='end', value_type=int, def_value=10, help='The last value (incl)'))
self._option_manager.add(Option(name='step', value_type=int, def_value=1, help='The increment between values'))
|
def _get_item_type(self):
'\n Returns the type of the individual items that get generated, when not outputting a list.\n\n :return: the type that gets generated\n '
return int
| 2,031,864,802,038,898,000
|
Returns the type of the individual items that get generated, when not outputting a list.
:return: the type that gets generated
|
base/src/shallowflow/base/sources/_ForLoop.py
|
_get_item_type
|
waikato-datamining/shallow-flow
|
python
|
def _get_item_type(self):
'\n Returns the type of the individual items that get generated, when not outputting a list.\n\n :return: the type that gets generated\n '
return int
|
def setup(self):
'\n Prepares the actor for use.\n\n :return: None if successful, otherwise error message\n :rtype: str\n '
result = super().setup()
if (result is None):
if (self.get('end') < self.get('start')):
result = ('End value (%s) must be smaller than start (%d)!' % (self.get('end'), self.get('start')))
return result
| 2,356,912,509,369,107,000
|
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str
|
base/src/shallowflow/base/sources/_ForLoop.py
|
setup
|
waikato-datamining/shallow-flow
|
python
|
def setup(self):
'\n Prepares the actor for use.\n\n :return: None if successful, otherwise error message\n :rtype: str\n '
result = super().setup()
if (result is None):
if (self.get('end') < self.get('start')):
result = ('End value (%s) must be smaller than start (%d)!' % (self.get('end'), self.get('start')))
return result
|
def _do_execute(self):
'\n Performs the actual execution.\n\n :return: None if successful, otherwise error message\n :rtype: str\n '
i = self.get('start')
step = self.get('step')
end = self.get('end')
while (i <= end):
self._output.append(i)
i += step
return None
| 2,658,049,908,359,687,000
|
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
|
base/src/shallowflow/base/sources/_ForLoop.py
|
_do_execute
|
waikato-datamining/shallow-flow
|
python
|
def _do_execute(self):
'\n Performs the actual execution.\n\n :return: None if successful, otherwise error message\n :rtype: str\n '
i = self.get('start')
step = self.get('step')
end = self.get('end')
while (i <= end):
self._output.append(i)
i += step
return None
|
def __init__(self, *, host: str='vision.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Sequence[str]=None, channel: grpc.Channel=None, api_mtls_endpoint: str=None, client_cert_source: Callable[([], Tuple[(bytes, bytes)])]=None, ssl_channel_credentials: grpc.ChannelCredentials=None, client_cert_source_for_mtls: Callable[([], Tuple[(bytes, bytes)])]=None, quota_project_id: Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool]=False) -> None:
"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n This argument is ignored if ``channel`` is provided.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n channel (Optional[grpc.Channel]): A ``Channel`` instance through\n which to make calls.\n api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.\n If provided, it overrides the ``host`` argument and tries to create\n a mutual TLS channel with client SSL credentials from\n ``client_cert_source`` or application default SSL credentials.\n client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):\n Deprecated. A callback to provide client SSL certificate bytes and\n private key bytes, both in PEM format. It is ignored if\n ``api_mtls_endpoint`` is None.\n ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials\n for the grpc channel. It is ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):\n A callback to provide client certificate bytes and private key bytes,\n both in PEM format. It is used to configure a mutual TLS channel. It is\n ignored if ``channel`` or ``ssl_channel_credentials`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport\n creation failed for any reason.\n google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``\n and ``credentials_file`` are passed.\n "
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[(str, Callable)] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn('api_mtls_endpoint is deprecated', DeprecationWarning)
if client_cert_source:
warnings.warn('client_cert_source is deprecated', DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
(cert, key) = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(certificate_chain=cert, private_key=key)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
elif (client_cert_source_for_mtls and (not ssl_channel_credentials)):
(cert, key) = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(certificate_chain=cert, private_key=key)
super().__init__(host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access)
if (not self._grpc_channel):
self._grpc_channel = type(self).create_channel(self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[('grpc.max_send_message_length', (- 1)), ('grpc.max_receive_message_length', (- 1))])
self._prep_wrapped_messages(client_info)
| -5,878,449,379,749,531,000
|
Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
__init__
|
googleapis/googleapis-gen
|
python
|
def __init__(self, *, host: str='vision.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Sequence[str]=None, channel: grpc.Channel=None, api_mtls_endpoint: str=None, client_cert_source: Callable[([], Tuple[(bytes, bytes)])]=None, ssl_channel_credentials: grpc.ChannelCredentials=None, client_cert_source_for_mtls: Callable[([], Tuple[(bytes, bytes)])]=None, quota_project_id: Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool]=False) -> None:
"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n This argument is ignored if ``channel`` is provided.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n channel (Optional[grpc.Channel]): A ``Channel`` instance through\n which to make calls.\n api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.\n If provided, it overrides the ``host`` argument and tries to create\n a mutual TLS channel with client SSL credentials from\n ``client_cert_source`` or application default SSL credentials.\n client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):\n Deprecated. A callback to provide client SSL certificate bytes and\n private key bytes, both in PEM format. It is ignored if\n ``api_mtls_endpoint`` is None.\n ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials\n for the grpc channel. It is ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):\n A callback to provide client certificate bytes and private key bytes,\n both in PEM format. It is used to configure a mutual TLS channel. It is\n ignored if ``channel`` or ``ssl_channel_credentials`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport\n creation failed for any reason.\n google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``\n and ``credentials_file`` are passed.\n "
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[(str, Callable)] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn('api_mtls_endpoint is deprecated', DeprecationWarning)
if client_cert_source:
warnings.warn('client_cert_source is deprecated', DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
(cert, key) = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(certificate_chain=cert, private_key=key)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
elif (client_cert_source_for_mtls and (not ssl_channel_credentials)):
(cert, key) = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(certificate_chain=cert, private_key=key)
super().__init__(host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access)
if (not self._grpc_channel):
self._grpc_channel = type(self).create_channel(self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[('grpc.max_send_message_length', (- 1)), ('grpc.max_receive_message_length', (- 1))])
self._prep_wrapped_messages(client_info)
|
@classmethod
def create_channel(cls, host: str='vision.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Optional[Sequence[str]]=None, quota_project_id: Optional[str]=None, **kwargs) -> grpc.Channel:
'Create and return a gRPC channel object.\n Args:\n host (Optional[str]): The host for the channel to use.\n credentials (Optional[~.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scopes (Optional[Sequence[str]]): A optional list of scopes needed for this\n service. These are only used when credentials are not specified and\n are passed to :func:`google.auth.default`.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n kwargs (Optional[dict]): Keyword arguments, which are passed to the\n channel creation.\n Returns:\n grpc.Channel: A gRPC channel object.\n\n Raises:\n google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``\n and ``credentials_file`` are passed.\n '
return grpc_helpers.create_channel(host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs)
| -3,496,580,290,601,304,000
|
Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
create_channel
|
googleapis/googleapis-gen
|
python
|
@classmethod
def create_channel(cls, host: str='vision.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Optional[Sequence[str]]=None, quota_project_id: Optional[str]=None, **kwargs) -> grpc.Channel:
'Create and return a gRPC channel object.\n Args:\n host (Optional[str]): The host for the channel to use.\n credentials (Optional[~.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is mutually exclusive with credentials.\n scopes (Optional[Sequence[str]]): A optional list of scopes needed for this\n service. These are only used when credentials are not specified and\n are passed to :func:`google.auth.default`.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n kwargs (Optional[dict]): Keyword arguments, which are passed to the\n channel creation.\n Returns:\n grpc.Channel: A gRPC channel object.\n\n Raises:\n google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``\n and ``credentials_file`` are passed.\n '
return grpc_helpers.create_channel(host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs)
|
@property
def grpc_channel(self) -> grpc.Channel:
'Return the channel designed to connect to this service.\n '
return self._grpc_channel
| -1,956,682,971,687,930,400
|
Return the channel designed to connect to this service.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
grpc_channel
|
googleapis/googleapis-gen
|
python
|
@property
def grpc_channel(self) -> grpc.Channel:
'\n '
return self._grpc_channel
|
@property
def operations_client(self) -> operations_v1.OperationsClient:
'Create the client designed to process long-running operations.\n\n This property caches on the instance; repeated calls return the same\n client.\n '
if (self._operations_client is None):
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
return self._operations_client
| -7,084,677,965,328,057,000
|
Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
operations_client
|
googleapis/googleapis-gen
|
python
|
@property
def operations_client(self) -> operations_v1.OperationsClient:
'Create the client designed to process long-running operations.\n\n This property caches on the instance; repeated calls return the same\n client.\n '
if (self._operations_client is None):
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
return self._operations_client
|
@property
def batch_annotate_images(self) -> Callable[([image_annotator.BatchAnnotateImagesRequest], image_annotator.BatchAnnotateImagesResponse)]:
'Return a callable for the batch annotate images method over gRPC.\n\n Run image detection and annotation for a batch of\n images.\n\n Returns:\n Callable[[~.BatchAnnotateImagesRequest],\n ~.BatchAnnotateImagesResponse]:\n A function that, when called, will call the underlying RPC\n on the server.\n '
if ('batch_annotate_images' not in self._stubs):
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary('/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize, response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize)
return self._stubs['batch_annotate_images']
| -4,998,487,497,053,026,000
|
Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
batch_annotate_images
|
googleapis/googleapis-gen
|
python
|
@property
def batch_annotate_images(self) -> Callable[([image_annotator.BatchAnnotateImagesRequest], image_annotator.BatchAnnotateImagesResponse)]:
'Return a callable for the batch annotate images method over gRPC.\n\n Run image detection and annotation for a batch of\n images.\n\n Returns:\n Callable[[~.BatchAnnotateImagesRequest],\n ~.BatchAnnotateImagesResponse]:\n A function that, when called, will call the underlying RPC\n on the server.\n '
if ('batch_annotate_images' not in self._stubs):
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary('/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize, response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize)
return self._stubs['batch_annotate_images']
|
@property
def async_batch_annotate_files(self) -> Callable[([image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation)]:
'Return a callable for the async batch annotate files method over gRPC.\n\n Run asynchronous image detection and annotation for a list of\n generic files, such as PDF files, which may contain multiple\n pages and multiple images per page. Progress and results can be\n retrieved through the ``google.longrunning.Operations``\n interface. ``Operation.metadata`` contains ``OperationMetadata``\n (metadata). ``Operation.response`` contains\n ``AsyncBatchAnnotateFilesResponse`` (results).\n\n Returns:\n Callable[[~.AsyncBatchAnnotateFilesRequest],\n ~.Operation]:\n A function that, when called, will call the underlying RPC\n on the server.\n '
if ('async_batch_annotate_files' not in self._stubs):
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary('/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles', request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString)
return self._stubs['async_batch_annotate_files']
| -3,732,901,292,045,829,600
|
Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
async_batch_annotate_files
|
googleapis/googleapis-gen
|
python
|
@property
def async_batch_annotate_files(self) -> Callable[([image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation)]:
'Return a callable for the async batch annotate files method over gRPC.\n\n Run asynchronous image detection and annotation for a list of\n generic files, such as PDF files, which may contain multiple\n pages and multiple images per page. Progress and results can be\n retrieved through the ``google.longrunning.Operations``\n interface. ``Operation.metadata`` contains ``OperationMetadata``\n (metadata). ``Operation.response`` contains\n ``AsyncBatchAnnotateFilesResponse`` (results).\n\n Returns:\n Callable[[~.AsyncBatchAnnotateFilesRequest],\n ~.Operation]:\n A function that, when called, will call the underlying RPC\n on the server.\n '
if ('async_batch_annotate_files' not in self._stubs):
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary('/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles', request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize, response_deserializer=operations_pb2.Operation.FromString)
return self._stubs['async_batch_annotate_files']
|
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
'Truncates or pads a tensor to new_size on on a given axis.\n Truncate or extend tensor such that tensor.shape[axis] == new_size. If the\n size increases, the padding will be performed at the end, using fill_value.\n Args:\n tensor: The tensor to be resized.\n axis: An integer representing the dimension to be sliced.\n new_size: An integer or 0d tensor representing the new value for\n tensor.shape[axis].\n fill_value: Value to use to fill any new entries in the tensor. Will be\n cast to the type of tensor.\n Returns:\n The resized tensor.\n '
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, (new_size - shape[axis]))
start = (0 if (shape[axis] <= new_size) else np.random.randint((shape[axis] - new_size)))
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([(torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))) if ((start > 0) and random_sampling) else torch.narrow(tensor, dim=axis, start=start, length=shape[axis])), torch.Tensor(*pad_shape).fill_(fill_value)], dim=axis)
return resized
| 3,647,447,032,106,927,600
|
Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
|
utils.py
|
resize_axis
|
glee1228/segment_temporal_context_aggregation
|
python
|
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
'Truncates or pads a tensor to new_size on on a given axis.\n Truncate or extend tensor such that tensor.shape[axis] == new_size. If the\n size increases, the padding will be performed at the end, using fill_value.\n Args:\n tensor: The tensor to be resized.\n axis: An integer representing the dimension to be sliced.\n new_size: An integer or 0d tensor representing the new value for\n tensor.shape[axis].\n fill_value: Value to use to fill any new entries in the tensor. Will be\n cast to the type of tensor.\n Returns:\n The resized tensor.\n '
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, (new_size - shape[axis]))
start = (0 if (shape[axis] <= new_size) else np.random.randint((shape[axis] - new_size)))
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([(torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))) if ((start > 0) and random_sampling) else torch.narrow(tensor, dim=axis, start=start, length=shape[axis])), torch.Tensor(*pad_shape).fill_(fill_value)], dim=axis)
return resized
|
def __init__(self, value: T, parent: Optional[Any]=None, callback: Optional[Callable[([], None)]]=None):
' Initialize the PageProperty to always have value v.'
self._values: List[Tuple[(int, T)]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
| -7,846,967,357,376,559,000
|
Initialize the PageProperty to always have value v.
|
chart/chart/python/spectralsequence_chart/page_property.py
|
__init__
|
JoeyBF/sseq
|
python
|
def __init__(self, value: T, parent: Optional[Any]=None, callback: Optional[Callable[([], None)]]=None):
' '
self._values: List[Tuple[(int, T)]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
|
@click.command(name='about')
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
'Print information about osxphotos including license.'
click.echo_via_pager(f'''osxphotos, version {__version__}
Source code available at: {OSXPHOTOS_URL}
{LICENSE}''')
| 63,966,833,063,543,110
|
Print information about osxphotos including license.
|
osxphotos/cli/about.py
|
about
|
oPromessa/osxphotos
|
python
|
@click.command(name='about')
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
click.echo_via_pager(f'osxphotos, version {__version__}
Source code available at: {OSXPHOTOS_URL}
{LICENSE}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.