function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def _set_scsa_rx(self, v, load=False):
"""
Setter method for scsa_rx, mapped from YANG variable /macsec/interfaces/interface/scsa_rx (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_scsa_rx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_scsa_rx() directly.
YANG Description: Enclosing container for received packets for Secure Channel and | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _unset_scsa_rx(self):
self.__scsa_rx = YANGDynClass(base=yc_scsa_rx_openconfig_macsec__macsec_interfaces_interface_scsa_rx, is_container='container', yang_name="scsa-rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True) | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _set_mka(self, v, load=False):
"""
Setter method for mka, mapped from YANG variable /macsec/interfaces/interface/mka (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mka is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mka() directly.
YANG Description: Enclosing container for the MKA interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_mka_openconfig_macsec__macsec_interfaces_interface_mka, is_container='container', yang_name="mka", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mka must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_mka_openconfig_macsec__macsec_interfaces_interface_mka, is_container='container', yang_name="mka", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)""",
})
self.__mka = t
if hasattr(self, '_set'):
self._set() | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("name",yc_interface_openconfig_macsec__macsec_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load) | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /macsec/interfaces/interface (list)
YANG Description: List of interfaces on which MACsec is enabled / available
"""
return self.__interface | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /macsec/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces on which MACsec is enabled / available
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",yc_interface_openconfig_macsec__macsec_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",yc_interface_openconfig_macsec__macsec_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set() | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__mka = YANGDynClass(base=yc_mka_openconfig_macsec__macsec_mka, is_container='container', yang_name="mka", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_macsec__macsec_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load) | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _get_mka(self):
"""
Getter method for mka, mapped from YANG variable /macsec/mka (container)
YANG Description: The MKA
"""
return self.__mka | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _set_mka(self, v, load=False):
"""
Setter method for mka, mapped from YANG variable /macsec/mka (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mka is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mka() directly.
YANG Description: The MKA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_mka_openconfig_macsec__macsec_mka, is_container='container', yang_name="mka", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mka must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_mka_openconfig_macsec__macsec_mka, is_container='container', yang_name="mka", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)""",
})
self.__mka = t
if hasattr(self, '_set'):
self._set() | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _get_interfaces(self):
"""
Getter method for interfaces, mapped from YANG variable /macsec/interfaces (container)
YANG Description: Enclosing container for the MACsec interfaces list
"""
return self.__interfaces | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _set_interfaces(self, v, load=False):
"""
Setter method for interfaces, mapped from YANG variable /macsec/interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interfaces() directly.
YANG Description: Enclosing container for the MACsec interfaces list
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_interfaces_openconfig_macsec__macsec_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interfaces must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_interfaces_openconfig_macsec__macsec_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)""",
})
self.__interfaces = t
if hasattr(self, '_set'):
self._set() | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__macsec = YANGDynClass(base=yc_macsec_openconfig_macsec__macsec, is_container='container', yang_name="macsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load) | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _get_macsec(self):
"""
Getter method for macsec, mapped from YANG variable /macsec (container)
YANG Description: The MACsec
"""
return self.__macsec | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def _set_macsec(self, v, load=False):
"""
Setter method for macsec, mapped from YANG variable /macsec (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_macsec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_macsec() directly.
YANG Description: The MACsec
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_macsec_openconfig_macsec__macsec, is_container='container', yang_name="macsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """macsec must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_macsec_openconfig_macsec__macsec, is_container='container', yang_name="macsec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/macsec', defining_module='openconfig-macsec', yang_type='container', is_config=True)""",
})
self.__macsec = t
if hasattr(self, '_set'):
self._set() | google/gnxi | [
229,
110,
229,
19,
1506413981
] |
def train_sequence_model(data,
learning_rate=1e-3,
epochs=1000,
batch_size=128,
blocks=2,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3):
"""Trains sequence model on the given dataset.
# Arguments
data: tuples of training and test texts and labels.
learning_rate: float, learning rate for training model.
epochs: int, number of epochs.
batch_size: int, number of samples per batch.
blocks: int, number of pairs of sepCNN and pooling blocks in the model.
filters: int, output dimension of sepCNN layers in the model.
dropout_rate: float: percentage of input to drop at Dropout layers.
embedding_dim: int, dimension of the embedding vectors.
kernel_size: int, length of the convolution window.
pool_size: int, factor by which to downscale input at MaxPooling layer.
# Raises
ValueError: If validation data has label values which were not seen
in the training data.
"""
# Get the data.
(train_texts, train_labels), (val_texts, val_labels) = data
# Verify that validation labels are in the same range as training labels.
num_classes = explore_data.get_num_classes(train_labels)
unexpected_labels = [v for v in val_labels if v not in range(num_classes)]
if len(unexpected_labels):
raise ValueError('Unexpected label values found in the validation set:'
' {unexpected_labels}. Please make sure that the '
'labels in the validation set are in the same range '
'as training labels.'.format(
unexpected_labels=unexpected_labels))
# Vectorize texts.
x_train, x_val, word_index = vectorize_data.sequence_vectorize(
train_texts, val_texts)
# Number of features will be the embedding input dimension. Add 1 for the
# reserved index 0.
num_features = min(len(word_index) + 1, TOP_K)
# Create model instance.
model = build_model.sepcnn_model(blocks=blocks,
filters=filters,
kernel_size=kernel_size,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
pool_size=pool_size,
input_shape=x_train.shape[1:],
num_classes=num_classes,
num_features=num_features)
# Compile model with learning parameters.
if num_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'sparse_categorical_crossentropy'
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
# Create callback for early stopping on validation loss. If the loss does
# not decrease in two consecutive tries, stop training.
callbacks = [tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=2)]
# Train and validate model.
history = model.fit(
x_train,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_val, val_labels),
verbose=2, # Logs once per epoch.
batch_size=batch_size)
# Print results.
history = history.history
print('Validation accuracy: {acc}, loss: {loss}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1]))
# Save model.
model.save('rotten_tomatoes_sepcnn_model.h5')
return history['val_acc'][-1], history['val_loss'][-1] | google/eng-edu | [
659,
382,
659,
6,
1486495587
] |
def __init__(self, name, formatter=None):
super(FileCodec, self).__init__(name, formatter=formatter)
self.extension = 'must-have-extension' | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def _DecodeFile(self, videofile, encodedfile, workdir):
tempyuvfile = os.path.join(workdir,
videofile.basename + 'tempyuvfile.yuv')
if os.path.isfile(tempyuvfile):
print "Removing tempfile before decode:", tempyuvfile
os.unlink(tempyuvfile)
commandline = self.DecodeCommandLine(videofile, encodedfile, tempyuvfile)
print commandline
with open(os.path.devnull, 'r') as nullinput:
subprocess_cpu_start = os.times()[2]
returncode = subprocess.call(commandline, shell=True,
stdin=nullinput)
if returncode:
raise Exception('Decode failed with returncode %d' % returncode)
subprocess_cpu = os.times()[2] - subprocess_cpu_start
print "Decode took %f seconds" % subprocess_cpu
commandline = encoder.Tool("psnr") + " %s %s %d %d 9999" % (
videofile.filename, tempyuvfile, videofile.width,
videofile.height)
print commandline
psnr = subprocess.check_output(commandline, shell=True, stdin=nullinput)
commandline = ['md5sum', tempyuvfile]
md5 = subprocess.check_output(commandline, shell=False)
yuv_md5 = md5.split(' ')[0]
os.unlink(tempyuvfile)
return psnr, subprocess_cpu, yuv_md5 | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
"""This function returns the command line that should be executed
in order to turn an YUV file into an encoded file."""
# pylint: disable=W0613,R0201
raise encoder.Error('EncodeCommandLine not defined') | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def ResultData(self, encodedfile):
"""Returns additional fields that the codec may know how to generate."""
# pylint: disable=W0613,R0201
return {} | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def EncoderVersion(self):
raise encoder.Error('File codecs must define their own version') | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def MatroskaFrameInfo(encodedfile):
# Run the mkvinfo tool across the file to get frame size info.
commandline = 'mkvinfo -v %s' % encodedfile
print commandline
mkvinfo = subprocess.check_output(commandline, shell=True)
frameinfo = []
for line in mkvinfo.splitlines():
match = re.search(r'Frame with size (\d+)', line)
if match:
# The mkvinfo tool gives frame size in bytes. We want bits.
frameinfo.append({'size': int(match.group(1))*8})
return frameinfo | google/compare-codecs | [
43,
22,
43,
30,
1418056941
] |
def run_in_shell(cmd):
subprocess.check_call(cmd, shell=True) | miracl/amcl | [
29,
19,
29,
7,
1549367563
] |
def rsaset(tb,tff,nb,base,ml) :
bd=tb+"_"+base
fnameh="config_big_"+bd+".h"
run_in_shell(copytext+" config_big.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"@NB@",nb)
replace(fnameh,"@BASE@",base)
fnameh="config_ff_"+tff+".h"
run_in_shell(copytext+" config_ff.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"@ML@",ml)
fnamec="big_"+bd+".c"
fnameh="big_"+bd+".h"
run_in_shell(copytext+" big.c "+fnamec)
run_in_shell(copytext+" big.h "+fnameh)
replace(fnamec,"XXX",bd)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ff_"+tff+".c"
fnameh="ff_"+tff+".h"
run_in_shell(copytext+" ff.c "+fnamec)
run_in_shell(copytext+" ff.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="rsa_"+tff+".c"
fnameh="rsa_"+tff+".h"
run_in_shell(copytext+" rsa.c "+fnamec)
run_in_shell(copytext+" rsa.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec) | miracl/amcl | [
29,
19,
29,
7,
1549367563
] |
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False}) | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))
if not options:
raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
] | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast() | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def __init__(self, accounts=None, debug=False):
super(ElasticSearchService, self).__init__(accounts=accounts, debug=debug) | stackArmor/security_monkey | [
1,
2,
1,
2,
1512311697
] |
def slurp_items(**kwargs):
item_list = []
exception_map = {}
kwargs['exception_map'] = exception_map
account_db = Account.query.filter(Account.name == kwargs['account_name']).first()
account_num = account_db.identifier
es_info = self.get_all_es_domains_in_region(**kwargs)
if es_info is None:
return item_list, exception_map
(client, domains) = es_info
app.logger.debug("Found {} {}".format(len(domains), ElasticSearchService.i_am_plural))
for domain in domains:
if self.check_ignore_list(domain["DomainName"]):
continue
# Fetch the policy:
item = self.build_item(domain["DomainName"], client, account_num, **kwargs)
if item:
item_list.append(item)
return item_list, exception_map | stackArmor/security_monkey | [
1,
2,
1,
2,
1512311697
] |
def get_all_es_domains_in_region(self, **kwargs):
from security_monkey.common.sts_connect import connect
client = connect(kwargs['account_name'], "boto3.es.client", region=kwargs['region'])
app.logger.debug("Checking {}/{}/{}".format(ElasticSearchService.index, kwargs['account_name'], kwargs['region']))
# No need to paginate according to: client.can_paginate("list_domain_names")
domains = self.wrap_aws_rate_limited_call(client.list_domain_names)["DomainNames"]
return client, domains | stackArmor/security_monkey | [
1,
2,
1,
2,
1512311697
] |
def build_item(self, domain, client, account_num, **kwargs):
arn = ARN_PREFIX + ':es:{region}:{account_number}:domain/{domain_name}'.format(
region=kwargs['region'],
account_number=account_num,
domain_name=domain)
config = {
'arn': arn
}
domain_config = self.wrap_aws_rate_limited_call(client.describe_elasticsearch_domain_config,
DomainName=domain)
# Does the cluster have a policy?
if domain_config["DomainConfig"]["AccessPolicies"]["Options"] == "":
config['policy'] = {}
else:
config['policy'] = json.loads(domain_config["DomainConfig"]["AccessPolicies"]["Options"])
config['name'] = domain
return ElasticSearchServiceItem(region=kwargs['region'], account=kwargs['account_name'], name=domain, arn=arn, config=config) | stackArmor/security_monkey | [
1,
2,
1,
2,
1512311697
] |
def setup_db():
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep +".."+os.sep+"gitdox.db"
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
# Drop tables if they exist
cur.execute("DROP TABLE IF EXISTS docs")
cur.execute("DROP TABLE IF EXISTS users")
cur.execute("DROP TABLE IF EXISTS metadata")
cur.execute("DROP TABLE IF EXISTS validate")
conn.commit()
# Create tables
#user table not used
#cur.execute('''CREATE TABLE IF NOT EXISTS users
# (id INTEGER PRIMARY KEY AUTOINCREMENT, username text)''')
#docs table
cur.execute('''CREATE TABLE IF NOT EXISTS docs
(id INTEGER PRIMARY KEY AUTOINCREMENT, name text, corpus text, status text,assignee_username text ,filename text, content text, mode text, schema text, validation text, timestamp text, cache text)''')
#metadata table
cur.execute('''CREATE TABLE IF NOT EXISTS metadata
(docid INTEGER, metaid INTEGER PRIMARY KEY AUTOINCREMENT, key text, value text, corpus_meta text, UNIQUE (docid, metaid) ON CONFLICT REPLACE, UNIQUE (docid, key) ON CONFLICT REPLACE)''')
#validation table
cur.execute('''CREATE TABLE IF NOT EXISTS validate
(doc text, corpus text, domain text, name text, operator text, argument text, id INTEGER PRIMARY KEY AUTOINCREMENT)''')
conn.commit()
conn.close() | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def generic_query(sql, params, return_new_id=False):
# generic_query("DELETE FROM rst_nodes WHERE doc=? and project=?",(doc,project))
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep + "gitdox.db"
conn = sqlite3.connect(dbpath)
with conn:
cur = conn.cursor()
if params is not None:
cur.execute(sql,params)
else:
cur.execute(sql)
if return_new_id:
return cur.lastrowid
else:
rows = cur.fetchall()
return rows | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def invalidate_ether_docs(doc,corpus):
generic_query("UPDATE docs SET validation=NULL WHERE name like ? and corpus like ? and mode = 'ether'", (doc, corpus)) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def doc_exists(doc,corpus):
res = generic_query("SELECT name from docs where name=? and corpus=?",(doc,corpus))
return len(res) > 0 | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def update_assignee(doc_id,user_name):
generic_query("UPDATE docs SET assignee_username=? WHERE id=?",(user_name,doc_id)) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def update_docname(id,docname):
generic_query("UPDATE docs SET name=? WHERE id=?",(docname,id))
invalidate_doc_by_id(id) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def update_corpus(id,corpusname):
generic_query("UPDATE docs SET corpus=? WHERE id=?",(corpusname,id))
invalidate_doc_by_id(id) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def update_schema(id, schema):
generic_query("UPDATE docs SET schema=? WHERE id=?", (schema, id)) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def cell(text):
if isinstance(text, int):
text = str(text)
return "\n <td>" + text + "</td>" | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def save_meta(doc_id,key,value,corpus=False):
if corpus:
_, corpus_name, _, _, _, _, _ = get_doc_info(doc_id)
new_id = generic_query("REPLACE INTO metadata(docid,key,value,corpus_meta) VALUES(?,?,?,?)", (None, key, value,corpus_name), return_new_id = True)
else:
new_id = generic_query("INSERT OR REPLACE INTO metadata(docid,key,value,corpus_meta) VALUES(?,?,?,?)",(doc_id,key,value,None), return_new_id = True)
invalidate_doc_by_id(doc_id)
return new_id | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_doc_info(doc_id):
res = generic_query("SELECT name,corpus,filename,status,assignee_username,mode,schema FROM docs WHERE id=?", (int(doc_id),))
if len(res) > 0:
return res[0]
else:
return res | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_all_doc_ids_for_corpus(corpus):
return map(lambda x: x[0],
generic_query("SELECT id FROM docs WHERE corpus=?", (corpus,))) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_doc_meta(doc_id, corpus=False):
if corpus:
fields = get_doc_info(doc_id)
if len(fields) > 0:
_, corpus_name, _, _, _, _, _ = fields
return generic_query("SELECT * FROM metadata WHERE corpus_meta=? ORDER BY key COLLATE NOCASE",(corpus_name,))
else:
return []
else:
return generic_query("SELECT * FROM metadata WHERE docid=? ORDER BY key COLLATE NOCASE", (int(doc_id),)) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_validate_rules(sort=None, domain=None):
query = "SELECT corpus, doc, domain, name, operator, argument, id FROM validate"
args = []
if domain:
query += " WHERE domain=? "
args.append(domain)
if sort:
query += " ORDER BY " + sort
return generic_query(query, args) | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_xml_rules():
return get_validate_rules(domain='xml') | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def get_ether_rules():
return get_validate_rules(domain='ether') | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def create_validate_rule(doc, corpus, domain, name, operator, argument):
new_id = generic_query("INSERT INTO validate(doc,corpus,domain,name,operator,argument) VALUES(?,?,?,?,?,?)", (doc, corpus, domain, name, operator, argument), return_new_id = True)
if domain == "meta":
invalidate_doc_by_name("%","%")
else:
invalidate_ether_docs("%","%")
return new_id | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def update_validate_rule(doc, corpus, domain, name, operator, argument, id):
generic_query("UPDATE validate SET doc = ?, corpus = ?, domain = ?, name = ?, operator = ?, argument = ? WHERE id = ?",(doc, corpus, domain, name, operator, argument, id))
if domain == "meta":
invalidate_doc_by_name("%", "%")
else:
invalidate_ether_docs("%", "%") | cligu/gitdox | [
13,
4,
13,
28,
1480629062
] |
def run_prepare_receptor_spark(receptor):
receptor_pdbqt = os.path.join(pdbqt_receptor_path.value,
get_name_model_pdb(receptor))
command = ''.join([pythonsh.value,
' ',
script_receptor4.value,
' -r ',
receptor,
' -o ',
receptor_pdbqt,
'.pdbqt',
' -v ']) | rodrigofaccioli/drugdesign | [
3,
6,
3,
6,
1417717511
] |
def get_test_environment():
global _test_env
if _test_env:
return _test_env
_test_env = TestEnvironment(**DEFAULT_ENV)
return _test_env | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def get_test_env_var(key: str, default: Optional[str] = None):
test_env = get_test_environment()
return test_env.get(key, default) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def load_schema_file(path: Optional[str] = None) -> Dict:
path = path or RELEASE_TEST_SCHEMA_FILE
with open(path, "rt") as fp:
return json.load(fp) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def validate_test(test: Test, schema: Optional[Dict] = None) -> Optional[str]:
schema = schema or load_schema_file()
try:
jsonschema.validate(test, schema=schema)
except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
return str(e.message)
except Exception as e:
return str(e) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def as_smoke_test(test: Test) -> Test:
if "smoke_test" not in test:
logger.warning(
f"Requested smoke test, but test with name {test['name']} does "
f"not have any smoke test configuration."
)
return test
smoke_test_config = test.pop("smoke_test")
new_test = deep_update(test, smoke_test_config)
return new_test | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def load_and_render_yaml_template(
template_path: str, env: Optional[Dict] = None | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def load_test_cluster_env(test: Test, ray_wheels_url: str) -> Optional[Dict]:
cluster_env_file = test["cluster"]["cluster_env"]
cluster_env_path = os.path.join(
RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file
)
env = get_test_environment()
commit = env.get("RAY_COMMIT", None)
env["RAY_WHEELS_SANITY_CHECK"] = get_wheels_sanity_check(commit)
env["RAY_WHEELS"] = ray_wheels_url
return load_and_render_yaml_template(cluster_env_path, env=env) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def add_partition_statements(partitions, bucket, table_name):
"""Generate ALTER TABLE commands from existing partitions. It wil yield Athena
statement string(s), the length of each string should be less than Athena query
statement length limit, 262144 bytes.
https://docs.aws.amazon.com/athena/latest/ug/service-limits.html
Args:
partitions (set): The unique set of partitions gathered from Athena
bucket (str): The bucket name
table_name (str): The name of the Athena table
Yields:
string: The ALTER TABLE statements to add the new partitions
"""
# Each add partition statement starting with "ALTER TABLE"
initial_statement = 'ALTER TABLE {} ADD IF NOT EXISTS'.format(table_name)
initial_statement_len = len(initial_statement)
# The statement will be stored in a list of string format before join into a string
statement = [initial_statement]
statement_len = initial_statement_len
fmt_values = {
'bucket': bucket,
'table_name': table_name
}
for partition in sorted(partitions):
parts = PARTITION_PARTS.match(partition)
if not parts:
continue
fmt_values.update(parts.groupdict())
partition_stmt = PARTITION_STMT.format(**fmt_values)
partition_stmt_len = len(partition_stmt)
# It will add a space between sub strings when join the whole statement
space_count = len(statement)
# Monitor the lenght of whole statement and make sure it won't exceed the limit
if statement_len + partition_stmt_len + space_count >= MAX_QUERY_LENGTH:
# If the length of whole statement about to exceed the limit, yield
# the statement and reset it for rest of partitions
yield ' '.join(statement)
statement = [initial_statement]
statement_len = initial_statement_len
statement_len += partition_stmt_len
statement.append(partition_stmt)
yield ' '.join(statement) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def unique_values_from_query(query_result):
"""Simplify Athena query results into a set of values.
Useful for listing tables, partitions, databases, enable_metrics
Args:
query_result (dict): The result of run_athena_query
Returns:
set: Unique values from the query result
"""
return {
value
for row in query_result['ResultSet']['Rows'] for result in row['Data']
for value in list(result.values())
} | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def generate_alerts_table_schema():
"""Generate the schema for alerts table in terraform by using a fake alert
Returns:
athena_schema (dict): Equivalent Athena schema used for generating create table statement
"""
alert = Alert('temp_rule_name', {}, {})
output = alert.output_dict()
schema = record_to_schema(output)
athena_schema = logs_schema_to_athena_schema(schema, False)
return format_schema_tf(athena_schema) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def WriteKeyPath(self, key_path):
"""Writes a key path to the output.
Args:
key_path (str): key path.
"""
text = '{0:s}\n'.format(key_path)
self.WriteText(text) | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts a catalog of Windows Registry keys and values.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH', default=None,
help='path of a Windows Registry file.')
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
with open(options.source, 'rb') as file_object:
try:
registry_file = dfwinreg_regf.REGFWinRegistryFile()
registry_file.Open(file_object)
except IOError:
registry_file = None
if not registry_file:
try:
registry_file = dfwinreg_creg.CREGWinRegistryFile()
registry_file.Open(file_object)
except IOError:
registry_file = None
if not registry_file:
print('Unable to open Windows Registry file.')
return False
# Using dfWinReg to determine Windows native key paths if available.
registry = dfwinreg_registry.WinRegistry()
key_path_prefix = registry.GetRegistryFileMapping(registry_file)
registry_file.SetKeyPathPrefix(key_path_prefix)
root_key = registry_file.GetRootKey()
output_writer_object = StdoutWriter()
if not output_writer_object.Open():
print('Unable to open output writer.')
print('')
return False
try:
has_results = False
if root_key:
collector_object = catalog.CatalogCollector()
has_results = collector_object.Collect(root_key, output_writer_object)
finally:
output_writer_object.Close()
if not has_results:
print('No catalog keys and values found.')
return True | libyal/winreg-kb | [
122,
17,
122,
5,
1411881319
] |
def convertMAC(mac):
newMac="%s%s:%s%s:%s%s:%s%s:%s%s:%s%s" % (mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],mac[6],mac[7],mac[8],mac[9],mac[10],mac[11])
return newMac | TravisFSmith/SweetSecurity | [
754,
196,
754,
40,
1441859311
] |
def main():
hc2 = Client(
'v3',
'http://192.168.1.230/api/',
'admin',
'admin'
)
info = hc2.info.get()
print info
weather = hc2.weather.get()
print weather
login = hc2.login.get()
print login
devices = hc2.devices.get(1)
print devices
devices = hc2.devices.list(name='Ceiling Lamp')
print devices
print type(devices)
for device in devices:
print device.name
devices = hc2.devices.list(id=1)
for device in devices:
print device.name | kstaniek/fiblary | [
9,
16,
9,
3,
1390870365
] |
def generate_jobstep_data():
# this must generic a *valid* dataset that should result in a full
# run
return {
'status': {'id': 'queued'},
'data': {},
'expectedSnapshot': None,
'snapshot': {
'id': 'a1028849e8cf4ff0a7d7fdfe3c4fe925',
},
} | dropbox/changes-lxc-wrapper | [
8,
7,
8,
3,
1405481019
] |
def teardown_function(function):
assert threading.activeCount() == 1 | dropbox/changes-lxc-wrapper | [
8,
7,
8,
3,
1405481019
] |
def test_local_run(mock_run):
command = WrapperCommand([
'--', 'echo 1',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot=None,
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['echo 1'],
script=None,
flush_cache=False,
clean=False,
keep=False,
) | dropbox/changes-lxc-wrapper | [
8,
7,
8,
3,
1405481019
] |
def test_remote_run(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
mock_run.assert_called_once_with(
release='precise',
post_launch=None,
snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925',
save_snapshot=False,
s3_bucket=None,
pre_launch=None,
validate=True,
user='ubuntu',
cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex],
flush_cache=False,
clean=False,
keep=False,
) | dropbox/changes-lxc-wrapper | [
8,
7,
8,
3,
1405481019
] |
def test_already_finished_job(mock_run, mock_api_cls):
jobstep_id = uuid4()
jobstep_data = generate_jobstep_data()
jobstep_data['status']['id'] = 'finished'
mock_api = mock_api_cls.return_value
mock_api.get_jobstep.return_value = jobstep_data
command = WrapperCommand([
'--jobstep-id', jobstep_id.hex,
'--api-url', 'http://changes.example.com',
])
command.run()
assert not mock_run.called | dropbox/changes-lxc-wrapper | [
8,
7,
8,
3,
1405481019
] |
def test_image_export_reference(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'REFERENCE'
gltf_image_default['uri'] = '../filepath.png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def test_image_export_embed_glb(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
state['settings']['gltf_export_binary'] = True
gltf_image_default['mimeType'] = 'image/png'
gltf_image_default['bufferView'] = 'bufferView_buffer_Image_0'
output = exporters.ImageExporter.export(state, bpy_image_default)
for ref in state['references']:
ref.source[ref.prop] = ref.blender_name
assert output == gltf_image_default | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def test_image_check(exporters, state, bpy_image_default):
assert exporters.ImageExporter.check(state, bpy_image_default) | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def test_image_check_0_x(exporters, state, bpy_image_default):
bpy_image_default.size = [0, 1]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True | Kupoman/blendergltf | [
314,
50,
314,
14,
1455166435
] |
def get_active_topics(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/active_topics
Retrieves available metadata for active topics (i.e. -- groups) with
registered schemas. A dict of <topic name>:<topic metadata> is returned.
'''
url = 'http://%s:%s/tasr/active_topics' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if not 200 == resp.status_code:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
topic_metas = SubjectHeaderBot.extract_metadata(resp)
return topic_metas | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def register_schema(topic_name, schema_str, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/topic/<topic name>
Register a schema string for a topic. Returns a SchemaMetadata object
with the topic-version, topic-timestamp and ID metadata.
'''
url = 'http://%s:%s/tasr/topic/%s' % (host, port, topic_name)
headers = {'content-type': 'application/json; charset=utf8', }
rs = reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
return rs | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def get_schema_version(topic_name, version, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/topic/<topic name>/version/<version>
Retrieve a specific schema registered for the given topic name identified
by a version (a positive integer). Returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/topic/%s/version/%s' %
(host, port, topic_name, version))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.') | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def schema_for_schema_str(schema_str, object_on_miss=False,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/schema
In essence this is very similar to the schema_for_id_str, but with the
calculation of the ID string being moved to the server. That is, the
client POSTs the schema JSON itself, the server canonicalizes it, then
calculates the SHA256-based ID string for what was sent, then looks for
a matching schema based on that ID string. This allows clients that do not
know how to canonicalize or hash the schemas to find the metadata (is it
registered, what version does it have for a topic) with what they have.
A RegisteredSchema object is returned if the schema string POSTed has been
registered for one or more topics.
If the schema string POSTed has yet to be registered for a topic and the
object_on_miss flag is True, a RegisteredSchema calculated for the POSTed
schema string is returned (it will have no topic-versions as there are
none). This provides an easy way for a client to get the ID strings to
use for subsequent requests.
If the object_on_miss flag is False (the default), then a request for a
previously unregistered schema will raise a TASRError.
'''
url = 'http://%s:%s/tasr/schema' % (host, port)
headers = {'content-type': 'application/json; charset=utf8', }
resp = requests.post(url, data=schema_str, headers=headers,
timeout=timeout)
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if 200 == resp.status_code:
# success -- return a normal reg schema
ras = RegisteredAvroSchema()
ras.schema_str = resp.context
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
elif 404 == resp.status_code and object_on_miss:
ras = RegisteredAvroSchema()
ras.schema_str = schema_str
schema_meta = SchemaHeaderBot.extract_metadata(resp)
ras.update_from_schema_metadata(schema_meta)
return ras
raise TASRError('Schema not registered to any topics.') | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def __init__(self, host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def get_active_topics(self):
'''Returns a dict of <topic name>:<metadata> for active topics.'''
return get_active_topics(self.host, self.port, self.timeout) | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def register_schema(self, topic_name, schema_str):
'''Register a schema for a topic'''
return register_schema(topic_name, schema_str) | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def get_schema_version(self, topic_name, version=None):
'''Get a schema by version for the topic'''
return get_schema_version(topic_name, version,
self.host, self.port, self.timeout) | ifwe/tasr | [
5,
1,
5,
1,
1435712483
] |
def test_constructor_and_read_properties(self):
sd = ShapeDtype((2, 3), np.int32)
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.int32) | google/trax | [
7391,
769,
7391,
106,
1570288154
] |
def test_signature_on_ndarray(self):
array = np.array([[2, 3, 5, 7],
[11, 13, 17, 19]],
dtype=np.int16)
sd = shapes.signature(array)
self.assertEqual(sd.shape, (2, 4))
self.assertEqual(sd.dtype, np.int16) | google/trax | [
7391,
769,
7391,
106,
1570288154
] |
def test_splice_signatures(self):
sd1 = ShapeDtype((1,))
sd2 = ShapeDtype((2,))
sd3 = ShapeDtype((3,))
sd4 = ShapeDtype((4,))
sd5 = ShapeDtype((5,))
# Signatures can be ShapeDtype instances, tuples of 2+ ShapeDtype instances,
# or empty tuples.
sig1 = sd1
sig2 = (sd2, sd3, sd4)
sig3 = ()
sig4 = sd5
spliced = shapes.splice_signatures(sig1, sig2, sig3, sig4)
self.assertEqual(spliced, (sd1, sd2, sd3, sd4, sd5)) | google/trax | [
7391,
769,
7391,
106,
1570288154
] |
def run_test (cmd, *args):
cmd = PJ ('tests', cmd)
p = subprocess.Popen ([cmd] + list(args), stdout=subprocess.PIPE)
out = p.stdout.read()
return out | samrushing/irken-compiler | [
207,
16,
207,
36,
1305764021
] |
def test_t_dump_image():
# generate the output
run_test ('t_dump_image')
# load the image and run it
exp0 = run_test ('t_dump_image','-l')
exp1 = open ('tests/t_dump_image.exp').read()
assert (exp0 == exp1) | samrushing/irken-compiler | [
207,
16,
207,
36,
1305764021
] |
def test_t22():
out = run_test ('t22')
lines = out.split ('\n')
assert (lines[0].count ('<closure pc=') == 5)
r6 = [ str(x) for x in range (6) ]
assert (lines[1:] == (r6 + r6 + ['#u', ''])) | samrushing/irken-compiler | [
207,
16,
207,
36,
1305764021
] |
def test_t_vm():
out = run_test ('t_vm', 'vm/tests/t11.byc')
assert (out.split()[-2:] == ['7', '#u']) | samrushing/irken-compiler | [
207,
16,
207,
36,
1305764021
] |
def load_baseband_sweep_tones(ri, tone_banks, num_tone_samples):
return ri.set_tone_freqs(freqs=np.vstack(tone_banks), nsamp=num_tone_samples) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def run_sweep(ri, tone_banks, num_tone_samples, length_seconds=0, state=None, description='', verbose=False,
wait_for_sync=0.1, **kwargs):
"""
Return a SweepArray acquired using the given tone banks.
Parameters
----------
ri : RoachInterface
An instance of a subclass.
tone_banks : iterable of ndarray (float)
An iterable of arrays (or a 2-D array) of frequencies to use for the sweep.
num_tone_samples : int
The number of samples in the playback buffer; must be a power of two.
length_seconds : float
The duration of each data stream; the default of 0 means the minimum unit of data that can be read out in the
current configuration.
state : dict
The non-roach state to pass to the SweepArray.
description : str
A human-readable description of the measurement.
verbose : bool
If true, print progress messages.
wait_for_sync : float
Sleep for this time in seconds to let the ROACH sync finish.
kwargs
Keyword arguments passed to ri.get_measurement().
Returns
-------
SweepArray
"""
stream_arrays = core.MeasurementList()
if verbose:
print("Measuring bank")
for n, tone_bank in enumerate(tone_banks):
if verbose:
print n,
sys.stdout.flush()
ri.set_tone_freqs(tone_bank, nsamp=num_tone_samples)
ri.select_fft_bins(np.arange(tone_bank.size))
# we wait a bit here to let the roach2 sync catch up. figuring this out still.
time.sleep(wait_for_sync)
stream_arrays.append(ri.get_measurement(num_seconds=length_seconds, **kwargs))
return basic.SweepArray(stream_arrays, state=state, description=description) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def run_multipart_sweep(ri, length_seconds=0, state=None, description='', num_tones_read_at_once=32, verbose=False,
**kwargs):
num_tones = ri.tone_bins.shape[1]
num_steps = num_tones // num_tones_read_at_once
if num_steps == 0:
num_steps = 1
indices_to_read = range(num_tones)
parts = []
for step in range(num_steps):
if verbose:
print("running sweep step {} of {}.".format(step,num_steps))
parts.append(run_loaded_sweep(ri, length_seconds=length_seconds, state=state, description=description,
bin_indices=indices_to_read[step::num_steps], **kwargs))
stream_arrays = core.MeasurementList()
for part in parts:
stream_arrays.extend(list(part.stream_arrays))
return basic.SweepArray(stream_arrays, state=state, description=description) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def script_code():
"""
Return the source code of a module running as '__main__'. Acquisition scripts can use this to save their code.
If attempting to load the source code raises an exception, return a string representation of the exception.
Returns
-------
str
The code, with lines separated by newline characters.
"""
try:
return inspect.getsource(sys.modules['__main__'])
except Exception as e:
return str(e) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def git_status():
import kid_readout
kid_readout_directory = os.path.dirname(os.path.abspath(kid_readout.__file__))
try:
return subprocess.check_output(("cd {}; git status --porcelain".format(kid_readout_directory)), shell=True)
except Exception as e:
return str(e) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def new_nc_file(suffix='', directory=settings.BASE_DATA_DIR, metadata=None):
if suffix and not suffix.startswith('_'):
suffix = '_' + suffix
if metadata is None:
metadata = all_metadata()
root_path = os.path.join(directory, time.strftime('%Y-%m-%d_%H%M%S') + suffix + nc.NCFile.EXTENSION)
logger.debug("Creating new NCFile with path %s" % root_path)
return nc.NCFile(root_path, metadata=metadata) | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def show_settings():
print("cryostat: {}".format(settings.CRYOSTAT))
for k, v in settings.COOLDOWN.items():
print("{}: {}".format(k, v))
raw_input("Press enter to continue or ctrl-C to quit.") | ColumbiaCMB/kid_readout | [
3,
7,
3,
3,
1370440123
] |
def generate_leaf_updates(leaf_block, dist_thresholds, inds, indptr, data, dist):
updates = [[(-1, -1, np.inf)] for i in range(leaf_block.shape[0])]
for n in numba.prange(leaf_block.shape[0]):
for i in range(leaf_block.shape[1]):
p = leaf_block[n, i]
if p < 0:
break
for j in range(i + 1, leaf_block.shape[1]):
q = leaf_block[n, j]
if q < 0:
break
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = dist(from_inds, from_data, to_inds, to_data)
if d < dist_thresholds[p] or d < dist_thresholds[q]:
updates[n].append((p, q, d))
return updates | lmcinnes/pynndescent | [
731,
93,
731,
58,
1518045834
] |
def init_rp_tree(inds, indptr, data, dist, current_graph, leaf_array):
n_leaves = leaf_array.shape[0]
block_size = 65536
n_blocks = n_leaves // block_size
for i in range(n_blocks + 1):
block_start = i * block_size
block_end = min(n_leaves, (i + 1) * block_size)
leaf_block = leaf_array[block_start:block_end]
dist_thresholds = current_graph[1][:, 0]
updates = generate_leaf_updates(
leaf_block, dist_thresholds, inds, indptr, data, dist
)
for j in range(len(updates)):
for k in range(len(updates[j])):
p, q, d = updates[j][k]
if p == -1 or q == -1:
continue
checked_flagged_heap_push(
current_graph[1][p],
current_graph[0][p],
current_graph[2][p],
d,
q,
np.uint8(1),
)
checked_flagged_heap_push(
current_graph[1][q],
current_graph[0][q],
current_graph[2][q],
d,
p,
np.uint8(1),
) | lmcinnes/pynndescent | [
731,
93,
731,
58,
1518045834
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.