code stringlengths 281 23.7M |
|---|
class AnritsuMS464xB(Instrument):
CHANNELS_MAX = 16
TRACES_MAX = 16
PORTS = 4
TRIGGER_TYPES = ['POIN', 'SWE', 'CHAN', 'ALL']
FREQUENCY_RANGE = [.0, .0]
SPARAM_LIST = ['S11', 'S12', 'S21', 'S22', 'S13', 'S23', 'S33', 'S31', 'S32', 'S14', 'S24', 'S34', 'S41', 'S42', 'S43', 'S44']
DISPLAY_LAYOUTS = ['R1C1', 'R1C2', 'R2C1', 'R1C3', 'R3C1', 'R2C2C1', 'R2C1C2', 'C2R2R1', 'C2R1R2', 'R1C4', 'R4C1', 'R2C2', 'R2C3', 'R3C2', 'R2C4', 'R4C2', 'R3C3', 'R5C2', 'R2C5', 'R4C3', 'R3C4', 'R4C4']
def __init__(self, adapter, name='Anritsu MS464xB Vector Network Analyzer', active_channels=16, installed_ports=4, traces_per_channel=None, **kwargs):
super().__init__(adapter, name, timeout=10000, **kwargs)
self.PORTS = (self.number_of_ports if (installed_ports == 'auto') else installed_ports)
number_of_channels = (None if (active_channels == 'auto') else active_channels)
self.update_channels(number_of_channels=number_of_channels, traces=traces_per_channel)
def update_channels(self, number_of_channels=None, **kwargs):
if (number_of_channels is None):
number_of_channels = self.number_of_channels
if (not hasattr(self, 'channels')):
self.channels = {}
if (len(self.channels) == number_of_channels):
return
while (len(self.channels) > number_of_channels):
self.remove_child(self.channels[len(self.channels)])
while (len(self.channels) < number_of_channels):
self.add_child(MeasurementChannel, (len(self.channels) + 1), frequency_range=self.FREQUENCY_RANGE, **kwargs)
def check_errors(self):
errors = []
while True:
err = self.values('SYST:ERR?')
if (err[0] != 'No Error'):
log.error(f'{self.name}: {err[0]}')
errors.append(err)
else:
break
return errors
datablock_header_format = Instrument.control('FDHX?', 'FDH%d', 'Control the way the arbitrary block header for output data is formed.\n\n Valid values are:\n\n ===== \n value description\n ===== \n 0 A block header with arbitrary length will be sent.\n 1 The block header will have a fixed length of 11 characters.\n 2 No block header will be sent. Not IEEE 488.2 compliant.\n ===== \n ', values=[0, 1, 2], validator=strict_discrete_set, cast=int)
datafile_frequency_unit = Instrument.control(':FORM:SNP:FREQ?', ':FORM:SNP:FREQ %s', 'Control the frequency unit displayed in a SNP data file.\n\n Valid values are HZ, KHZ, MHZ, GHZ.\n ', values=['HZ', 'KHZ', 'MHZ', 'GHZ'], validator=strict_discrete_set)
datablock_numeric_format = Instrument.control(':FORM:DATA?', ':FORM:DATA %s', 'Control format for numeric I/O data representation.\n\n Valid values are:\n\n ===== \n value description\n ===== \n ASCII An ASCII number of 20 or 21 characters long with floating point notation.\n 8byte 8 bytes of binary floating point number representation limited to 64 bits.\n 4byte 4 bytes of floating point number representation.\n ===== \n ', values={'ASCII': 'ASC', '8byte': 'REAL', '4byte': 'REAL32'}, map_values=True)
datafile_include_heading = Instrument.control(':FORM:DATA:HEAD?', ':FORM:DATA:HEAD %d', 'Control whether a heading is included in the data files. ', values={True: 1, False: 0}, map_values=True)
datafile_parameter_format = Instrument.control(':FORM:SNP:PAR?', ':FORM:SNP:PAR %s', 'Control the parameter format displayed in an SNP data file.\n\n Valid values are:\n\n ===== \n value description\n ===== \n LINPH Linear and Phase.\n LOGPH Log and Phase.\n REIM Real and Imaginary Numbers.\n ===== \n ', values=['LINPH', 'LOGPH', 'REIM'], validator=strict_discrete_set)
data_drawing_enabled = Instrument.control('DD1?', 'DD%d', 'Control whether data drawing is enabled (True) or not (False). ', values={True: 1, False: 0}, map_values=True)
event_status_enable_bits = Instrument.control('*ESE?', '*ESE %d', 'Control the Standard Event Status Enable Register bits.\n\n The register can be queried using the :meth:`~.query_event_status_register` method. Valid\n values are between 0 and 255. Refer to the instrument manual for an explanation of the bits.\n ', values=[0, 255], validator=strict_range, cast=int)
def query_event_status_register(self):
return self.values('*ESR?', cast=int)[0]
service_request_enable_bits = Instrument.control('*SRE?', '*SRE %d', 'Control the Service Request Enable Register bits.\n\n Valid values are between 0 and 255; setting 0 performs a register reset. Refer to the\n instrument manual for an explanation of the bits.\n ', values=[0, 255], validator=strict_range, cast=int)
def return_to_local(self):
self.write('RTL')
binary_data_byte_order = Instrument.control(':FORM:BORD?', ':FORM:BORD %s', 'Control the binary numeric I/O data byte order.\n\n valid values are:\n\n ===== \n value description\n ===== \n NORM The most significant byte (MSB) is first\n SWAP The least significant byte (LSB) is first\n ===== \n ', values=['NORM', 'SWAP'], validator=strict_discrete_set)
max_number_of_points = Instrument.control(':SYST:POIN:MAX?', ':SYST:POIN:MAX %d', 'Control the maximum number of points the instrument can measure in a sweep.\n\n Note that when this value is changed, the instrument will be rebooted.\n Valid values are 25000 and 100000. When 25000 points is selected, the instrument supports 16\n channels with 16 traces each; when 100000 is selected, the instrument supports 1 channel\n with 16 traces.\n ', values=[25000, 100000], validator=strict_discrete_set, cast=int)
number_of_ports = Instrument.measurement(':SYST:PORT:COUN?', 'Get the number of instrument test ports. ', cast=int)
number_of_channels = Instrument.control(':DISP:COUN?', ':DISP:COUN %d', 'Control the number of displayed (and therefore accessible) channels.\n\n When the system is in 25000 points mode, the number of channels can be 1, 2, 3, 4, 6, 8, 9,\n 10, 12, or 16; when the system is in 100000 points mode, the system only supports 1 channel.\n If a value is provided that is not valid in the present mode, the instrument is set to the\n next higher channel number.\n ', values=[1, CHANNELS_MAX], validator=strict_range, cast=int)
display_layout = Instrument.control(':DISP:SPL?', ':DISP:SPL %s', "Control the channel display layout in a Row-by-Column format.\n\n Valid values are: {}. The number following the R indicates the number of rows, following the\n C the number of columns; e.g. R2C2 results in a 2-by-2 layout. The options that contain two\n C's or R's result in asymmetric layouts; e.g. R2C1C2 results in a layout with 1 channel on\n top and two channels side-by-side on the bottom row.\n ".format(', '.join(DISPLAY_LAYOUTS)), values=DISPLAY_LAYOUTS, validator=strict_discrete_set, cast=str)
active_channel = Instrument.control(':DISP:WIND:ACT?', ':DISP:WIND%d:ACT', 'Control the active channel. ', values=[1, CHANNELS_MAX], validator=strict_range, cast=int)
bandwidth_enhancer_enabled = Instrument.control(':SENS:BAND:ENH?', ':SENS:BAND:ENH %d', 'Control the state of the IF bandwidth enhancer. ', values={True: 1, False: 0}, map_values=True)
trigger_source = Instrument.control(':TRIG:SOUR?', ':TRIG:SOUR %s', 'Control the source of the sweep/measurement triggering.\n\n Valid values are:\n\n ===== \n value description\n ===== \n AUTO Automatic triggering\n MAN Manual triggering\n EXTT Triggering from rear panel BNC via the GPIB parser\n EXT External triggering port\n REM Remote triggering\n ===== \n ', values=['AUTO', 'MAN', 'EXTT', 'EXT', 'REM'], validator=strict_discrete_set)
external_trigger_type = Instrument.control(':TRIG:EXT:TYP?', ':TRIG:EXT:TYP %s', 'Control the type of trigger that will be associated with the external trigger.\n\n Valid values are POIN (for point), SWE (for sweep), CHAN (for channel), and ALL.\n ', values=TRIGGER_TYPES, validator=strict_discrete_set)
external_trigger_delay = Instrument.control(':TRIG:EXT:DEL?', ':TRIG:EXT:DEL %g', 'Control the delay time of the external trigger in seconds.\n\n Valid values are between 0 [s] and 10 [s] in steps of 1e-9 [s] (i.e. 1 ns).\n ', values=[0, 10], validator=strict_range)
external_trigger_edge = Instrument.control(':TRIG:EXT:EDG?', ':TRIG:EXT:EDG %s', 'Control the edge type of the external trigger.\n\n Valid values are POS (for positive or leading edge) or NEG (for negative or trailing edge).\n ', values=['POS', 'NEG'], validator=strict_discrete_set)
external_trigger_handshake = Instrument.control(':TRIG:EXT:HAND?', ':TRIG:EXT:HAND %s', 'Control status of the external trigger handshake. ', values={True: 1, False: 0}, map_values=True)
remote_trigger_type = Instrument.control(':TRIG:REM:TYP?', ':TRIG:REM:TYP %s', 'Control the type of trigger that will be associated with the remote trigger.\n\n Valid values are POIN (for point), SWE (for sweep), CHAN (for channel), and ALL.\n ', values=TRIGGER_TYPES, validator=strict_discrete_set)
manual_trigger_type = Instrument.control(':TRIG:MAN:TYP?', ':TRIG:MAN:TYP %s', 'Control the type of trigger that will be associated with the manual trigger.\n\n Valid values are POIN (for point), SWE (for sweep), CHAN (for channel), and ALL.\n ', values=TRIGGER_TYPES, validator=strict_discrete_set)
def trigger(self):
self.write('*TRG')
def trigger_single(self):
self.write(':TRIG:SING')
def trigger_continuous(self):
self.write(':TRIG')
hold_function_all_channels = Instrument.control(':SENS:HOLD:FUNC?', ':SENS:HOLD:FUNC %s', 'Control the hold function of all channels.\n\n Valid values are:\n\n ===== \n value description\n ===== \n CONT Perform continuous sweeps on all channels\n HOLD Hold the sweep on all channels\n SING Perform a single sweep and then hold all channels\n ===== \n ', values=['CONT', 'HOLD', 'SING'], validator=strict_discrete_set)
def load_data_file(self, filename):
self.write(f":MMEM:LOAD '{filename}'")
def delete_data_file(self, filename):
self.write(f":MMEM:DEL '{filename}'")
def copy_data_file(self, from_filename, to_filename):
self.write(f":MMEM:COPY '{from_filename}', '{to_filename}'")
def load_data_file_to_memory(self, filename):
self.write(f":MMEM:LOAD:MDATA '{filename}'")
def create_directory(self, dir_name):
self.write(f":MMEM:MDIR '{dir_name}'")
def delete_directory(self, dir_name):
self.write(f":MMEM:RDIR '{dir_name}'")
def store_image(self, filename):
self.write(f":MMEM:STOR:IMAG '{filename}'")
def read_datafile(self, channel, sweep_points, datafile_freq, datafile_par, filename):
cur_ch = self.channels[channel]
cur_ch.sweep_points = sweep_points
self.datafile_frequency_unit = datafile_freq
self.datafile_parameter_format = datafile_par
self.write('TRS;WFS;OS2P')
bytes_to_transfer = int(self.read_bytes(11)[2:11])
data = self.read_bytes(bytes_to_transfer)
with open(filename, 'w') as textfile:
data_list = data.split(b'\r\n')
for s in data_list:
textfile.write((str(s)[2:len(s)] + '\n')) |
def metrics(labels, logits, batchsize, reverse_ce=False):
with tf.variable_scope('metrics'):
labels_reshaped = _reshape_labels_like_logits(labels, logits, batchsize)
if (not reverse_ce):
xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_reshaped, logits=logits), name='xent')
else:
preds = tf.nn.softmax(logits)
xent = tf.reduce_mean(categorical_crossentropy(labels_reshaped, (1 - preds)))
equal = tf.equal(labels, tf.cast(tf.argmax(logits, axis=1), dtype=labels.dtype))
acc = tf.reduce_mean(tf.to_float(equal), name='acc')
return (xent, acc) |
class ValueGradFunction():
def __init__(self, costs, grad_vars, extra_vars_and_values=None, *, dtype=None, casting='no', compute_grads=True, **kwargs):
if (extra_vars_and_values is None):
extra_vars_and_values = {}
names = [arg.name for arg in (grad_vars + list(extra_vars_and_values.keys()))]
if any(((name is None) for name in names)):
raise ValueError('Arguments must be named.')
if (len(set(names)) != len(names)):
raise ValueError('Names of the arguments are not unique.')
self._grad_vars = grad_vars
self._extra_vars = list(extra_vars_and_values.keys())
self._extra_var_names = {var.name for var in extra_vars_and_values.keys()}
if (dtype is None):
dtype = pytensor.config.floatX
self.dtype = dtype
self._n_costs = len(costs)
if (self._n_costs == 0):
raise ValueError('At least one cost is required.')
weights = np.ones((self._n_costs - 1), dtype=self.dtype)
self._weights = pytensor.shared(weights, '__weights')
cost = costs[0]
for (i, val) in enumerate(costs[1:]):
if ((cost.ndim > 0) or (val.ndim > 0)):
raise ValueError('All costs must be scalar.')
cost = (cost + (self._weights[i] * val))
self._extra_are_set = False
for var in self._grad_vars:
if (not np.can_cast(var.dtype, self.dtype, casting)):
raise TypeError(f'Invalid dtype for variable {var.name}. Can not cast to {self.dtype} with casting rule {casting}.')
if (not np.issubdtype(var.dtype, np.floating)):
raise TypeError(f'Invalid dtype for variable {var.name}. Must be floating point but is {var.dtype}.')
givens = []
self._extra_vars_shared = {}
for (var, value) in extra_vars_and_values.items():
shared = pytensor.shared(value, (var.name + '_shared__'), shape=[(1 if (s == 1) else None) for s in value.shape])
self._extra_vars_shared[var.name] = shared
givens.append((var, shared))
cost = rewrite_pregrad(cost)
if compute_grads:
grads = pytensor.grad(cost, grad_vars, disconnected_inputs='ignore')
for (grad_wrt, var) in zip(grads, grad_vars):
grad_wrt.name = f'{var.name}_grad'
outputs = ([cost] + grads)
else:
outputs = [cost]
inputs = grad_vars
self._pytensor_function = compile_pymc(inputs, outputs, givens=givens, **kwargs)
def set_weights(self, values):
if (values.shape != ((self._n_costs - 1),)):
raise ValueError('Invalid shape. Must be (n_costs - 1,).')
self._weights.set_value(values)
def set_extra_values(self, extra_vars):
self._extra_are_set = True
for var in self._extra_vars:
self._extra_vars_shared[var.name].set_value(extra_vars[var.name])
def get_extra_values(self):
if (not self._extra_are_set):
raise ValueError('Extra values are not set.')
return {var.name: self._extra_vars_shared[var.name].get_value() for var in self._extra_vars}
def __call__(self, grad_vars, grad_out=None, extra_vars=None):
if (extra_vars is not None):
self.set_extra_values(extra_vars)
if (not self._extra_are_set):
raise ValueError('Extra values are not set.')
if isinstance(grad_vars, RaveledVars):
grad_vars = list(DictToArrayBijection.rmap(grad_vars).values())
(cost, *grads) = self._pytensor_function(*grad_vars)
if grads:
grads_raveled = DictToArrayBijection.map({v.name: gv for (v, gv) in zip(self._grad_vars, grads)})
if (grad_out is None):
return (cost, grads_raveled.data)
else:
np.copyto(grad_out, grads_raveled.data)
return cost
else:
return cost
def profile(self):
return self._pytensor_function.profile |
class TestHeaderTuple():
def test_is_tuple(self):
h = HeaderTuple('name', 'value')
assert isinstance(h, tuple)
def test_unpacks_properly(self):
h = HeaderTuple('name', 'value')
(k, v) = h
assert (k == 'name')
assert (v == 'value')
def test_header_tuples_are_indexable(self):
h = HeaderTuple('name', 'value')
assert h.indexable
def test_never_indexed_tuples_are_not_indexable(self):
h = NeverIndexedHeaderTuple('name', 'value')
assert (not h.indexable)
.parametrize('cls', (HeaderTuple, NeverIndexedHeaderTuple))
def test_equal_to_tuples(self, cls):
t1 = ('name', 'value')
t2 = cls('name', 'value')
assert (t1 == t2)
assert (t1 is not t2)
.parametrize('cls', (HeaderTuple, NeverIndexedHeaderTuple))
def test_equal_to_self(self, cls):
t1 = cls('name', 'value')
t2 = cls('name', 'value')
assert (t1 == t2)
assert (t1 is not t2)
def test_equal_for_different_indexes(self):
t1 = HeaderTuple('name', 'value')
t2 = NeverIndexedHeaderTuple('name', 'value')
assert (t1 == t2)
assert (t1 is not t2) |
def create_datasets(params_list):
if (not isinstance(params_list, list)):
params_list = [params_list]
datasets = [WatercolorsDataset(params=dp) for dp in params_list]
if (len(datasets) == 1):
dataset = datasets[0]
else:
dataset = dataset_utils.combine_dataset_names(datasets)
return dataset |
.parametrize('ramp_symmetry', [0.01, 99.98])
def test_ramp_symmetry(ramp_symmetry):
with expected_protocol(Agilent33500, [('SOUR1:FUNC:RAMP:SYMM?', ramp_symmetry), ('SOUR2:FUNC:RAMP:SYMM?', ramp_symmetry), ('FUNC:RAMP:SYMM?', ramp_symmetry), (f'SOUR1:FUNC:RAMP:SYMM {ramp_symmetry:.6f}', None), (f'SOUR2:FUNC:RAMP:SYMM {ramp_symmetry:.6f}', None), (f'FUNC:RAMP:SYMM {ramp_symmetry:.6f}', None)]) as inst:
assert (ramp_symmetry == inst.ch_1.ramp_symmetry)
assert (ramp_symmetry == inst.ch_2.ramp_symmetry)
assert (ramp_symmetry == inst.ramp_symmetry)
inst.ch_1.ramp_symmetry = ramp_symmetry
inst.ch_2.ramp_symmetry = ramp_symmetry
inst.ramp_symmetry = ramp_symmetry |
_images
def test_addr(host):
non_resolvable = host.addr('some_non_resolvable_host')
assert (not non_resolvable.is_resolvable)
assert (not non_resolvable.is_reachable)
assert (not non_resolvable.port(80).is_reachable)
non_reachable_ip = host.addr('10.42.13.73')
assert non_reachable_ip.is_resolvable
assert (non_reachable_ip.ipv4_addresses == ['10.42.13.73'])
assert (not non_reachable_ip.is_reachable)
assert (not non_reachable_ip.port(80).is_reachable)
google_dns = host.addr('8.8.8.8')
assert google_dns.is_resolvable
assert (google_dns.ipv4_addresses == ['8.8.8.8'])
assert google_dns.port(53).is_reachable
assert (not google_dns.port(666).is_reachable)
google_addr = host.addr('google.com')
assert google_addr.is_resolvable
assert google_addr.port(443).is_reachable
assert (not google_addr.port(666).is_reachable)
for ip in google_addr.ipv4_addresses:
assert isinstance(ip_address(ip), IPv4Address)
for ip in google_addr.ipv6_addresses:
assert isinstance(ip_address(ip), IPv6Address)
for ip in google_addr.ip_addresses:
assert isinstance(ip_address(ip), (IPv4Address, IPv6Address)) |
class FGM():
def __init__(self, model, emb_name, epsilon=1.0):
self.model = model
self.epsilon = epsilon
self.emb_name = emb_name
self.backup = {}
def attack(self):
for (name, param) in self.model.named_parameters():
if (param.requires_grad and (self.emb_name in name)):
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if ((norm != 0) and (not torch.isnan(norm))):
r_at = ((self.epsilon * param.grad) / norm)
param.data.add_(r_at)
def restore(self):
for (name, param) in self.model.named_parameters():
if (param.requires_grad and (self.emb_name in name)):
assert (name in self.backup)
param.data = self.backup[name]
self.backup = {} |
def build_tries(entities: List[Entity]) -> Dict[(str, Trie)]:
tries = defaultdict(list)
labels = set()
for ent in entities:
tries[ent.label.name].append([ent.name])
if (ent.label.name not in labels):
labels.add(ent.label.name)
for label in labels:
tries[label] = Trie(tries[label])
return tries |
class Plane(Primitive):
def __init__(self, center, material, width, height, u_axis, v_axis, max_ray_depth=5, shadow=True):
super().__init__(center, material, max_ray_depth, shadow=shadow)
self.collider_list += [Plane_Collider(assigned_primitive=self, center=center, u_axis=u_axis, v_axis=v_axis, w=(width / 2), h=(height / 2))]
self.width = width
self.height = height
self.bounded_sphere_radius = np.sqrt((((width / 2) ** 2) + ((height / 2) ** 2)))
def get_uv(self, hit):
return hit.collider.get_uv(hit) |
class CountRelations(LogicalValue):
def __init__(self, from_nodes: NodeEnumerator, relation: Relation, to_nodes: NodeFilter, min_value: int):
self.from_nodes = from_nodes
self.relation = relation
self.to_nodes = to_nodes
self.min_value = min_value
def evaluate(self, state: EnvironmentState, **kwargs):
count = 0
for fn in self.from_nodes.enumerate(state, **kwargs):
for tn in state.get_nodes_from(fn, self.relation):
if self.to_nodes.filter(tn):
count += 1
if (count >= self.min_value):
return True
return False |
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
if (type is not None):
assert (type >= 256), type
if (content is not None):
assert (not isinstance(content, str)), repr(content)
content = list(content)
for (i, item) in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
if self.wildcards:
for (c, r) in generate_matches(self.content, node.children):
if (c == len(node.children)):
if (results is not None):
results.update(r)
return True
return False
if (len(self.content) != len(node.children)):
return False
for (subpattern, child) in zip(self.content, node.children):
if (not subpattern.match(child, results)):
return False
return True |
def evaluate(model, dataset, args, sess):
[train, valid, test, usernum, itemnum] = copy.deepcopy(dataset)
NDCG = 0.0
HT = 0.0
valid_user = 0.0
if (usernum > 10000):
users = random.sample(range(1, (usernum + 1)), 10000)
else:
users = range(1, (usernum + 1))
for u in users:
if ((len(train[u]) < 1) or (len(test[u]) < 1)):
continue
seq = np.zeros([args.maxlen], dtype=np.int32)
idx = (args.maxlen - 1)
seq[idx] = valid[u][0]
idx -= 1
for i in reversed(train[u]):
seq[idx] = i
idx -= 1
if (idx == (- 1)):
break
rated = set(train[u])
rated.add(0)
item_idx = [test[u][0]]
for _ in range(100):
t = np.random.randint(1, (itemnum + 1))
while (t in rated):
t = np.random.randint(1, (itemnum + 1))
item_idx.append(t)
predictions = (- model.predict(sess, [u], [seq], item_idx))
predictions = predictions[0]
rank = predictions.argsort().argsort()[0]
valid_user += 1
if (rank < 10):
NDCG += (1 / np.log2((rank + 2)))
HT += 1
if ((valid_user % 100) == 0):
sys.stdout.flush()
return ((NDCG / valid_user), (HT / valid_user)) |
class ListConfig(ProductionCommand):
keyword = 'listconfig'
def assemble(self):
super().assemble()
self.parser.add_argument('-v', '--values', action='store_true', dest='print_values', help='prints the currently configured value')
self.parser.add_argument('-f', '--files', action='store_true', dest='print_files', help='prints the filename where the setting should be defined')
self.parser.add_argument('-d', '--defaults', action='store_true', dest='print_defaults', help='prints the default value')
self.parser.add_argument('-m', '--missing', action='store_true', dest='print_missing_only', help='prints the missing values only')
self.parser.add_argument('-i', '--info', action='store_true', dest='print_description', help='prints a description')
def create_context(self, config_directory):
self.context = ExecutionContext(name=self.__class__.__name__)
def execute(self, args):
super().execute(args)
with self.context:
print(('Listing config for %s' % self.directory))
config = StoredConfiguration(self.directory)
config.configure(validate=False)
for (config_file, key, value, setting) in config.list_all():
to_print = ('%-35s' % key)
if args.print_files:
to_print += ('\t%s' % config_file)
if args.print_values:
to_print += ('\t%s' % value)
if args.print_defaults:
if setting.defaulted:
message = str(setting.default)
if setting.dangerous:
message += ' (DANGEROUS DEFAULT)'
elif setting.automatic:
message = 'AUTOMATIC'
else:
message = 'NO DEFAULT'
to_print += ('\t%s' % message)
if args.print_description:
to_print += ('\t%s' % setting.description)
if (args.print_missing_only and (not isinstance(value, MissingValue))):
pass
else:
print(to_print) |
def prefix_match(s1: str, s2: str) -> bool:
(i, j) = (0, 0)
for i in range(len(s1)):
if (not is_span_separator(s1[i])):
break
for j in range(len(s2)):
if (not is_span_separator(s2[j])):
break
if ((i < len(s1)) and (j < len(s2))):
return (s1[i] == s2[j])
elif ((i >= len(s1)) and (j >= len(s2))):
return True
else:
return False |
def normalize_data_storage(data_storage, offset=0.1, mul_factor=100, save_file='../data/mean_std.pkl'):
print('normalize_data_storage...')
mean_std_values = {}
for modality_storage in data_storage:
means = []
pbar = ProgressBar().start()
print('calculate mean value...')
n_subs = modality_storage.shape[0]
for i in range(n_subs):
means.append(np.mean(np.ravel(modality_storage[i])[np.flatnonzero(modality_storage[i])]))
pbar.update(int(((i * 100) / (n_subs - 1))))
pbar.finish()
mean = np.mean(means)
mean_std_values[(modality_storage.name + '_mean')] = mean
print('mean=', mean)
std_means = []
pbar = ProgressBar().start()
print('calculate std value...')
for i in range(n_subs):
std_means.append(np.mean(np.power((np.ravel(modality_storage[i])[np.flatnonzero(modality_storage[i])] - mean), 2)))
pbar.update(int(((i * 100) / (n_subs - 1))))
pbar.finish()
std = np.sqrt(np.mean(std_means))
mean_std_values[(modality_storage.name + '_std')] = std
print('std=', std)
for i in range(n_subs):
brain_index = np.nonzero(modality_storage[i])
temp_img = np.copy(modality_storage[i])
temp_img[brain_index] = ((minmax_normalize(((modality_storage[i][brain_index] - mean) / std)) + offset) * mul_factor)
modality_storage[i] = temp_img
print('normalization FINISHED')
with open(save_file, 'wb') as f:
pickle.dump(mean_std_values, f)
return |
def test_color():
assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0))
assert (mmcv.color_val('green') == (0, 255, 0))
assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3))
assert (mmcv.color_val(100) == (100, 100, 100))
assert (mmcv.color_val(np.zeros(3, dtype=np.int)) == (0, 0, 0))
with pytest.raises(TypeError):
mmcv.color_val([255, 255, 255])
with pytest.raises(TypeError):
mmcv.color_val(1.0)
with pytest.raises(AssertionError):
mmcv.color_val((0, 0, 500)) |
def test_fgraph_rewrite(non_centered_rewrite):
with pm.Model(coords={'subject': range(10)}) as m_old:
group_mean = pm.Normal('group_mean')
group_std = pm.HalfNormal('group_std')
subject_mean = pm.Normal('subject_mean', group_mean, group_std, dims=('subject',))
obs = pm.Normal('obs', subject_mean, 1, observed=np.zeros(10), dims=('subject',))
(fg, _) = fgraph_from_model(m_old)
non_centered_rewrite.apply(fg)
m_new = model_from_fgraph(fg)
assert (m_new.named_vars_to_dims == {'subject_mean': ['subject'], 'subject_mean_raw_': ['subject'], 'obs': ['subject']})
assert (set(m_new.named_vars) == {'group_mean', 'group_std', 'subject_mean_raw_', 'subject_mean', 'obs'})
assert ({rv.name for rv in m_new.free_RVs} == {'group_mean', 'group_std', 'subject_mean_raw_'})
assert ({rv.name for rv in m_new.observed_RVs} == {'obs'})
assert ({rv.name for rv in m_new.deterministics} == {'subject_mean'})
with pm.Model() as m_ref:
group_mean = pm.Normal('group_mean')
group_std = pm.HalfNormal('group_std')
subject_mean_raw = pm.Normal('subject_mean_raw_', 0, 1, shape=(10,))
subject_mean = pm.Deterministic('subject_mean', (group_mean + (subject_mean_raw * group_std)))
obs = pm.Normal('obs', subject_mean, 1, observed=np.zeros(10))
np.testing.assert_array_equal(pm.draw(m_new['subject_mean_raw_'], draws=7, random_seed=1), pm.draw(m_ref['subject_mean_raw_'], draws=7, random_seed=1))
ip = m_new.initial_point()
np.testing.assert_equal(m_new.compile_logp()(ip), m_ref.compile_logp()(ip)) |
class RCompleter(Completer):
def __init__(self, timeout=0.02):
self.timeout = timeout
super(RCompleter, self).__init__()
def get_completions(self, document, complete_event):
word = document.get_word_before_cursor()
prefix_length = settings.completion_prefix_length
if ((len(word) < prefix_length) and (not complete_event.completion_requested)):
return
latex_comps = list(get_latex_completions(document, complete_event))
if (len(latex_comps) > 0):
for x in latex_comps:
(yield x)
return
for x in self.get_r_builtin_completions(document, complete_event):
(yield x)
for x in self.get_package_completions(document, complete_event):
(yield x)
def get_r_builtin_completions(self, document, complete_event):
text_before = document.current_line_before_cursor
completion_requested = complete_event.completion_requested
library_prefix = LIBRARY_PATTERN.match(text_before)
if library_prefix:
return
if ((not completion_requested) and ('print(' in text_before) and re.match('.*print\\([^\\)]*$', remove_nested_paren(text_before))):
token = rcompletion.assign_line_buffer(text_before)
text_before = token
with suppress_stderr():
try:
token = rcompletion.assign_line_buffer(text_before)
if (('::' in token) or completion_requested):
timeout = 0
else:
timeout = self.timeout
rcompletion.complete_token(timeout)
completions = rcompletion.retrieve_completions()
except Exception:
completions = []
for c in completions:
if (c.startswith(token) and (c != token)):
if (c.endswith('=') and settings.completion_adding_spaces_around_equals):
c = (c[:(- 1)] + ' = ')
if c.endswith('::'):
continue
(yield Completion(c, (- len(token))))
def get_package_completions(self, document, complete_event):
text_before = document.current_line_before_cursor
token_match = TOKEN_PATTERN.match(text_before)
if (not token_match):
return
token = token_match.group(1)
library_prefix = LIBRARY_PATTERN.match(text_before)
instring = cursor_in_string(document)
for p in installed_packages():
if p.startswith(token):
comp = (p if (instring or library_prefix) else (p + '::'))
(yield Completion(comp, (- len(token)))) |
def _handle_conv2d_transpose(callback):
(callback)
def _handle(cls, tensor):
if isinstance(cls.original_layer, tf.keras.layers.Conv2DTranspose):
if (len(tensor.shape) == 4):
permute = [0, 1, 3, 2]
tensor = K.permute_dimensions(tensor, permute)
return_val = callback(cls, tensor)
if isinstance(return_val, tuple):
return (K.permute_dimensions(return_val[0], permute), return_val[1])
return K.permute_dimensions(return_val, permute)
return callback(cls, tensor)
return _handle |
def import_dsprite_location_module():
script_path = os.path.split(__file__)[0]
module_name = 'create_dsprites_location_data_files'
module_path = os.path.join(script_path, f'{module_name}.py')
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module |
def test_swift_session_by_user_key():
def mock_init(self, session=None, swift_storage_url=None, swift_auth_token=None, swift_auth_v1_url=None, swift_user=None, swift_key=None):
self._creds = {'SWIFT_STORAGE_URL': 'foo', 'SWIFT_AUTH_TOKEN': 'bar'}
with mock.patch('rasterio.session.SwiftSession.__init__', new=mock_init):
swift_session = SwiftSession(swift_auth_v1_url='foo', swift_user='bar', swift_key='key')
with rasterio.env.Env(session=swift_session) as s:
s.credentialize()
assert (getenv()['SWIFT_STORAGE_URL'] == 'foo')
assert (getenv()['SWIFT_AUTH_TOKEN'] == 'bar') |
def disambiguate(items, nr, **kwds):
msgs = []
for (key, value) in iteritems(kwds):
msgs.append(('%s=%r' % (key, value)))
msg = ' '.join(msgs)
if (not items):
raise ItemNotFoundError(msg)
if (nr is None):
if (len(items) > 1):
raise AmbiguityError(msg)
nr = 0
if (len(items) <= nr):
raise ItemNotFoundError(msg)
return items[nr] |
def calculate_monthly_payment(total_amount, down_payment, interest_rate, amortization_period):
total_amount = float(total_amount)
amortization_period = int(amortization_period)
down_payment = float(down_payment)
interest_rate = float(interest_rate)
total_amount -= down_payment
if ((interest_rate > 0) and (amortization_period > 0)):
monthly_interest_rate = (interest_rate / 12)
payment = ((total_amount * (monthly_interest_rate * ((1 + monthly_interest_rate) ** amortization_period))) / (((1 + monthly_interest_rate) ** amortization_period) - 1))
print(total_amount)
(print(monthly_interest_rate),)
print(amortization_period)
print(payment)
return payment
return 0 |
def train_model_swag(model, arch, opt, train_data, test_data, args, lamb_lr, verbose=False):
model.train()
MI_data = train_data
(train_accs, train_losses) = ([], [])
(test_accs, test_losses) = ([], [])
l_MIs = []
maxes = []
lambs = []
t = 0
lamb = args.lamb_init
analyse(model, grads=True)
if isinstance(model, StochasticMLP):
swag_model = SWAG(StochasticMLP, no_cov_mat=False, max_num_models=10, arch=arch)
elif isinstance(model, StochasticConvMLP):
swag_model = SWAG(StochasticConvMLP, no_cov_mat=False, max_num_models=10, arch=arch, in_dim=args.in_dim)
swag_model.to(device)
for ep in range(args.epochs):
if (ep >= args.swa_start):
swag_model.collect_model(model)
for (xs, ys) in train_data:
if ((t % 10) == 0):
(train_acc, train_loss) = evaluate(model, train_data, args, 'train', plot=False)
(test_acc, test_loss) = evaluate(model, test_data, args, 'test', plot=False)
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(test_acc)
test_losses.append(test_loss)
if verbose:
print((ep, t, train_acc, test_acc))
sys.stdout.flush()
xs = xs.to(device)
ys = ys.to(device)
opt.zero_grad()
(preds, max_prob) = model(xs)
l_sup = nn.functional.cross_entropy(preds, ys, reduction='mean')
maxes.append(max_prob)
MI = est_MI(model, MI_data.dataset, jensen=False, sz=min(MI_SZ, len(MI_data.dataset)))
constraint = (args.MI_const - MI)
l_MIs.append(MI.item())
loss = (l_sup + (lamb * constraint))
loss.backward()
opt.step()
if (ep == 0):
analyse(model, grads=True, t=t)
lamb += (lamb_lr * constraint.item())
lambs.append(lamb)
t += 1
(train_acc, train_loss) = evaluate(model, train_data, args, 'train', plot=False)
(test_acc, test_loss) = evaluate(model, test_data, args, 'test', plot=False)
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(test_acc)
test_losses.append(test_loss)
MI_mc = est_MI(model, MI_data.dataset, sz=min(MI_SZ, len(MI_data.dataset)), jensen=False, requires_grad=False).item()
MI_cond_mc = est_MI_cond(model, args.C, MI_data, sz=(- 1), jensen=False).item()
MI_jensen = est_MI(model, MI_data.dataset, sz=min(MI_SZ, len(MI_data.dataset)), jensen=True, requires_grad=False).item()
MI_cond_jensen = est_MI_cond(model, args.C, MI_data, sz=(- 1), jensen=True).item()
swag_model.sample(0.0)
(test_acc_swag, test_loss_swag) = evaluate(swag_model, test_data, args, 'test', plot=False)
diagnostics = {'train_losses': train_losses, 'train_accs': train_accs, 'test_losses': test_losses, 'test_accs': test_accs, 'test_loss_swag': test_loss_swag, 'test_acc_swag': test_acc_swag, 'MI_mc': MI_mc, 'MI_cond_mc': MI_cond_mc, 'MI_jensen': MI_jensen, 'MI_cond_jensen': MI_cond_jensen, 'l_MIs': l_MIs, 'maxes': maxes, 'lambs': lambs}
return (model, swag_model, diagnostics) |
def make_dataset(dir, class_to_idx, extensions, num_instance_per_class):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
if (num_instance_per_class == 0):
num = len(fnames)
else:
num = min(num_instance_per_class, len(fnames))
for fname in sorted(fnames)[:num]:
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images |
def train(model, env, args):
STEPS = 10
LAMBDA = 0.99
vis = visdom.Visdom(env=(args.name + '[{}]'.format(args.phrase)))
pre_per_replay = [[] for _ in range(args.n_replays)]
gt_per_replay = [[] for _ in range(args.n_replays)]
acc = None
win = vis.line(X=np.zeros(1), Y=np.zeros(1))
loss_win = vis.line(X=np.zeros(1), Y=np.zeros(1))
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
gpu_id = args.gpu_id
with torch.cuda.device(gpu_id):
model = (model.cuda() if (gpu_id >= 0) else model)
model.train()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
epoch = 0
save = args.save_intervel
env_return = env.step()
if (env_return is not None):
((states, rewards), require_init) = env_return
with torch.cuda.device(gpu_id):
states = torch.from_numpy(states).float()
rewards = torch.from_numpy(rewards).float()
if (gpu_id >= 0):
states = states.cuda()
rewards = rewards.cuda()
while True:
values = model(Variable(states), require_init)
value_loss = 0
for (value, reward) in zip(values, rewards):
value_loss = (value_loss + F.binary_cross_entropy(value, Variable(reward)))
model.zero_grad()
value_loss.backward()
optimizer.step()
model.detach()
if (env.epoch > epoch):
epoch = env.epoch
for p in optimizer.param_groups:
p['lr'] *= 0.1
vis.updateTrace(X=np.asarray([env.step_count()]), Y=np.asarray(value_loss.data.cpu().numpy()), win=loss_win, name='value')
values_np = np.swapaxes(np.asarray([value.data.cpu().numpy() for value in values]), 0, 1)
rewards_np = np.swapaxes(rewards.cpu().numpy(), 0, 1)
for (idx, (value, reward, init)) in enumerate(zip(values_np, rewards_np, require_init)):
if (init and (len(pre_per_replay[idx]) > 0)):
pre_per_replay[idx] = np.asarray(pre_per_replay[idx], dtype=np.uint8)
gt_per_replay[idx] = np.asarray(gt_per_replay[idx], dtype=np.uint8)
step = (len(pre_per_replay[idx]) // STEPS)
if (step > 0):
acc_tmp = []
for s in range(STEPS):
value_pre = pre_per_replay[idx][(s * step):((s + 1) * step)]
value_gt = gt_per_replay[idx][(s * step):((s + 1) * step)]
acc_tmp.append(np.mean((value_pre == value_gt)))
acc_tmp = np.asarray(acc_tmp)
if (acc is None):
acc = acc_tmp
else:
acc = ((LAMBDA * acc) + ((1 - LAMBDA) * acc_tmp))
if (acc is None):
continue
for s in range(STEPS):
vis.updateTrace(X=np.asarray([env.step_count()]), Y=np.asarray([acc[s]]), win=win, name='{}[{}%~{}%]'.format('value', (s * 10), ((s + 1) * 10)))
vis.updateTrace(X=np.asarray([env.step_count()]), Y=np.asarray([np.mean(acc)]), win=win, name='value[TOTAL]')
pre_per_replay[idx] = []
gt_per_replay[idx] = []
pre_per_replay[idx].append(int((value[(- 1)] >= 0.5)))
gt_per_replay[idx].append(int(reward[(- 1)]))
env_return = env.step()
if (env_return is not None):
((raw_states, raw_rewards), require_init) = env_return
states = states.copy_(torch.from_numpy(raw_states).float())
rewards = rewards.copy_(torch.from_numpy(raw_rewards).float())
if ((env.step_count() > save) or (env_return is None)):
save = (env.step_count() + args.save_intervel)
torch.save(model.state_dict(), os.path.join(args.model_path, 'model_iter_{}.pth'.format(env.step_count())))
torch.save(model.state_dict(), os.path.join(args.model_path, 'model_latest.pth'))
if (env_return is None):
env.close()
break |
def edit_task(name=None, location='\\', user_name=None, password=None, description=None, enabled=None, hidden=None, run_if_idle=None, idle_duration=None, idle_wait_timeout=None, idle_stop_on_end=None, idle_restart=None, ac_only=None, stop_if_on_batteries=None, wake_to_run=None, run_if_network=None, network_id=None, network_name=None, allow_demand_start=None, start_when_available=None, restart_every=None, restart_count=3, execution_time_limit=None, force_stop=None, delete_after=None, multiple_instances=None, restorePrivs=False, privsToRestore=None, **kwargs):
logging.debug('Editing parameters of the task...')
save_definition = False
if kwargs.get('task_definition', False):
task_definition = kwargs.get('task_definition')
else:
save_definition = True
if (not name):
return 'Required parameter "name" not passed'
if (name in list_tasks(location)):
pythoncom.CoInitialize()
task_service = win32com.client.Dispatch('Schedule.Service')
task_service.Connect()
task_folder = task_service.GetFolder(location)
task_definition = task_folder.GetTask(name).Definition
else:
return '{0} not found'.format(name)
if save_definition:
task_definition.RegistrationInfo.Author = ''
task_definition.RegistrationInfo.Source = ''
if (description is not None):
task_definition.RegistrationInfo.Description = description
if user_name:
if (user_name.lower() == 'system'):
logon_type = TASK_LOGON_SERVICE_ACCOUNT
user_name = 'SYSTEM'
password = None
else:
task_definition.Principal.Id = user_name
if password:
logon_type = TASK_LOGON_PASSWORD
else:
logon_type = TASK_LOGON_INTERACTIVE_TOKEN
task_definition.Principal.UserID = user_name
task_definition.Principal.DisplayName = user_name
task_definition.Principal.LogonType = logon_type
task_definition.Principal.RunLevel = TASK_RUNLEVEL_HIGHEST
else:
user_name = None
password = None
if (restorePrivs == True):
IID_IPrincipal2 = pythoncom.MakeIID('{248919AE-E345-4A6D-8AEB-E0D3165C904E}')
iprincipal2 = task_definition.Principal._oleobj_.QueryInterface(IID_IPrincipal2, pythoncom.IID_IDispatch)
principal2 = win32com.client.Dispatch(iprincipal2)
if (privsToRestore == None):
logging.error("Internal error: Impossible to restore privs because 'restorePrivs' is not given while 'restorePrivs' is enabled")
if (restorePrivs == True):
logging.debug('Restoring these privileges: {0}'.format(privsToRestore))
for aPrivName in privsToRestore:
principal2.AddRequiredPrivilege(aPrivName)
if (enabled is not None):
task_definition.Settings.Enabled = enabled
if (hidden is not None):
task_definition.Settings.Hidden = hidden
if (run_if_idle is not None):
task_definition.Settings.RunOnlyIfIdle = run_if_idle
if task_definition.Settings.RunOnlyIfIdle:
if (idle_stop_on_end is not None):
task_definition.Settings.IdleSettings.StopOnIdleEnd = idle_stop_on_end
if (idle_restart is not None):
task_definition.Settings.IdleSettings.RestartOnIdle = idle_restart
if (idle_duration is not None):
if (idle_duration in duration):
task_definition.Settings.IdleSettings.IdleDuration = _lookup_first(duration, idle_duration)
else:
return 'Invalid value for "idle_duration"'
if (idle_wait_timeout is not None):
if (idle_wait_timeout in duration):
task_definition.Settings.IdleSettings.WaitTimeout = _lookup_first(duration, idle_wait_timeout)
else:
return 'Invalid value for "idle_wait_timeout"'
if (ac_only is not None):
task_definition.Settings.DisallowStartIfOnBatteries = ac_only
if (stop_if_on_batteries is not None):
task_definition.Settings.StopIfGoingOnBatteries = stop_if_on_batteries
if (wake_to_run is not None):
task_definition.Settings.WakeToRun = wake_to_run
if (run_if_network is not None):
task_definition.Settings.RunOnlyIfNetworkAvailable = run_if_network
if task_definition.Settings.RunOnlyIfNetworkAvailable:
if network_id:
task_definition.Settings.NetworkSettings.Id = network_id
if network_name:
task_definition.Settings.NetworkSettings.Name = network_name
if (allow_demand_start is not None):
task_definition.Settings.AllowDemandStart = allow_demand_start
if (start_when_available is not None):
task_definition.Settings.StartWhenAvailable = start_when_available
if (restart_every is not None):
if (restart_every is False):
task_definition.Settings.RestartInterval = ''
elif (restart_every in duration):
task_definition.Settings.RestartInterval = _lookup_first(duration, restart_every)
else:
return 'Invalid value for "restart_every"'
if task_definition.Settings.RestartInterval:
if (restart_count is not None):
if (restart_count in range(1, 999)):
task_definition.Settings.RestartCount = restart_count
else:
return '"restart_count" must be a value between 1 and 999'
if (execution_time_limit is not None):
if (execution_time_limit is False):
task_definition.Settings.ExecutionTimeLimit = 'PT0S'
elif (execution_time_limit in duration):
task_definition.Settings.ExecutionTimeLimit = _lookup_first(duration, execution_time_limit)
else:
return 'Invalid value for "execution_time_limit"'
if (force_stop is not None):
task_definition.Settings.AllowHardTerminate = force_stop
if (delete_after is not None):
if (delete_after is False):
task_definition.Settings.DeleteExpiredTaskAfter = ''
if (delete_after in duration):
task_definition.Settings.DeleteExpiredTaskAfter = _lookup_first(duration, delete_after)
else:
return 'Invalid value for "delete_after"'
if (multiple_instances is not None):
task_definition.Settings.MultipleInstances = instances[multiple_instances]
if save_definition:
return _save_task_definition(name=name, task_folder=task_folder, task_definition=task_definition, user_name=user_name, password=password, logon_type=task_definition.Principal.LogonType) |
class NominationCreateForm(NominationForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super().__init__(*args, **kwargs)
self_nomination = forms.BooleanField(required=False, help_text='If you are nominating yourself, we will automatically associate the nomination with your python.org user.')
def clean_self_nomination(self):
data = self.cleaned_data['self_nomination']
if data:
if ((not self.request.user.first_name) or (not self.request.user.last_name)):
raise forms.ValidationError(mark_safe('You must set your First and Last name in your <a href="/users/edit/">User Profile</a> to self nominate.'))
return data |
class LxTaskByPidFunc(gdb.Function):
def __init__(self):
super(LxTaskByPidFunc, self).__init__('lx_task_by_pid')
def invoke(self, pid):
task = get_task_by_pid(pid)
if task:
return task.dereference()
else:
raise gdb.GdbError(('No task of PID ' + str(pid))) |
class TestAlignMixinInitializationMethods(unittest.TestCase):
def setUp(self):
self.mixin_mock = mock.Mock()
def test_set_align_with_valid_config(self):
for align in ['right', 'center', 'left']:
with self.subTest():
AlignMixin._set_align(self.mixin_mock, align)
self.assertEqual(self.mixin_mock.align, align)
def test_align_with_invalid_config(self):
with self.assertRaises(InvalidAlignException):
AlignMixin._set_align(self.mixin_mock, align='bar') |
class L2TPAttr(TypeEnum):
MsgType = 0
RandomVector = 36
Result = 1
Version = 2
FramingCap = 3
BearerCap = 4
TieBreaker = 5
Firmware = 6
HostName = 7
VendorName = 8
TunnelID = 9
WindowSize = 10
Challenge = 11
Response = 13
CauseCode = 12
SessionID = 14
CallSerial = 15
MinimumBPS = 16
MaximumBPS = 17
BearerType = 18
FramingType = 19
CalledNumber = 21
CallingNumber = 22
SubAddress = 23
ConnectSpeed = 24
RxConnectSpeed = 38
PhysicalChannel = 25
PrivateGroupID = 37
SequencingRequired = 39
InitialLCP = 26
LastSentLCP = 27
LastReceivedLCP = 28
ProxyAuthenType = 29
ProxyAuthenName = 30
ProxyAuthenChallenge = 31
ProxyAuthenID = 32
ProxyAuthenResponse = 33
CallErrors = 34
ACCM = 35 |
class Cmvn(object):
def __init__(self, dim=None):
self.init(dim)
def accumulate(self, feats, weights=None):
if (not self.stats):
raise ValueError('CMVN stats matrix is not initialized. Initialize it either by reading it from file or by calling the init method to accumulate new statistics or by directly setting the stats attribute.')
if isinstance(feats, _kaldi_matrix.MatrixBase):
_cmvn.acc_cmvn_stats(feats, weights, self.stats)
elif isinstance(feats, _kaldi_vector.VectorBase):
if (weights is None):
weights = 1.0
_cmvn.acc_cmvn_stats_single_frame(feats, weights, self.stats)
else:
raise TypeError('Input feature should be a matrix or vector.')
def apply(self, feats, norm_vars=False, reverse=False):
if (not self.stats):
raise ValueError('CMVN stats matrix is not initialized. Initialize it either by reading it from file or by calling the init method and accumulating new statistics or by directly setting the stats attribute.')
if reverse:
_cmvn.apply_cmvn_reverse(self.stats, norm_vars, feats)
else:
_cmvn.apply_cmvn(self.stats, norm_vars, feats)
def init(self, dim):
if (dim is None):
self.stats = None
else:
assert (dim > 0)
self.stats = matrix.DoubleMatrix(2, (dim + 1))
def read_stats(self, rxfilename, binary=True):
with io.Input(rxfilename, binary=binary) as ki:
self.stats = matrix.DoubleMatrix().read_(ki.stream(), ki.binary)
def skip_dims(self, dims):
_cmvn.fake_stats_for_some_dims(dims, self.stats)
def write_stats(self, wxfilename, binary=True):
with io.Output(wxfilename, binary=binary) as ko:
self.stats.write(ko.stream(), binary) |
class RenderThread(Thread):
def __init__(self, source_path: Path, target_path: Path, variables: TerraformVariableStore):
super().__init__()
self.source_path = source_path
self.target_path = target_path
self.target_name = target_path.name
self.variables = variables
self.blocks: List[dict] = []
self.error: Optional[Exception] = None
self.is_tfvars = self.target_name.endswith('.tfvars.json')
def contents(self) -> Union[(dict, List[dict])]:
if self.is_tfvars:
merged = {}
for block in self.blocks:
for (name, value) in block.items():
merged[name] = value
return merged
else:
return self.blocks
def process_tf_block(self, block: dict) -> None:
for var in get_variable_definitions_from_block(block, source=self.source_path):
self.variables.add(var)
def process_tfvars_dict(self, values: dict) -> None:
if self.variables.tfvars_waiting_for(self.target_path):
for (name, value) in values.items():
var = VariableValue(name=name, value=value, source=self.source_path)
self.variables.add(var)
def render(self) -> Generator[(dict, None, None)]:
raise NotImplementedError('subclass should implement this')
def run(self) -> None:
try:
self.blocks = list(self.render())
except Exception as error:
log.bad(f'create: {self.target_name} could not be processed')
self.error = error
finally:
self.variables.file_done(self.target_path) |
class Curric_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
self.init_instance_weight()
def init_instance_weight(self):
self.instance_weights = np.zeros(len(self.labels))
def set_instance_weight(self, instance_weights):
self.instance_weights = instance_weights
def set_scale_classwise_instance_weight_with_shot(self, shot_list, thresh=1.0):
scaled_instance_weight = np.zeros(np.shape(self.instance_weights))
labels = np.array(self.labels)
num_classes = len(np.unique(labels))
for i in range(num_classes):
class_idxs = np.where((labels == i))[0]
scaled = self.instance_weights[class_idxs]
low_mean_shot = 12
medium_mean_shot = 53
many_mean_shot = 230
if (i in shot_list['low_shot']):
thresh_tmp = thresh
elif (i in shot_list['median_shot']):
thresh_tmp = ((thresh * low_mean_shot) / medium_mean_shot)
elif (i in shot_list['many_shot']):
thresh_tmp = ((thresh * low_mean_shot) / many_mean_shot)
scaled = ((((scaled - scaled.min()) / (scaled.max() - scaled.min())) * (1 - thresh_tmp)) + thresh_tmp)
scaled_instance_weight[class_idxs] = scaled
self.instance_weights = scaled_instance_weight
def set_scale_classwise_instance_weight(self, thresh=0.1):
scaled_instance_weight = np.zeros(np.shape(self.instance_weights))
labels = np.array(self.labels)
num_classes = len(np.unique(labels))
for i in range(num_classes):
class_idxs = np.where((labels == i))[0]
mean = np.mean(self.instance_weights[class_idxs])
std = np.std(self.instance_weights[class_idxs])
scaled = self.instance_weights[class_idxs]
scaled = (((scaled / (scaled.max() - scaled.min())) * (1 - (2 * thresh))) + thresh)
scaled_instance_weight[class_idxs] = scaled
self.instance_weights = scaled_instance_weight
def sort_classwise_hardness(self):
labels = np.array(self.labels)
num_classes = len(np.unique(labels))
self.classwise_hardness = {}
for i in range(num_classes):
class_idxs = np.where((labels == i))[0]
arg_sort = np.argsort(self.instance_weights[class_idxs])
class_idxs_sort = class_idxs[arg_sort]
self.classwise_hardness[i] = {'class_idxs': class_idxs_sort, 'confidence': self.instance_weights[class_idxs_sort]}
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if (self.transform is not None):
sample = self.transform(sample)
weight = self.instance_weights[index]
return (sample, label, path, weight) |
class PercentileFilter(SingleInputMixin, Filter):
window_length = 0
def __new__(cls, factor, min_percentile, max_percentile, mask):
return super(PercentileFilter, cls).__new__(cls, inputs=(factor,), mask=mask, min_percentile=min_percentile, max_percentile=max_percentile)
def _init(self, min_percentile, max_percentile, *args, **kwargs):
self._min_percentile = min_percentile
self._max_percentile = max_percentile
return super(PercentileFilter, self)._init(*args, **kwargs)
def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs):
return (super(PercentileFilter, cls)._static_identity(*args, **kwargs), min_percentile, max_percentile)
def _validate(self):
if (not (0.0 <= self._min_percentile < self._max_percentile <= 100.0)):
raise BadPercentileBounds(min_percentile=self._min_percentile, max_percentile=self._max_percentile, upper_bound=100.0)
return super(PercentileFilter, self)._validate()
def _compute(self, arrays, dates, assets, mask):
data = arrays[0].copy().astype(float64)
data[(~ mask)] = nan
lower_bounds = nanpercentile(data, self._min_percentile, axis=1, keepdims=True)
upper_bounds = nanpercentile(data, self._max_percentile, axis=1, keepdims=True)
return ((lower_bounds <= data) & (data <= upper_bounds))
def graph_repr(self):
return '{}:\\l min: {}, max: {}\\l'.format(type(self).__name__, self._min_percentile, self._max_percentile) |
def get_agents_action(o_n, sess, noise_rate=0):
agent1_action = (agent1_ddpg.action(state=[o_n[0]], sess=sess) + (np.random.randn(2) * noise_rate))
agent2_action = (agent2_ddpg.action(state=[o_n[1]], sess=sess) + (np.random.randn(2) * noise_rate))
agent3_action = (agent3_ddpg.action(state=[o_n[2]], sess=sess) + (np.random.randn(2) * noise_rate))
return (agent1_action, agent2_action, agent3_action) |
def main(args=None):
p = argparse.ArgumentParser(description='Count how often each token is used by the lexers')
p.add_argument('-v', '--verbose', dest='verbose', help='Give more output.', default=False, action='store_true')
p.add_argument('--minfiles', dest='minfiles', metavar='COUNT', type=int, help='Report all tokens referenced by at least COUNT lexer source files (default %(default)s)', default=1)
p.add_argument('--maxfiles', dest='maxfiles', metavar='COUNT', type=int, help='Report all tokens referenced by at most COUNT lexer source files (default %(default)s)', default=1)
p.add_argument('--minlines', dest='minlines', metavar='COUNT', type=int, help='Report all tokens referenced by at least COUNT lexer source lines (default %(default)s)', default=1)
p.add_argument('--maxlines', dest='maxlines', metavar='COUNT', type=int, help='Report all tokens referenced by at most COUNT lexer source lines (default %(default)s)', default=10)
p.add_argument('-s', '--subtoken', dest='subtoken', help='Include count of references to subtokens in the count for each token (default %(default)s)', default=False, action='store_true')
args = p.parse_args(args)
if args.verbose:
print('Looking up all lexers ... ', end='', flush=True)
count = lookup_all_lexers()
if args.verbose:
print(f'found {count:,} lexers')
if args.verbose:
print('Fetching lexer source code ... ', end='', flush=True)
lexer_sources = fetch_lexer_sources()
if args.verbose:
print(f'found {len(lexer_sources):,} lexer source files')
if args.verbose:
print('Finding token references ... ', end='', flush=True)
token_references = find_token_references(lexer_sources, args)
if args.verbose:
print(f'found references to {len(token_references):,} tokens')
if args.verbose:
print()
print('Result:')
print_result(token_references, args) |
class ParseException(ParseBaseException):
def explain(exc, depth=16):
import inspect
if (depth is None):
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(((' ' * (exc.col - 1)) + '^'))
ret.append('{}: {}'.format(type(exc).__name__, exc))
if (depth > 0):
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for (i, ff) in enumerate(callers[(- depth):]):
frm = ff[0]
f_self = frm.f_locals.get('self', None)
if isinstance(f_self, ParserElement):
if (frm.f_code.co_name not in ('parseImpl', '_parseNoCache')):
continue
if (f_self in seen):
continue
seen.add(f_self)
self_type = type(f_self)
ret.append('{}.{} - {}'.format(self_type.__module__, self_type.__name__, f_self))
elif (f_self is not None):
self_type = type(f_self)
ret.append('{}.{}'.format(self_type.__module__, self_type.__name__))
else:
code = frm.f_code
if (code.co_name in ('wrapper', '<module>')):
continue
ret.append('{}'.format(code.co_name))
depth -= 1
if (not depth):
break
return '\n'.join(ret) |
def NetGap(pattern):
print(pattern)
count = 0
for i in range(SeqNum):
Nettree = [[] for k in range(len(pattern))]
CreatNettree(Nettree, pattern, sdb[i])
UpdateNettree(Nettree)
print(Nettree)
while (Nettree[0] != []):
ShowNettree(Nettree)
count += 1
cuttree(Nettree[0][0], Nettree, 0)
UpdateNettree(Nettree)
return count |
class SettingsEntry(BaseModel, FieldRequiring):
name: ClassVar[str] = FieldRequiring.MUST_SET_UNIQUE
description: ClassVar[(str | dict[(str, str)])] = FieldRequiring.MUST_SET
_overrides: set[str] = PrivateAttr(default_factory=set)
def __init__(self, defaults: (SettingsEntry | None)=None, /, **data):
overrides = set()
if defaults:
defaults_dict = defaults.model_dump()
for (field_name, field_value) in list(data.items()):
if (field_value is None):
data[field_name] = defaults_dict[field_name]
else:
overrides.add(field_name)
super().__init__(**data)
self._overrides |= overrides
def overrides(self) -> dict[(str, Any)]:
return {name: getattr(self, name) for name in self._overrides}
def create(cls, entry_data: (dict[(str, Any)] | None), *, defaults: (SettingsEntry | None)=None, keep_empty: bool=False) -> (SettingsEntry | None):
if (entry_data is None):
return None
if ((not keep_empty) and hasattr(entry_data, 'values') and all(((value is None) for value in entry_data.values()))):
return None
if (not isinstance(entry_data, dict)):
entry_data = {cls.name: entry_data}
return cls(defaults, **entry_data) |
def evalTime(f, v, script=False, loops=1000):
min = .0
for i in range(0, loops):
t0 = time.perf_counter()
f(v)
dt = (time.perf_counter() - t0)
min = (dt if (dt < min) else min)
if (not script):
print(f' run time in {int(loops)} loops was {min:2.9f} sec')
return min |
def test_object_feature_values():
(obj, _) = create_test_object()
properties = create_test_properties()
obj.properties = properties
keys = list(properties.keys())
obj.set_features(keys)
raw_features = np.ctypeslib.as_array(obj.features, shape=(obj.n_features,))
flat_properties = np.concatenate([np.asarray(obj.properties[k]).ravel() for k in keys], axis=0)
np.testing.assert_almost_equal(flat_properties, raw_features) |
class ZipMemoryFile(MemoryFile):
def __init__(self, file_or_bytes=None):
super().__init__(file_or_bytes, ext='zip')
_env
def open(self, path, driver=None, sharing=False, **kwargs):
zippath = _UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))
if self.closed:
raise ValueError('I/O operation on closed file.')
return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs) |
class TaskGroupHandler(TaskNewHandler):
.authenticated
async def get(self, taskid):
user = self.current_user
groupNow = (await self.db.task.get(taskid, fields=('_groups',)))['_groups']
_groups = []
for task in (await self.db.task.list(user['id'], fields=('_groups',), limit=None)):
if (not isinstance(task['_groups'], str)):
task['_groups'] = str(task['_groups'])
temp = task['_groups']
if (temp not in _groups):
_groups.append(temp)
(await self.render('task_setgroup.html', taskid=taskid, _groups=_groups, groupNow=groupNow))
.authenticated
async def post(self, taskid):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
New_group = envs['New_group'][0].strip()
if (New_group != ''):
target_group = New_group
else:
for value in envs:
if (envs[value][0] == 'on'):
target_group = escape_decode(value.strip()[2:(- 1)], 'hex-escape')[0].decode('utf-8')
break
else:
target_group = 'None'
(await self.db.task.mod(taskid, _groups=target_group))
self.redirect('/my/') |
_required
_POST
def user_block(request, username):
user = get_object_or_404(User, username=username, is_staff=False)
user.is_active = False
user.save()
msg = _(('The user %s is now blocked.' % user))
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(reverse('user_details', args=[user.username])) |
def test_session(server_app):
server_app.sio = MagicMock()
with server_app.app.test_request_context():
flask.request.sid = 1234
result = server_app.session()
assert (result == server_app.sio.server.session.return_value)
server_app.sio.server.session.assert_called_once_with(1234, namespace=None) |
class Test_prev_next_history(unittest.TestCase):
t = u'test text'
def setUp(self):
self.q = q = LineHistory()
for x in [u'aaaa', u'aaba', u'aaca', u'akca', u'bbb', u'ako']:
q.add_history(RL(x))
def test_previous_history(self):
hist = self.q
assert (hist.history_cursor == 6)
l = RL(u'')
hist.previous_history(l)
assert (l.get_line_text() == u'ako')
hist.previous_history(l)
assert (l.get_line_text() == u'bbb')
hist.previous_history(l)
assert (l.get_line_text() == u'akca')
hist.previous_history(l)
assert (l.get_line_text() == u'aaca')
hist.previous_history(l)
assert (l.get_line_text() == u'aaba')
hist.previous_history(l)
assert (l.get_line_text() == u'aaaa')
hist.previous_history(l)
assert (l.get_line_text() == u'aaaa')
def test_next_history(self):
hist = self.q
hist.beginning_of_history()
assert (hist.history_cursor == 0)
l = RL(u'')
hist.next_history(l)
assert (l.get_line_text() == u'aaba')
hist.next_history(l)
assert (l.get_line_text() == u'aaca')
hist.next_history(l)
assert (l.get_line_text() == u'akca')
hist.next_history(l)
assert (l.get_line_text() == u'bbb')
hist.next_history(l)
assert (l.get_line_text() == u'ako')
hist.next_history(l)
assert (l.get_line_text() == u'ako') |
def get_dataloaders(args):
(train_loader, val_loader, test_loader) = (None, None, None)
if (args.data == 'cifar10'):
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467], std=[0.2471, 0.2435, 0.2616])
train_set = datasets.CIFAR10(args.data_root, train=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_set = datasets.CIFAR10(args.data_root, train=False, transform=transforms.Compose([transforms.ToTensor(), normalize]))
elif (args.data == 'cifar100'):
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])
train_set = datasets.CIFAR100(args.data_root, train=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_set = datasets.CIFAR100(args.data_root, train=False, transform=transforms.Compose([transforms.ToTensor(), normalize]))
else:
traindir = os.path.join(args.data_root, 'train')
valdir = os.path.join(args.data_root, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_set = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_set = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
if args.use_valid:
train_set_index = torch.randperm(len(train_set))
if os.path.exists(os.path.join(args.save, 'index.pth')):
print('!!!!!! Load train_set_index !!!!!!')
train_set_index = torch.load(os.path.join(args.save, 'index.pth'))
else:
print('!!!!!! Save train_set_index !!!!!!')
torch.save(train_set_index, os.path.join(args.save, 'index.pth'))
if args.data.startswith('cifar'):
num_sample_valid = 5000
else:
num_sample_valid = 50000
if ('train' in args.splits):
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_set_index[:(- num_sample_valid)]), num_workers=args.workers, pin_memory=False)
if ('val' in args.splits):
val_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_set_index[(- num_sample_valid):]), num_workers=args.workers, pin_memory=False)
if ('test' in args.splits):
test_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)
else:
if ('train' in args.splits):
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=False)
if ('val' or ('test' in args.splits)):
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)
test_loader = val_loader
return (train_loader, val_loader, test_loader) |
def compute_predictions_logits(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer):
logger.info(('Writing predictions to: %s' % output_prediction_file))
logger.info(('Writing nbest to: %s' % output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit'])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
min_null_feature_index = 0
null_start_logit = 0
null_end_logit = 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2_with_negative:
feature_null_score = (result.start_logits[0] + result.end_logits[0])
if (feature_null_score < score_null):
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
if (start_index >= len(feature.tokens)):
continue
if (end_index >= len(feature.tokens)):
continue
if (start_index not in feature.token_to_orig_map):
continue
if (end_index not in feature.token_to_orig_map):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True)
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit'])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
if (pred.start_index > 0):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
if version_2_with_negative:
if ('' not in seen_predictions):
nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit))
if (len(nbest) == 1):
nbest.insert(0, _NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
if (not nbest):
nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert (len(nbest) >= 1)
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_logit + entry.end_logit))
if (not best_non_null_entry):
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
assert (len(nbest_json) >= 1)
if (not version_2_with_negative):
all_predictions[example.qas_id] = nbest_json[0]['text']
else:
score_diff = ((score_null - best_non_null_entry.start_logit) - best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if (score_diff > null_score_diff_threshold):
all_predictions[example.qas_id] = ''
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
with open(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return all_predictions |
class CIBHash(Base_Model):
def __init__(self, hparams):
super().__init__(hparams=hparams)
def define_parameters(self):
self.vgg = torchvision.models.vgg16(pretrained=True)
self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier.children())[:6])
for param in self.vgg.parameters():
param.requires_grad = False
self.encoder = nn.Sequential(nn.Linear(4096, 1024), nn.ReLU(), nn.Linear(1024, self.hparams.encode_length))
self.criterion = NtXentLoss(self.hparams.batch_size, self.hparams.temperature)
def forward(self, imgi, imgj, device):
imgi = self.vgg.features(imgi)
imgi = imgi.view(imgi.size(0), (- 1))
imgi = self.vgg.classifier(imgi)
prob_i = torch.sigmoid(self.encoder(imgi))
z_i = hash_layer((prob_i - 0.5))
imgj = self.vgg.features(imgj)
imgj = imgj.view(imgj.size(0), (- 1))
imgj = self.vgg.classifier(imgj)
prob_j = torch.sigmoid(self.encoder(imgj))
z_j = hash_layer((prob_j - 0.5))
kl_loss = ((self.compute_kl(prob_i, prob_j) + self.compute_kl(prob_j, prob_i)) / 2)
contra_loss = self.criterion(z_i, z_j, device)
loss = (contra_loss + (self.hparams.weight * kl_loss))
return {'loss': loss, 'contra_loss': contra_loss, 'kl_loss': kl_loss}
def encode_discrete(self, x):
x = self.vgg.features(x)
x = x.view(x.size(0), (- 1))
x = self.vgg.classifier(x)
prob = torch.sigmoid(self.encoder(x))
z = hash_layer((prob - 0.5))
return z
def compute_kl(self, prob, prob_v):
prob_v = prob_v.detach()
kl = ((prob * (torch.log((prob + 1e-08)) - torch.log((prob_v + 1e-08)))) + ((1 - prob) * (torch.log(((1 - prob) + 1e-08)) - torch.log(((1 - prob_v) + 1e-08)))))
kl = torch.mean(torch.sum(kl, axis=1))
return kl
def configure_optimizers(self):
return torch.optim.Adam([{'params': self.encoder.parameters()}], lr=self.hparams.lr)
def get_hparams_grid(self):
grid = Base_Model.get_general_hparams_grid()
grid.update({'temperature': [0.2, 0.3, 0.4], 'weight': [0.001, 0.005, 0.0005, 0.0001, 5e-05, 1e-05]})
return grid
def get_model_specific_argparser():
parser = Base_Model.get_general_argparser()
parser.add_argument('-t', '--temperature', default=0.3, type=float, help='Temperature [%(default)d]')
parser.add_argument('-w', '--weight', default=0.001, type=float, help='weight of I(x,z) [%(default)f]')
return parser |
def se_resnet152(num_classes, loss, pretrained='imagenet', **kwargs):
model = SENet(num_classes=num_classes, loss=loss, block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, last_stride=2, fc_dims=None, **kwargs)
if (pretrained == 'imagenet'):
model_url = pretrained_settings['se_resnet152']['imagenet']['url']
init_pretrained_weights(model, model_url)
return model |
(params=[(10.0, 10.0, 10.0), (5.0, 5.0, 1.0)])
def simple_piecewise_model(request):
(in_flow, out_flow, benefit) = request.param
min_flow_req = 5.0
model = pywr.core.Model()
inpt = pywr.core.Input(model, name='Input', max_flow=in_flow)
lnk = pywr.core.PiecewiseLink(model, name='Link', nsteps=2, costs=[(- 1.0), 0.0], max_flows=[min_flow_req, None])
inpt.connect(lnk)
otpt = pywr.core.Output(model, name='Output', min_flow=out_flow, cost=(- benefit))
lnk.connect(otpt)
expected_sent = (in_flow if (benefit > 1.0) else out_flow)
expected_node_results = {'Input': expected_sent, 'Link': expected_sent, 'Link Sublink 0': min(min_flow_req, expected_sent), 'Link Sublink 1': (expected_sent - min(min_flow_req, expected_sent)), 'Output': expected_sent}
return (model, expected_node_results) |
class PyDemoPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(PyDemoPlugin, self).__init__(parent)
self._initialized = False
def initialize(self, formEditor):
if self._initialized:
return
self._initialized = True
def isInitialized(self):
return self._initialized
def createWidget(self, parent):
return PyDemo(parent)
def name(self):
return 'PyDemo'
def group(self):
return 'PyQt Examples'
def icon(self):
return QIcon(_logo_pixmap)
def toolTip(self):
return 'PyQt demonstration widget'
def whatsThis(self):
return 'PyDemo is a demonstration custom widget written in Python using PyQt.'
def isContainer(self):
return False
def domXml(self):
return '<widget class="PyDemo" name="pyDemo">\n <property name="toolTip" >\n <string>PyQt demonstration widget</string>\n </property>\n <property name="whatsThis" >\n <string>PyDemo is a demonstration custom widget written in Python using PyQt.</string>\n </property>\n</widget>\n'
def includeFile(self):
return 'pydemo' |
class SerializeMemoizer(Serialize):
__serialize_fields__ = ('memoized',)
def __init__(self, types_to_memoize: List) -> None:
self.types_to_memoize = tuple(types_to_memoize)
self.memoized = Enumerator()
def in_types(self, value: Serialize) -> bool:
return isinstance(value, self.types_to_memoize)
def serialize(self) -> Dict[(int, Any)]:
return _serialize(self.memoized.reversed(), None)
def deserialize(cls, data: Dict[(int, Any)], namespace: Dict[(str, Any)], memo: Dict[(Any, Any)]) -> Dict[(int, Any)]:
return _deserialize(data, namespace, memo) |
def test_write_pinned_buffer(tmpdir):
data_fname = tmpdir.join('test_read.sigmf-data')
meta_fname = tmpdir.join('test_read.sigmf-meta')
actual = cp.random.rand(100).astype(cp.complex64)
meta = {'core:datatype': 'cf32'}
cusignal.write_bin(str(data_fname), actual)
meta_fname.write(json.dumps(meta))
binary = cusignal.read_bin(str(data_fname))
buffer = cusignal.get_pinned_mem(binary.shape, cp.ubyte)
cusignal.write_sigmf(str(data_fname), actual, buffer=buffer, append=False)
expect = cusignal.read_sigmf(str(data_fname), str(meta_fname))
cp.testing.assert_array_equal(actual, expect) |
.parametrize('mark', [None, '', 'skip', 'xfail'])
def test_parameterset_for_parametrize_marks(pytester: Pytester, mark: Optional[str]) -> None:
if (mark is not None):
pytester.makeini('\n [pytest]\n {}={}\n '.format(EMPTY_PARAMETERSET_OPTION, mark))
config = pytester.parseconfig()
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
pytest_configure(config)
result_mark = get_empty_parameterset_mark(config, ['a'], all)
if (mark in (None, '')):
mark = 'skip'
assert (result_mark.name == mark)
assert result_mark.kwargs['reason'].startswith('got empty parameter set ')
if (mark == 'xfail'):
assert (result_mark.kwargs.get('run') is False) |
def write_shapefile(df, filename, geomtype='line', prj=None):
import shapefile
df['Name'] = df.index
if (geomtype == 'point'):
w = shapefile.Writer(filename, shapefile.POINT, autoBalance=True)
elif (geomtype == 'line'):
w = shapefile.Writer(filename, shapefile.POLYLINE, autoBalance=True)
elif (geomtype == 'polygon'):
w = shapefile.Writer(filename, shapefile.POLYGON, autoBalance=True)
for fieldname in df.columns:
w.field(fieldname, 'C')
for (k, row) in df.iterrows():
w.record(*row.tolist())
if (geomtype == 'line'):
w.line([row.coords])
if (geomtype == 'point'):
w.point(*row.coords[0])
if (geomtype == 'polygon'):
w.poly([row.coords])
w.close()
if (prj is None):
prj = os.path.join(ROOT_DIR, 'swmmio/defs/default.prj')
prj_filepath = (os.path.splitext(filename)[0] + '.prj')
shutil.copy(prj, prj_filepath) |
def list_killable_nodes(label_selector=None):
nodes = []
try:
if label_selector:
ret = cli.list_node(pretty=True, label_selector=label_selector)
else:
ret = cli.list_node(pretty=True)
except ApiException as e:
logging.error(('Exception when calling CoreV1Api->list_node: %s\n' % e))
raise e
for node in ret.items:
if (kraken_node_name != node.metadata.name):
for cond in node.status.conditions:
if ((str(cond.type) == 'Ready') and (str(cond.status) == 'True')):
nodes.append(node.metadata.name)
return nodes |
def main():
warnings.filterwarnings('error')
with open('./pursuit-op.json') as f:
op = json.load(f)
self = CO.Object()
self.pool = None
self.cache = Cache(tmp_dir=op['options']['tmp_dir'])
self.runner = QueueMaster(op['options']['network']['qhost'], op['options']['network']['qport'])
print('loading ', op['data_file'])
with open(op['data_file']) as f:
data_json = json.load(f)
for d in data_json:
if (not os.path.isabs(d['subtomogram'])):
d['subtomogram'] = os.path.abspath(os.path.join(os.path.dirname(op['data_file']), d['subtomogram']))
if (not os.path.isabs(d['mask'])):
d['mask'] = os.path.abspath(os.path.join(os.path.dirname(op['data_file']), d['mask']))
del op['data_file']
from aitom.tomominer.pursuit.multi.main import pursuit
pursuit(self=self, op=op, data_json=data_json) |
class ListControl(Control):
_label = None
def __init__(self, type, name, attrs={}, select_default=False, called_as_base_class=False, index=None):
if (not called_as_base_class):
raise NotImplementedError()
self.__dict__['type'] = type.lower()
self.__dict__['name'] = name
self._value = attrs.get('value')
self.disabled = False
self.readonly = False
self.id = attrs.get('id')
self._closed = False
self.items = []
self._form = None
self._select_default = select_default
self._clicked = False
def clear(self):
self.value = []
def is_of_kind(self, kind):
if (kind == 'list'):
return True
elif (kind == 'multilist'):
return bool(self.multiple)
elif (kind == 'singlelist'):
return (not self.multiple)
else:
return False
def get_items(self, name=None, label=None, id=None, exclude_disabled=False):
if ((name is not None) and (not isstringlike(name))):
raise TypeError('item name must be string-like')
if ((label is not None) and (not isstringlike(label))):
raise TypeError('item label must be string-like')
if ((id is not None) and (not isstringlike(id))):
raise TypeError('item id must be string-like')
items = []
for o in self.items:
if (exclude_disabled and o.disabled):
continue
if ((name is not None) and (o.name != name)):
continue
if (label is not None):
for l in o.get_labels():
if (label in l.text):
break
else:
continue
if ((id is not None) and (o.id != id)):
continue
items.append(o)
return items
def get(self, name=None, label=None, id=None, nr=None, exclude_disabled=False):
items = self.get_items(name, label, id, exclude_disabled)
return disambiguate(items, nr, name=name, label=label, id=id)
def _get(self, name, by_label=False, nr=None, exclude_disabled=False):
if by_label:
(name, label) = (None, name)
else:
(name, label) = (name, None)
return self.get(name, label, nr, exclude_disabled)
def toggle(self, name, by_label=False, nr=None):
deprecation('item = control.get(...); item.selected = not item.selected')
o = self._get(name, by_label, nr)
self._set_selected_state(o, (not o.selected))
def set(self, selected, name, by_label=False, nr=None):
deprecation('control.get(...).selected = <boolean>')
self._set_selected_state(self._get(name, by_label, nr), selected)
def _set_selected_state(self, item, action):
if self.disabled:
raise AttributeError(("control '%s' is disabled" % self.name))
if self.readonly:
raise AttributeError(("control '%s' is readonly" % self.name))
(action == bool(action))
if item.disabled:
raise AttributeError('item is disabled')
if self.multiple:
item.__dict__['_selected'] = action
elif (not action):
item.__dict__['_selected'] = False
else:
for o in self.items:
o.__dict__['_selected'] = False
item.__dict__['_selected'] = True
def toggle_single(self, by_label=None):
deprecation('control.items[0].selected = not control.items[0].selected')
if (len(self.items) != 1):
raise ItemCountError(("'%s' is not a single-item control" % self.name))
item = self.items[0]
self._set_selected_state(item, (not item.selected))
def set_single(self, selected, by_label=None):
deprecation('control.items[0].selected = <boolean>')
if (len(self.items) != 1):
raise ItemCountError(("'%s' is not a single-item control" % self.name))
self._set_selected_state(self.items[0], selected)
def get_item_disabled(self, name, by_label=False, nr=None):
deprecation('control.get(...).disabled')
return self._get(name, by_label, nr).disabled
def set_item_disabled(self, disabled, name, by_label=False, nr=None):
deprecation('control.get(...).disabled = <boolean>')
self._get(name, by_label, nr).disabled = disabled
def set_all_items_disabled(self, disabled):
for o in self.items:
o.disabled = disabled
def get_item_attrs(self, name, by_label=False, nr=None):
deprecation('control.get(...).attrs')
return self._get(name, by_label, nr).attrs
def close_control(self):
self._closed = True
def add_to_form(self, form):
assert ((self._form is None) or (form == self._form)), "can't add control to more than one form"
self._form = form
if (self.name is None):
Control.add_to_form(self, form)
else:
for ii in range((len(form.controls) - 1), (- 1), (- 1)):
control = form.controls[ii]
if ((control.name == self.name) and (control.type == self.type)):
if control._closed:
Control.add_to_form(self, form)
else:
control.merge_control(self)
break
else:
Control.add_to_form(self, form)
def merge_control(self, control):
assert (bool(control.multiple) == bool(self.multiple))
self.items.extend(control.items)
def fixup(self):
for o in self.items:
o.__dict__['_control'] = self
def __getattr__(self, name):
if (name == 'value'):
if (self.name is None):
return []
return [o.name for o in self.items if (o.selected and (not o.disabled))]
else:
raise AttributeError(("%s instance has no attribute '%s'" % (self.__class__.__name__, name)))
def __setattr__(self, name, value):
if (name == 'value'):
if self.disabled:
raise AttributeError(("control '%s' is disabled" % self.name))
if self.readonly:
raise AttributeError(("control '%s' is readonly" % self.name))
self._set_value(value)
elif (name in ('name', 'type', 'multiple')):
raise AttributeError(('%s attribute is readonly' % name))
else:
self.__dict__[name] = value
def _set_value(self, value):
if ((value is None) or isstringlike(value)):
raise TypeError('ListControl, must set a sequence')
if (not value):
for o in self.items:
if (not o.disabled):
o.selected = False
elif self.multiple:
self._multiple_set_value(value)
elif (len(value) > 1):
raise ItemCountError('single selection list, must set sequence of length 0 or 1')
else:
self._single_set_value(value)
def _get_items(self, name, target=1):
all_items = self.get_items(name)
items = [o for o in all_items if (not o.disabled)]
if (len(items) < target):
if (len(all_items) < target):
raise ItemNotFoundError(('insufficient items with name %r' % name))
else:
raise AttributeError(('insufficient non-disabled items with name %s' % name))
on = []
off = []
for o in items:
if o.selected:
on.append(o)
else:
off.append(o)
return (on, off)
def _single_set_value(self, value):
assert (len(value) == 1)
(on, off) = self._get_items(value[0])
assert (len(on) <= 1)
if (not on):
off[0].selected = True
def _multiple_set_value(self, value):
turn_on = []
turn_off = [item for item in self.items if (item.selected and (not item.disabled))]
names = {}
for nn in value:
names[nn] = (names.setdefault(nn, 0) + 1)
for (name, count) in iteritems(names):
(on, off) = self._get_items(name, count)
for i in range(count):
if on:
item = on[0]
del on[0]
del turn_off[turn_off.index(item)]
else:
item = off[0]
del off[0]
turn_on.append(item)
for item in turn_off:
item.selected = False
for item in turn_on:
item.selected = True
def set_value_by_label(self, value):
if isstringlike(value):
raise TypeError(value)
if ((not self.multiple) and (len(value) > 1)):
raise ItemCountError('single selection list, must set sequence of length 0 or 1')
items = []
for nn in value:
found = self.get_items(label=nn)
if (len(found) > 1):
opt_name = found[0].name
if [o for o in found[1:] if (o.name != opt_name)]:
raise AmbiguityError(nn)
for o in found:
if (o not in items):
items.append(o)
break
else:
raise ItemNotFoundError(nn)
self.value = []
for o in items:
o.selected = True
def get_value_by_label(self):
res = []
for o in self.items:
if ((not o.disabled) and o.selected):
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
def possible_items(self, by_label=False):
deprecation('[item.name for item in self.items]')
if by_label:
res = []
for o in self.items:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
return [o.name for o in self.items]
def _totally_ordered_pairs(self):
if (self.disabled or (self.name is None)):
return []
else:
return [(o._index, self.name, o.name) for o in self.items if (o.selected and (not o.disabled))]
def __str__(self):
name = self.name
if (name is None):
name = '<None>'
display = [str(o) for o in self.items]
infos = []
if self.disabled:
infos.append('disabled')
if self.readonly:
infos.append('readonly')
info = ', '.join(infos)
if info:
info = (' (%s)' % info)
return ('<%s(%s=[%s])%s>' % (self.__class__.__name__, name, ', '.join(display), info)) |
.parametrize('holder', make_holder())
def test_hookrecorder_basic(holder) -> None:
pm = PytestPluginManager()
pm.add_hookspecs(holder)
rec = HookRecorder(pm, _ispytest=True)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall('pytest_xyz')
assert (call.arg == 123)
assert (call._name == 'pytest_xyz')
pytest.raises(pytest.fail.Exception, rec.popcall, 'abc')
pm.hook.pytest_xyz_noarg()
call = rec.popcall('pytest_xyz_noarg')
assert (call._name == 'pytest_xyz_noarg') |
def imitation_learning_loss(player):
episode_loss = torch.tensor(0)
with torch.cuda.device(player.gpu_id):
episode_loss = episode_loss.cuda()
for i in player.il_update_actions:
step_optimal_action = torch.tensor(player.il_update_actions[i]).reshape([1]).long()
with torch.cuda.device(player.gpu_id):
step_optimal_action = step_optimal_action.cuda()
step_loss = F.cross_entropy(player.probs[i], step_optimal_action)
episode_loss = (episode_loss + step_loss)
return episode_loss |
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile('\\[sourcecode:(.+?)\\](.+?)\\[/sourcecode\\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return ('\n\n<div class="code">%s</div>\n\n' % code)
joined_lines = '\n'.join(lines)
joined_lines = self.pattern.sub(repl, joined_lines)
return joined_lines.split('\n') |
class HotpotGoldParagraph(HotpotParagraph):
def __init__(self, title: str, sentences: List[List[str]], question_id: str, supporting_sentence_ids: List[int]):
super().__init__(title, sentences)
self.question_id = question_id
self.supporting_sentence_ids = supporting_sentence_ids
def repr_supporting_facts(self) -> str:
return f"{self.title}: {' '.join(flatten_iterable([self.sentences[i] for i in self.supporting_sentence_ids]))}" |
class QueryExtension(rq.ReplyRequest):
_request = rq.Struct(rq.Opcode(98), rq.Pad(1), rq.RequestLength(), rq.LengthOf('name', 2), rq.Pad(2), rq.String8('name'))
_reply = rq.Struct(rq.ReplyCode(), rq.Pad(1), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Card8('present'), rq.Card8('major_opcode'), rq.Card8('first_event'), rq.Card8('first_error'), rq.Pad(20)) |
def save_to_files(path, eight_bit_pan, eight_bit_rgb, eight_bit_rgbn, eight_bit_ps):
tiff.imwrite(f'{path}_pan_8bit.tiff', eight_bit_pan)
tiff.imwrite(f'{path}_rgbn_8bit.tiff', eight_bit_rgbn)
tiff.imwrite(f'{path}_ps_8bit.tiff', eight_bit_ps)
tiff.imwrite(f'{path}_rgb_8bit.png', eight_bit_rgb) |
def fn_amp_aware_filtering(t_input, amp, name='Amplitude_aware_filtering'):
with tf.variable_scope(name):
blurkernel = tf.constant(np.array([[0.002969, 0.013306, 0.021938, 0.013306, 0.002969], [0.013306, 0.059634, 0.09832, 0.059634, 0.013306], [0.021938, 0.09832, 0.162103, 0.09832, 0.021938], [0.013306, 0.059634, 0.09832, 0.059634, 0.013306], [0.002969, 0.013306, 0.021938, 0.013306, 0.002969]], np.float32), tf.float32)
blurkernel3D = tf.tile(tf.expand_dims(blurkernel, (- 1)), (1, 1, 3))
blurkernel4D = tf.expand_dims(blurkernel3D, (- 1))
t_denorm = thoo_depthwise_conv2d(amp, blurkernel4D)
t_numer = thoo_depthwise_conv2d((t_input * amp), blurkernel4D)
return (t_numer / (t_denorm + TF_EPS)) |
def format_received_item(item_name: str, player_name: str) -> str:
special = {'Locked Missile Expansion': 'Received Missile Expansion from {provider_name}, but the Missile Launcher is required to use it.', 'Locked Ship Missile Expansion': 'Received Ship Missile Expansion from {provider_name}, but the main launcher is required to use it.'}
generic = 'Received {item_name} from {provider_name}.'
return special.get(item_name, generic).format(item_name=item_name, provider_name=player_name) |
class W_Path(W_Object):
errorname = 'path'
_attrs_ = _immutable_fields_ = ['path']
def __init__(self, p):
self.path = p
def equal(self, other):
if (not isinstance(other, W_Path)):
return False
return (self.path == other.path)
def write(self, port, env):
port.write(('(p+ %s)' % self.path))
def tostring(self):
return ('#<path:%s>' % self.path) |
class TestCustomCircuitOracle(QiskitAquaTestCase):
def test_using_dj_with_constant_func(self):
q_v = QuantumRegister(2, name='v')
q_o = QuantumRegister(1, name='o')
circuit = QuantumCircuit(q_v, q_o)
circuit.x(q_o[0])
oracle = CustomCircuitOracle(variable_register=q_v, output_register=q_o, circuit=circuit)
algorithm = DeutschJozsa(oracle)
result = algorithm.run(quantum_instance=QuantumInstance(BasicAer.get_backend('qasm_simulator')))
self.assertEqual(result['result'], 'constant')
def test_using_dj_with_balanced_func(self):
q_v = QuantumRegister(2, name='v')
q_o = QuantumRegister(1, name='o')
circuit = QuantumCircuit(q_v, q_o)
circuit.cx(q_v[0], q_o[0])
oracle = CustomCircuitOracle(variable_register=q_v, output_register=q_o, circuit=circuit)
algorithm = DeutschJozsa(oracle)
result = algorithm.run(quantum_instance=QuantumInstance(BasicAer.get_backend('qasm_simulator')))
self.assertEqual(result['result'], 'balanced')
def test_using_grover_for_error(self):
q_v = QuantumRegister(2, name='v')
q_o = QuantumRegister(1, name='o')
circuit = QuantumCircuit(q_v, q_o)
oracle = CustomCircuitOracle(variable_register=q_v, output_register=q_o, circuit=circuit)
with self.assertRaises(AquaError):
_ = Grover(oracle)
def test_using_grover_for_ccx(self):
q_v = QuantumRegister(2, name='v')
q_o = QuantumRegister(1, name='o')
circuit = QuantumCircuit(q_v, q_o)
circuit.ccx(q_v[0], q_v[1], q_o[0])
oracle = CustomCircuitOracle(variable_register=q_v, output_register=q_o, circuit=circuit, evaluate_classically_callback=(lambda m: ((m == '11'), [1, 2])))
algorithm = Grover(oracle)
result = algorithm.run(quantum_instance=QuantumInstance(BasicAer.get_backend('qasm_simulator')))
self.assertEqual(result.assignment, [1, 2]) |
class Solution(object):
def isPerfectSquare(self, num):
(low, high) = (1, num)
while (low <= high):
mid = ((low + high) / 2)
mid_square = (mid * mid)
if (mid_square == num):
return True
elif (mid_square < num):
low = (mid + 1)
else:
high = (mid - 1)
return False |
class KotlinLexer(RegexLexer):
name = 'Kotlin'
url = '
aliases = ['kotlin']
filenames = ['*.kt', '*.kts']
mimetypes = ['text/x-kotlin']
version_added = '1.5'
flags = (re.MULTILINE | re.DOTALL)
kt_name = ((((('?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl')) + ']') + '[') + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc')) + ']*')
kt_space_name = ((((('?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl')) + ']') + '[') + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc', 'Zs')) + "\\'~!%^&*()+=|\\[\\]:;,.<>/\\?-]*")
kt_id = (((('(' + kt_name) + '|`') + kt_space_name) + '`)')
modifiers = 'actual|abstract|annotation|companion|const|crossinline|data|enum|expect|external|final|infix|inline|inner|internal|lateinit|noinline|open|operator|override|private|protected|public|sealed|suspend|tailrec|value'
tokens = {'root': [('[^\\S\\n]+', Whitespace), ('\\s+', Whitespace), ('\\\\$', String.Escape), ('\\n', Whitespace), ('(//.*?)(\\n)', bygroups(Comment.Single, Whitespace)), ('^(#!/.+?)(\\n)', bygroups(Comment.Single, Whitespace)), ('/[*].*?[*]/', Comment.Multiline), ('as\\?', Keyword), ('(as|break|by|catch|constructor|continue|do|dynamic|else|finally|get|for|if|init|[!]*in|[!]*is|out|reified|return|set|super|this|throw|try|typealias|typeof|vararg|when|where|while)\\b', Keyword), ('it\\b', Name.Builtin), (words(('Boolean?', 'Byte?', 'Char?', 'Double?', 'Float?', 'Int?', 'Long?', 'Short?', 'String?', 'Any?', 'Unit?')), Keyword.Type), (words(('Boolean', 'Byte', 'Char', 'Double', 'Float', 'Int', 'Long', 'Short', 'String', 'Any', 'Unit'), suffix='\\b'), Keyword.Type), ('(true|false|null)\\b', Keyword.Constant), ('(package|import)(\\s+)(\\S+)', bygroups(Keyword, Whitespace, Name.Namespace)), ('(\\?\\.)((?:[^\\W\\d]|\\$)[\\w$]*)', bygroups(Operator, Name.Attribute)), ('(\\.)((?:[^\\W\\d]|\\$)[\\w$]*)', bygroups(Punctuation, Name.Attribute)), ('[^\\W\\d][\\w.]*', Name.Decorator), ('[^\\W\\d][\\w.]+', Name.Decorator), ('(object)(\\s+)(:)(\\s+)', bygroups(Keyword, Whitespace, Punctuation, Whitespace), 'class'), ((('((?:(?:' + modifiers) + '|fun)\\s+)*)(class|interface|object)(\\s+)'), bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'class'), ('(var|val)(\\s+)(\\()', bygroups(Keyword.Declaration, Whitespace, Punctuation), 'destructuring_assignment'), ((('((?:(?:' + modifiers) + ')\\s+)*)(var|val)(\\s+)'), bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'variable'), ((('((?:(?:' + modifiers) + ')\\s+)*)(fun)(\\s+)'), bygroups(using(this, state='modifiers'), Keyword.Declaration, Whitespace), 'function'), ('::|!!|\\?[:.]', Operator), ('[~^*!%&\\[\\]<>|+=/?-]', Operator), ('[{}();:.,]', Punctuation), ('"""', String, 'multiline_string'), ('"', String, 'string'), ("'\\\\.'|'[^\\\\]'", String.Char), ('[0-9](\\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|0[xX][0-9a-fA-F]+[Ll]?', Number), ((('' + kt_id) + '((\\?[^.])?)'), Name)], 'class': [(kt_id, Name.Class, '#pop')], 'variable': [(kt_id, Name.Variable, '#pop')], 'destructuring_assignment': [(',', Punctuation), ('\\s+', Whitespace), (kt_id, Name.Variable), ((('(:)(\\s+)(' + kt_id) + ')'), bygroups(Punctuation, Whitespace, Name)), ('<', Operator, 'generic'), ('\\)', Punctuation, '#pop')], 'function': [('<', Operator, 'generic'), (((('' + kt_id) + '(\\.)') + kt_id), bygroups(Name, Punctuation, Name.Function), '#pop'), (kt_id, Name.Function, '#pop')], 'generic': [('(>)(\\s*)', bygroups(Operator, Whitespace), '#pop'), (':', Punctuation), ('(reified|out|in)\\b', Keyword), (',', Punctuation), ('\\s+', Whitespace), (kt_id, Name)], 'modifiers': [('\\w+', Keyword.Declaration), ('\\s+', Whitespace), default('#pop')], 'string': [('"', String, '#pop'), include('string_common')], 'multiline_string': [('"""', String, '#pop'), ('"', String), include('string_common')], 'string_common': [('', String), ('\\\\"', String), ('\\\\', String), ('\\$\\{', String.Interpol, 'interpolation'), ('(\\$)(\\w+)', bygroups(String.Interpol, Name)), ('[^\\\\"$]+', String)], 'interpolation': [('"', String), ('\\$\\{', String.Interpol, 'interpolation'), ('\\{', Punctuation, 'scope'), ('\\}', String.Interpol, '#pop'), include('root')], 'scope': [('\\{', Punctuation, 'scope'), ('\\}', Punctuation, '#pop'), include('root')]} |
def test_invalid_parent(qtmodeltester):
class Model(qt_api.QtGui.QStandardItemModel):
def parent(self, index):
if (index == self.index(0, 0, parent=self.index(0, 0))):
return self.index(0, 0)
else:
return qt_api.QtCore.QModelIndex()
model = Model()
item = qt_api.QtGui.QStandardItem('foo')
item2 = qt_api.QtGui.QStandardItem('bar')
item3 = qt_api.QtGui.QStandardItem('bar')
model.setItem(0, 0, item)
item.setChild(0, item2)
item2.setChild(0, item3)
with pytest.raises(AssertionError):
qtmodeltester.check(model, force_py=True) |
def _extract_patches(x, kernel_size, stride, padding):
if ((padding[0] + padding[1]) > 0):
x = F.pad(x, (padding[1], padding[1], padding[0], padding[0])).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(x.size(0), x.size(1), x.size(2), ((x.size(3) * x.size(4)) * x.size(5)))
return x |
class SawyerHandlePullV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'handle_pos': obs[4:7], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_handle = (o_d['handle_pos'] + np.array([0, (- 0.04), 0]))
if (np.linalg.norm((pos_curr[:2] - pos_handle[:2])) > 0.02):
return pos_handle
if (abs((pos_curr[2] - pos_handle[2])) > 0.02):
return pos_handle[2]
return (pos_handle + np.array([0.0, 0.0, 0.1]))
def _grab_effort(o_d):
return 1.0 |
class Item():
def __init__(self, control, attrs, index=None):
label = _get_label(attrs)
self.__dict__.update({'name': attrs['value'], '_labels': ((label and [label]) or []), 'attrs': attrs, '_control': control, 'disabled': ('disabled' in attrs), '_selected': False, 'id': attrs.get('id'), '_index': index})
control.items.append(self)
def get_labels(self):
res = []
res.extend(self._labels)
if self.id:
res.extend(self._control._form._id_to_labels.get(self.id, ()))
return res
def __getattr__(self, name):
if (name == 'selected'):
return self._selected
raise AttributeError(name)
def __setattr__(self, name, value):
if (name == 'selected'):
self._control._set_selected_state(self, value)
elif (name == 'disabled'):
self.__dict__['disabled'] = bool(value)
else:
raise AttributeError(name)
def __str__(self):
res = self.name
if self.selected:
res = ('*' + res)
if self.disabled:
res = ('(%s)' % res)
return res
def __repr__(self):
attrs = ([('name', self.name), ('id', self.id)] + list(iteritems(self.attrs)))
return ('<%s %s>' % (self.__class__.__name__, ' '.join([('%s=%r' % (k, v)) for (k, v) in attrs]))) |
def detect_python_on_windows():
try:
p = subprocess.run('python -c "import sys;print(sys.version_info.major)"', capture_output=True)
output = p.stdout.decode('utf-8')
if (int(output) == 3):
return ['python']
except FileNotFoundError:
pass
try:
p = subprocess.run('py -3 --version')
if (p.returncode == 0):
return ['py', '-3']
except FileNotFoundError:
pass
return None |
class IndexedRawTextDataset(FairseqDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if ((i < 0) or (i >= self.size)):
raise IndexError('index out of range')
_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return os.path.exists(path) |
class SimulScorer(object):
def __init__(self, args):
self.tokenizer = args.tokenizer
self.output_dir = args.output
if (args.output is not None):
self.output_files = {'text': os.path.join(args.output, 'text'), 'delay': os.path.join(args.output, 'delay'), 'scores': os.path.join(args.output, 'scores')}
else:
self.output_files = None
self.eos = DEFAULT_EOS
self.data = {'tgt': []}
self.reset()
def get_info(self):
return {'num_sentences': len(self)}
def add_args(parser):
parser.add_argument('--src-file', type=str, required=True, help='Source input file')
parser.add_argument('--tgt-file', type=str, required=True, help='Target reference file')
parser.add_argument('--tokenizer', default='13a', choices=['none', '13a'], help='Tokenizer used for sacrebleu')
parser.add_argument('--output', type=str, default=None, help='Path for output directory')
def send_src(self, sent_id, *args):
raise NotImplementedError
def recv_hyp(self, sent_id, list_of_tokens):
for token in list_of_tokens:
self.translations[sent_id].append((token, self.steps[sent_id]))
def reset(self):
self.steps = defaultdict(int)
self.translations = defaultdict(list)
def src_lengths(self):
raise NotImplementedError
def score(self):
translations = []
delays = []
for i in range((1 + max(self.translations.keys()))):
translations += [' '.join((t[0] for t in self.translations[i][:(- 1)]))]
delays += [[t[1] for t in self.translations[i]]]
bleu_score = BLEUScorer(sent_level=False, corpus_level=True, extra_args={'bleu_tokenizer': self.tokenizer}).score(translations, [self.data['tgt']])
ter_score = TERScorer(sent_level=False, corpus_level=True).score(translations, [self.data['tgt']])
meteor_score = METEORScorer(sent_level=False, corpus_level=True).score(translations, [self.data['tgt']])
latency_score = LatencyScorer().score([{'src_len': src_len, 'delays': delay} for (src_len, delay) in zip(self.src_lengths(), delays)], start_from_zero=False)
scores = {'BLEU': bleu_score[0], 'TER': ter_score[0], 'METEOR': meteor_score[0], 'DAL': latency_score['differentiable_average_lagging'], 'AL': latency_score['average_lagging'], 'AP': latency_score['average_proportion']}
if (self.output_files is not None):
try:
os.makedirs(self.output_dir, exist_ok=True)
self.write_results_to_file(translations, delays, scores)
except BaseException as be:
print(f'Failed to write results to {self.output_dir}.')
print(be)
print('Skip writing predictions')
return scores
def write_results_to_file(self, translations, delays, scores):
if (self.output_files['text'] is not None):
with open(self.output_files['text'], 'w') as f:
for line in translations:
f.write((line + '\n'))
if (self.output_files['delay'] is not None):
with open(self.output_files['delay'], 'w') as f:
for (i, delay) in enumerate(delays):
f.write((json.dumps({'src_len': self.src_lengths()[i], 'delays': delay}) + '\n'))
with open(self.output_files['scores'], 'w') as f:
for (key, value) in scores.items():
f.write(f'''{key}, {value}
''')
def _load_text_file(cls, file, split=False):
with open(file) as f:
if split:
return [r.strip().split() for r in f]
else:
return [r.strip() for r in f]
def _load_text_from_json(cls, file):
list_to_return = []
with open(file) as f:
content = json.load(f)
for item in content['utts'].values():
list_to_return.append(item['output']['text'].strip())
return list_to_return
def _load_wav_info_from_json(cls, file):
list_to_return = []
with open(file) as f:
content = json.load(f)
for item in content['utts'].values():
list_to_return.append({'path': item['input']['path'].strip(), 'length': item['input']['length_ms']})
return list_to_return
def _load_wav_info_from_list(cls, file):
list_to_return = []
with open(file) as f:
for line in f:
list_to_return.append({'path': line.strip()})
return list_to_return
def __len__(self):
return len(self.data['tgt']) |
def shortest_layer_path(start, end, layers):
links_from = {}
for layer in layers:
for bot in layer.bottom:
if (bot not in links_from):
links_from[bot] = []
links_from[bot].append(layer)
queue = [(s, []) for s in start]
visited = set(start)
while queue:
(blob, path) = queue.pop(0)
for layer in links_from[blob]:
for t in layer.top:
if (t == end):
return (path + [layer])
if (t not in visited):
queue.append((t, (path + [layer])))
visited.add(t)
return None |
class VerificationRequest(Requirement):
__tablename__ = 'verificationrequest'
__mapper_args__ = {'polymorphic_identity': 'verificationrequest'}
id = Column(Integer, ForeignKey(Requirement.id, ondelete='CASCADE'), primary_key=True)
salt = Column(String(10), nullable=False)
mailer = None
def by_secret_key(cls, secret_key):
try:
(request_id, salt) = re.match('([0-9]*)k(.*)', secret_key).groups()
except:
raise KeyException()
requests = Session.query(cls).filter_by(id=request_id)
if ((requests.count() == 1) and (requests.one().salt == salt)):
return requests.one()
raise KeyException()
def __init__(self, **kwargs):
self.generate_salt()
super().__init__(**kwargs)
def generate_salt(self):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZqwertyuiopasdfghjklzxcvbnm'
self.salt = ''.join([random.choice(alphabet) for x in list(range(10))])
def send_mail(self, destination, subject_config_key, mail_config_key):
data = self.get_data_for_substitution()
config = ExecutionContext.get_context().config
admin_email = config.accounts.admin_email
subject = Template(config.get_from_string(subject_config_key)).safe_substitute(data)
message_text = Template(config.get_from_string(mail_config_key)).safe_substitute(data)
message = MailMessage(admin_email, [destination], subject, message_text)
mailer = config.accounts.mailer_class.from_context()
try:
mailer.send_message(message)
except ConnectionError as e:
message = (_('Could not send registration verification email message to %s: %s.') % (self.email, str(e)))
user_message = (message + _(' Please contact the site administrator and try again later.'))
logging.getLogger(__name__).error(message)
raise DomainException(message=user_message)
def send_notification(self):
pass
def get_data_for_substitution(self):
return {'email': self.email, 'secret_key': self.as_secret_key()}
def as_secret_key(self):
Session.flush()
return ('%sk%s' % (self.id, self.salt)) |
def get_token_network_registry_by_token_network_address(chain_state: ChainState, token_network_address: TokenNetworkAddress) -> Optional[TokenNetworkRegistryState]:
for token_network_registry in chain_state.identifiers_to_tokennetworkregistries.values():
if (token_network_address in token_network_registry.tokennetworkaddresses_to_tokennetworks):
return token_network_registry
return None |
def mk_VTranslator(_RTLIRTranslator, _STranslator, _BTranslator):
class _VTranslator(_RTLIRTranslator, _STranslator, _BTranslator):
def get_pretty(s, namespace, attr, newline=True):
ret = getattr(namespace, attr, '')
if (newline and (ret and (ret[(- 1)] != '\n'))):
ret += '\n'
return ret
def is_verilog_reserved(s, name):
return (name in verilog_reserved)
def set_header(s):
s.header_keywords = 'generated by PyMTL SystemVerilog translation pass'
s.header = '//\n// {name}.v\n//\n// This file is generated by PyMTL SystemVerilog translation pass.\n\n'
def rtlir_tr_initialize(s):
s._rtlir_tr_unpacked_q = deque()
s._placeholder_pass = VerilogPlaceholderPass
s._mangled_placeholder_top_module_name = ''
s._included_pickled_files = set()
def rtlir_tr_src_layout(s, hierarchy):
all_struct_names = {x.cls.__name__ for x in hierarchy.decl_type_struct}
for struct in hierarchy.decl_type_struct.keys():
for field_name in struct.get_all_properties().keys():
if (field_name in all_struct_names):
raise VerilogStructuralTranslationError(struct, f'field {field_name} has the same name as BitStruct type {field_name}!')
s.set_header()
name = s._top_module_full_name
if (s.header_keywords not in hierarchy.component_src):
ret = s.header.format(**locals())
else:
ret = ''
for (struct_dtype, tplt) in hierarchy.decl_type_struct.items():
template = '// PyMTL BitStruct {dtype_name} Definition\n{struct_def}'
dtype_name = struct_dtype.get_name()
struct_def = (tplt['def'] + '\n')
ret += template.format(**locals())
ret += hierarchy.component_src
return ret
def rtlir_tr_components(s, components):
return '\n\n'.join(components.values())
def rtlir_tr_component(s, behavioral, structural):
component_name = structural.component_name
file_info = structural.component_file_info
full_name = structural.component_full_name
if structural.component_explicit_module_name:
module_name = structural.component_explicit_module_name
elif (structural.component_is_top and s._mangled_placeholder_top_module_name):
module_name = s._mangled_placeholder_top_module_name
else:
module_name = structural.component_unique_name
if structural.component_no_synthesis:
no_synth_begin = '`ifndef SYNTHESIS\n'
no_synth_end = '`endif'
else:
no_synth_begin = ''
no_synth_end = ''
s._top_module_name = structural.component_name
s._top_module_full_name = module_name
if (full_name != module_name):
optional_full_name = f'''Full name: {full_name}
// '''
else:
optional_full_name = ''
if structural.placeholder_src:
placeholder_src = structural.placeholder_src
if ('// PyMTL VerilogPlaceholder' not in structural.placeholder_src):
template = '// PyMTL VerilogPlaceholder {component_name} Definition\n// {optional_full_name}At {file_info}\n\n{no_synth_begin}{placeholder_src}\n{no_synth_end}'
else:
template = '{placeholder_src}'
return template.format(**locals())
else:
template = '// PyMTL Component {component_name} Definition\n// {optional_full_name}At {file_info}\n\n{no_synth_begin}module {module_name}\n(\n{ports});\n{body}\nendmodule\n{no_synth_end}'
ports_template = '{port_decls}{ifc_decls}'
port_decls = s.get_pretty(structural, 'decl_ports', False)
ifc_decls = s.get_pretty(structural, 'decl_ifcs', False)
if (port_decls or ifc_decls):
if (port_decls and ifc_decls):
port_decls += ',\n'
ifc_decls += '\n'
ports = ports_template.format(**locals())
const_decls = s.get_pretty(structural, 'decl_consts')
fvar_decls = s.get_pretty(behavioral, 'decl_freevars')
wire_decls = s.get_pretty(structural, 'decl_wires')
tmpvar_decls = s.get_pretty(behavioral, 'decl_tmpvars')
subcomp_decls = s.get_pretty(structural, 'decl_subcomps')
upblk_decls = s.get_pretty(behavioral, 'upblk_decls')
body = (((((const_decls + fvar_decls) + wire_decls) + subcomp_decls) + tmpvar_decls) + upblk_decls)
connections = s.get_pretty(structural, 'connections')
if ((body and connections) or ((not body) and connections)):
connections = ('\n' + connections)
body += connections
return template.format(**locals())
return _VTranslator |
class Comment(Object):
id = Counter.T(optional=True, xmlstyle='attribute')
value = Unicode.T(xmltagname='Value')
begin_effective_time = DummyAwareOptionalTimestamp.T(optional=True, xmltagname='BeginEffectiveTime')
end_effective_time = DummyAwareOptionalTimestamp.T(optional=True, xmltagname='EndEffectiveTime')
author_list = List.T(Person.T(xmltagname='Author')) |
def downgrade(op, tables, tester):
op.drop_constraint(op.f('fk_repositorybuildtrigger_disabled_reason_id_disablereason'), 'repositorybuildtrigger', type_='foreignkey')
op.drop_index('repositorybuildtrigger_disabled_reason_id', table_name='repositorybuildtrigger')
op.drop_column('repositorybuildtrigger', 'enabled')
op.drop_column('repositorybuildtrigger', 'disabled_reason_id')
op.drop_table('disablereason')
op.execute(tables.logentrykind.delete().where((tables.logentrykind.c.name == op.inline_literal('toggle_repo_trigger')))) |
_task('legacy_masked_lm')
class LegacyMaskedLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
parser.add_argument('--break-mode', default='doc', type=str, help='mode for breaking sentence')
parser.add_argument('--shuffle-dataset', action='store_true', default=False)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
def load_dictionary(cls, filename):
return BertDictionary.load(filename)
def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = BertDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def target_dictionary(self):
return self.dictionary
def setup_task(cls, args, **kwargs):
paths = args.data.split(':')
assert (len(paths) > 0)
dictionary = BertDictionary.load(os.path.join(paths[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=0, combine=False):
loaded_datasets = []
paths = self.args.data.split(':')
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
print('| data_path', data_path)
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary)
if (ds is None):
if (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
with data_utils.numpy_seed((self.seed + k)):
loaded_datasets.append(BlockPairDataset(ds, self.dictionary, ds.sizes, self.args.tokens_per_sample, break_mode=self.args.break_mode, doc_break_size=1))
print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)])))
if (not combine):
break
if (len(loaded_datasets) == 1):
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
self.datasets[split] = MaskedLMDataset(dataset=dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.cls(), sep_token_idx=self.dictionary.sep(), shuffle=self.args.shuffle_dataset, seed=self.seed) |
.parametrize('map_variables', [True, False])
.parametrize('endpoint,function,params,json_response', [('live/radiation_and_weather', pvlib.iotools.get_solcast_live, dict(api_key='1234', latitude=(- 33.856784), longitude=151.215297, output_parameters='dni,ghi'), {'estimated_actuals': [{'dni': 836, 'ghi': 561, 'period_end': '2023-09-18T05:00:00.0000000Z', 'period': 'PT30M'}, {'dni': 866, 'ghi': 643, 'period_end': '2023-09-18T04:30:00.0000000Z', 'period': 'PT30M'}, {'dni': 890, 'ghi': 713, 'period_end': '2023-09-18T04:00:00.0000000Z', 'period': 'PT30M'}, {'dni': 909, 'ghi': 768, 'period_end': '2023-09-18T03:30:00.0000000Z', 'period': 'PT30M'}]})])
def test_get_solcast_live(requests_mock, endpoint, function, params, json_response, map_variables):
mock_url = f"
requests_mock.get(mock_url, json=json_response)
if map_variables:
pd.testing.assert_frame_equal(function(**params, map_variables=map_variables)[0], pvlib.iotools.solcast._solcast2pvlib(pd.DataFrame.from_dict(json_response[list(json_response.keys())[0]])))
else:
pd.testing.assert_frame_equal(function(**params, map_variables=map_variables)[0], pd.DataFrame.from_dict(json_response[list(json_response.keys())[0]])) |
class ConfigureRequest(rq.Event):
_code = X.ConfigureRequest
_fields = rq.Struct(rq.Card8('type'), rq.Card8('stack_mode'), rq.Card16('sequence_number'), rq.Window('parent'), rq.Window('window'), rq.Window('sibling', (X.NONE,)), rq.Int16('x'), rq.Int16('y'), rq.Card16('width'), rq.Card16('height'), rq.Card16('border_width'), rq.Card16('value_mask'), rq.Pad(4)) |
class JobManager():
def __init__(self, num_threads: int=2):
self._jobs: defaultdict[(str, Dict[(str, Event)])] = defaultdict(dict)
self._loop = asyncio.get_event_loop()
self._sem = Semaphore(num_threads)
if (sys.version_info < (3, 8)):
from .watcher import ThreadedChildWatcher
logger.info('Using local threaded child watcher')
asyncio.set_child_watcher(ThreadedChildWatcher())
self._sem = Semaphore(num_threads, loop=self._loop)
else:
self._sem = Semaphore(num_threads)
def semaphore(self) -> Semaphore:
return self._sem
def run(self, cor: Coroutine, job_group: str):
job_id = str(uuid4())
cancel_event = Event()
wrapped_cor = self._handle_coroutine(cor, job_group=job_group, job_id=job_id, cancel_event=cancel_event)
asyncio.run_coroutine_threadsafe(wrapped_cor, loop=self._loop)
self._jobs[job_group][job_id] = cancel_event
def stop_jobs(self, group: str) -> bool:
logger.finfo('Stopping jobs in group {group}')
cancel_events = self._jobs[group]
if (not cancel_events):
logger.finfo('No jobs found for group {group}')
return False
for cancel_event in cancel_events.values():
self._loop.call_soon_threadsafe(cancel_event.set)
return True
async def _handle_coroutine(self, cor: Coroutine, job_group: str, job_id: str, cancel_event: Event):
try:
logger.fdebug('Starting job with group {job_group}')
run_task = asyncio.create_task(cor)
cancel_task = asyncio.create_task(cancel_event.wait())
try:
(done, _) = (await asyncio.wait([run_task, cancel_task], return_when=asyncio.FIRST_COMPLETED))
except CancelledError:
logger.exception(f'Task was cancelled prematurely {run_task}')
else:
if (run_task in done):
e = run_task.exception()
if e:
logger.warn(f'Exception throw in job: {e}')
logger.warn('\n'.join(traceback.format_exception(type(e), e, e.__traceback__)))
logger.fdebug('Finished job with group {job_group}')
else:
run_task.cancel()
logger.fdebug('Cancelled running job with group {job_group}')
except CancelledError:
logger.exception('Job runner cancelled')
raise
except Exception:
logger.exception('Error running job')
finally:
self._jobs[job_group].pop(job_id) |
.skipif((sys.version_info < (3, 3)), reason='Mock class not available')
def test_v3_custom_session():
from unittest.mock import Mock
response = Mock()
response.content = SUCCESS_RESPONSE
session = Mock()
session.get = Mock(return_value=response)
client = cas.CASClient(version='3', server_url=' service_url=' session=session)
(user, attributes, pgtiou) = client.verify_ticket('ABC123')
assert (user == '')
assert (not attributes)
assert (not pgtiou) |
class CoverGridContainer(ScrolledWindow):
def __init__(self, fb):
super().__init__(hscrollbar_policy=Gtk.PolicyType.NEVER, vscrollbar_policy=Gtk.PolicyType.AUTOMATIC, shadow_type=Gtk.ShadowType.IN)
self._fb = fb
fb.set_hadjustment(self.props.hadjustment)
fb.set_vadjustment(self.props.vadjustment)
self.add(fb)
def scroll_up(self):
va = self.props.vadjustment
va.props.value = va.props.lower
def scroll_to_child(self, child):
def scroll():
va = self.props.vadjustment
if (va is None):
return
v = va.props.value
coords = child.translate_coordinates(self, 0, v)
if (coords is None):
return
(x, y) = coords
h = child.get_allocation().height
p = va.props.page_size
if (y < v):
va.props.value = y
elif ((y + h) > (v + p)):
va.props.value = ((y - p) + h)
GLib.idle_add(scroll, priority=GLib.PRIORITY_LOW)
def do_focus(self, direction):
is_tab = ((direction == Gtk.DirectionType.TAB_FORWARD) or (direction == Gtk.DirectionType.TAB_BACKWARD))
if (not is_tab):
self._fb.child_focus(direction)
return True
if self.get_focus_child():
return False
children = self._fb.get_selected_children()
if children:
children[0].grab_focus()
else:
self._fb.child_focus(direction)
return True |
class AdditionalSkipNamesTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs(additional_skip_names=['pyfakefs.tests.import_as_example'])
def test_path_exists(self):
self.assertTrue(pyfakefs.tests.import_as_example.exists_this_file())
def test_fake_path_does_not_exist1(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists1('foo'))
def test_fake_path_does_not_exist2(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists2('foo'))
def test_fake_path_does_not_exist3(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists3('foo'))
def test_fake_path_does_not_exist4(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists4('foo'))
def test_fake_path_does_not_exist5(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists5('foo'))
def test_fake_path_does_not_exist6(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists6('foo'))
def test_fake_path_does_not_exist7(self):
self.fs.create_file('foo')
self.assertFalse(pyfakefs.tests.import_as_example.check_if_exists7('foo'))
def test_open_succeeds(self):
pyfakefs.tests.import_as_example.open_this_file()
def test_path_succeeds(self):
pyfakefs.tests.import_as_example.return_this_file_path() |
class _AnnotationExtractor():
__slots__ = ['sig']
def __init__(self, callable):
try:
self.sig = inspect.signature(callable)
except (ValueError, TypeError):
self.sig = None
def get_first_param_type(self):
if (not self.sig):
return None
params = list(self.sig.parameters.values())
if (params and (params[0].annotation is not inspect.Parameter.empty)):
return params[0].annotation
return None
def get_return_type(self):
if (self.sig and (self.sig.return_annotation is not inspect.Signature.empty)):
return self.sig.return_annotation
return None |
.parametrize('obj, raising, exc_reason, exc_str', [(QtObject(valid=True, null=True), False, None, None), (QtObject(valid=True, null=False), False, None, None), (QtObject(valid=False, null=True), True, None, '<QtObject> is not valid'), (QtObject(valid=False, null=False), True, None, '<QtObject> is not valid'), (QtObject(valid=False, null=True, error='Test'), True, 'Test', '<QtObject> is not valid: Test')])
def test_ensure_valid(obj, raising, exc_reason, exc_str):
if raising:
with pytest.raises(qtutils.QtValueError) as excinfo:
qtutils.ensure_valid(obj)
assert (excinfo.value.reason == exc_reason)
assert (str(excinfo.value) == exc_str)
else:
qtutils.ensure_valid(obj) |
class CEGCN(nn.Module):
def __init__(self, height: int, width: int, changel: int, class_count: int, Q: torch.Tensor, A: torch.Tensor, model='normal'):
super(CEGCN, self).__init__()
self.class_count = class_count
self.channel = changel
self.height = height
self.width = width
self.Q = Q
self.A = A
self.model = model
self.norm_col_Q = (Q / torch.sum(Q, 0, keepdim=True))
layers_count = 2
self.CNN_denoise = nn.Sequential()
for i in range(layers_count):
if (i == 0):
self.CNN_denoise.add_module(('CNN_denoise_BN' + str(i)), nn.BatchNorm2d(self.channel))
self.CNN_denoise.add_module(('CNN_denoise_Conv' + str(i)), nn.Conv2d(self.channel, 128, kernel_size=(1, 1)))
self.CNN_denoise.add_module(('CNN_denoise_Act' + str(i)), nn.LeakyReLU())
else:
self.CNN_denoise.add_module(('CNN_denoise_BN' + str(i)), nn.BatchNorm2d(128))
self.CNN_denoise.add_module(('CNN_denoise_Conv' + str(i)), nn.Conv2d(128, 128, kernel_size=(1, 1)))
self.CNN_denoise.add_module(('CNN_denoise_Act' + str(i)), nn.LeakyReLU())
self.CNN_Branch = nn.Sequential()
for i in range(layers_count):
if (i < (layers_count - 1)):
self.CNN_Branch.add_module(('CNN_Branch' + str(i)), SSConv(128, 128, kernel_size=5))
else:
self.CNN_Branch.add_module(('CNN_Branch' + str(i)), SSConv(128, 64, kernel_size=5))
self.GCN_Branch = nn.Sequential()
for i in range(layers_count):
if (i < (layers_count - 1)):
self.GCN_Branch.add_module(('GCN_Branch' + str(i)), GCNLayer(128, 128, self.A))
else:
self.GCN_Branch.add_module(('GCN_Branch' + str(i)), GCNLayer(128, 64, self.A))
self.Softmax_linear = nn.Sequential(nn.Linear(128, self.class_count))
def forward(self, x: torch.Tensor):
(h, w, c) = x.shape
noise = self.CNN_denoise(torch.unsqueeze(x.permute([2, 0, 1]), 0))
noise = torch.squeeze(noise, 0).permute([1, 2, 0])
clean_x = noise
clean_x_flatten = clean_x.reshape([(h * w), (- 1)])
superpixels_flatten = torch.mm(self.norm_col_Q.t(), clean_x_flatten)
hx = clean_x
CNN_result = self.CNN_Branch(torch.unsqueeze(hx.permute([2, 0, 1]), 0))
CNN_result = torch.squeeze(CNN_result, 0).permute([1, 2, 0]).reshape([(h * w), (- 1)])
H = superpixels_flatten
if (self.model == 'normal'):
for i in range(len(self.GCN_Branch)):
(H, _) = self.GCN_Branch[i](H)
else:
for i in range(len(self.GCN_Branch)):
(H, _) = self.GCN_Branch[i](H, model='smoothed')
GCN_result = torch.matmul(self.Q, H)
Y = torch.cat([GCN_result, CNN_result], dim=(- 1))
Y = self.Softmax_linear(Y)
Y = F.softmax(Y, (- 1))
return Y |
class Command(BaseCommand):
leave_locale_alone = True
server_class = BeatServer
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--layer', action='store', dest='layer', default=DEFAULT_CHANNEL_LAYER, help='Channel layer alias to use, if not the default.')
def handle(self, *args, **options):
if ('layer' in options):
self.channel_layer = get_channel_layer(options['layer'])
else:
self.channel_layer = get_channel_layer()
if (self.channel_layer is None):
raise CommandError('You do not have any CHANNEL_LAYERS configured.')
from beatserver.parser import Parser
project_path = settings.SETTINGS_MODULE.replace('.settings', '')
beat_config = Parser((project_path + '.beatconfig')).get_beat_config()
logger.debug('[CONFIG] %s', beat_config)
logger.info('Starting beatserver...')
server = self.server_class(application=get_default_application(), channel_layer=self.channel_layer, beat_config=beat_config)
server.run() |
def get_loss_landscape(model, n_ff, dataset, bases=None, cutoffs=(0.0, 0.9), bins=np.linspace(0.0, 1.0, 11), verbose=False, period=10, gpu=True, x_min=(- 1.0), x_max=1.0, n_x=11, y_min=(- 1.0), y_max=1.0, n_y=11):
model = (model.cuda() if gpu else model.cpu())
model = copy.deepcopy(model)
ws0 = copy.deepcopy(model.state_dict())
bases = (create_bases(model, gpu) if (bases is None) else bases)
xs = np.linspace(x_min, x_max, n_x)
ys = np.linspace(y_min, y_max, n_y)
ratio_grid = np.stack(np.meshgrid(xs, ys), axis=0).transpose((1, 2, 0))
metrics_grid = {}
for ratio in ratio_grid.reshape([(- 1), 2]):
ws = copy.deepcopy(ws0)
gs = [{k: (r * bs[k]) for k in bs} for (r, bs) in zip(ratio, bases)]
gs = {k: (torch.sum(torch.stack([g[k] for g in gs]), dim=0) + ws[k]) for k in gs[0]}
model.load_state_dict(gs)
print('Grid: ', ratio, end=', ')
(*metrics, cal_diag) = tests.test(model, n_ff, dataset, cutoffs=cutoffs, bins=bins, verbose=verbose, period=period, gpu=gpu)
metrics_grid[tuple(ratio)] = metrics
return metrics_grid |
def get_arguments():
parser = argparse.ArgumentParser(description='Code for domain adaptation (DA) training')
parser.add_argument('--cfg', type=str, default=None, help='optional config file')
parser.add_argument('--random-train', action='store_true', help='not fixing random seed.')
parser.add_argument('--tensorboard', action='store_true', help='visualize training loss with tensorboardX.')
parser.add_argument('--viz-every-iter', type=int, default=None, help='visualize results.')
parser.add_argument('--exp-suffix', type=str, default=None, help='optional experiment suffix')
return parser.parse_args() |
class HighResolutionNet(nn.Module):
def __init__(self, cfg, **kwargs):
self.inplanes = 64
super(HighResolutionNet, self).__init__()
use_old_impl = cfg.get('use_old_impl')
self.use_old_impl = use_old_impl
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg.get('stage1', {})
num_channels = self.stage1_cfg['num_channels'][0]
block = blocks_dict[self.stage1_cfg['block']]
num_blocks = self.stage1_cfg['num_blocks'][0]
self.layer1 = self._make_layer(block, num_channels, num_blocks)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = cfg.get('stage2', {})
num_channels = self.stage2_cfg.get('num_channels', (32, 64))
block = blocks_dict[self.stage2_cfg.get('block')]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
stage2_num_channels = num_channels
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg.get('stage3')
num_channels = self.stage3_cfg['num_channels']
block = blocks_dict[self.stage3_cfg['block']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
stage3_num_channels = num_channels
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg.get('stage4')
num_channels = self.stage4_cfg['num_channels']
block = blocks_dict[self.stage4_cfg['block']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
stage_4_out_channels = num_channels
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=(not self.use_old_impl))
stage4_num_channels = num_channels
self.output_channels_dim = pre_stage_channels
self.pretrained_layers = cfg['pretrained_layers']
self.init_weights()
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
if use_old_impl:
in_dims = ((((2 ** 2) * stage2_num_channels[(- 1)]) + ((2 ** 1) * stage3_num_channels[(- 1)])) + stage_4_out_channels[(- 1)])
else:
in_dims = (4 * 384)
self.subsample_4 = self._make_subsample_layer(in_channels=stage4_num_channels[0], num_layers=3)
self.subsample_3 = self._make_subsample_layer(in_channels=stage2_num_channels[(- 1)], num_layers=2)
self.subsample_2 = self._make_subsample_layer(in_channels=stage3_num_channels[(- 1)], num_layers=1)
self.conv_layers = self._make_conv_layer(in_channels=in_dims, num_layers=5)
def get_output_dim(self):
base_output = {f'layer{(idx + 1)}': val for (idx, val) in enumerate(self.output_channels_dim)}
output = base_output.copy()
for key in base_output:
output[f'{key}_avg_pooling'] = output[key]
output['concat'] = 2048
return output
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i]), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), nn.BatchNorm2d(outchannels), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_layer(self, in_channels=2048, num_layers=3, num_filters=2048, stride=1):
layers = []
for i in range(num_layers):
downsample = nn.Conv2d(in_channels, num_filters, stride=1, kernel_size=1, bias=False)
layers.append(Bottleneck(in_channels, (num_filters // 4), downsample=downsample))
in_channels = num_filters
return nn.Sequential(*layers)
def _make_subsample_layer(self, in_channels=96, num_layers=3, stride=2):
layers = []
for i in range(num_layers):
layers.append(nn.Conv2d(in_channels=in_channels, out_channels=(2 * in_channels), kernel_size=3, stride=stride, padding=1))
in_channels = (2 * in_channels)
layers.append(nn.BatchNorm2d(in_channels, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True, log=False):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = blocks_dict[layer_config['block']]
fuse_method = layer_config['fuse_method']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
modules[(- 1)].log = log
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if (self.transition2[i] is not None):
if (i < self.stage2_cfg['num_branches']):
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if (self.transition3[i] is not None):
if (i < self.stage3_cfg['num_branches']):
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
if (not self.use_old_impl):
y_list = self.stage4(x_list)
output = {}
for (idx, x) in enumerate(y_list):
output[f'layer{(idx + 1)}'] = x
feat_list = []
if self.use_old_impl:
x3 = self.subsample_3(x_list[1])
x2 = self.subsample_2(x_list[2])
x1 = x_list[3]
feat_list = [x3, x2, x1]
else:
x4 = self.subsample_4(y_list[0])
x3 = self.subsample_3(y_list[1])
x2 = self.subsample_2(y_list[2])
x1 = y_list[3]
feat_list = [x4, x3, x2, x1]
xf = self.conv_layers(torch.cat(feat_list, dim=1))
xf = xf.mean(dim=(2, 3))
xf = xf.view(xf.size(0), (- 1))
output['concat'] = xf
return output
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
for (name, _) in m.named_parameters():
if (name in ['bias']):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for (name, _) in m.named_parameters():
if (name in ['bias']):
nn.init.constant_(m.bias, 0)
def load_weights(self, pretrained=''):
pretrained = osp.expandvars(pretrained)
if osp.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained, map_location=torch.device('cpu'))
need_init_state_dict = {}
for (name, m) in pretrained_state_dict.items():
if ((name.split('.')[0] in self.pretrained_layers) or (self.pretrained_layers[0] == '*')):
need_init_state_dict[name] = m
(missing, unexpected) = self.load_state_dict(need_init_state_dict, strict=False)
elif pretrained:
raise ValueError('{} is not exist!'.format(pretrained)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.