function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def test_power_panel_id(self):
power_panels = PowerPanel.objects.all()[:2]
params = {'power_panel_id': [power_panels[0].pk, power_panels[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def parse_requirements(self, network_configuration=None):
# type: (Optional[NetworkConfiguration]) -> Iterable[ParsedRequirement]
parsed_requirements = [] # type: List[ParsedRequirement]
if self.requirements:
parsed_requirements.extend(parse_requirement_strings(self.requirements))
if self.requirement_files:
fetcher = URLFetcher(network_configuration=network_configuration)
for requirement_file in self.requirement_files:
parsed_requirements.extend(
requirement_or_constraint
for requirement_or_constraint in parse_requirement_file(
requirement_file, is_constraints=False, fetcher=fetcher
)
if not isinstance(requirement_or_constraint, Constraint)
)
return parsed_requirements | pantsbuild/pex | [
2269,
235,
2269,
149,
1405962372
] |
def _one_hot(index):
onehot = np.zeros([go.N * go.N + 1], dtype=np.float32)
onehot[index] = 1
return onehot | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def write_tf_examples(filename, tf_examples, serialize=True):
"""
Args:
filename: Where to write tf.records
tf_examples: An iterable of tf.Example
serialize: whether to serialize the examples.
"""
with tf.python_io.TFRecordWriter(
filename, options=TF_RECORD_CONFIG) as writer:
for ex in tf_examples:
if serialize:
writer.write(ex.SerializeToString())
else:
writer.write(ex) | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError('Must set shuffle buffer size if shuffling examples')
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(
tf.data.experimental.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def get_input_tensors(batch_size,
feature_layout,
tf_records,
num_repeats=1,
shuffle_records=True,
shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05,
random_rotation=True):
"""Read tf.Records and prepare them for ingestion by dual_net.
See `read_tf_records` for parameter documentation.
Returns a dict of tensors (see return value of batch_parse_tf_example)
"""
print('Reading tf_records from {} inputs'.format(len(tf_records)))
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout), batch_size))
return dataset.make_one_shot_iterator().get_next() | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def get_tpu_bt_input_tensors(games,
games_nr,
batch_size,
feature_layout,
num_repeats=1,
number_of_games=500e3,
fresh_fraction=0.05,
random_rotation=True):
dataset = bigtable_input.get_unparsed_moves_from_last_n_games(
games, games_nr, number_of_games)
dataset = dataset.repeat(num_repeats)
dataset = dataset.batch(batch_size)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size,
drop_remainder=True))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def make_dataset_from_sgf(sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(sgf_filename)
tf_examples = map(_make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples) | mlperf/training_results_v0.7 | [
11,
25,
11,
1,
1606268455
] |
def __init__(self):
# get the model, will download if it's not available locally
self.__model_filename = get_corpus_path(_MODEL_NAME)
loader = torch.load(self.__model_filename, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval() | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def g2p(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network(
input_tensor, input_length, None, 0
)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(target_tensor_logits.squeeze(1), 1)
.cpu()
.detach()
.numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target) | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5 | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = np.sort(sequences_lengths)[::-1]
index_sorted = np.argsort(
-sequences_lengths
) # use negation in sort in descending order
index_unsort = np.argsort(index_sorted) # to unsorted sequence
index_sorted = torch.from_numpy(index_sorted)
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.copy(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
index_unsort = torch.from_numpy(index_unsort).to(device)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size)) | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5 | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def forward(self, input_character, last_hidden, encoder_outputs, mask):
""""Defines the forward computation of the decoder"""
# input_character: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input_character)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def __init__(
self,
encoder,
decoder,
target_start_token,
target_end_token,
max_length, | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask | PyThaiNLP/pythainlp | [
786,
237,
786,
35,
1466693846
] |
def mock_device(device_id, name, is_online=True, device_type_name=None):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
type(device).device_type = PropertyMock(
return_value={"id": 1, "name": device_type_name}
)
return device | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def mock_mode(mode_id, name):
"""Mock Canary Mode class."""
mode = MagicMock()
type(mode).mode_id = PropertyMock(return_value=mode_id)
type(mode).name = PropertyMock(return_value=name)
type(mode).resource_url = PropertyMock(return_value=f"/v1/modes/{mode_id}")
return mode | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def ansible_ping_func(modules):
if "ansible.system.ping" in modules:
# we need to go by getattr() because salt's loader will try to find "system" in the dictionary and fail
# The ansible hack injects, in this case, "system.ping" as an attribute to the loaded module
return getattr(modules.ansible, "system.ping")
if "ansible.ping" in modules:
# Ansible >= 2.10
return modules.ansible.ping
pytest.fail("Where is the ping function these days in Ansible?!") | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def pack(self, packer: Packer) -> None:
packer.pack_int(self.value) | StellarCN/py-stellar-base | [
328,
158,
328,
6,
1443187561
] |
def unpack(cls, unpacker: Unpacker) -> "LedgerUpgradeType":
value = unpacker.unpack_int()
return cls(value) | StellarCN/py-stellar-base | [
328,
158,
328,
6,
1443187561
] |
def from_xdr_bytes(cls, xdr: bytes) -> "LedgerUpgradeType":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker) | StellarCN/py-stellar-base | [
328,
158,
328,
6,
1443187561
] |
def from_xdr(cls, xdr: str) -> "LedgerUpgradeType":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes) | StellarCN/py-stellar-base | [
328,
158,
328,
6,
1443187561
] |
def __init__(self, value):
self.value = value | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, api):
self.api = api | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def length_to_bytes(self, length):
if length < 0x80:
return self.to_bytes(length)
elif length < 0x4000:
length |= 0x8000
return self.to_bytes(length, 2)
elif length < 0x200000:
length |= 0xC00000
return self.to_bytes(length, 3)
elif length < 0x10000000:
length |= 0xE0000000
return self.to_bytes(length, 4)
else:
return self.to_bytes(0xF0) + self.to_bytes(length, 4) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def _unpack(self, times, i):
temp1 = self.to_bytes(i)
temp2 = self.api.read_bytes(times)
try:
temp3 = temp2.decode('utf-8')
except:
try:
temp3 = temp2.decode('windows-1252')
except Exception:
print("Cannot decode response properly:", temp2)
print(Exception)
exit(1)
res = temp1 + temp3
return self.from_bytes(res) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def from_bytes(self, data):
data_values = [ord(char) for char in data]
value = 0
for byte_value in data_values:
value <<= 8
value += byte_value
return value | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def from_bytes(self, data):
return int.from_bytes(data, 'big') | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, socket):
self.socket = socket
self.length_utils = RosApiLengthUtils(self) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def talk(self, words):
if self.write_sentence(words) == 0:
return
output = []
while True:
input_sentence = self.read_sentence()
if not len(input_sentence):
continue
attrs = {}
reply = input_sentence.pop(0)
for line in input_sentence:
try:
second_eq_pos = line.index(b'=', 1)
except IndexError:
attrs[line[1:]] = b''
else:
attrs[line[1:second_eq_pos]] = line[second_eq_pos + 1:]
output.append((reply, attrs))
if reply == b'!done':
if output[0][0] == b'!trap':
raise RosAPIError(output[0][1])
if output[0][0] == b'!fatal':
self.socket.close()
raise RosAPIFatalError(output[0][1])
return output | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def read_sentence(self):
sentence = []
while True:
word = self.read_word()
if not len(word):
return sentence
sentence.append(word) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def read_word(self):
word = self.read_bytes(self.length_utils.read_length())
logger.debug('<<< %s' % word)
return word | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def read_bytes(self, length):
received_overal = b''
while len(received_overal) < length:
try:
received = self.socket.recv(
length - len(received_overal))
except socket.error as e:
raise RosAPIConnectionError(str(e))
if len(received) == 0:
raise RosAPIConnectionError('Connection closed by remote end.')
received_overal += received
return received_overal | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, api, namespace):
self.api = api
self.namespace = namespace | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def _prepare_arguments(is_query, **kwargs):
command_arguments = []
for key, value in kwargs.items():
if key in ['id', 'proplist']:
key = '.%s' % key
key = key.replace('_', '-')
selector_char = '?' if is_query else '='
command_arguments.append(
('%s%s=' % (selector_char, key)).encode('ascii') + value)
return command_arguments | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def _remove_first_char_from_keys(dictionary):
elements = []
for key, value in dictionary.items():
key = key.decode('ascii')
if key in ['.id', '.proplist']:
key = key[1:]
elements.append((key, value))
return dict(elements) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def detailed_get(self, **kwargs):
return self.call('print', {'detail': b''}, kwargs) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def add(self, **kwargs):
return self.call('add', kwargs) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def detailed_get(self, **kwargs):
return self.call('print', {'detail': ''}, kwargs) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def _encode_kwargs(self, kwargs):
return dict((k, v.encode('ascii')) for k, v in kwargs.items()) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, host, username='api', password='', port=8728, ssl=False):
self.host = host
self.username = username
self.password = password
self.socket = None
self.port = port
self.ssl = ssl
self.reconnect() | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __exit__(self, _, __, ___):
self.close_connection() | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(15.0)
sock.connect((self.host, self.port))
set_keepalive(sock, after_idle_sec=10)
if self.ssl:
try:
self.socket = ssl.wrap_socket(sock)
except ssl.SSLError as e:
raise RosAPIConnectionError(str(e))
else:
self.socket = sock
self.api_client = RosAPI(self.socket) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def get_resource(self, namespace):
return RouterboardResource(self, namespace) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def close_connection(self):
self.socket.close() | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, hostname, username, password):
self.hostname = hostname
self.username = username
self.password = password | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def talk(self, talk_command):
r = self.login()
response = r.talk(talk_command)
return(response) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def api_add(self, base_path, params):
command = [base_path + '/add']
for key, value in params.iteritems():
item = b'=' + key + '=' + str(value)
command.append(item)
return self.talk(command) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def api_remove(self, base_path, remove_id):
command = [
base_path + '/remove',
b'=.id=' + remove_id
]
return self.talk(command) | zahodi/ansible-mikrotik | [
93,
34,
93,
17,
1484089137
] |
def __init__(self, env, scope=None, mpi_context=None, **kwargs):
self.scope = scope
self.env = env
self.mpi_context = mpi_context
self._n_experiences = 0
self.step = 0
self._saver = None | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def n_experiences(self):
return self._n_experiences | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _build_graph(self):
raise Exception("NotImplemented") | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _update(self, batch_size):
raise Exception("NotImplemented") | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _evaluate(self, batch_size, mode):
raise Exception("NotImplemented") | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def save(self, filename):
path = self.saver.save(tf.get_default_session(), filename)
return path | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def trainable_variables(self, for_opt):
return [] | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _update(self, batch_size):
return dict() | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def save(self, session, filename):
return '' | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def __init__(self, env, f, **kwargs):
assert hasattr(env, 'build'), (
"Environments used with DifferentiableUpdater must possess "
"a method called `build` which builds returns a dictionary of scalar tensors."
)
self.f = f
super(DifferentiableUpdater, self).__init__(env, **kwargs) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _build_graph(self):
self.recorded_tensors = self.env.build(self.f)
self.loss = self.recorded_tensors['loss']
tvars = self.trainable_variables(for_opt=True)
if self.l2_weight is not None:
self.loss += self.l2_weight * sum(tf.nn.l2_loss(v) for v in tvars if 'weights' in v.name)
self.train_op, self.train_recorded_tensors = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule)
self.recorded_tensors.update(get_scheduled_values()) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _evaluate(self, batch_size, mode):
if mode == "val":
feed_dict = self.env.data_manager.do_val()
elif mode == "test":
feed_dict = self.env.data_manager.do_test()
else:
raise Exception("Unknown evaluation mode: {}".format(mode))
sess = tf.get_default_session()
return sess.run(self.recorded_tensors, feed_dict=feed_dict) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def __init__(self, env, scope=None, **kwargs):
self.obs_shape = env.obs_shape
*other, self.image_height, self.image_width, self.image_depth = self.obs_shape
self.n_frames = other[0] if other else 0
self.network = cfg.build_network(env, self, scope="network")
super(VideoUpdater, self).__init__(env, scope=scope, **kwargs) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _update(self, batch_size):
if cfg.get('no_gradient', False):
return dict()
feed_dict = self.data_manager.do_train()
sess = tf.get_default_session()
_, record, train_record = sess.run(
[self.train_op, self.recorded_tensors, self.train_records], feed_dict=feed_dict)
record.update(train_record)
return record | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def _build_graph(self):
self.data_manager = DataManager(datasets=self.env.datasets)
self.data_manager.build_graph()
data = self.data_manager.iterator.get_next()
self.inp = data["image"]
network_outputs = self.network(data, self.data_manager.is_training)
network_tensors = network_outputs["tensors"]
network_recorded_tensors = network_outputs["recorded_tensors"]
network_losses = network_outputs["losses"]
self.tensors = network_tensors
self.recorded_tensors = recorded_tensors = dict(global_step=tf.train.get_or_create_global_step())
# --- loss ---
self.loss = tf.constant(0., tf.float32)
for name, tensor in network_losses.items():
self.loss += tensor
recorded_tensors['loss_' + name] = tensor
recorded_tensors['loss'] = self.loss
# --- train op ---
if cfg.do_train and not cfg.get('no_gradient', False):
tvars = self.trainable_variables(for_opt=True)
self.train_op, self.train_records = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule, grad_n_record_groups=self.grad_n_record_groups)
sess = tf.get_default_session()
for k, v in getattr(sess, 'scheduled_values', None).items():
if k in recorded_tensors:
recorded_tensors['scheduled_' + k] = v
else:
recorded_tensors[k] = v
# --- recorded values ---
intersection = recorded_tensors.keys() & network_recorded_tensors.keys()
assert not intersection, "Key sets have non-zero intersection: {}".format(intersection)
recorded_tensors.update(network_recorded_tensors)
intersection = recorded_tensors.keys() & self.network.eval_funcs.keys()
assert not intersection, "Key sets have non-zero intersection: {}".format(intersection)
if self.network.eval_funcs:
eval_funcs = self.network.eval_funcs
else:
eval_funcs = {}
# For running functions, during evaluation, that are not implemented in tensorflow
self.evaluator = Evaluator(eval_funcs, network_tensors, self) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def record_tensors(self, **kwargs):
for k, v in kwargs.items():
self.recorded_tensors[k] = tf.reduce_mean(tf.to_float(v)) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def recorded_tensors(self):
if self._recorded_tensors is None:
self._recorded_tensors = {}
return self._recorded_tensors | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def __init__(self, train=None, val=None, test=None, datasets=None, **kwargs):
self.datasets = {}
self.datasets.update(train=train, val=val, test=test)
self.datasets.update(datasets)
assert (
self.datasets['train'] is not None
or self.datasets['val'] is not None
or self.datasets['test'] is not None), (
'Must provide at least one dataset with name "train", "val", or "test".')
self.iterators_and_handles = {} | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def build_iterator(self, name, base_dataset_name, batch_size, repeat, shuffle_buffer_size):
base_dataset = self.datasets[base_dataset_name]
if batch_size is None:
batch_size = self.batch_size
if isinstance(base_dataset, tf.data.Dataset):
dset = base_dataset
elif isinstance(base_dataset, Dataset):
dset = tf.data.TFRecordDataset(base_dataset.filename)
else:
raise Exception("Unknown dataset type: {}.".format(base_dataset))
# --- possibly repeat and/or shuffle --
if repeat and shuffle_buffer_size > 0:
try:
shuffle_and_repeat_func = tf.data.experimental.shuffle_and_repeat
except AttributeError:
shuffle_and_repeat_func = tf.contrib.data.shuffle_and_repeat
shuffle_and_repeat = shuffle_and_repeat_func(self.shuffle_buffer_size)
dset = dset.apply(shuffle_and_repeat)
elif shuffle_buffer_size > 0:
dset = dset.shuffle(self.shuffle_buffer_size)
# --- batch and parse ---
dset = dset.batch(batch_size)
if hasattr(base_dataset, 'parse_example_batch'):
dset = dset.map(base_dataset.parse_example_batch)
# --- possibly prefetch to improve performance ---
if self.prefetch_buffer_size_in_batches > 0:
if cfg.use_gpu and self.prefetch_to_device:
# Suggested here: https://github.com/tensorflow/tensorflow/issues/18947#issuecomment-407778515
dset = (dset.apply(tf.data.experimental.copy_to_device('/gpu:0'))
.prefetch(self.prefetch_buffer_size_in_batches))
else:
dset = dset.prefetch(self.prefetch_buffer_size_in_batches)
# --- finalize ---
iterator = dset.make_initializable_iterator()
sess = tf.get_default_session()
handle = sess.run(iterator.string_handle(name="{}_string_handle".format(name)))
self.iterators_and_handles[name] = (iterator, handle)
return dset, iterator, handle | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def do_val(self, is_training=False):
return self.do('val', is_training) | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def do(self, name, is_training=False):
""" Initialize iterator (unless it's the `train` iterator, which is handled slightly differently)
and return a feed_dict populated with the appropriate handle for the requested iterator. """
iterator, handle = self.iterators_and_handles[name]
sess = tf.get_default_session()
if name == 'train':
if not self.train_initialized:
sess.run(iterator.initializer)
self.train_initialized = True
else:
sess.run(iterator.initializer)
return {self.handle: handle, self.is_training: is_training} | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def __call__(self, fetched, updater):
return {} | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def __init__(self, functions, tensors, updater):
self._functions = functions
self._tensors = tensors
# Force evaluation to happen at with the default feed_dict
functions["dummy"] = DummyFunc()
self.updater = updater
self.functions = defaultdict(list)
self.feed_dicts = {}
fetch_keys = defaultdict(set)
for name, func in functions.items():
if hasattr(func, 'get_feed_dict'):
feed_dict = func.get_feed_dict(updater)
else:
feed_dict = {}
fd_key = {str(k): str(v) for k, v in feed_dict.items()}
fd_key = json.dumps(fd_key, default=str, indent=4, sort_keys=True)
self.functions[fd_key].append((name, func))
self.feed_dicts[fd_key] = feed_dict
# store for the function
keys_accessed = func.keys_accessed
if isinstance(keys_accessed, str):
keys_accessed = keys_accessed.split()
for key in keys_accessed:
fetch_keys[fd_key].add(key)
self.fetches = {}
for fd_key, _fetch_keys in fetch_keys.items():
fetches = self.fetches[fd_key] = {}
for key in _fetch_keys:
dst = fetches
src = tensors
subkeys = key.split(":")
for i, _key in enumerate(subkeys):
if i == len(subkeys)-1:
dst[_key] = src[_key]
else:
if _key not in dst:
dst[_key] = dict()
dst = dst[_key]
src = src[_key] | e2crawfo/dps | [
1,
2,
1,
3,
1491848389
] |
def main(_):
print(_CONFIG.value) | google/ml_collections | [
676,
26,
676,
14,
1597899148
] |
def __init__(self, account, requests_options=()):
super(Export, self).__init__(account=account, requests_options=requests_options) | icoxfog417/pykintone | [
21,
11,
21,
8,
1435900514
] |
def get_user_organization_titles(self, code):
url = "https://{0}.cybozu.com/v1/user/organizations.json".format(self.account.domain)
params = {
"code": code
}
resp = self._request("GET", url, params_or_data=params)
r = ur.UserOrganizationTitlesResult(resp)
return r | icoxfog417/pykintone | [
21,
11,
21,
8,
1435900514
] |
def setUp(self):
super().setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName) | tensorflow/tfx | [
1905,
649,
1905,
157,
1549300476
] |
def test_anonymous_watch_with_email(self):
form = WatchQuestionForm(
AnonymousUser(), data={"email": "wo@ot.com", "event_type": "reply"}
)
assert form.is_valid()
eq_("wo@ot.com", form.cleaned_data["email"]) | mozilla/kitsune | [
1110,
779,
1110,
26,
1264532037
] |
def test_registered_watch_with_email(self):
form = WatchQuestionForm(UserFactory(), data={"email": "wo@ot.com", "event_type": "reply"})
assert form.is_valid()
assert not form.cleaned_data["email"] | mozilla/kitsune | [
1110,
779,
1110,
26,
1264532037
] |
def setUp(self):
super(TestNewQuestionForm, self).setUp() | mozilla/kitsune | [
1110,
779,
1110,
26,
1264532037
] |
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_spear_rack_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","spear_rack") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_corellia_cec_officer.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None | junhuac/MQUIC | [
2,
1,
2,
1,
1459966120
] |
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True) | opennode/nodeconductor-assembly-waldur | [
39,
35,
39,
3,
1484854426
] |
def _AccessIdException(command_name, subcommand, synopsis):
return CommandException(
'%s %s requires an Access ID to be specified as the last argument.\n%s' %
(command_name, subcommand, synopsis)) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def FormatInfo(name, value, new_line=True):
"""Format the metadata name-value pair into two aligned columns."""
width = 22
info_str = '\t%-*s %s' % (width, name + ':', value)
if new_line:
info_str += '\n'
return info_str | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def _CreateHmacKey(self, thread_state=None):
"""Creates HMAC key for a service account."""
if self.args:
self.service_account_email = self.args[0]
else:
err_msg = ('%s %s requires a service account to be specified as the '
'last argument.\n%s')
raise CommandException(
err_msg %
(self.command_name, self.action_subcommand, _CREATE_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.CreateHmacKey(self.project_id,
self.service_account_email,
provider='gs')
print('%-12s %s' % ('Access ID:', response.metadata.accessId))
print('%-12s %s' % ('Secret:', response.secret)) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def _GetHmacKey(self, thread_state=None):
"""Gets HMAC key from its Access Id."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_GET_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.GetHmacKey(self.project_id, access_id, provider='gs')
print(_KeyMetadataOutput(response)) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def _UpdateHmacKey(self, thread_state=None):
"""Update an HMAC key's state."""
if not self.state:
raise CommandException(
'A state flag must be supplied for %s %s\n%s' %
(self.command_name, self.action_subcommand, _UPDATE_SYNOPSIS))
elif self.state not in _VALID_UPDATE_STATES:
raise CommandException('The state flag value must be one of %s' %
', '.join(_VALID_UPDATE_STATES))
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_UPDATE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.UpdateHmacKey(self.project_id,
access_id,
self.state,
self.etag,
provider='gs')
print(_KeyMetadataOutput(response)) | catapult-project/catapult | [
1835,
570,
1835,
1039,
1429033745
] |
def __init__(self, app):
"""Creates a new script for Java applications.
Arguments:
- app: the application to create a script for.
"""
default.Script.__init__(self, app)
# Some objects which issue descendant changed events lack
# STATE_MANAGES_DESCENDANTS. As a result, onSelectionChanged
# doesn't ignore these objects. That in turn causes Orca to
# double-speak some items and/or set the locusOfFocus to a
# parent it shouldn't. See bgo#616582. [[[TODO - JD: remove
# this hack if and when we get a fix for that bug]]]
#
self.lastDescendantChangedSource = None | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self) | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def checkKeyboardEventData(self, keyboardEvent):
"""Checks the data on the keyboard event.
Some toolkits don't fill all the key event fields, and/or fills
them out with unexpected data. This method tries to fill in the
missing fields and validate/standardize the data we've been given.
While any script can override this method, it is expected that
this will only be done at the toolkit script level.
Arguments:
- keyboardEvent: an instance of input_event.KeyboardEvent
"""
default.Script.checkKeyboardEventData(self, keyboardEvent)
if not keyboardEvent.keyval_name:
return
from gi.repository import Gdk
keymap = Gdk.Keymap.get_default()
keyval = Gdk.keyval_from_name(keyboardEvent.keyval_name)
success, entries = keymap.get_entries_for_keyval(keyval)
for entry in entries:
if entry.group == 0:
keyboardEvent.hw_code = entry.keycode
break
# Put the event_string back to what it was prior to the Java
# Atk Wrapper hack which gives us the keyname and not the
# expected and needed printable character for punctuation
# marks.
#
if keyboardEvent.event_string == keyboardEvent.keyval_name \
and len(keyboardEvent.event_string) > 1:
keyval = Gdk.keyval_from_name(keyboardEvent.keyval_name)
if 0 < keyval < 256:
keyboardEvent.event_string = chr(keyval) | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def onSelectionChanged(self, event):
"""Called when an object's selection changes.
Arguments:
- event: the Event
"""
# Avoid doing this with objects that manage their descendants
# because they'll issue a descendant changed event. (Note: This
# equality check is intentional; utilities.isSameObject() is
# especially thorough with trees and tables, which is not
# performant.
#
if event.source == self.lastDescendantChangedSource:
return
# We treat selected children as the locus of focus. When the
# selection changes in a list we want to update the locus of
# focus. If there is no selection, we default the locus of
# focus to the containing object.
#
if (event.source.getRole() in [pyatspi.ROLE_LIST,
pyatspi.ROLE_PAGE_TAB_LIST,
pyatspi.ROLE_TREE]) \
and event.source.getState().contains(pyatspi.STATE_FOCUSED):
newFocus = event.source
if event.source.childCount:
selection = event.source.querySelection()
if selection.nSelectedChildren > 0:
newFocus = selection.getSelectedChild(0)
orca.setLocusOfFocus(event, newFocus)
else:
default.Script.onSelectionChanged(self, event) | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def onValueChanged(self, event):
"""Called whenever an object's value changes.
Arguments:
- event: the Event
"""
# We'll ignore value changed events for Java's toggle buttons since
# they also send a redundant object:state-changed:checked event.
#
ignoreRoles = [pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_CHECK_BOX]
if event.source.getRole() in ignoreRoles:
return
# Java's SpinButtons are the most caret movement happy thing
# I've seen to date. If you Up or Down on the keyboard to
# change the value, they typically emit three caret movement
# events, first to the beginning, then to the end, and then
# back to the beginning. It's a very excitable little widget.
# Luckily, it only issues one value changed event. So, we'll
# ignore caret movement events caused by value changes and
# just process the single value changed event.
#
if event.source.getRole() == pyatspi.ROLE_SPIN_BUTTON:
try:
thisBox = orca_state.locusOfFocus.parent.parent == event.source
except:
thisBox = False
if thisBox:
self._presentTextAtNewCaretPosition(event,
orca_state.locusOfFocus)
return
default.Script.onValueChanged(self, event) | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
def setUp(self):
super(ThirdPartyOAuthTestMixin, self).setUp()
if self.CREATE_USER:
self.user = UserFactory()
UserSocialAuth.objects.create(user=self.user, provider=self.BACKEND, uid=self.social_uid)
self.oauth_client = self._create_client()
if self.BACKEND == 'google-oauth2':
self.configure_google_provider(enabled=True, visible=True)
elif self.BACKEND == 'facebook':
self.configure_facebook_provider(enabled=True, visible=True) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def _create_client(self):
"""
Create an OAuth2 client application
"""
return Application.objects.create(
client_id=self.client_id,
client_type=Application.CLIENT_PUBLIC,
) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
def _setup_provider_response_with_body(self, status, body):
"""
Register a mock response for the third party user information endpoint with given status and body.
"""
httpretty.register_uri(
httpretty.GET,
self.USER_URL,
body=body,
status=status,
content_type="application/json",
) | edx-solutions/edx-platform | [
12,
19,
12,
9,
1391522577
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.