content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from flask import Flask, make_response, request, render_template, jsonify
import io
import csv
from flask_pymongo import PyMongo
import pandas as pd
import json
from bson import ObjectId
app = Flask(__name__,template_folder='templates')
#Database name
app.config['MONGO_DBNAME'] = 'tickets'
# use mlab.com to take temperory dbs
#mongodb://<dbuser>:<dbpassword>@ds241012.mlab.com:41012/DatabaseName
app.config['MONGO_URI'] = 'mongodb://datta:datta1@ds241012.mlab.com:41012/tickets'
mongo = PyMongo(app)
@app.route('/')
@app.route('/insert', methods=["POST"])
@app.route('/display')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True) | [
201,
198,
6738,
42903,
1330,
46947,
11,
787,
62,
26209,
11,
2581,
11,
8543,
62,
28243,
11,
33918,
1958,
201,
198,
11748,
33245,
201,
198,
11748,
269,
21370,
201,
198,
6738,
42903,
62,
79,
4948,
25162,
1330,
9485,
44,
25162,
201,
198,
... | 2.387755 | 294 |
import pytest
from skedulord.cron import parse_job_from_settings, Cron
checks = [
{
"name": "foo",
"command": "python foobar.py",
"arguments": {"hello": "world"},
"expected": "python foobar.py --hello world",
},
{
"name": "foo",
"command": "python foobar.py",
"arguments": {"hello": "world", "one": 1},
"expected": "python foobar.py --hello world --one 1",
},
{
"name": "download",
"command": "python -m gitwit download apache/airflow",
"expected": "python -m gitwit download apache/airflow",
}
]
@pytest.mark.parametrize("check", checks)
def test_job_parsing(check):
"""Test that the job is parsed correctly from the settings"""
res = parse_job_from_settings(settings=[check], name="foo")
assert res == check["expected"]
def test_cron_obj_parsing():
"""Test that the cron object parses the schedule appropriately"""
c = Cron("tests/schedule.yml")
for s in c.settings:
parsed_command = c.parse_cmd(s)
assert parsed_command.rstrip() == parsed_command
assert '--retry' in parsed_command
assert '--wait' in parsed_command
# TODO add this feature
# if 'arguments' in s.keys():
# for k, v in s['arguments'].items():
# print(parsed_command)
# assert f"--{k} {v}" in parsed_command | [
11748,
12972,
9288,
198,
6738,
1341,
276,
377,
585,
13,
66,
1313,
1330,
21136,
62,
21858,
62,
6738,
62,
33692,
11,
31683,
198,
198,
42116,
796,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
3672,
1298,
... | 2.358459 | 597 |
"""
The BCN example for the $\lambda$ switch genetic network.
Solved using the algebraic method developed by Yuqian Guo etc.
Guo, Yuqian, Pan Wang, Weihua Gui, and Chunhua Yang. "Set stability and set stabilization of Boolean control networks
based on invariant subsets." Automatica 61 (2015): 106-112.
Please refer to "example_lambda_switch.py" for the results obtained using our proposed method.
"""
from algorithm.utils import read_network
from algorithm.related_work import *
if __name__ == '__main__':
n, m, L = read_network('./networks/lambda_switch.txt')
M_set = [11, 2, 30, 32, 31, 5, 20, 7, 24, 13]
print('M_set = ', M_set)
solver = GYQSolver(m, n, L, M_set)
LCIS = solver.compute_largest_control_invariant_subset()
print('LCIS = ', LCIS)
print('Is globally set stabilizable? ', solver.is_set_stabilizable())
print('Shortest transient period (T_M): ', solver.compute_shortest_transient_period())
# time optimal state feedback (any logical sub-matrix of bF is a solution)
# We can check that the one generated by the graphical method is indeed a sub-matrix of bF
print('The bold F in Proposition 6 is:\n', solver.compute_time_optimal_stabilizer().astype(np.int8))
print('(It can be validated that time-optimal F produced by our graphical method is a logical sub-matrix of the bold F here)')
| [
37811,
198,
464,
11843,
45,
1672,
329,
262,
39280,
50033,
3,
5078,
8513,
3127,
13,
198,
50,
5634,
1262,
262,
37139,
291,
2446,
4166,
416,
10605,
80,
666,
1962,
78,
3503,
13,
198,
198,
8205,
78,
11,
10605,
80,
666,
11,
5961,
15233,
... | 3.022321 | 448 |
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
class drc_lut():
"""
Implement a lookup table of rules.
Each element is a tuple with the last value being the rule.
It searches through backwards until all of the key values are
met and returns the rule value.
For exampe, the key values can be width and length,
and it would return the rule for a wire of at least a given width and length.
A dimension can be ignored by passing inf.
"""
def __call__(self, *key):
"""
Lookup a given tuple in the table.
"""
if len(key)==0:
first_key = list(sorted(self.table.keys()))[0]
return self.table[first_key]
for table_key in sorted(self.table.keys(), reverse=True):
if self.match(key, table_key):
return self.table[table_key]
def match(self, key1, key2):
"""
Determine if key1>=key2 for all tuple pairs.
(i.e. return false if key1<key2 for any pair.)
"""
# If any one pair is less than, return False
debug.check(len(key1) == len(key2), "Comparing invalid key lengths.")
for k1, k2 in zip(key1, key2):
if k1 < k2:
return False
return True
| [
2,
4091,
38559,
24290,
329,
15665,
1321,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
12,
23344,
3310,
658,
286,
262,
2059,
286,
3442,
290,
383,
5926,
198,
2,
286,
3310,
658,
329,
262,
10433,
36694,
290,
19663,
5535,
198,
2,
357,
... | 2.481422 | 619 |
# (C) Copyright 2018-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Test support, providing the ability to run the event loop from within tests.
"""
from traits.api import Bool, HasStrictTraits
from traits_futures.asyncio.event_loop import AsyncioEventLoop
#: Maximum timeout for blocking calls, in seconds. A successful test should
#: never hit this timeout - it's there to prevent a failing test from hanging
#: forever and blocking the rest of the test suite.
SAFETY_TIMEOUT = 5.0
class _HasBool(HasStrictTraits):
"""
Simple HasTraits class with a single mutable trait.
Used in tests that need something mutable and observable.
"""
#: Simple boolean flag.
flag = Bool(False)
class TestAssistant:
"""
Convenience mixin class for tests that need the event loop.
This class is designed to be used as a mixin alongside unittest.TestCase
for tests that need to run the event loop as part of the test.
Most of the logic is devolved to a toolkit-specific EventLoopHelper class.
"""
#: Factory for the event loop. This should be a zero-argument callable
#: that provides an IEventLoop instance. Override in subclasses to
#: run tests with a particular toolkit.
event_loop_factory = AsyncioEventLoop
def run_until(self, object, trait, condition, timeout=SAFETY_TIMEOUT):
"""
Run event loop until the given condition holds true, or until timeout.
The condition is re-evaluated, with the object as argument, every time
the trait changes.
Parameters
----------
object : traits.has_traits.HasTraits
Object whose trait we monitor.
trait : str
Name of the trait to monitor for changes.
condition
Single-argument callable, returning a boolean. This will be
called with *object* as the only input.
timeout : float, optional
Number of seconds to allow before timing out with an exception.
The (somewhat arbitrary) default is 5 seconds.
Raises
------
RuntimeError
If timeout is reached, regardless of whether the condition is
true or not at that point.
"""
self._event_loop_helper.run_until(object, trait, condition, timeout)
def exercise_event_loop(self):
"""
Exercise the event loop.
Places a new task on the event loop and runs the event loop
until that task is complete. The goal is to flush out any other
tasks that might already be in event loop tasks queue.
Note that there's no guarantee that this will execute other pending
event loop tasks. So this method is useful for tests of the form
"check that nothing bad happens as a result of other pending event
loop tasks", but it's not safe to use it for tests that *require*
pending event loop tasks to be processed.
"""
sentinel = _HasBool()
self._event_loop_helper.setattr_soon(sentinel, "flag", True)
self.run_until(sentinel, "flag", lambda sentinel: sentinel.flag)
| [
2,
357,
34,
8,
15069,
2864,
12,
1238,
2481,
2039,
28895,
11,
3457,
1539,
9533,
11,
15326,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
3788,
318,
2810,
1231,
18215,
739,
262,
2846,
286,
262,
347,
10305,
198,
2,
5964,
3017,
... | 2.999136 | 1,158 |
import matplotlib.pyplot as plt
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628
] | 3 | 11 |
# From https://github.com/snap-stanford/GraphRNN/blob/1ef475d957414d7c0bf8c778a1d44cb52dd7829b/data.py
import torch
import torchvision as tv
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
from random import shuffle
import networkx as nx
import pickle as pkl
import scipy.sparse as sp
import logging
import random
import shutil
import os
import time
from .graphrnn_utils import *
# load ENZYMES and PROTEIN and DD dataset
def Graph_load_batch(min_num_nodes = 20, max_num_nodes = 1000, name = 'ENZYMES',node_attributes = True,graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: '+str(name))
G = nx.Graph()
# load data
path = 'dataset/'+name+'/'
data_adj = np.loadtxt(path+name+'_A.txt', delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(path+name+'_node_attributes.txt', delimiter=',')
data_node_label = np.loadtxt(path+name+'_node_labels.txt', delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(path+name+'_graph_indicator.txt', delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(path+name+'_graph_labels.txt', delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# print(len(data_tuple))
# print(data_tuple[0])
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i+1, feature = data_node_att[i])
G.add_node(i+1, label = data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# print(G.number_of_nodes())
# print(G.number_of_edges())
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0])+1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator==i+1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
# print('nodes', G_sub.number_of_nodes())
# print('edges', G_sub.number_of_edges())
# print('label', G_sub.graph)
if G_sub.number_of_nodes()>=min_num_nodes and G_sub.number_of_nodes()<=max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
# print(G_sub.number_of_nodes(), 'i', i)
# print('Graph dataset name: {}, total graph num: {}'.format(name, len(graphs)))
# logging.warning('Graphs loaded, total num: {}'.format(len(graphs)))
print('Loaded')
return graphs
# load cora, citeseer and pubmed dataset
def Graph_load(dataset = 'cora'):
'''
Load a single graph dataset
:param dataset: dataset name
:return:
'''
names = ['x', 'tx', 'allx', 'graph']
objects = []
for i in range(len(names)):
load = pkl.load(open("dataset/ind.{}.{}".format(dataset, names[i]), 'rb'), encoding='latin1')
# print('loaded')
objects.append(load)
# print(load)
x, tx, allx, graph = tuple(objects)
test_idx_reorder = parse_index_file("dataset/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
G = nx.from_dict_of_lists(graph)
adj = nx.adjacency_matrix(G)
return adj, features, G
######### code test ########
# adj, features,G = Graph_load()
# print(adj)
# print(G.number_of_nodes(), G.number_of_edges())
# _,_,G = Graph_load(dataset='citeseer')
# G = max((G.subgraph(c) for c in nx.connected_components(G)), key=len)
# G = nx.convert_node_labels_to_integers(G)
#
# count = 0
# max_node = 0
# for i in range(G.number_of_nodes()):
# G_ego = nx.ego_graph(G, i, radius=3)
# # draw_graph(G_ego,prefix='test'+str(i))
# m = G_ego.number_of_nodes()
# if m>max_node:
# max_node = m
# if m>=50:
# print(i, G_ego.number_of_nodes(), G_ego.number_of_edges())
# count += 1
# print('count', count)
# print('max_node', max_node)
def bfs_seq(G, start_id):
'''
get a bfs node sequence
:param G:
:param start_id:
:return:
'''
dictionary = dict(nx.bfs_successors(G, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
next = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
#### a wrong example, should not permute here!
# shuffle(neighbor)
next = next + neighbor
output = output + next
start = next
return output
def encode_adj(adj, max_prev_node=10, is_full = False):
'''
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
'''
if is_full:
max_prev_node = adj.shape[0]-1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i,:] = adj_output[i,:][::-1] # reverse order
return adj_output
def decode_adj(adj_output):
'''
recover to adj from adj_output
note: here adj_output have shape (n-1)*m
'''
max_prev_node = adj_output.shape[1]
adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))
for i in range(adj_output.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)
output_end = max_prev_node
adj[i, input_start:input_end] = adj_output[i,::-1][output_start:output_end] # reverse order
adj_full = np.zeros((adj_output.shape[0]+1, adj_output.shape[0]+1))
n = adj_full.shape[0]
adj_full[1:n, 0:n-1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_flexible(adj):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
adj_output = []
input_start = 0
for i in range(adj.shape[0]):
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
adj_output.append(adj_slice)
non_zero = np.nonzero(adj_slice)[0]
input_start = input_end-len(adj_slice)+np.amin(non_zero)
return adj_output
def decode_adj_flexible(adj_output):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
adj = np.zeros((len(adj_output), len(adj_output)))
for i in range(len(adj_output)):
output_start = i+1-len(adj_output[i])
output_end = i+1
adj[i, output_start:output_end] = adj_output[i]
adj_full = np.zeros((len(adj_output)+1, len(adj_output)+1))
n = adj_full.shape[0]
adj_full[1:n, 0:n-1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_full(adj):
'''
return a n-1*n-1*2 tensor, the first dimension is an adj matrix, the second show if each entry is valid
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
adj_output = np.zeros((adj.shape[0],adj.shape[1],2))
adj_len = np.zeros(adj.shape[0])
for i in range(adj.shape[0]):
non_zero = np.nonzero(adj[i,:])[0]
input_start = np.amin(non_zero)
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
# write adj
adj_output[i,0:adj_slice.shape[0],0] = adj_slice[::-1] # put in reverse order
# write stop token (if token is 0, stop)
adj_output[i,0:adj_slice.shape[0],1] = 1 # put in reverse order
# write sequence length
adj_len[i] = adj_slice.shape[0]
return adj_output,adj_len
def decode_adj_full(adj_output):
'''
return an adj according to adj_output
:param
:return:
'''
# pick up lower tri
adj = np.zeros((adj_output.shape[0]+1,adj_output.shape[1]+1))
for i in range(adj_output.shape[0]):
non_zero = np.nonzero(adj_output[i,:,1])[0] # get valid sequence
input_end = np.amax(non_zero)
adj_slice = adj_output[i, 0:input_end+1, 0] # get adj slice
# write adj
output_end = i+1
output_start = i+1-input_end-1
adj[i+1,output_start:output_end] = adj_slice[::-1] # put in reverse order
adj = adj + adj.T
return adj
########## use pytorch dataloader
########## use pytorch dataloader
# dataset = Graph_sequence_sampler_pytorch_nobfs(graphs)
# print(dataset[1]['x'])
# print(dataset[1]['y'])
# print(dataset[1]['len'])
########## use pytorch dataloader
########## use pytorch dataloader
# graphs = [nx.barabasi_albert_graph(20,3)]
# graphs = [nx.grid_2d_graph(4,4)]
# dataset = Graph_sequence_sampler_pytorch_nll(graphs)
############## below are codes not used in current version
############## they are based on pytorch default data loader, we should consider reimplement them in current datasets, since they are more efficient
# normal version
class Graph_sequence_sampler_truncate():
'''
the output will truncate according to the max_prev_node
'''
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='DD',node_attributes=False)
# dataset = Graph_sequence_sampler_truncate([nx.karate_club_graph()])
# max_prev_nodes = dataset.calc_max_prev_node(iter=10000)
# print(max_prev_nodes)
# x,y,len = dataset.sample()
# print('x',x)
# print('y',y)
# print(len)
# only output y_batch (which is needed in batch version of new model)
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='PROTEINS_full')
# print(max_num_nodes)
# G = nx.ladder_graph(100)
# # G1 = nx.karate_club_graph()
# # G2 = nx.connected_caveman_graph(4,5)
# G_list = [G]
# dataset = Graph_sequence_sampler_fast(graphs, batch_size=128, max_node_num=max_num_nodes, max_prev_node=30)
# for i in range(5):
# time0 = time.time()
# y = dataset.sample()
# time1 = time.time()
# print(i,'time', time1 - time0)
# output size is flexible (using list to represent), batch size is 1
# G = nx.ladder_graph(5)
# # G = nx.grid_2d_graph(20,20)
# # G = nx.ladder_graph(200)
# graphs = [G]
#
# graphs, max_num_nodes = Graph_load_batch(min_num_nodes=6, name='ENZYMES')
# sampler = Graph_sequence_sampler_flexible(graphs)
#
# y_max_all = []
# for i in range(10000):
# y_raw,adj_copy = sampler.sample()
# y_max = max(len(y_raw[i]) for i in range(len(y_raw)))
# y_max_all.append(y_max)
# # print('max bfs node',y_max)
# print('max', max(y_max_all))
# print(y[1])
# print(Variable(torch.FloatTensor(y[1])).cuda(CUDA))
########### potential use: an encoder along with the GraphRNN decoder
# preprocess the adjacency matrix
# truncate the output seqence to save representation, and allowing for infinite generation
# now having a list of graphs
# generate own synthetic dataset
# G = Graph_synthetic(10)
# return adj and features from a single graph
class GraphDataset_adj(torch.utils.data.Dataset):
"""Graph Dataset"""
# G = nx.karate_club_graph()
# dataset = GraphDataset_adj(G)
# train_loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True, num_workers=1)
# for data in train_loader:
# print(data)
# return adj and features from a list of graphs
class GraphDataset_adj_batch(torch.utils.data.Dataset):
"""Graph Dataset"""
# return adj and features from a list of graphs, batch size = 1, so that graphs can have various size each time
class GraphDataset_adj_batch_1(torch.utils.data.Dataset):
"""Graph Dataset"""
# get one node at a time, for a single graph
class GraphDataset(torch.utils.data.Dataset):
"""Graph Dataset"""
| [
2,
3574,
3740,
1378,
12567,
13,
785,
14,
45380,
12,
14192,
3841,
14,
37065,
49,
6144,
14,
2436,
672,
14,
16,
891,
32576,
67,
24,
3553,
37309,
67,
22,
66,
15,
19881,
23,
66,
39761,
64,
16,
67,
2598,
21101,
4309,
1860,
3695,
1959,
... | 2.288804 | 5,734 |
import contextlib
import json
from dataclasses import dataclass
from argo_workflow_tools.dsl import building_mode_context as context
from argo_workflow_tools.dsl.input_definition import InputDefinition
from argo_workflow_tools.dsl.workflow_template_collector import (
push_condition,
pop_condition,
)
@dataclass
@dataclass
| [
11748,
4732,
8019,
198,
11748,
33918,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
1822,
78,
62,
1818,
11125,
62,
31391,
13,
67,
6649,
1330,
2615,
62,
14171,
62,
22866,
355,
4732,
198,
6738,
1822,
78,
62,
1818,
1112... | 3.12037 | 108 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Tue Jul 19 11:50:08 2011 +0200
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""This example shows how to use the Iris Flower (Fisher's) Dataset to create
3-class classifier based on Neural Networks (Multi-Layer Perceptrons - MLP).
"""
from __future__ import print_function
import os
import sys
import bob.io
import bob.db
import bob.measure
import bob.learn.mlp
import bob.learn.activation
import optparse
import tempfile #for package tests
import numpy
def choose_matplotlib_iteractive_backend():
"""Little logic to get interactive plotting right on OSX and Linux"""
import platform
import matplotlib
if platform.system().lower() == 'darwin': #we are on OSX
matplotlib.use('macosx')
else:
matplotlib.use('GTKAgg')
def generate_testdata(data, target):
"""Concatenates all data in a single 2D array. Examples are encoded row-wise,
features, column-wise. The same for the targets.
"""
destsize = 0
for d in data: destsize += len(d)
retval = numpy.zeros((destsize, 4), 'float64')
t_retval = numpy.zeros((destsize, target[0].shape[0]), 'float64')
retval.fill(0)
cur = 0
for k, d in enumerate(data):
retval[cur:(cur+len(d)),:] = numpy.vstack(d)
for i in range(len(d)):
t_retval[i+cur,:] = target[k]
cur += len(d)
return retval, t_retval
def create_machine(data, training_steps):
"""Creates the machine given the training data"""
mlp = bob.learn.mlp.MLP((4, 4, len(data)))
mlp.hidden_activation = bob.learn.activation.HyperbolicTangent()
mlp.output_activation = bob.learn.activation.HyperbolicTangent()
mlp.randomize() #reset weights and biases to a value between -0.1 and +0.1
BATCH = 50
trainer = bob.learn.mlp.MLPBackPropTrainer(BATCH, bob.learn.mlp.SquareError(mlp.output_activation), mlp)
trainer.trainBiases = True #this is the default, but just to clarify!
trainer.momentum = 0.1 #some momenta
targets = [ #we choose the approximate Fisher response!
numpy.array([+1., -1., -1.]), #setosa
numpy.array([-1., +1., -1.]), #versicolor
numpy.array([-1., -1., +1.]), #virginica
]
# Associate the data to targets, by setting the arrayset order explicetly
datalist = [data['setosa'], data['versicolor'], data['virginica']]
# All data, as 2 x 2D arrays containing data and targets
AllData, AllTargets = generate_testdata(datalist, targets)
# A helper to select and shuffle the data
S = bob.learn.mlp.DataShuffler(datalist, targets)
# We now iterate for several steps, look for the convergence
retval = [bob.learn.mlp.MLP(mlp)]
for k in range(training_steps):
input, target = S(BATCH)
# We use "train_" which is unchecked and faster. Use train() if you want
# checks! See the MLPBackPropTrainer documentation for details on this
# before choosing the wrong approach.
trainer.train_(mlp, input, target)
print("|RMSE| @%d:" % (k,), end=' ')
print(numpy.linalg.norm(bob.measure.rmse(mlp(AllData), AllTargets)))
retval.append(bob.learn.mlp.MLP(mlp))
return retval #all machines => nice plotting!
def process_data(machine, data):
"""Iterates over classes and passes data through the trained machine"""
output = {}
for cl in data.keys():
output[cl]=machine.forward(data[cl])
return output
def plot(output):
"""Plots each of the outputs, with the classes separated by colors.
"""
import matplotlib.pyplot as mpl
histo = [{}, {}, {}]
for k in output.keys():
for i in range(len(histo)):
histo[i][k] = numpy.vstack(output[k])[:,i]
order = ['setosa', 'versicolor', 'virginica']
color = ['green', 'blue', 'red']
FAR = []
FRR = []
THRES = []
# Calculates separability
for i, O in enumerate(order):
positives = histo[i][O].copy() #make it C-style contiguous
negatives = numpy.hstack([histo[i][k] for k in order if k != O])
# note: threshold a posteriori! (don't do this at home, kids ;-)
thres = bob.measure.eer_threshold(negatives, positives)
far, frr = bob.measure.farfrr(negatives, positives, thres)
FAR.append(far)
FRR.append(frr)
THRES.append(thres)
# Plots the class histograms
plot_counter = 0
for O, C in zip(order, color):
for k in range(len(histo)):
plot_counter += 1
mpl.subplot(len(histo), len(order), plot_counter)
mpl.hist(histo[k][O], bins=20, color=C, range=(-1,+1), label='Setosa', alpha=0.5)
mpl.vlines((THRES[k],), 0, 60, colors=('red',), linestyles=('--',))
mpl.axis([-1.1,+1.1,0,60])
mpl.grid(True)
if k == 0: mpl.ylabel("Data %s" % O.capitalize())
if O == order[-1]: mpl.xlabel("Output %s" % order[k].capitalize())
if O == order[0]: mpl.title("EER = %.1f%%" % (100*(FAR[k] + FRR[k])/2))
def fig2bzarray(fig):
"""
@brief Convert a Matplotlib figure to a 3D blitz array with RGB channels and
return it
@param fig a matplotlib figure
@return a blitz 3D array of RGB values
"""
import numpy
# draw the renderer
fig.canvas.draw()
# Get the RGB buffer from the figure, re-shape it adequately
w,h = fig.canvas.get_width_height()
buf = numpy.fromstring(fig.canvas.tostring_rgb(),dtype=numpy.uint8)
buf.shape = (h,w,3)
buf = numpy.transpose(buf, (2,0,1))
return numpy.array(buf)
def makemovie(machines, data, filename=None):
"""Plots each of the outputs, with the classes separated by colors.
"""
if not filename:
choose_matplotlib_iteractive_backend()
else:
import matplotlib
if not hasattr(matplotlib, 'backends'): matplotlib.use('Agg')
import matplotlib.pyplot as mpl
output = None
orows = 0
ocols = 0
if not filename: #start interactive plot animation
mpl.ion()
else:
# test output size
processed = process_data(machines[0], data)
plot(processed)
refimage = fig2bzarray(mpl.gcf())
orows = int(2*(refimage.shape[1]/2))
ocols = int(2*(refimage.shape[2]/2))
output = bob.io.VideoWriter(filename, orows, ocols, 5) #5 Hz
print("Saving %d frames to %s" % (len(machines), filename))
for i, k in enumerate(machines):
# test output size
processed = process_data(k, data)
mpl.clf()
plot(processed)
mpl.suptitle("Fisher Iris DB / MLP Training step %d" % i)
if not filename: mpl.draw() #updates ion drawing
else:
image = fig2bzarray(mpl.gcf())
output.append(image[:,:orows,:ocols])
sys.stdout.write('.')
sys.stdout.flush()
if filename:
sys.stdout.write('\n')
sys.stdout.flush()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
1058,
198,
2,
10948,
1052,
73,
418,
1279,
49078,
13,
272,
73,
418,
31,
19830,
499,
13,
354,
29,
198,
2,
30030,
5979,
678,
... | 2.613429 | 2,517 |
"""
Servicio videos.
"""
from tornado.web import RequestHandler
from tornado.gen import sleep
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from time import time
import os
from subprocess import call
rootFolder = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
# load placeholder image for video widget
with open('src/assets/placeholder.jpeg', 'br') as t:
cat_jpg = t.read()
class servicio_videos(RequestHandler):
"""
Get data from drones and serves it to clients if they are allowed to.
"""
frames = {}
timestamps = {}
def build_chunk(self, dt):
"""Build a video chunk, if not video, return placeholder image.
"""
# if drone is not broadcasting, send a placeholder chunk instead
if not dt:
chunk = cat_jpg
else:
chunk = self.frames[dt]
return b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + chunk + b'\r\n'
async def get(self, dt):
"""Show video stream to users.
"""
self.set_header('content-type', "multipart/x-mixed-replace; boundary=frame")
while True:
chunk = self.build_chunk(dt if dt in self.frames else None)
self.write(chunk)
self.flush()
# if drone is broadcasting, send new pic every 0.2 sec, otherwise
# send updates every 2 seconds
await sleep(0.2 if dt in self.frames else 2)
async def post(self, dt):
"""
Get info from drones and save it for streaming.
"""
if dt not in servicio_videos.frames:
# check if stream is ok
IOLoop.current().spawn_callback(self.monitoreo_servicio, dt)
# build directory to save images to make video later
if not os.path.exists(dt):
os.mkdir(dt)
servicio_videos.timestamps[dt] = time(), 0
new_chunk = self.request.body
# save new chunk to be consumed
servicio_videos.frames[dt] = new_chunk
# save chunk for later use as file to generate video
await self.send_chunk_to_almacenaje(dt, new_chunk)
self.write(dt)
async def monitoreo_servicio(self, dt):
"""
Close drone conection if server dont get new information in less than 10 seconds.
"""
while dt in servicio_videos.timestamps:
if time() - servicio_videos.timestamps[dt][0] > 9:
servicio_videos.timestamps.pop(dt)
servicio_videos.frames.pop(dt)
await sleep(3)
| [
37811,
198,
11838,
46441,
5861,
13,
198,
37811,
198,
6738,
33718,
13,
12384,
1330,
19390,
25060,
198,
6738,
33718,
13,
5235,
1330,
3993,
198,
6738,
33718,
13,
1669,
11224,
1330,
314,
3535,
11224,
198,
6738,
33718,
13,
4023,
16366,
1330,
... | 2.328533 | 1,111 |
import os
import sys
import subprocess
import importlib.util as il
spec = il.spec_from_file_location("config", snakemake.params.config)
config = il.module_from_spec(spec)
sys.modules[spec.name] = config
spec.loader.exec_module(config)
sys.path.append(snakemake.config['args']['mcc_path'])
import scripts.mccutils as mccutils
import scripts.output as output
if __name__ == "__main__":
main() | [
11748,
28686,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
1330,
8019,
13,
22602,
355,
4229,
198,
16684,
796,
4229,
13,
16684,
62,
6738,
62,
7753,
62,
24886,
7203,
11250,
1600,
17522,
15883,
13,
37266,
13,
11250,
8,
198,
11250... | 2.76 | 150 |
"""The clicksend_tts component."""
| [
37811,
464,
25785,
437,
62,
83,
912,
7515,
526,
15931,
198
] | 3.181818 | 11 |
import datetime
import json
from datetime import datetime
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.template import loader
from django.utils import translation
from django.views.generic import TemplateView
from rest_framework import generics, mixins, status, viewsets
from rest_framework.authentication import (SessionAuthentication,
TokenAuthentication)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from registry.models import Activity, Authorization, Contact, Operator, Aircraft, Pilot, Test, TestValidity
from registry.serializers import (ContactSerializer, OperatorSerializer, PilotSerializer,
PrivilagedContactSerializer, PrivilagedPilotSerializer,
PrivilagedOperatorSerializer, AircraftSerializer, AircraftESNSerializer)
from django.http import JsonResponse
from rest_framework.decorators import api_view
from six.moves.urllib import request as req
from functools import wraps
class OperatorList(mixins.ListModelMixin,
generics.GenericAPIView):
"""
List all operators, or create a new operator.
"""
queryset = Operator.objects.all()
serializer_class = OperatorSerializer
class OperatorDetail(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Operator instance.
"""
queryset = Operator.objects.all()
serializer_class = OperatorSerializer
class OperatorDetailPrivilaged(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Operator instance.
"""
queryset = Operator.objects.all()
serializer_class = PrivilagedOperatorSerializer
class OperatorAircraft(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Operator instance.
"""
queryset = Aircraft.objects.all()
serializer_class = AircraftSerializer
class ContactList(mixins.ListModelMixin,
generics.GenericAPIView):
"""
List all contacts in the database
"""
queryset = Contact.objects.all()
serializer_class = ContactSerializer
class ContactDetail(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Contact instance.
"""
queryset = Contact.objects.all()
serializer_class = ContactSerializer
class ContactDetailPrivilaged(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Contact instance.
"""
queryset = Contact.objects.all()
serializer_class = PrivilagedContactSerializer
class PilotList(mixins.ListModelMixin,
generics.GenericAPIView):
"""
List all pilots in the database
"""
queryset = Pilot.objects.all()
serializer_class = PilotSerializer
class PilotDetail(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Pilot instance.
"""
queryset = Pilot.objects.all()
serializer_class = PilotSerializer
class PilotDetailPrivilaged(mixins.RetrieveModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete a Pilot instance.
"""
queryset = Pilot.objects.all()
serializer_class = PrivilagedPilotSerializer
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
2448,
3411,
21306,
798,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
6738,
... | 2.862126 | 1,204 |
# Generated by Django 2.1.7 on 2019-11-23 10:11
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
1157,
12,
1954,
838,
25,
1157,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python
import argparse
import csv
import itertools
import logging
import os
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
if __name__ == '__main__':
parser = argparse.ArgumentParser("""
The objective of this script is to transform a given row of a CSV into an HTML, that could be printed via a browser Example.
csv_to_html.py file.csv --output output.html --row 1
""")
parser.add_argument("--input", help="Input file to process")
parser.add_argument("--template", help="Template file to process")
parser.add_argument("--output", help="If not defined, output will appear on console")
parser.add_argument("--row", type=int, help="If not defined default is 1", default=1)
parser.add_argument("--debug", default="ERROR")
parser.add_argument("--no_gui", default=False, action='store_true')
args = vars(parser.parse_args())
LOGGER.setLevel(args['debug'])
if args['no_gui']:
generate_html(args["input"], args["template"], args["output"], args["row"])
else:
launch_gui()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
340,
861,
10141,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
38... | 2.982143 | 392 |
# Write your code here
import math
import numpy as np
import itertools
if __name__ == "__main__":
n = int(input())
l = []
while n > 0 :
x,y = map(int,input().split())
l.append((x,y))
n -= 1
dict = {}
for i in l :
if i in dict :
dict[i] += 1
else :
dict[i] = 1
for i,j in sorted(dict.items()) :
print(i[0],i[1],j)
"""
for key,values in dict.items() :
print(key[0],key[1],values)
"""
| [
2,
19430,
534,
2438,
994,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
340,
861,
10141,
220,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
... | 1.875912 | 274 |
#!/usr/bin/env python3
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.realpath(__file__))))
from pythonutils import gen_diffs
if __name__ == '__main__':
import argparse
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
6738,
28686,
1330,
3108,
198,
198,
17597,
13,
6978,
13,
33295,
7,
6978,
13,
15908,
3672,
7,
6978,
13,
15908,
3672,
7,
6978,
13,
5305,
6978,
7,
834,
7753,
... | 2.773333 | 75 |
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# setrans is a tool for analyzing process transitions in SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
import sepolicy
| [
2,
15069,
357,
34,
8,
2321,
2297,
10983,
198,
2,
766,
2393,
705,
34,
3185,
45761,
6,
329,
779,
290,
18215,
1321,
198,
2,
198,
2,
900,
26084,
318,
257,
2891,
329,
22712,
1429,
27188,
287,
311,
3698,
259,
2821,
2450,
198,
2,
198,
... | 3.169435 | 301 |
#! /usr/bin/env python
# generates a dictionary in json format, with the
import json
from collections import Counter
import click
@click.command()
@click.argument("input-file", type=click.Path(exists=True, dir_okay=False))
@click.argument("output-file", type=click.Path(exists=False, dir_okay=False))
@click.option("--char-dict/--no-char-dict", default=False)
if __name__ == "__main__":
get_dict()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
18616,
257,
22155,
287,
33918,
5794,
11,
351,
262,
198,
198,
11748,
33918,
198,
6738,
17268,
1330,
15034,
198,
198,
11748,
3904,
628,
198,
31,
12976,
13,
21812,
3419,
198,
... | 2.921429 | 140 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This script is used to migrate a global backend service
(EXTERNAL/INTERNAL-SELF-MANAGED) from its legacy network to
the target subnet.
"""
from vm_network_migration.handler_helper.selfLink_executor import SelfLinkExecutor
from vm_network_migration.modules.backend_service_modules.global_backend_service import \
GlobalBackendService
from vm_network_migration.utils import initializer
from vm_network_migration.handlers.compute_engine_resource_migration import ComputeEngineResourceMigration
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.686007 | 293 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from dev0s.classes.response import *
from dev0s.classes.defaults import objects
from dev0s.classes.response import response as _response_
# pip imports.
from bs4 import BeautifulSoup as bs4
import urllib
import requests as __requests__
# the requests class.
# initialized classes.
requests = Requests()
#
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
17944,
13,
198,
6738,
1614,
15,
82,
13,
37724,
13,
26209,
1330,
1635,
198,
6738,
1614,
15,
82,
13,
3772... | 3.016393 | 122 |
#!/usr/bin/env python
"""Extracts messages from CAN Bus interface and save to file"""
import os
import time
import threading
import queue
import zipfile
import can
from can import Message
# List of OBD-II parameter Ids to query
PIDS = {
"vehicle_speed": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0xD, 0x55, 0x55, 0x55, 0x55, 0x55],
),
"engine_load": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0x4, 0x55, 0x55, 0x55, 0x55, 0x55],
),
"coolant_temp": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0x5, 0x55, 0x55, 0x55, 0x55, 0x55],
),
"engine_rpm": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0xC, 0x55, 0x55, 0x55, 0x55, 0x55],
),
"throttle_position": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0x11, 0x55, 0x55, 0x55, 0x55, 0x55],
),
"ambient_air_temperature": Message(
arbitration_id=0x7DF,
extended_id=False,
data=[0x2, 0x1, 0x46, 0x55, 0x55, 0x55, 0x55, 0x55],
),
}
# Intermediate format is: "timestamp.nnnn ID DATA"
CANBUS_DATA_FORMAT = "{} {:02X} {}"
def bus_request(bus, pids, run_event):
"""Request parameters of interest on the bus every 20ms, bus_response reads the responses"""
while run_event.is_set():
for i in pids:
try:
bus.send(pids[i], timeout=0.02)
except can.interfaces.kvaser.canlib.CANLIBError:
bus.flush_tx_buffer()
print("error")
# Pause 50ms between queries
time.sleep(0.05)
def bus_response(bus, q):
"""
Continiously read the CAN Bus and queues entries of interest,
filtered to OBD-II class messages
"""
for msg in bus:
# Only log common OBD-II parameters:
if msg.arbitration_id == 0x7E8:
q.put(
CANBUS_DATA_FORMAT.format(
time.time(), msg.arbitration_id, msg.data.hex().upper()
)
)
def persist_data(q, run_event):
"""Read data from queue and persist to local file"""
total_events = 0
f = open("events.txt", "a", buffering=512)
while run_event.is_set():
try:
event = q.get(False)
f.write(f"{event}\n")
total_events += 1
if total_events % 200 == 0:
print(f"read and written {total_events} events")
except queue.Empty:
# No work to process, continue
pass
f.close()
# Common elements used by all extract methods
message_queue = queue.Queue()
# Connect to data source
# This is specific to the Kvaser Leaf Light v2 data logger,
# replace with specifics for your CAN Bus device
bus = can.Bus(interface="kvaser", channel=0, receive_own_messages=True)
# Setup threads to interact with CAN Bus, read data, and persist to data store
run_event = threading.Event()
run_event.set()
worker_canbus_request = threading.Thread(
target=bus_request, args=[bus, PIDS, run_event]
)
worker_canbus_response = threading.Thread(
target=bus_response, args=[bus, message_queue]
)
worker_persist = threading.Thread(target=persist_data, args=[message_queue, run_event])
# Start workers in reverse order, so messages aren't missed
worker_persist.start()
worker_canbus_response.start()
worker_canbus_request.start()
try:
while True:
# Until keyboard interrupt
pass
except KeyboardInterrupt:
print("Closing threads")
run_event.clear()
worker_canbus_request.join()
worker_persist.join()
print("Threads successfully closed")
os._exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
11627,
974,
82,
6218,
422,
15628,
5869,
7071,
290,
3613,
284,
2393,
37811,
198,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
4704,
278,
198,
11748,
16834,
198,
11748,
19974,
77... | 2.236244 | 1,672 |
try:
from psycopg2cffi._impl._libpq import ffi, lib as libpq
except ImportError:
from psycopg2cffi._impl._build_libpq import ffi, C_SOURCE, C_SOURCE_KWARGS
libpq = ffi.verify(C_SOURCE, **C_SOURCE_KWARGS)
PG_VERSION = libpq._PG_VERSION
| [
28311,
25,
198,
220,
220,
220,
422,
17331,
22163,
70,
17,
66,
487,
72,
13557,
23928,
13557,
8019,
79,
80,
1330,
277,
12463,
11,
9195,
355,
9195,
79,
80,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
17331,
22163,
70,
17,
... | 2.254545 | 110 |
import re
from sqlalchemy import text
from yuuhpizzakebab import db
from yuuhpizzakebab.topping.models import Topping
class Pizza():
"""The pizza class.
variables:
id - id of the pizza
name - name of the pizza
price - price of the pizza in USD
image_url - image of the pizza
toppings - list of toppings included in the pizza
"""
@staticmethod
@staticmethod
@staticmethod
| [
11748,
302,
198,
198,
6738,
44161,
282,
26599,
1330,
2420,
198,
6738,
331,
84,
7456,
79,
6457,
539,
65,
397,
1330,
20613,
198,
198,
6738,
331,
84,
7456,
79,
6457,
539,
65,
397,
13,
1462,
2105,
13,
27530,
1330,
1675,
2105,
628,
628,
... | 2.860927 | 151 |
from .handlers import (DefaultHandler, MessageHandler, RegexHandler,
StartswithHandler)
| [
6738,
764,
4993,
8116,
1330,
357,
19463,
25060,
11,
16000,
25060,
11,
797,
25636,
25060,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
7253,
2032,
342,
25060,
... | 2.488889 | 45 |
import json
import pytest
from model_mommy import mommy
from rest_framework import status
from unittest.mock import Mock
from usaspending_api.awards.models import (
TransactionNormalized,
TransactionFABS,
TransactionFPDS,
Subaward,
BrokerSubaward,
)
from usaspending_api.awards.v2.lookups.lookups import all_subaward_types, award_type_mapping
from usaspending_api.common.helpers.generic_helper import generate_test_db_connection_string
from usaspending_api.download.filestreaming import download_generation
from usaspending_api.download.lookups import JOB_STATUS
from usaspending_api.etl.award_helpers import update_awards
@pytest.fixture
@pytest.mark.skip
def test_download_transactions_v2_endpoint(client, award_data):
"""test the transaction endpoint."""
resp = client.post(
"/api/v2/bulk_download/transactions",
content_type="application/json",
data=json.dumps({"filters": {}, "columns": {}}),
)
assert resp.status_code == status.HTTP_200_OK
assert ".zip" in resp.json()["file_url"]
@pytest.mark.skip
def test_download_awards_v2_endpoint(client, award_data):
"""test the awards endpoint."""
resp = client.post(
"/api/v2/bulk_download/awards", content_type="application/json", data=json.dumps({"filters": {}, "columns": []})
)
assert resp.status_code == status.HTTP_200_OK
assert ".zip" in resp.json()["file_url"]
@pytest.mark.skip
def test_download_transactions_v2_status_endpoint(client, award_data):
"""Test the transaction status endpoint."""
dl_resp = client.post(
"/api/v2/bulk_download/transactions",
content_type="application/json",
data=json.dumps({"filters": {}, "columns": []}),
)
resp = client.get("/api/v2/download/status/?file_name={}".format(dl_resp.json()["file_name"]))
assert resp.status_code == status.HTTP_200_OK
assert resp.json()["total_rows"] == 3
assert resp.json()["total_columns"] > 100
@pytest.mark.django_db
def test_download_status_nonexistent_file_404(client):
"""Requesting status of nonexistent file should produce HTTP 404"""
resp = client.get("/api/v2/bulk_download/status/?file_name=there_is_no_such_file.zip")
assert resp.status_code == status.HTTP_404_NOT_FOUND
def test_list_agencies(client, award_data):
"""Test transaction list agencies endpoint"""
resp = client.post(
"/api/v2/bulk_download/list_agencies",
content_type="application/json",
data=json.dumps({"type": "award_agencies"}),
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data == {
"agencies": {
"cfo_agencies": [],
"other_agencies": [
{"name": "Bureau of Stuff", "toptier_agency_id": 2, "toptier_code": "101"},
{"name": "Bureau of Things", "toptier_agency_id": 1, "toptier_code": "100"},
],
},
"sub_agencies": [],
}
resp = client.post(
"/api/v2/bulk_download/list_agencies",
content_type="application/json",
data=json.dumps({"type": "award_agencies", "agency": 2}),
)
assert resp.status_code == status.HTTP_200_OK
assert resp.data == {"agencies": [], "sub_agencies": [{"subtier_agency_name": "SubBureau of Stuff"}]}
| [
11748,
33918,
198,
11748,
12972,
9288,
198,
198,
6738,
2746,
62,
32542,
1820,
1330,
1995,
1820,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
514,
5126,
1571,
62,
15042,
13... | 2.5233 | 1,309 |
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from utils import *
import numpy as np
import tensorflow as tf
def content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar
"""
# Retrieve dimensions from a_G
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G
a_C_unrolled = tf.transpose(tf.reshape(a_C, [m, n_H*n_W, n_C]))
a_G_unrolled = tf.transpose(tf.reshape(a_G, [m, n_H*n_W, n_C]))
# compute the cost
J_content = (1/(4*n_H*n_W*n_C))*(tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled,a_G_unrolled))))
return J_content
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
GA = tf.matmul(A, A, transpose_b=True)
return GA
def layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value
"""
# Retrieve dimensions from a_G
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W)
a_S = tf.transpose(tf.reshape(a_S, [n_H*n_W, n_C]))
a_G = tf.transpose(tf.reshape(a_G, [n_H*n_W, n_C]))
# Computing gram_matrices for both images S and G
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss
J_style_layer = (1/(4*(n_H*n_W)*(n_H*n_W)*n_C*n_C))*(tf.reduce_sum(tf.reduce_sum(tf.square(tf.subtract(GS,GG)),1)))
return J_style_layer
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
def style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost
"""
J_total = alpha*J_content+beta*J_style
return J_total
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
# Load, reshape, and normalize the "content" image:
img = Image.open("images/cat.jpg")
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 400
img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.ANTIALIAS)
img.save('images/cat_small.jpg')
content_image = scipy.misc.imread("images/cat_small.jpg")
content_image = reshape_and_normalize_image(content_image)
# Load, reshape and normalize the "style" image:
img = Image.open("images/dali.jpg")
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 400
img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.ANTIALIAS)
img.save('images/dali_small.jpg')
style_image = scipy.misc.imread("images/dali_small.jpg")
style_image = reshape_and_normalize_image(style_image)
# Now, we initialize the "generated" image as a noisy image created from the content_image.
#By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image,
#this will help the content of the "generated" image more rapidly match the content of the "content" image.
generated_image = generate_noise_image(content_image)
imshow(generated_image[0])
# Load the VGG16 model
model = load_vgg_model("pretrained_model/imagenet-vgg-verydeep-19.mat")
# Assign the content image to be the input of the VGG model
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = style_cost(model, STYLE_LAYERS)
# Compute the total cost
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
# define optimizer
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step
train_step = optimizer.minimize(J)
model_nn(sess, generated_image)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
629,
541,
88,
13,
952,
198,
11748,
629,
541,
88,
13,
44374,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
545,
12860,
19... | 2.522576 | 2,547 |
# Generated by Django 2.0.2 on 2018-07-21 16:00
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
17,
319,
2864,
12,
2998,
12,
2481,
1467,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
6... | 2.818182 | 44 |
# The vowel substrings in the word codewarriors are o,e,a,io.
# The longest of these has a length of 2. Given a lowercase string that has
# alphabetic characters only (both vowels and consonants) and no spaces,
# return the length of the longest vowel substring. Vowels are any of aeiou.
# Good luck!
# If you like substring Katas, please try:
# exercise ==> https://www.codewars.com/kata/59c5f4e9d751df43cf000035/train/python
# ----------- sample tests------------
# Test.it("Basic tests")
# Test.assert_equals(solve("codewarriors"),2)
# Test.assert_equals(solve("suoidea"),3)
# Test.assert_equals(solve("ultrarevolutionariees"),3)
# Test.assert_equals(solve("strengthlessnesses"),1)
# Test.assert_equals(solve("cuboideonavicuare"),2)
# Test.assert_equals(solve("chrononhotonthuooaos"),5)
# Test.assert_equals(solve("iiihoovaeaaaoougjyaw"),8) | [
2,
383,
48617,
850,
37336,
287,
262,
1573,
14873,
413,
283,
8657,
389,
267,
11,
68,
11,
64,
11,
952,
13,
198,
2,
220,
383,
14069,
286,
777,
468,
257,
4129,
286,
362,
13,
11259,
257,
2793,
7442,
4731,
326,
468,
220,
198,
2,
220,
... | 2.746795 | 312 |
# calls the functions from linalg/umath_linalg.c.src via cffi rather than cpyext
# As opposed to the numpy version, this version leaves broadcasting to the responsibility
# of the pypy extended frompyfunc, so the _umath_linag_capi functions are always called
# with the final arguments, no broadcasting needed.
import os, sys
from ._umath_linalg_build import all_four, three
from ._umath_linalg_cffi import ffi, lib
lib.init_constants()
import numpy as np
# dtype has not been imported yet. Fake it.
from numpy.core.multiarray import dtype
nt = Dummy()
nt.int32 = dtype('int32')
nt.int8 = dtype('int8')
nt.float32 = dtype('float32')
nt.float64 = dtype('float64')
nt.complex64 = dtype('complex64')
nt.complex128 = dtype('complex128')
from numpy.core.umath import frompyfunc
__version__ = '0.1.4'
VOIDP = ffi.cast('void *', 0)
npy_clear_floatstatus = lib._npy_clear_floatstatus
npy_set_floatstatus_invalid = lib._npy_set_floatstatus_invalid
# --------------------------------------------------------------------------
# Determinants
FLOAT_slogdet = wrap_slogdet(nt.float32, nt.float32,
lib.FLOAT_slogdet)
DOUBLE_slogdet = wrap_slogdet(nt.float64, nt.float64,
lib.DOUBLE_slogdet)
CFLOAT_slogdet = wrap_slogdet(nt.complex64, nt.float32,
lib.CFLOAT_slogdet)
CDOUBLE_slogdet = wrap_slogdet(nt.complex128, nt.float64,
lib.CDOUBLE_slogdet)
FLOAT_det = wrap_det(nt.float32, lib.FLOAT_det)
DOUBLE_det = wrap_det(nt.float64, lib.DOUBLE_det)
CFLOAT_det = wrap_det(nt.complex64, lib.CFLOAT_det)
CDOUBLE_det = wrap_det(nt.complex128, lib.CDOUBLE_det)
slogdet = frompyfunc([FLOAT_slogdet, DOUBLE_slogdet, CFLOAT_slogdet, CDOUBLE_slogdet],
1, 2, dtypes=[nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64,
nt.complex64, nt.complex64, nt.float32,
nt.complex128, nt.complex128, nt.float64],
signature='(m,m)->(),()', name='slogdet', stack_inputs=False,
doc="slogdet on the last two dimensions and broadcast on the rest. \n"\
"Results in two arrays, one with sign and the other with log of the"\
" determinants. \n"\
" \"(m,m)->(),()\" \n",
)
det = frompyfunc([FLOAT_det, DOUBLE_det, CFLOAT_det, CDOUBLE_det],
1, 1, dtypes=[nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
doc="det on the last two dimensions and broadcast"\
" on the rest. \n \"(m,m)->()\" \n",
signature='(m,m)->()', name='det', stack_inputs=False,
)
# --------------------------------------------------------------------------
# Eigh family
eigh_lo_funcs = \
[wrap_1inVoutMout(getattr(lib, f + 'eighlo')) for f in all_four]
eigh_up_funcs = \
[wrap_1inVoutMout(getattr(lib, f + 'eighup')) for f in all_four]
eig_shlo_funcs = \
[wrap_1inVout(getattr(lib, f + 'eigvalshlo')) for f in all_four]
eig_shup_funcs = \
[wrap_1inVout(getattr(lib, f + 'eigvalshup')) for f in all_four]
eigh_lo = frompyfunc(eigh_lo_funcs, 1, 2, dtypes=[ \
nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64,
nt.complex64, nt.float32, nt.complex64,
nt.complex128, nt.float64, nt.complex128],
signature='(m,m)->(m),(m,m)', name='eigh_lo', stack_inputs=True,
doc = "eigh on the last two dimension and broadcast to the rest, using"\
" lower triangle \n"\
"Results in a vector of eigenvalues and a matrix with the"\
"eigenvectors. \n"\
" \"(m,m)->(m),(m,m)\" \n",
)
eigh_up = frompyfunc(eigh_up_funcs, 1, 2, dtypes=[ \
nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64,
nt.complex64, nt.float32, nt.complex64,
nt.complex128, nt.float64, nt.complex128],
signature='(m,m)->(m),(m,m)', name='eigh_up', stack_inputs=True,
doc = "eigh on the last two dimension and broadcast to the rest, using"\
" upper triangle \n"\
"Results in a vector of eigenvalues and a matrix with the"\
"eigenvectors. \n"\
" \"(m,m)->(m),(m,m)\" \n",
)
eigvalsh_lo = frompyfunc(eig_shlo_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
signature='(m,m)->(m)', name='eigvaslh_lo', stack_inputs=True,
doc = "eigh on the last two dimension and broadcast to the rest, using"\
" lower triangle \n"\
"Results in a vector of eigenvalues. \n"\
" \"(m,m)->(m)\" \n",
)
eigvalsh_up = frompyfunc(eig_shup_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
signature='(m,m)->(m)', name='eigvaslh_up', stack_inputs=True,
doc = "eigh on the last two dimension and broadcast to the rest, using"\
" upper triangle \n"\
"Results in a vector of eigenvalues. \n"\
" \"(m,m)->(m)\" \n",
)
# --------------------------------------------------------------------------
# Solve family (includes inv)
solve_funcs = \
[wrap_solve(getattr(lib, f + 'solve')) for f in all_four]
solve1_funcs = \
[wrap_solve1(getattr(lib, f + 'solve1')) for f in all_four]
inv_funcs = \
[wrap_1in1out(getattr(lib, f + 'inv')) for f in all_four]
solve = frompyfunc(solve_funcs, 2, 1, dtypes=[ \
nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64,
nt.complex64, nt.complex64, nt.complex64,
nt.complex128, nt.complex128, nt.complex128],
signature='(m,m),(m,n)->(m,n)', name='solve', stack_inputs=True,
doc = "solve the system a x = b, on the last two dimensions, broadcast"\
" to the rest. \n"\
"Results in a matrices with the solutions. \n"\
" \"(m,m),(m,n)->(m,n)\" \n",
)
solve1 = frompyfunc(solve1_funcs, 2, 1, dtypes=[ \
nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64,
nt.complex64, nt.complex64, nt.complex64,
nt.complex128, nt.complex128, nt.complex128],
signature='(m,m),(m)->(m)', name='solve1', stack_inputs=True,
doc = "solve the system a x = b, for b being a vector, broadcast in"\
" the outer dimensions. \n"\
"Results in the vectors with the solutions. \n"\
" \"(m,m),(m)->(m)\" \n",
)
inv = frompyfunc(inv_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.complex64,
nt.complex128, nt.complex128],
signature='(m,m)->(m,m)', name='inv', stack_inputs=True,
doc="compute the inverse of the last two dimensions and broadcast "\
" to the rest. \n"\
"Results in the inverse matrices. \n"\
" \"(m,m)->(m,m)\" \n",
)
# --------------------------------------------------------------------------
# Cholesky decomposition
cholesky_lo_funcs = [wrap_1in1out(getattr(lib, f + 'cholesky_lo')) for f in all_four]
cholesky_lo = frompyfunc(cholesky_lo_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
signature='(m,m)->(m,m)', name='cholesky_lo', stack_inputs=True,
doc = "cholesky decomposition of hermitian positive-definite matrices. \n"\
"Broadcast to all outer dimensions. \n"\
" \"(m,m)->(m,m)\" \n",
)
# --------------------------------------------------------------------------
# eig family
# There are problems with eig in complex single precision.
# That kernel is disabled
eig_funcs = [wrap_1inVoutMout(getattr(lib, f + 'eig')) for f in three]
eigval_funcs = [wrap_1inVout(getattr(lib, f + 'eigvals')) for f in three]
eig = frompyfunc(eig_funcs, 1, 2, dtypes=[ \
nt.float32, nt.complex64, nt.complex64,
nt.float64, nt.complex128, nt.complex128,
nt.complex128, nt.complex128, nt.complex128],
signature='(m,m)->(m),(m,m)', name='eig', stack_inputs=True,
doc = "eig on the last two dimension and broadcast to the rest. \n"\
"Results in a vector with the eigenvalues and a matrix with the"\
" eigenvectors. \n"\
" \"(m,m)->(m),(m,m)\" \n",
)
eigvals = frompyfunc(eigval_funcs, 1, 1, dtypes=[ \
nt.float32, nt.complex64,
nt.float64, nt.complex128,
nt.complex128, nt.complex128],
signature='(m,m)->(m)', name='eig', stack_inputs=True,
doc = "eig on the last two dimension and broadcast to the rest. \n"\
"Results in a vector of eigenvalues. \n"\
" \"(m,m)->(m)\" \n",
)
# --------------------------------------------------------------------------
# SVD family of singular value decomposition
svd_m_funcs = [wrap_1inVout(getattr(lib, f + 'svd_N')) for f in all_four]
svd_n_funcs = [wrap_1inVout(getattr(lib, f + 'svd_N')) for f in all_four]
svd_m_s_funcs = [wrap_1inMoutVoutMout(getattr(lib, f + 'svd_S')) for f in all_four]
svd_n_s_funcs = [wrap_1inMoutVoutMout(getattr(lib, f + 'svd_S')) for f in all_four]
svd_m_f_funcs = [wrap_1inMoutVoutMout(getattr(lib, f + 'svd_A')) for f in all_four]
svd_n_f_funcs = [wrap_1inMoutVoutMout(getattr(lib, f + 'svd_A')) for f in all_four]
svd_m = frompyfunc(svd_m_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
signature='(m,n)->(m)', name='svd_m', stack_inputs=True,
doc = "svd when n>=m. ",
)
svd_n = frompyfunc(svd_n_funcs, 1, 1, dtypes=[ \
nt.float32, nt.float32,
nt.float64, nt.float64,
nt.complex64, nt.float32,
nt.complex128, nt.float64],
signature='(m,n)->(n)', name='svd_n', stack_inputs=True,
doc = "svd when n<=m. ",
)
svd_1_3_types =[ nt.float32, nt.float32, nt.float32, nt.float32,
nt.float64, nt.float64, nt.float64, nt.float64,
nt.complex64, nt.complex64, nt.float32, nt.complex64,
nt.complex128, nt.complex128, nt.float64, nt.complex128]
svd_m_s = frompyfunc(svd_m_s_funcs, 1, 3, dtypes=svd_1_3_types,
signature='(m,n)->(m,n),(m),(m,n)', name='svd_m_s', stack_inputs=True,
doc = "svd when m>=n. ",
)
svd_n_s = frompyfunc(svd_n_s_funcs, 1, 3, dtypes=svd_1_3_types,
signature='(m,n)->(m,n),(n),(n,n)', name='svd_n_s', stack_inputs=True,
doc = "svd when m>=n. ",
)
svd_m_f = frompyfunc(svd_m_f_funcs, 1, 3, dtypes=svd_1_3_types,
signature='(m,n)->(m,m),(m),(n,n)', name='svd_m_f', stack_inputs=True,
doc = "svd when m>=n. ",
)
svd_n_f = frompyfunc(svd_n_f_funcs, 1, 3, dtypes=svd_1_3_types,
signature='(m,n)->(m,m),(n),(n,n)', name='svd_n_f', stack_inputs=True,
doc = "svd when m>=n. ",
)
| [
2,
3848,
262,
5499,
422,
300,
1292,
70,
14,
388,
776,
62,
75,
1292,
70,
13,
66,
13,
10677,
2884,
269,
487,
72,
2138,
621,
269,
9078,
2302,
198,
2,
1081,
6886,
284,
262,
299,
32152,
2196,
11,
428,
2196,
5667,
22978,
284,
262,
579... | 1.697388 | 8,116 |
import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N = ni()
S = [ns() for _ in range(N)]
dd = []
for i in range(N):
d = defaultdict(int)
for c in S[i]:
d[c] += 1
dd.append(d)
ans = []
for i in range(26):
c = chr(ord('a') + i)
min_v = INF
for j in range(N):
d = dd[j]
min_v = min(min_v, d[c])
ans.append(c * min_v)
print("".join(ans))
| [
11748,
25064,
198,
11748,
10688,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
12429,
718,
8,
198,
19282,
259,
796,
25064,
13,
19282,
259,
198,
198,
1268,
37,
796,
12178,
10... | 2.156364 | 275 |
from .gconv_origin import ConvTemporalGraphical,ConvTemporalGraphicalBatchA,ConvTemporalGraphicalTwoA
from .graph import Graph | [
6738,
764,
70,
42946,
62,
47103,
1330,
34872,
12966,
35738,
37065,
605,
11,
3103,
85,
12966,
35738,
37065,
605,
33,
963,
32,
11,
3103,
85,
12966,
35738,
37065,
605,
7571,
32,
198,
6738,
764,
34960,
1330,
29681
] | 3.405405 | 37 |
import cv2
__all__ = ["read_bgr_image", "read_rgb_image"]
| [
11748,
269,
85,
17,
198,
198,
834,
439,
834,
796,
14631,
961,
62,
65,
2164,
62,
9060,
1600,
366,
961,
62,
81,
22296,
62,
9060,
8973,
628,
198
] | 2.178571 | 28 |
# -*- coding: utf-8 -*-
import pytest
from comport.department.models import Department, Extractor
from comport.data.models import IncidentsUpdated, UseOfForceIncidentLMPD
from testclient.JSON_test_client import JSONTestClient
@pytest.mark.usefixtures('db')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
6738,
552,
419,
13,
10378,
1823,
13,
27530,
1330,
2732,
11,
29677,
273,
198,
6738,
552,
419,
13,
7890,
13,
27530,
1330,
3457,
3231,
17354,
11,
... | 3.225 | 80 |
"""Public section, including homepage and signup."""
# Copyright 2020 Pax Syriana Foundation. Licensed under the Apache License, Version 2.0
#
#
from flask import Blueprint
from flask_login import login_required
from via_common.multiprocess.logger_manager import LoggerManager
from via_cms.util.helper import get_locale
from via_cms.util.helper import render_extensions
from via_cms.util.helper import role_required
from via_cms.viewmodel.vm_user import get_user_list
logger = LoggerManager.get_logger('dashboard_user')
bp = Blueprint('private.user.dashboard_user', __name__, url_prefix='/private/admin/dashboard',
static_folder="../static")
@bp.route("/user", methods=["GET", "POST"])
@login_required
@role_required(['admin'])
def dashboard_user(page=None):
"""
"""
page = int(page) if page else 0 # TODO !!!! page + ValueError
_page_size = 100 # TODO: selectable on html
if not page or page <= 0:
next_page = 0
prev_page = 1
current = True
else:
next_page = page - 1
prev_page = page + 1
current = False
user_list = get_user_list(_page_size, page)
return render_extensions("private/user/dashboard_user.html", lang=get_locale(), user_list=user_list,
next_page=next_page, prev_page=prev_page,
current=current)
| [
37811,
15202,
2665,
11,
1390,
34940,
290,
1051,
929,
526,
15931,
198,
2,
220,
15069,
12131,
48008,
3030,
2271,
5693,
13,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
198,
2,
628,
198,
6738,
42903,
1330,
39932,
198,
... | 2.590226 | 532 |
won_bet = True
big_win = True
if won_bet or big_win:
print("You can now stop betting!")
won_bet = False
big_win = True
if won_bet or big_win:
print("You can now stop betting!")
won_bet = True
big_win = False
if won_bet or big_win:
print("You can now stop betting!")
won_bet = False
big_win = False
if won_bet or big_win:
print("You can now stop betting!") | [
26502,
62,
11181,
796,
6407,
198,
14261,
62,
5404,
796,
6407,
220,
198,
198,
361,
220,
1839,
62,
11181,
393,
1263,
62,
5404,
25,
198,
197,
4798,
7203,
1639,
460,
783,
2245,
22908,
2474,
8,
628,
197,
26502,
62,
11181,
796,
10352,
198... | 2.604167 | 144 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
PyTorch implementation of LeNet5
'''
import torch.nn as nn
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
20519,
15884,
354,
7822,
286,
1004,
7934,
20,
198,
7061,
6,
198,
198,
11748,
28034,
13,
20471,
355... | 2.333333 | 48 |
#!/usr/bin/env python
# TODO: openbmc/openbmc#2994 remove python 2 support
try: # python 2
import gobject
except ImportError: # python 3
from gi.repository import GObject as gobject
import dbus
import dbus.service
import dbus.mainloop.glib
from obmc.dbuslib.bindings import get_dbus, DbusProperties, DbusObjectManager
DBUS_NAME = 'org.openbmc.control.Chassis'
OBJ_NAME = '/org/openbmc/control/chassis0'
CONTROL_INTF = 'org.openbmc.Control'
MACHINE_ID = '/etc/machine-id'
POWER_OFF = 0
POWER_ON = 1
BOOTED = 100
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = get_dbus()
obj = ChassisControlObject(bus, OBJ_NAME)
mainloop = gobject.MainLoop()
obj.unmask_signals()
name = dbus.service.BusName(DBUS_NAME, bus)
print("Running ChassisControlService")
mainloop.run()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
16926,
46,
25,
1280,
20475,
66,
14,
9654,
20475,
66,
2,
1959,
5824,
4781,
21015,
362,
1104,
198,
28311,
25,
220,
1303,
21015,
362,
198,
220,
220,
220,
1330,
48484,
752,
198,
... | 2.501377 | 363 |
#Programa que leia o cateto oposto e o adjacente de um triangulo retangulo
#e mostre o comprimento da hipotenusa
cat_o = float(input("comprimento do cateto oposto: "))
cat_a = float(input("comprimento do cateto adjacente: "))
c_hip = (cat_o ** 2 + cat_a ** 2) ** (1/2)
print(f" A hipotenusa mede {c_hip:.2f}")
import math
ca = float(input("Comprimento do cateto adjacente: "))
co = float(input("Comprimento do cateto oposto: "))
hipt = math.hypot(ca, co)
print(f"A hipotenusa mede {hipt:.2f}") | [
2,
15167,
64,
8358,
443,
544,
267,
3797,
27206,
1034,
455,
78,
304,
267,
15909,
68,
390,
23781,
1333,
648,
43348,
1005,
648,
43348,
198,
2,
68,
749,
260,
267,
552,
3036,
50217,
12379,
10359,
313,
268,
22064,
198,
198,
9246,
62,
78,
... | 2.38756 | 209 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2021 TU Wien.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Pytest configuration.
See https://pytest-invenio.readthedocs.io/ for documentation on which test
fixtures are available.
"""
# Monkey patch Werkzeug 2.1, needed to import flask_security.login_user
# Flask-Login uses the safe_str_cmp method which has been removed in Werkzeug
# 2.1. Flask-Login v0.6.0 (yet to be released at the time of writing) fixes the
# issue. Once we depend on Flask-Login v0.6.0 as the minimal version in
# Flask-Security-Invenio/Invenio-Accounts we can remove this patch again.
try:
# Werkzeug <2.1
from werkzeug import security
security.safe_str_cmp
except AttributeError:
# Werkzeug >=2.1
import hmac
from werkzeug import security
security.safe_str_cmp = hmac.compare_digest
import pytest
from flask_principal import Identity, Need, UserNeed
from flask_security import login_user
from flask_security.utils import hash_password
from invenio_access.permissions import ActionUsers, any_user, system_process
from invenio_access.proxies import current_access
from invenio_accounts.proxies import current_datastore
from invenio_accounts.testutils import login_user_via_session
from invenio_app.factory import create_api as _create_api
from invenio_cache import current_cache
from invenio_vocabularies.records.api import Vocabulary
from invenio_vocabularies.records.models import VocabularyType
pytest_plugins = ("celery.contrib.pytest", )
@pytest.fixture(scope="module")
def h():
"""Accept JSON headers."""
return {"accept": "application/json"}
@pytest.fixture(scope="module")
def extra_entry_points():
"""Extra entry points to load the mock_module features."""
return {
'invenio_db.models': [
'mock_module = mock_module.models',
],
'invenio_jsonschemas.schemas': [
'mock_module = mock_module.jsonschemas',
],
'invenio_search.mappings': [
'records = mock_module.mappings',
]
}
@pytest.fixture(scope='module')
def app_config(app_config):
"""Mimic an instance's configuration."""
app_config["JSONSCHEMAS_HOST"] = 'localhost'
app_config["BABEL_DEFAULT_LOCALE"] = 'en'
app_config["I18N_LANGUAGES"] = [('da', 'Danish')]
app_config['RECORDS_REFRESOLVER_CLS'] = \
"invenio_records.resolver.InvenioRefResolver"
app_config['RECORDS_REFRESOLVER_STORE'] = \
"invenio_jsonschemas.proxies.current_refresolver_store"
return app_config
@pytest.fixture(scope="module")
def create_app(instance_path, entry_points):
"""Application factory fixture."""
return _create_api
@pytest.fixture(scope="module")
def identity_simple():
"""Simple identity fixture."""
i = Identity(1)
i.provides.add(UserNeed(1))
i.provides.add(Need(method="system_role", value="any_user"))
return i
@pytest.fixture(scope='module')
def identity():
"""Simple identity to interact with the service."""
i = Identity(1)
i.provides.add(UserNeed(1))
i.provides.add(any_user)
i.provides.add(system_process)
return i
@pytest.fixture(scope='module')
def service(app):
"""Vocabularies service object."""
return app.extensions['invenio-vocabularies'].service
@pytest.fixture()
def lang_type(db):
"""Get a language vocabulary type."""
v = VocabularyType.create(id='languages', pid_type='lng')
db.session.commit()
return v
@pytest.fixture(scope='function')
def lang_data():
"""Example data."""
return {
'id': 'eng',
'title': {'en': 'English', 'da': 'Engelsk'},
'description': {
'en': 'English description',
'da': 'Engelsk beskrivelse'
},
'icon': 'file-o',
'props': {
'akey': 'avalue',
},
'tags': ['recommended'],
'type': 'languages',
}
@pytest.fixture()
def lang_data2(lang_data):
"""Example data for testing invalid cases."""
data = dict(lang_data)
data['id'] = 'new'
return data
@pytest.fixture()
def example_record(db, identity, service, example_data):
"""Example record."""
vocabulary_type_languages = VocabularyType(name="languages")
vocabulary_type_licenses = VocabularyType(name="licenses")
db.session.add(vocabulary_type_languages)
db.session.add(vocabulary_type_licenses)
db.session.commit()
record = service.create(
identity=identity,
data=dict(
**example_data,
vocabulary_type_id=vocabulary_type_languages.id
),
)
Vocabulary.index.refresh() # Refresh the index
return record
@pytest.fixture(scope='function')
def lang_data_many(lang_type, lic_type, lang_data, service, identity):
"""Create many language vocabulary."""
lang_ids = ['fr', 'tr', 'gr', 'ger', 'es']
data = dict(lang_data)
for lang_id in lang_ids:
data['id'] = lang_id
service.create(identity, data)
Vocabulary.index.refresh() # Refresh the index
return lang_ids
@pytest.fixture()
def user(app, db):
"""Create example user."""
with db.session.begin_nested():
datastore = app.extensions["security"].datastore
_user = datastore.create_user(
email="info@inveniosoftware.org",
password=hash_password("password"),
active=True
)
db.session.commit()
return _user
@pytest.fixture()
def role(app, db):
"""Create some roles."""
with db.session.begin_nested():
datastore = app.extensions["security"].datastore
role = datastore.create_role(name="admin", description="admin role")
db.session.commit()
return role
@pytest.fixture()
def client_with_credentials(db, client, user, role):
"""Log in a user to the client."""
current_datastore.add_role_to_user(user, role)
action = current_access.actions["superuser-access"]
db.session.add(ActionUsers.allow(action, user_id=user.id))
login_user(user, remember=True)
login_user_via_session(client, email=user.email)
return client
# FIXME: https://github.com/inveniosoftware/pytest-invenio/issues/30
# Without this, success of test depends on the tests order
@pytest.fixture()
def cache():
"""Empty cache."""
try:
yield current_cache
finally:
current_cache.clear()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
12131,
327,
28778,
13,
198,
2,
15069,
357,
34,
8,
33448,
309,
52,
370,
2013,
13,
198,
2,
198,
2,
554,
574,
952,
12,
53,
420,
397,
... | 2.539875 | 2,558 |
import os
import sys
libPath = os.path.abspath('casper/cp/spexpr')
sys.path.append(libPath)
import cputil
import util
print "/* THIS FILE WAS AUTOGENERATED FROM explicit.cpp.py */"
print "#include <casper/cp/spexpr/expr.h>"
print "namespace Casper {"
print "namespace Detail {"
util.printViews(False)
util.printPost2(False)
util.printRef(False)
cputil.printExprWrapper(False,"int")
util.printExprWrapper(False)
print "}"
print "}"
| [
198,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
8019,
15235,
796,
28686,
13,
6978,
13,
397,
2777,
776,
10786,
66,
32981,
14,
13155,
14,
4125,
87,
1050,
11537,
198,
17597,
13,
6978,
13,
33295,
7,
8019,
15235,
8,
198,
198,
11748,
... | 2.670732 | 164 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_duration,
int_or_none,
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
302,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
26791,
1330,
357,
198,
220,
220,
220,
5004... | 2.772727 | 66 |
#!/usr/bin/env python3.8
# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
from typing import List
from assembly import FileEntry, FilePath, ImageAssemblyConfig, PackageManifest
from serialization import json_load
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
2,
15069,
33160,
383,
376,
37533,
544,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
... | 3.487805 | 123 |
from flask import Flask
from flask.templating import render_template
from models import provinsi,ambilTanggal,dunia,indonesia,harian_indo,india,india_global,turki,turki_global,us,us_global
application = Flask(__name__)
# Route untuk halaman utama
@application.route('/')
# Route untuk halaman awal
@application.route('/post')
# Route untuk halaman Indonesia
@application.route('/post1')
# Route untuk halaman Global
@application.route('/post2')
# Route untuk halaman Indonesia Harian
@application.route('/post3')
# Route untuk halaman India
@application.route('/post4')
# Route untuk halaman Turki
@application.route('/post5')
# Route untuk halaman United States
@application.route('/post6')
if __name__ == '__main__' :
application.run(debug=True)
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
13,
11498,
489,
803,
1330,
8543,
62,
28243,
198,
6738,
4981,
1330,
899,
1040,
72,
11,
4131,
346,
43909,
13528,
11,
67,
39934,
11,
521,
1952,
544,
11,
71,
3699,
62,
521,
78,
11,
521,
544,
... | 3.027888 | 251 |
from python_framework import Service, ServiceMethod
from domain import LoginConstants
from enumeration.AuthenticationStatus import AuthenticationStatus
from dto import QRCodeDto
@Service()
| [
6738,
21015,
62,
30604,
1330,
4809,
11,
4809,
17410,
198,
198,
6738,
7386,
1330,
23093,
34184,
1187,
198,
6738,
27056,
341,
13,
47649,
3299,
19580,
1330,
48191,
19580,
198,
6738,
288,
1462,
1330,
1195,
7397,
1098,
35,
1462,
198,
198,
31... | 4.340909 | 44 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0118-Pascals-Triangle.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-15
=================================================================="""
import sys
import time
from typing import List
# import collections
"""
LeetCode - 0118 - (Easy) - Pascal's Triangle
https://leetcode.com/problems/pascals-triangle/
Description & Requirement:
Given an integer numRows, return the first numRows of Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example 1:
Input: numRows = 5
Output: [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]
Example 2:
Input: numRows = 1
Output: [[1]]
Constraints:
1 <= numRows <= 30
"""
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
23926,
28,
198,
31,
16775,
1058,
978,
42289,
62,
40728,
42990,
56,
259,
14,
3123,
316,
10669,
12,
3237,
12,
46344,
... | 2.887538 | 329 |
# coding: utf-8
from caty.testutil import *
from caty.jsontools.path.JsonPathLexer import *
from StringIO import StringIO
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
3797,
88,
13,
9288,
22602,
1330,
1635,
198,
6738,
3797,
88,
13,
8457,
756,
10141,
13,
6978,
13,
41,
1559,
15235,
45117,
263,
1330,
1635,
198,
6738,
10903,
9399,
1330,
10903,
9399,
628,
198
... | 2.883721 | 43 |
# Generated by Django 3.1.6 on 2021-02-24 04:06
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2999,
12,
1731,
8702,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
setup(
name="DriveLink",
version=read("VERSION").strip(),
author="Chris Dusold",
author_email="DriveLink@ChrisDusold.com",
description=("A set of memory conserving data structures."),
license=read("LICENSE"),
keywords="memory",
url="http://drivelink.rtfd.org/",
packages=['drivelink', 'drivelink.hash', 'tests'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent", # Hopefully.
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
],
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
34030,
2163,
284,
1100,
262,
20832,
11682,
2393,
13,
198,
2,
16718,
329,
262,
890,
62,
11213,
13,
220,
632,
338,
3621,
11,
780,
783,
352,
8,
356,
423,
257,
1353,
... | 2.824324 | 370 |
# example of error estimation with numerical derivatives and using
# Richardson extrapolation to reduce the leading order error.
import math
# function we are differentiating
# analytic derivative (for comparison)
# difference equation
# desired tolerance -- be careful not to go too close to machine
# epsilon, or else roundoff error will rule
tol = 1.e-7
# starting h for differencing
h = 0.125
# point where we want the derivative
x0 = 1.0
err = 100.0
# initial derivative
d0 = diff(x0, h, fun)
print "h, d, rel err, analytic rel err"
while (err > tol):
d1 = diff(x0, h/2, fun)
# relative error between the h and h/2 estimates
err = abs(d1 - d0)/d1
# combination of h and h/2 estimates to eliminate leading error
# term
d = (4*d1-d0)/3.0
print h, d, err, abs(d - fprime(x0))/fprime(x0)
d0 = d1
h = h/2
| [
2,
1672,
286,
4049,
31850,
351,
29052,
28486,
290,
1262,
198,
2,
21679,
36804,
21417,
284,
4646,
262,
3756,
1502,
4049,
13,
198,
198,
11748,
10688,
628,
198,
2,
2163,
356,
389,
1180,
26336,
628,
198,
2,
49166,
27255,
357,
1640,
7208,
... | 2.789137 | 313 |
"""
Provides unified interface for all Architect commands. Each command should live
in a separate module and define an "arguments" variable which should contain the
command's arguments and a "run" function which implements the command's behaviour.
"""
import os
import sys
import pkgutil
import argparse
from .. import __version__, orms
from ..exceptions import (
BaseArchitectError,
CommandNotProvidedError,
CommandError,
CommandArgumentError
)
commands = {}
for _, name, __ in pkgutil.iter_modules([os.path.dirname(__file__)]):
commands[name] = {'module': __import__(name, globals(), level=1)}
sys.path.append(os.getcwd())
def main():
"""
Initialization function for all commands.
"""
parser = ArgumentParser(prog='architect')
parser.add_argument('-v', '--version', action='version', version='Architect {0}'.format(__version__))
subparsers = parser.add_subparsers(title='commands', help='run one of the commands for additional functionality')
for command in commands:
commands[command]['parser'] = subparsers.add_parser(
command,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=100))
for argument in commands[command]['module'].arguments:
for names, options in argument.items():
commands[command]['parser'].add_argument(*names, **options)
commands[command]['parser'].set_defaults(func=commands[command]['module'].run)
args = parser.parse_args()
# Starting from Python 3.3 the check for empty arguments was removed
# from argparse for some strange reason, so we have to emulate it here
try:
command = args.func.__module__.split('.')[-1]
except AttributeError:
parser.error('too few arguments')
else:
orms.init()
try:
commands[command]['parser'].result(args.func(vars(args)))
except BaseArchitectError as e:
commands[command]['parser'].error(str(e))
| [
37811,
198,
15946,
1460,
22706,
7071,
329,
477,
17340,
9729,
13,
5501,
3141,
815,
2107,
198,
259,
257,
4553,
8265,
290,
8160,
281,
366,
853,
2886,
1,
7885,
543,
815,
3994,
262,
198,
21812,
338,
7159,
290,
257,
366,
5143,
1,
2163,
54... | 2.820728 | 714 |
# Libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, Dataset
import torchvision
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
import os
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
# Parameters for model
# Hyper parameters
num_epochs = 10
num_classes = 2
batch_size = 100
learning_rate = 0.002
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data_dir = '/mnt/d/project/AI.Health.kaggle/'
label_dir = './data/'
train_path = data_dir+'train/'
test_path = data_dir+'test/'
train_label_path = label_dir+'train_labels.csv'
test_label_path = label_dir+'sample_submission.csv'
labels = pd.read_csv(train_label_path)
sub = pd.read_csv(test_label_path)
# Splitting data into train and val
train, val = train_test_split(labels, stratify=labels.label, test_size=0.2)
print(len(train), len(val))
trans_train = transforms.Compose([transforms.ToPILImage(),
transforms.Pad(64, padding_mode='reflect'),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(20),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
trans_valid = transforms.Compose([transforms.ToPILImage(),
transforms.Pad(64, padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
dataset_train = MyDataset(df_data=train, data_dir=train_path, transform=trans_train)
dataset_valid = MyDataset(df_data=val, data_dir=train_path, transform=trans_valid)
loader_train = DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True, num_workers=0)
loader_valid = DataLoader(dataset=dataset_valid, batch_size=batch_size//2, shuffle=False, num_workers=0)
model = SimpleCNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adamax(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(loader_train)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(loader_train):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 2 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in loader_valid:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
dataset_valid = MyDataset(df_data=sub, data_dir=test_path, transform=trans_valid)
loader_test = DataLoader(dataset=dataset_valid, batch_size=32, shuffle=False, num_workers=0)
model.eval()
preds = []
for batch_i, (data, target) in enumerate(loader_test):
data, target = data.cuda(), target.cuda()
output = model(data)
pr = output[:, 1].detach().cpu().numpy()
for i in pr:
preds.append(i)
print(sub.shape, len(preds))
sub['label'] = preds
sub.to_csv('s.csv', index=False)
| [
2,
46267,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
309,
22854,
27354,
292,
316,
11,
6060,
17401,... | 2.22822 | 1,871 |
from waldur_core.core import WaldurExtension
| [
6738,
266,
1940,
333,
62,
7295,
13,
7295,
1330,
24261,
333,
11627,
3004,
628
] | 3.285714 | 14 |
from src.operation.execution import Execution
from selenium.common.exceptions import (
ElementClickInterceptedException,
ElementNotInteractableException,
NoSuchElementException,
)
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
class TestExecution(Execution):
"""
Sub-class of `Execution()` for executing testing steps
"""
@property
def logic_args(self):
"""Retrieve inline args and input for running"""
return self.exe_data['exe_teststep_arg']
### Preparation Functions ###
def _locators(self):
"""Fix name for selenium and provide a path for that locator
Outputs:
------
`(locator, path)` --
"""
path = self.exe_data['exe_teststep_source']
locator = self.exe_data['exe_teststep_selector'].lower()
if locator in ['class', 'tag']:
return f'{locator} name', path, self.driver
elif locator == 'css':
return 'css selector', path, self.driver
else:
return locator, path, self.driver
def _single_element(self):
"""Use to locate single web element"""
locator, path, driver = self._locators()
try:
self.element_exist = self.driver.find_element(locator, path)
except NoSuchElementException:
print('> LOCATOR: NO SUCH ELEMENT.')
def _group_elements(self):
"""Use to locate GROUPED web elements by INDEX"""
locator, path, driver = self._locators()
value = self.exe_data['exe_teststep_data'].lower()
choice = 0 # if entry don't have choice, assume to select first element
if value in ['false', 'no']:
choice = int(1)
elif value not in ['nan', 'true', 'yes']:
choice = int(value) # specific element index
try:
self.element_exist = driver.find_elements(locator, path)[choice]
except IndexError:
# checkbox is to not click
if choice != 1:
self.cache.log_input(error_msg='web element out of reach')
except NoSuchElementException:
self.cache.log_input(error_msg='web element does not exist')
def _text_elements(self):
"""Locate GROUPED web elements by STRING"""
locator, path, driver = self._locators()
value = self.exe_data['exe_teststep_data']
# locate buttons
# driver.find_element_by_link_text
buttons = driver.find_elements(locator, path)
# element not found
if len(buttons) == 0:
self.cache.log_input(error_msg='web element does not exist')
# check button text
# stop loading when text is found
match = False
for index, button in enumerate(buttons):
### debugging ###
print(f"Button{index} Name: {button.text}")
if button.text == value:
### debugging ###
print(f"====>{button.text} == {value}")
match = True
break
# text not found
if not match:
self.cache.log_input(error_msg=f'No BUTTONS cointain {value}')
else:
self.element_exist = buttons[index]
### Logic behind performing actions. Generalized different cases with similar behaviours ###
def _button_clicker(self):
"""Handle clicking button, e.g. real/ shadow button"""
element = self.element_exist
driver = self.driver
try:
assert element is not None
element.click() # ordinary clicking
# handle shadow button
except (ElementClickInterceptedException, ElementNotInteractableException):
js_command = 'arguments[0].click();'
driver.execute_script(js_command, element)
# except ElementNotInteractableException:
# element.submit()
except AssertionError:
print("> LOCATOR: Button does not exist")
def _input_writer(self):
"""Inject `exe_teststep_data` into input fields"""
# initiate
input_value = self.exe_data['exe_teststep_data']
element = self.element_exist
driver = self.driver
# don't type anything if value is 'nan'
if input_value == 'nan':
input_value = ''
# inject value by trying different methods
try:
element.send_keys(input_value)
# input fields is likely to be a span fields rather than input box
except ElementNotInteractableException:
js_command = f'arguments[0].innerText = {input_value};'
driver.execute_script(js_command, element)
### Actions Block ###
def click_alert(self):
"""
Click something on the ALERT BOX (default=accept)
inline-log:
------
`--accept` -- accept ALERT BOX
`--reject` -- reject ALERT BOX
"""
self.driver_wait.until(EC.alert_is_present())
how = self._logic_setup(default='accept')
alert_box = self.driver.switch_to.alert_box
# default is accept
if 'accept' in how:
alert_box.accept()
elif 'reject' in how:
alert_box.reject()
sleep(1) # ?
return None
def checkout(self):
"""
Check out whether a web-element should exist or not
args:
------
`--jumpto(value={Yes, No, Key}, i={0,1,..., n-th})` -- i-th determines which the exact ptr should be. \n
`--skipby(value={Yes, No, Key}, d={1,2,...})` -- d-th determines the relative position ptr should skip. \n
If value = Yes, and checkout element exist, jumpto i-th row of the blueprint \n
If value = No, and checkout element NOT exist, jumpto i-th row of the blueprint \n
If value = Key, it will lookup the {Yes, No} in run_value and apply the above conditions.
"""
### initiate ###
print("checkout start")
locator, path, driver = self._locators()
# modify the implicit wait time for performance
driver.implicitly_wait(2)
how = self._logic_setup(default='checkout')
checkout_list = driver.find_elements(locator, path) # This should be a []
checkout_num = len(checkout_list)
### conduct Checkout ###
if checkout_num != 0:
self.element_exist = checkout_list
### run inline ###
if 'checkout' not in how:
key = how[0]
attr = self._logic_attr(key, 'all')
# determine whether the checking is from given by users
# or it is part of the runflow
gate = (
self.exe_data['exe_teststep_data']
if attr['condition'] == 'Key'
else attr['condition']
)
# possible cases that run this logic
yes_exist = gate == 'Yes' and checkout_num != 0
no_not_exist = gate == 'No' and checkout_num == 0
print(f"> gate={gate}, len={checkout_num}")
if yes_exist or no_not_exist: # value = Yes & Exist ==> jump
self.cache.cache_add(**{key: attr['input']})
print("checkout done")
def click_button(self):
"""
method = click_button
logic: {
'--click': Ordinary click,
'--submit': Form submission click
}
"""
self._single_element()
how = self._logic_setup(default="click")
try:
if 'click' in how:
self._button_clicker()
elif 'submit' in how:
self._button_clicker()
# self.driver_wait.until(
# lambda x: self.driver.execute_script("return document.readyState") == 'complete'
# )
sleep(3)
self.cache.check_proceed()
except NoSuchElementException:
pass
def click_checkbox(self):
"""Click a CHECKBOX"""
print("> click checkbox")
self._group_elements()
try:
self._button_clicker()
sleep(0.5)
# if self.element_exist:
# self._button_clicker()
# time.sleep(0.5)
except NoSuchElementException:
pass
print("> click None")
def click_radio(self):
"""Click a RADIO button: radio define as any index-based buttons"""
self._group_elements()
element = self.element_exist
if element:
self._button_clicker()
def click_named_button(self):
"""Click a BUTTON WITH NAME"""
self._text_elements()
if self.element_exist:
self._button_clicker()
def counter(self):
"""Counter on the looping"""
# trigger this counter
counter_name = f'counter_{self.exe_data["exe_teststep_index"]}'
prev = self.cache._prev
# check if this counter_name already exist in cache
new_counter = counter_name not in self.cache._prev
how = self._logic_setup(default='default')
# initialize counter value
if 'set' in how:
attr = self._logic_attr(logic_name='set', attr='all')
goto = attr['condition']
count = int(attr['input'])
elif 'default' in how:
goto = 0
count = 1
# action for new_counter
if new_counter:
count -= 1
self.cache.cache_add(**{counter_name: count}, jumpto=goto)
# action for existing counter
else:
# a completed counter -> skip
if int(prev[counter_name] == 0):
pass
# reduce count value
else:
count = int(prev[counter_name]) - 1
self.cache.cache_add(**{counter_name: count}, jumpto=goto)
def date_picker(self):
"""Pick update from DATEPICKER using date format"""
self._single_element()
element = self.element_exist
try:
locator, path, driver = self._locators()
value = self.exe_data['exe_teststep_data']
js_template = 'document.{method}("{path}").value = "{value}";'
js_command = ''
self.element_exist.send_keys(value, Keys.TAB)
sleep(1)
except ElementNotInteractableException:
pass
# print(value)
# # js get id
# if locator == 'id':
# js_command = js_template.format(method='getElementById', path=path, value=value)
# # css query
# elif locator == 'css selector':
# js_command = js_template.format(method='querySelector', path=path, value=value)
# # execute command
# driver.execute_script(js_command)
# self.element_exist.submit()
def screencap(self, file_name):
"""Take a full screenshot"""
if file_name == '':
file_name = self.exe_data['exe_teststep_data']
img_where = '/'
sleep(0.5)
img_name = f'{img_where}{self.ref_id}_{file_name}.png'
self.cache.log_input(tc=self.ref_id, output=f'IMAGE:{img_name}')
def write_input(self):
"""Input value into INPUT FIELDS"""
self._single_element()
if self.element_exist:
self._input_writer()
# def goto_frame(self):
# """Goto a iFRAME"""
# locator, path, driver = self._locator()
# time.sleep(1)
# self.wait.until(EC.frame_to_be_available_and_switch_to_it)
# driver.switch_to.default_content()
# driver.switch_to.frame(path)
def scrap(self):
"""
Scrap some info from a particular tag
"""
### initiate ###
self._single_element()
args = self.exe_data['exe_teststep_key']
naming = ''
have_name = self._logic_setup(default='nameless')
text = ''
### define variable naming ###
if 'name' in have_name:
# retreive and set variable naming
naming = self._logic_attr(logic_name='name', attr='condition')
varname = f"{'<' + naming + '>' if naming != '' else ''}"
if self.element_exist:
# define expression components
comp = args.split('%')
comp.remove('')
### Input validation ###
if len(comp) > 3:
# Incorrect syntax (too many components)
self.cache.log_input(
error_msg=f"UNKNOWN EXPRESSION: %inner_tag OR %inner_tag%attr%attr_val OR empty"
)
print(f"> ERROR: {args} is an unknown syntax")
return None
### Scrapping start ###
soup_tag = comp[0]
inner_html = self.element_exist.get_attribute('innerHTML')
soup = BeautifulSoup(inner_html, features='html.parser')
if len(comp) == 3:
# Syntax looks like (%tag%attr%attr_val): Narrowly extracting specific text
# Syntax for BS4, e.g. span, {'class': 'some-class-val'}
# Use when a single innerHTML has multiple elements inside, e.g. div ~ {#span1, #span2, ...}
soup_dict = {comp[1]: comp[2]}
text_list = [
tag.get_text() for tag in soup.find_all(soup_tag, soup_dict)
]
elif len(comp) == 1:
# Syntax looks like (%tag): Broadly extracting all text
# Use to find text inside to whole innerHTML
text_list = [tag.get_text() for tag in soup.find_all(soup_tag)]
elif len(comp) == 0:
# No inputs, Use to find text inside the whole HTML
text_list = [self.element_exist.text]
# Result formatting
text = '|'.join(text_list)
else:
# web-element does not exist
print("> Element does not exist...")
pass
output = f"TEXT{varname}:{text}"
self.cache.log_input(tc=self.ref_id, output=output)
self.cache.cache_add(text=output) # add to cache for validation if needed
def goto(self):
"webdriver goto a specific object"
### initiate ###
goto = self._logic_setup(default='url')
locator, path, driver = self._locators()
### GOTO URL ###
if 'url' in goto:
url = self.exe_data['exe_teststep_data']
assert url[0:4] == 'http', "'url' should start with 'http' or 'https'"
driver.get(url)
print(f"> {self.ref_id} travelling to: '{url}'")
### GOTO iFRAME ###
elif 'iframe' in goto:
assert path != '', "'--iframe' requires a 'path'"
sleep(1)
self.driver_wait.until(EC.frame_to_be_available_and_switch_to_it)
driver.switch_to.default_content()
driver.switch_to.frame(path)
### GOTO BACK ###
elif 'back' in goto:
print("> Returning to last page...")
driver.back()
### Unknown args ###
else:
self.cache.log_input(error_msg=f"UNKNOWN ARGS: {goto}")
def unload_file(self):
"""upload a file to UPLOAD"""
from os import getcwd
self._single_element()
file_location = (
getcwd() + '\\resources\\input\\' + self.exe_data['exe_teststep_data']
)
element = self.element_exist
element.send_keys(file_location)
def waiting(self):
"""Force webdriver to wait for n-seconds"""
### initiate ###
val = self._logic_setup(default='default')
if 'default' in val:
sec = 5
# arg --for
elif 'for' in val:
sec = int(self._logic_attr(logic_name='for', attr='condition'))
sleep(sec)
| [
6738,
12351,
13,
27184,
13,
18558,
1009,
1330,
37497,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
357,
198,
220,
220,
220,
11703,
8164,
9492,
984,
276,
16922,
11,
198,
220,
220,
220,
11703,
3673,
9492,
529,
540,
169... | 2.167323 | 7,363 |
import numpy as np
import torch
class AverageMeter:
"""
Computes and stores the average and current value
"""
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
628,
198,
4871,
13475,
44,
2357,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3082,
1769,
290,
7000,
262,
2811,
290,
1459,
1988,
198,
220,
220,
220,
37227,
628,
198
] | 3.15 | 40 |
"""Push notification handling."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.util.uuid import random_uuid_hex
PUSH_CONFIRM_TIMEOUT = 10 # seconds
class PushChannel:
"""Class that represents a push channel."""
def __init__(
self,
hass: HomeAssistant,
webhook_id: str,
support_confirm: bool,
send_message: Callable[[dict], None],
on_teardown: Callable[[], None],
) -> None:
"""Initialize a local push channel."""
self.hass = hass
self.webhook_id = webhook_id
self.support_confirm = support_confirm
self._send_message = send_message
self.on_teardown = on_teardown
self.pending_confirms: dict[str, dict] = {}
@callback
def async_send_notification(self, data, fallback_send):
"""Send a push notification."""
if not self.support_confirm:
self._send_message(data)
return
confirm_id = random_uuid_hex()
data["hass_confirm_id"] = confirm_id
async def handle_push_failed(_=None):
"""Handle a failed local push notification."""
# Remove this handler from the pending dict
# If it didn't exist we hit a race condition between call_later and another
# push failing and tearing down the connection.
if self.pending_confirms.pop(confirm_id, None) is None:
return
# Drop local channel if it's still open
if self.on_teardown is not None:
await self.async_teardown()
await fallback_send(data)
self.pending_confirms[confirm_id] = {
"unsub_scheduled_push_failed": async_call_later(
self.hass, PUSH_CONFIRM_TIMEOUT, handle_push_failed
),
"handle_push_failed": handle_push_failed,
}
self._send_message(data)
@callback
def async_confirm_notification(self, confirm_id) -> bool:
"""Confirm a push notification.
Returns if confirmation successful.
"""
if confirm_id not in self.pending_confirms:
return False
self.pending_confirms.pop(confirm_id)["unsub_scheduled_push_failed"]()
return True
async def async_teardown(self):
"""Tear down this channel."""
# Tear down is in progress
if self.on_teardown is None:
return
self.on_teardown()
self.on_teardown = None
cancel_pending_local_tasks = [
actions["handle_push_failed"]()
for actions in self.pending_confirms.values()
]
if cancel_pending_local_tasks:
await asyncio.gather(*cancel_pending_local_tasks)
| [
37811,
49222,
14483,
9041,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
6738,
17268,
13,
39305,
1330,
4889,
540,
198,
198,
6738,
1363,
562,
10167,
13,
7295,
1330,
5995,
48902,
11,
23838,
198,
... | 2.28831 | 1,266 |
import logging
import re
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Set, Optional, Generic, TypeVar, Tuple
from sqlalchemy import text
from sqlalchemy.sql.elements import TextClause
from fidesops.graph.config import ROOT_COLLECTION_ADDRESS, CollectionAddress
from fidesops.graph.traversal import TraversalNode, Row
from fidesops.models.policy import Policy
from fidesops.util.collection_util import append
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
T = TypeVar("T")
class QueryConfig(Generic[T], ABC):
"""A wrapper around a resource-type dependant query object that can generate runnable queries and string representations."""
class QueryToken:
"""A placeholder token for query output"""
@property
def fields(self) -> List[str]:
"""Fields of interest from this traversal traversal_node."""
return [f.name for f in self.node.node.collection.fields]
def update_fields(self, policy: Policy) -> List[str]:
"""List of update-able field names"""
def exists_child(
field_categories: List[str], policy_categories: List[str]
) -> bool:
"""A not very efficient check for any policy category that matches one of the field categories or a prefix of it."""
if field_categories is None or len(field_categories) == 0:
return False
for policy_category in policy_categories:
for field_category in field_categories:
if field_category.startswith(policy_category):
return True
return False
policy_categories = policy.get_erasure_target_categories()
return [
f.name
for f in self.node.node.collection.fields
if exists_child(f.data_categories, policy_categories)
]
@property
def primary_keys(self) -> List[str]:
"""List of fields marked as primary keys"""
return [f.name for f in self.node.node.collection.fields if f.primary_key]
@property
def query_keys(self) -> Set[str]:
"""
All of the possible keys that we can query for possible filter values.
These are keys that are the ends of incoming edges.
"""
return set(map(lambda edge: edge.f2.field, self.node.incoming_edges()))
def filter_values(self, input_data: Dict[str, List[Any]]) -> Dict[str, Any]:
"""
Return a filtered list of key/value sets of data items that are both in
the list of incoming edge fields, and contain data in the input data set
"""
return {
key: value
for (key, value) in input_data.items()
if key in self.query_keys
and isinstance(value, list)
and len(value)
and None not in value
}
def query_sources(self) -> Dict[str, List[CollectionAddress]]:
"""Display the input sources for each query key"""
data: Dict[str, List[CollectionAddress]] = {}
for edge in self.node.incoming_edges():
append(data, edge.f2.field, edge.f1.collection_address())
return data
def display_query_data(self) -> Dict[str, Any]:
"""Data to represent a display (dry-run) query. Since we don't know
what data is available, just generate a query where the input identity
values are assumed to be present and singulur and all other values that
may be multiple are represented by a pair [?,?]"""
data = {}
t = QueryConfig.QueryToken()
for k, v in self.query_sources().items():
if len(v) == 1 and v[0] == ROOT_COLLECTION_ADDRESS:
data[k] = [t]
else:
data[k] = [
t,
QueryConfig.QueryToken(),
] # intentionally want a second instance so that set does not collapse into 1 value
return data
@abstractmethod
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy]
) -> Optional[T]:
"""Generate a retrieval query. If there is no data to be queried
(for example, if the policy identifies no fields to be queried)
returns None"""
@abstractmethod
def query_to_str(self, t: T, input_data: Dict[str, List[Any]]) -> str:
"""Convert query to string"""
@abstractmethod
def dry_run_query(self) -> Optional[str]:
"""dry run query for display"""
@abstractmethod
def generate_update_stmt(self, row: Row, policy: Optional[Policy]) -> Optional[T]:
"""Generate an update statement. If there is no data to be updated
(for example, if the policy identifies no fields to be updated)
returns None"""
class SQLQueryConfig(QueryConfig[TextClause]):
"""Query config that translates parameters into SQL statements."""
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a retrieval query"""
filtered_data = self.filter_values(input_data)
if filtered_data:
clauses = []
query_data: Dict[str, Tuple[Any, ...]] = {}
field_list = ",".join(self.fields)
for field_name, data in filtered_data.items():
if len(data) == 1:
clauses.append(f"{field_name} = :{field_name}")
query_data[field_name] = (data[0],)
elif len(data) > 1:
clauses.append(f"{field_name} IN :{field_name}")
query_data[field_name] = tuple(set(data))
else:
# if there's no data, create no clause
pass
if len(clauses) > 0:
query_str = f"SELECT {field_list} FROM {self.node.node.collection.name} WHERE {' OR '.join(clauses)}"
return text(query_str).params(query_data)
logger.warning(
f"There is not enough data to generate a valid query for {self.node.address}"
)
return None
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a SQL update statement in the form of a TextClause"""
update_fields = self.update_fields(policy)
update_value_map = {k: None for k in update_fields}
update_clauses = [f"{k} = :{k}" for k in update_fields]
pk_clauses = [f"{k} = :{k}" for k in self.primary_keys]
for pk in self.primary_keys:
update_value_map[pk] = row[pk]
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update statement for {self.node.address}"
)
return None
query_str = f"UPDATE {self.node.address.collection} SET {','.join(update_clauses)} WHERE {','.join(pk_clauses)}"
logger.info("query = %s, params = %s", query_str, update_value_map)
return text(query_str).params(update_value_map)
def query_to_str(self, t: TextClause, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
query_str = str(t)
for k, v in input_data.items():
if len(v) == 1:
query_str = re.sub(f"= :{k}", f"= {transform_param(v[0])}", query_str)
elif len(v) > 0:
query_str = re.sub(f"IN :{k}", f"IN { tuple(set(v)) }", query_str)
return query_str
MongoStatement = Tuple[Dict[str, Any], Dict[str, Any]]
"""A mongo query is expressed in the form of 2 dicts, the first of which represents
the query object(s) and the second of which represents fields to return.
e.g. 'collection.find({k1:v1, k2:v2},{f1:1, f2:1 ... })'. This is returned as
a tuple ({k1:v1, k2:v2},{f1:1, f2:1 ... }).
An update statement takes the form
collection.update_one({k1:v1},{k2:v2}...}, {$set: {f1:fv1, f2:fv2 ... }}, upsert=False).
This is returned as a tuple
({k1:v1},{k2:v2}...}, {f1:fv1, f2: fv2 ... }
"""
class MongoQueryConfig(QueryConfig[MongoStatement]):
"""Query config that translates paramters into mongo statements"""
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[MongoStatement]:
"""Generate a SQL update statement in the form of Mongo update statement components"""
update_fields = self.update_fields(policy)
update_clauses = {k: None for k in update_fields}
pk_clauses = {k: row[k] for k in self.primary_keys}
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update for {self.node.address}"
)
return None
return pk_clauses, {"$set": update_clauses}
def query_to_str(self, t: MongoStatement, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
query_data, field_list = t
db_name = self.node.address.dataset
collection_name = self.node.address.collection
return f"db.{db_name}.{collection_name}.find({query_data}, {field_list})"
| [
11748,
18931,
198,
11748,
302,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
11,
7343,
11,
5345,
11,
32233,
11,
42044,
11,
5994,
19852,
11,
309,
29291,
198,
198,
6738,
44161,
282,
26599,... | 2.36691 | 3,971 |
import numpy as np
import random
import logging
import pandas as pd
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# logging.info('ddddd')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
18931,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
18931,
13,
1136,
11187,
1362,
22446,
... | 2.58871 | 124 |
from django import forms
from expensesTracker.app.models import Profile, Expense
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
9307,
35694,
13,
1324,
13,
27530,
1330,
13118,
11,
5518,
1072,
628,
198
] | 4 | 21 |
__author__ = 'Teruaki Enoto'
__version__ = '0.02'
__date__ = '2018 November 26'
"""
HISTORY
2018-11-26 transfered from giantradiopulse library
2018-10-09 modified by T.Enoto
2018-10-01 created by T.Enoto
"""
import os
import pandas as pd
import astropy.io.fits as fits
class GootTimeIntervalTextFile(GootTimeInterval):
""" Represents GootTimeInterval in the Text format defined by Terasawa
:param file_path: path to a file to be opened.
"""
class GiantRadioPulseTextFile(GiantRadioPulse):
""" Represents GiantRadioPulse in the Text format defined by Terasawa
:param file_path: path to a file to be opened.
"""
| [
834,
9800,
834,
220,
796,
705,
15156,
84,
8182,
412,
1662,
78,
6,
198,
834,
9641,
834,
796,
705,
15,
13,
2999,
6,
198,
834,
4475,
834,
220,
220,
220,
796,
705,
7908,
3389,
2608,
6,
198,
37811,
198,
39,
42480,
198,
7908,
12,
1157... | 2.885321 | 218 |
#!/usr/local/bin/sage -python
from sage.all import *
n = 199843247
done = 0
for b in range(1, int(sqrt(199843247) + 1)):
E = EllipticCurve(Integers(n),[0,0,0,b,-b])
P = E(1,1)
for d in range(2, 21):
# try:
Q = d*P
print(d,"!P &= (",Q[0],"," ,Q[1],")\\\\")
P = Q
# except:
# T = P
# for i in range(1, int(log(d,2)) + 1):
# try:
# print(
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
82,
496,
532,
29412,
198,
6738,
35021,
13,
439,
1330,
1635,
198,
198,
77,
796,
7795,
3559,
23753,
198,
28060,
796,
657,
198,
198,
1640,
275,
287,
2837,
7,
16,
11,
493,
7,
31166,
17034,
7,
... | 1.641577 | 279 |
#1 -)SAY "HELLO, WORLD!" WITH PYTHON
a = "Hello, World!"
print(a)
#2 -)PYTHON IF - ELSE
# !/bin/python3
import math
import os
import random
import re
import sys
# For all condition writing if or elif
if __name__ == '__main__':
n = int(input().strip())
if n % 2 == 1:
print("Weird")
elif n % 2 == 0 and 2 <= n <= 5:
print("Not Weird")
elif n % 2 == 0 and 6 <= n <= 20:
print("Weird")
elif n % 2 == 0 and n > 20:
print("Not Weird")
#3 -)ARITHMETIC OPERATORS
# Take input and implement arithmetic operators
if __name__ == '__main__':
a = int(input())
b = int(input())
line1 = a + b
line2 = a - b
line3 = a * b
print(line1, line2, line3, sep="\n")
#4 -)PYTHON: DIVISION
# take the inputs and divide with floatdivision (/) and integerdivision (//)
if __name__ == '__main__':
a = int(input())
b = int(input())
intdivision = a // b
floatdivision = a / b
print(intdivision, floatdivision, sep='\n')
#5 -)LOOPS
# Take the input and up to input,print the each number square
if __name__ == '__main__':
n = int(input())
for i in range(n):
print(i ** 2)
#6 -)WRITE A FUNCTION
# this function first checks whether input number is divisible by 4 if not Return false
# If it is then try to divide it with 100
# if it is both divisible to 400 and 100 and it returns True, if only divisibleto 100 return false
year = int(input())
print(is_leap(year))
#7 -)PRINT FUNCTION
# take a number as a input.Then create list by list compherension add 1 to each number.
# Then print them by using end method = ""
if __name__ == '__main__':
n = int(input())
n = [n + 1 for n in range(n)]
for i in n:
print(i, end="")
| [
198,
2,
16,
532,
8,
27358,
366,
13909,
3069,
46,
11,
29564,
2474,
13315,
350,
56,
4221,
1340,
198,
198,
64,
796,
366,
15496,
11,
2159,
2474,
198,
4798,
7,
64,
8,
198,
198,
2,
17,
532,
8,
47,
56,
4221,
1340,
16876,
532,
17852,
... | 2.510086 | 694 |
from pydantic import BaseModel
from .models import UnprocessableEntity
from .utils import get_model_key, parse_code
class Response:
"""
response object
:param codes: list of HTTP status code, format('HTTP_[0-9]{3}'), 'HTTP200'
:param code_models: dict of <HTTP status code>: <`pydantic.BaseModel`> or None
"""
def has_model(self):
"""
:returns: boolean -- does this response has models or not
"""
return bool(self.code_models)
def find_model(self, code):
"""
:param code: ``r'\\d{3}'``
"""
return self.code_models.get(f"HTTP_{code}")
@property
def models(self):
"""
:returns: dict_values -- all the models in this response
"""
return self.code_models.values()
def generate_spec(self):
"""
generate the spec for responses
:returns: JSON
"""
responses = {}
for code in self.codes:
responses[parse_code(code)] = {"description": DEFAULT_CODE_DESC[code]}
for code, model in self.code_models.items():
model_name = get_model_key(model=model)
responses[parse_code(code)] = {
"description": DEFAULT_CODE_DESC[code],
"content": {
"application/json": {
"schema": {"$ref": f"#/components/schemas/{model_name}"},
"examples": model.schema().get("examples", {}),
}
},
}
return responses
# according to https://tools.ietf.org/html/rfc2616#section-10
# https://tools.ietf.org/html/rfc7231#section-6.1
# https://developer.mozilla.org/sv-SE/docs/Web/HTTP/Status
DEFAULT_CODE_DESC = {
# Information 1xx
"HTTP_100": "Continue",
"HTTP_101": "Switching Protocols",
# Successful 2xx
"HTTP_200": "OK",
"HTTP_201": "Created",
"HTTP_202": "Accepted",
"HTTP_203": "Non-Authoritative Information",
"HTTP_204": "No Content",
"HTTP_205": "Reset Content",
"HTTP_206": "Partial Content",
# Redirection 3xx
"HTTP_300": "Multiple Choices",
"HTTP_301": "Moved Permanently",
"HTTP_302": "Found",
"HTTP_303": "See Other",
"HTTP_304": "Not Modified",
"HTTP_305": "Use Proxy",
"HTTP_306": "(Unused)",
"HTTP_307": "Temporary Redirect",
"HTTP_308": "Permanent Redirect",
# Client Error 4xx
"HTTP_400": "Bad Request",
"HTTP_401": "Unauthorized",
"HTTP_402": "Payment Required",
"HTTP_403": "Forbidden",
"HTTP_404": "Not Found",
"HTTP_405": "Method Not Allowed",
"HTTP_406": "Not Acceptable",
"HTTP_407": "Proxy Authentication Required",
"HTTP_408": "Request Timeout",
"HTTP_409": "Conflict",
"HTTP_410": "Gone",
"HTTP_411": "Length Required",
"HTTP_412": "Precondition Failed",
"HTTP_413": "Request Entity Too Large",
"HTTP_414": "Request-URI Too Long",
"HTTP_415": "Unsupported Media Type",
"HTTP_416": "Requested Range Not Satisfiable",
"HTTP_417": "Expectation Failed",
"HTTP_418": "I'm a teapot",
"HTTP_421": "Misdirected Request",
"HTTP_422": "Unprocessable Entity",
"HTTP_423": "Locked",
"HTTP_424": "Failed Dependency",
"HTTP_425": "Too Early",
"HTTP_426": "Upgrade Required",
"HTTP_428": "Precondition Required",
"HTTP_429": "Too Many Requests",
"HTTP_431": "Request Header Fields Too Large",
"HTTP_451": "Unavailable For Legal Reasons",
# Server Error 5xx
"HTTP_500": "Internal Server Error",
"HTTP_501": "Not Implemented",
"HTTP_502": "Bad Gateway",
"HTTP_503": "Service Unavailable",
"HTTP_504": "Gateway Timeout",
"HTTP_505": "HTTP Version Not Supported",
"HTTP_506": "Variant Also negotiates",
"HTTP_507": "Insufficient Sotrage",
"HTTP_508": "Loop Detected",
"HTTP_511": "Network Authentication Required",
}
| [
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
198,
6738,
764,
27530,
1330,
791,
14681,
540,
32398,
198,
6738,
764,
26791,
1330,
651,
62,
19849,
62,
2539,
11,
21136,
62,
8189,
628,
198,
4871,
18261,
25,
198,
220,
220,
220,
37227,
198,... | 2.339893 | 1,677 |
n=int(input("number: "))
if n==1:
print n
if n<0:
print("sorry..factorial does not exist")
elif n==0:
print ("the factorial of 0 is 1")
else:
for i in range(1,n):
n=n*i
print n
| [
77,
28,
600,
7,
15414,
7203,
17618,
25,
366,
4008,
198,
361,
299,
855,
16,
25,
198,
197,
4798,
299,
198,
361,
299,
27,
15,
25,
198,
197,
4798,
7203,
41599,
492,
22584,
5132,
857,
407,
2152,
4943,
198,
417,
361,
299,
855,
15,
25,... | 2.176471 | 85 |
#!/usr/bin/env python
# if the __init__.py is imported directly, import the
# folowing classes
from .ger import Booger
from .geyman import Boogeyman, OogieBoogie
# purposely do not define Bookbag here
#from .kbag import Bookbag | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
611,
262,
11593,
15003,
834,
13,
9078,
318,
17392,
3264,
11,
1330,
262,
198,
2,
5955,
7855,
6097,
198,
6738,
764,
1362,
1330,
3248,
519,
263,
198,
6738,
764,
39608,
805,
1330... | 3.271429 | 70 |
import threading
from sqlalchemy import (
Column,
String,
Integer
)
from . import SESSION, BASE
# save user ids in whitelists
# save warn msg ids
# save warn counts
PMTABLE.__table__.create(checkfirst=True)
MSGID.__table__.create(checkfirst=True)
DISAPPROVE.__table__.create(checkfirst=True)
INSERTION_LOCK = threading.RLock()
# add message id of a user
# get warn message id
# add user id to whitelist
# remove user id from whitelist
# get whitelist (approved)
# warn table func
# get warn func
# del warn func
| [
11748,
4704,
278,
198,
198,
6738,
44161,
282,
26599,
1330,
357,
198,
197,
39470,
11,
220,
198,
197,
10100,
11,
220,
198,
197,
46541,
198,
8,
198,
198,
6738,
764,
1330,
311,
47621,
11,
49688,
628,
628,
198,
2,
3613,
2836,
220,
2340,
... | 2.847716 | 197 |
############ 계산기 만들기 연습 ###########
calcul() | [
7804,
4242,
220,
166,
111,
226,
168,
224,
108,
166,
116,
108,
31619,
100,
234,
167,
241,
97,
166,
116,
108,
23821,
245,
108,
168,
232,
113,
1303,
7804,
2235,
198,
198,
9948,
3129,
3419
] | 1.285714 | 35 |
import socket
s= socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999')
while True:
data,addr=s.recvfrom(1024)
print('Received from %s:%s' %addr)
s.sendto(b'Hello, %s!' % data, addr)
| [
11748,
17802,
198,
198,
82,
28,
17802,
13,
44971,
7,
44971,
13,
8579,
62,
1268,
2767,
11,
44971,
13,
50,
11290,
62,
35,
10761,
2390,
8,
198,
82,
13,
21653,
7,
10786,
16799,
13,
15,
13,
15,
13,
16,
3256,
24214,
4008,
198,
4798,
1... | 2.189189 | 111 |
from argparse import Namespace
import csv
from typing import List, Optional
import numpy as np
import torch
from tqdm import tqdm
from .predict import predict
from chemprop.data import MoleculeDataset
from chemprop.data.utils import get_data, get_data_from_smiles
from chemprop.utils import load_args, load_checkpoint, load_scalers
def create_fingerprints(args: Namespace, smiles: List[str] = None) -> List[Optional[List[float]]]:
"""
Create fingerprint vectors for the specified molecules. If smiles is provided, makes predictions on smiles. Otherwise makes predictions on args.test_data.
:param args: Arguments.
:param smiles: Smiles to make predictions on.
:return: A list of fingerprint vectors (list of floats)
"""
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
print('Loading training args')
scaler, features_scaler = load_scalers(args.checkpoint_paths[0])
train_args = load_args(args.checkpoint_paths[0])
# Update args with training arguments
for key, value in vars(train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
print('Loading data')
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=False, args=args)
else:
test_data = get_data(path=args.test_path, args=args, use_compound_names=args.use_compound_names, skip_invalid_smiles=False)
print('Validating SMILES')
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
# Edge case if empty list of smiles is provided
if len(test_data) == 0:
return [None] * len(full_data)
if args.use_compound_names:
compound_names = test_data.compound_names()
print(f'Test size = {len(test_data):,}')
# Normalize features
if train_args.features_scaling:
test_data.normalize_features(features_scaler)
print(f'Encoding smiles into a fingerprint vector from a single model')
# Load model
model = load_checkpoint(args.checkpoint_paths[0], current_args=args, cuda=args.cuda)
if hasattr(model,'spectral_mask'):
delattr(model,'spectral_mask')
model_preds = predict(
model=model,
args=args,
data=test_data,
batch_size=args.batch_size,
)
# Save predictions
assert len(test_data) == len(model_preds)
print(f'Saving predictions to {args.preds_path}')
# Put Nones for invalid smiles
full_preds = [None] * len(full_data)
for i, si in enumerate(valid_indices):
full_preds[si] = model_preds[i]
model_preds = full_preds
test_smiles = full_data.smiles()
# Write predictions
with open(args.preds_path, 'w') as f:
writer = csv.writer(f)
header = []
if args.use_compound_names:
header.append('compound_names')
header.extend(['smiles'])
header.extend(['fp{}'.format(x) for x in range(1,args.hidden_size+1)])
writer.writerow(header)
for i in range(len(model_preds)):
row = []
if args.use_compound_names:
row.append(compound_names[i])
row.append(test_smiles[i])
if model_preds[i] is not None:
row.extend(model_preds[i][:args.hidden_size])
else:
row.extend([''] * args.hidden_size)
writer.writerow(row)
return model_preds
| [
6738,
1822,
29572,
1330,
28531,
10223,
198,
11748,
269,
21370,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
764,
79... | 2.441522 | 1,445 |
import numpy as np
import tensorflow as tf
import gym
from datetime import datetime
import time
import roboschool
def mlp(x, hidden_layers, output_layer, activation=tf.tanh, last_activation=None):
'''
Multi-layer perceptron
'''
for l in hidden_layers:
x = tf.layers.dense(x, units=l, activation=activation)
return tf.layers.dense(x, units=output_layer, activation=last_activation)
def softmax_entropy(logits):
'''
Softmax Entropy
'''
return -tf.reduce_sum(tf.nn.softmax(logits, axis=-1) * tf.nn.log_softmax(logits, axis=-1), axis=-1)
def clipped_surrogate_obj(new_p, old_p, adv, eps):
'''
Clipped surrogate objective function
'''
rt = tf.exp(new_p - old_p) # i.e. pi / old_pi
return -tf.reduce_mean(tf.minimum(rt*adv, tf.clip_by_value(rt, 1-eps, 1+eps)*adv))
def GAE(rews, v, v_last, gamma=0.99, lam=0.95):
'''
Generalized Advantage Estimation
'''
assert len(rews) == len(v)
vs = np.append(v, v_last)
delta = np.array(rews) + gamma*vs[1:] - vs[:-1]
gae_advantage = discounted_rewards(delta, 0, gamma*lam)
return gae_advantage
def discounted_rewards(rews, last_sv, gamma):
'''
Discounted reward to go
Parameters:
----------
rews: list of rewards
last_sv: value of the last state
gamma: discount value
'''
rtg = np.zeros_like(rews, dtype=np.float32)
rtg[-1] = rews[-1] + gamma*last_sv
for i in reversed(range(len(rews)-1)):
rtg[i] = rews[i] + gamma*rtg[i+1]
return rtg
class StructEnv(gym.Wrapper):
'''
Gym Wrapper to store information like number of steps and total reward of the last espisode.
'''
class Buffer():
'''
Class to store the experience from a unique policy
'''
def store(self, temp_traj, last_sv):
'''
Add temp_traj values to the buffers and compute the advantage and reward to go
Parameters:
-----------
temp_traj: list where each element is a list that contains: observation, reward, action, state-value
last_sv: value of the last state (Used to Bootstrap)
'''
# store only if there are temporary trajectories
if len(temp_traj) > 0:
self.ob.extend(temp_traj[:,0])
rtg = discounted_rewards(temp_traj[:,1], last_sv, self.gamma)
self.adv.extend(GAE(temp_traj[:,1], temp_traj[:,3], last_sv, self.gamma, self.lam))
self.rtg.extend(rtg)
self.ac.extend(temp_traj[:,2])
def gaussian_log_likelihood(x, mean, log_std):
'''
Gaussian Log Likelihood
'''
log_p = -0.5 *((x-mean)**2 / (tf.exp(log_std)**2+1e-9) + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(log_p, axis=-1)
def PPO(env_name, hidden_sizes=[32], cr_lr=5e-3, ac_lr=5e-3, num_epochs=50, minibatch_size=5000, gamma=0.99, lam=0.95, number_envs=1, eps=0.1,
actor_iter=5, critic_iter=10, steps_per_env=100, action_type='Discrete'):
'''
Proximal Policy Optimization
Parameters:
-----------
env_name: Name of the environment
hidden_size: list of the number of hidden units for each layer
ac_lr: actor learning rate
cr_lr: critic learning rate
num_epochs: number of training epochs
minibatch_size: Batch size used to train the critic and actor
gamma: discount factor
lam: lambda parameter for computing the GAE
number_envs: number of parallel synchronous environments
# NB: it isn't distributed across multiple CPUs
eps: Clip threshold. Max deviation from previous policy.
actor_iter: Number of SGD iterations on the actor per epoch
critic_iter: NUmber of SGD iterations on the critic per epoch
steps_per_env: number of steps per environment
# NB: the total number of steps per epoch will be: steps_per_env*number_envs
action_type: class name of the action space: Either "Discrete' or "Box"
'''
tf.reset_default_graph()
# Create some environments to collect the trajectories
envs = [StructEnv(gym.make(env_name)) for _ in range(number_envs)]
obs_dim = envs[0].observation_space.shape
# Placeholders
if action_type == 'Discrete':
act_dim = envs[0].action_space.n
act_ph = tf.placeholder(shape=(None,), dtype=tf.int32, name='act')
elif action_type == 'Box':
low_action_space = envs[0].action_space.low
high_action_space = envs[0].action_space.high
act_dim = envs[0].action_space.shape[0]
act_ph = tf.placeholder(shape=(None,act_dim), dtype=tf.float32, name='act')
obs_ph = tf.placeholder(shape=(None, obs_dim[0]), dtype=tf.float32, name='obs')
ret_ph = tf.placeholder(shape=(None,), dtype=tf.float32, name='ret')
adv_ph = tf.placeholder(shape=(None,), dtype=tf.float32, name='adv')
old_p_log_ph = tf.placeholder(shape=(None,), dtype=tf.float32, name='old_p_log')
# Computational graph for the policy in case of a continuous action space
if action_type == 'Discrete':
with tf.variable_scope('actor_nn'):
p_logits = mlp(obs_ph, hidden_sizes, act_dim, tf.nn.relu, last_activation=tf.tanh)
act_smp = tf.squeeze(tf.random.multinomial(p_logits, 1))
act_onehot = tf.one_hot(act_ph, depth=act_dim)
p_log = tf.reduce_sum(act_onehot * tf.nn.log_softmax(p_logits), axis=-1)
# Computational graph for the policy in case of a continuous action space
else:
with tf.variable_scope('actor_nn'):
p_logits = mlp(obs_ph, hidden_sizes, act_dim, tf.tanh, last_activation=tf.tanh)
log_std = tf.get_variable(name='log_std', initializer=np.zeros(act_dim, dtype=np.float32)-0.5)
# Add noise to the mean values predicted
# The noise is proportional to the standard deviation
p_noisy = p_logits + tf.random_normal(tf.shape(p_logits), 0, 1) * tf.exp(log_std)
# Clip the noisy actions
act_smp = tf.clip_by_value(p_noisy, low_action_space, high_action_space)
# Compute the gaussian log likelihood
p_log = gaussian_log_likelihood(act_ph, p_logits, log_std)
# Nerual nework value function approximizer
with tf.variable_scope('critic_nn'):
s_values = mlp(obs_ph, hidden_sizes, 1, tf.tanh, last_activation=None)
s_values = tf.squeeze(s_values)
# PPO loss function
p_loss = clipped_surrogate_obj(p_log, old_p_log_ph, adv_ph, eps)
# MSE loss function
v_loss = tf.reduce_mean((ret_ph - s_values)**2)
# policy optimizer
p_opt = tf.train.AdamOptimizer(ac_lr).minimize(p_loss)
# value function optimizer
v_opt = tf.train.AdamOptimizer(cr_lr).minimize(v_loss)
# Time
now = datetime.now()
clock_time = "{}_{}.{}.{}".format(now.day, now.hour, now.minute, now.second)
print('Time:', clock_time)
# Set scalars and hisograms for TensorBoard
tf.summary.scalar('p_loss', p_loss, collections=['train'])
tf.summary.scalar('v_loss', v_loss, collections=['train'])
tf.summary.scalar('s_values_m', tf.reduce_mean(s_values), collections=['train'])
if action_type == 'Box':
tf.summary.scalar('p_std', tf.reduce_mean(tf.exp(log_std)), collections=['train'])
tf.summary.histogram('log_std',log_std, collections=['train'])
tf.summary.histogram('p_log', p_log, collections=['train'])
tf.summary.histogram('p_logits', p_logits, collections=['train'])
tf.summary.histogram('s_values', s_values, collections=['train'])
tf.summary.histogram('adv_ph',adv_ph, collections=['train'])
scalar_summary = tf.summary.merge_all('train')
# .. summary to run before the optimization steps
tf.summary.scalar('old_v_loss', v_loss, collections=['pre_train'])
tf.summary.scalar('old_p_loss', p_loss, collections=['pre_train'])
pre_scalar_summary = tf.summary.merge_all('pre_train')
hyp_str = '-bs_'+str(minibatch_size)+'-envs_'+str(number_envs)+'-ac_lr_'+str(ac_lr)+'-cr_lr'+str(cr_lr)+'-act_it_'+str(actor_iter)+'-crit_it_'+str(critic_iter)
file_writer = tf.summary.FileWriter('log_dir/'+env_name+'/PPO_'+clock_time+'_'+hyp_str, tf.get_default_graph())
# create a session
sess = tf.Session()
# initialize the variables
sess.run(tf.global_variables_initializer())
# variable to store the total number of steps
step_count = 0
print('Env batch size:',steps_per_env, ' Batch size:',steps_per_env*number_envs)
for ep in range(num_epochs):
# Create the buffer that will contain the trajectories (full or partial)
# run with the last policy
buffer = Buffer(gamma, lam)
# lists to store rewards and length of the trajectories completed
batch_rew = []
batch_len = []
# Execute in serial the environments, storing temporarily the trajectories.
for env in envs:
temp_buf = []
#iterate over a fixed number of steps
for _ in range(steps_per_env):
# run the policy
act, val = sess.run([act_smp, s_values], feed_dict={obs_ph:[env.n_obs]})
act = np.squeeze(act)
# take a step in the environment
obs2, rew, done, _ = env.step(act)
# add the new transition to the temporary buffer
temp_buf.append([env.n_obs.copy(), rew, act, np.squeeze(val)])
env.n_obs = obs2.copy()
step_count += 1
if done:
# Store the full trajectory in the buffer
# (the value of the last state is 0 as the trajectory is completed)
buffer.store(np.array(temp_buf), 0)
# Empty temporary buffer
temp_buf = []
batch_rew.append(env.get_episode_reward())
batch_len.append(env.get_episode_length())
# reset the environment
env.reset()
# Bootstrap with the estimated state value of the next state!
last_v = sess.run(s_values, feed_dict={obs_ph:[env.n_obs]})
buffer.store(np.array(temp_buf), np.squeeze(last_v))
# Gather the entire batch from the buffer
# NB: all the batch is used and deleted after the optimization. That is because PPO is on-policy
obs_batch, act_batch, adv_batch, rtg_batch = buffer.get_batch()
old_p_log = sess.run(p_log, feed_dict={obs_ph:obs_batch, act_ph:act_batch, adv_ph:adv_batch, ret_ph:rtg_batch})
old_p_batch = np.array(old_p_log)
summary = sess.run(pre_scalar_summary, feed_dict={obs_ph:obs_batch, act_ph:act_batch, adv_ph:adv_batch, ret_ph:rtg_batch, old_p_log_ph:old_p_batch})
file_writer.add_summary(summary, step_count)
lb = len(buffer)
shuffled_batch = np.arange(lb)
# Policy optimization steps
for _ in range(actor_iter):
# shuffle the batch on every iteration
np.random.shuffle(shuffled_batch)
for idx in range(0,lb, minibatch_size):
minib = shuffled_batch[idx:min(idx+minibatch_size,lb)]
sess.run(p_opt, feed_dict={obs_ph:obs_batch[minib], act_ph:act_batch[minib], adv_ph:adv_batch[minib], old_p_log_ph:old_p_batch[minib]})
# Value function optimization steps
for _ in range(critic_iter):
# shuffle the batch on every iteration
np.random.shuffle(shuffled_batch)
for idx in range(0,lb, minibatch_size):
minib = shuffled_batch[idx:min(idx+minibatch_size,lb)]
sess.run(v_opt, feed_dict={obs_ph:obs_batch[minib], ret_ph:rtg_batch[minib]})
# print some statistics and run the summary for visualizing it on TB
if len(batch_rew) > 0:
train_summary = sess.run(scalar_summary, feed_dict={obs_ph:obs_batch, act_ph:act_batch, adv_ph:adv_batch,
old_p_log_ph:old_p_batch, ret_ph:rtg_batch})
file_writer.add_summary(train_summary, step_count)
summary = tf.Summary()
summary.value.add(tag='supplementary/performance', simple_value=np.mean(batch_rew))
summary.value.add(tag='supplementary/len', simple_value=np.mean(batch_len))
file_writer.add_summary(summary, step_count)
file_writer.flush()
print('Ep:%d Rew:%.2f -- Step:%d' % (ep, np.mean(batch_rew), step_count))
# closing environments..
for env in envs:
env.close()
# Close the writer
file_writer.close()
if __name__ == '__main__':
PPO('RoboschoolWalker2d-v1', hidden_sizes=[64,64], cr_lr=5e-4, ac_lr=2e-4, gamma=0.99, lam=0.95, steps_per_env=5000,
number_envs=1, eps=0.15, actor_iter=6, critic_iter=10, action_type='Box', num_epochs=5000, minibatch_size=256)
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11550,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,
11748,
3857,
418,
1251,
198,
198,
4299,
25962,
79,
7,
87,
11,
7104,
62,
7... | 2.223952 | 5,845 |
# Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import json
import logging
import os
import re
from django.apps import apps
from django.contrib.gis.db import models
from django.db.models.signals import post_delete
from django.db.models.signals import post_save
from django.dispatch import receiver
from idgo_admin.ckan_module import CkanHandler
from idgo_admin.ckan_module import CkanUserHandler
from idgo_admin.datagis import drop_table
from idgo_admin.managers import RasterLayerManager
from idgo_admin.managers import VectorLayerManager
from idgo_admin.mra_client import MraBaseError
from idgo_admin.mra_client import MRAHandler
from idgo_admin import OWS_URL_PATTERN
from idgo_admin import CKAN_STORAGE_PATH
from idgo_admin import MAPSERV_STORAGE_PATH
from idgo_admin import DEFAULTS_VALUES
logger = logging.getLogger('idgo_admin')
# Signaux
# =======
@receiver(post_save, sender=Layer)
@receiver(post_delete, sender=Layer)
| [
2,
15069,
357,
66,
8,
2177,
12,
1238,
2481,
21227,
469,
78,
12,
25574,
5823,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
... | 3.300216 | 463 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
import tensorflow as tf
a = tf.constant(10)
b = tf.constant(20)
# 使用with可以自动关闭Session()
with tf.Session() as sess:
ret = sess.run(a + b)
print("ret:", ret)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
1772,
25,
1263,
6513,
692,
16115,
628,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
64,
796,
48700,
13,
9... | 1.97479 | 119 |
from base import find_root
from gaussian import truncated_gaussian
from chi import truncated_chi, truncated_chi2
from T import truncated_T
from F import truncated_F
| [
6738,
2779,
1330,
1064,
62,
15763,
198,
198,
6738,
31986,
31562,
1330,
40122,
515,
62,
4908,
31562,
198,
6738,
33166,
1330,
40122,
515,
62,
11072,
11,
40122,
515,
62,
11072,
17,
198,
6738,
309,
1330,
40122,
515,
62,
51,
198,
6738,
376... | 3.458333 | 48 |
import <app_pkg_name>
from <app_pkg_name>.bin import zlogger
app = <app_pkg_name>.init_app()
zlogger.startLogger("<app_pkg_name>")
if __name__ == "__main__":
'''
TODO: populate dummy data, setup zlogger
'''
zlogger.log( "run.py" f"starting {__name__}" )
app.run(debug=True) | [
11748,
1279,
1324,
62,
35339,
62,
3672,
29,
198,
6738,
1279,
1324,
62,
35339,
62,
3672,
28401,
8800,
1330,
1976,
6404,
1362,
220,
198,
198,
1324,
796,
1279,
1324,
62,
35339,
62,
3672,
28401,
15003,
62,
1324,
3419,
220,
198,
198,
89,
... | 2.237037 | 135 |
import numpy as np
class FFT_DIP:
"""
need to complete the implementation having issues with
how to keep size, in order to avoid zero padding
def bluestein(self, vector, inverse=False):
transformed_vector = np.covolve()
return vector
"""
| [
11748,
299,
32152,
355,
45941,
198,
198,
4871,
376,
9792,
62,
35,
4061,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
761,
284,
1844,
262,
7822,
1719,
2428,
351,
198,
220,
220,
220,
703,
284,
1394,
2546,
11,
287,
1502,
284,
3... | 2.85567 | 97 |
import os
from setuptools import setup
AUTHORS = ('Sebastian Krieger, Nabil Freij, Alexey Brazhe, '
'Christopher Torrence, Gilbert P. Compo and contributors')
setup(
name='pycwt',
version='0.3.0a22',
author=AUTHORS,
author_email='sebastian@nublia.com',
description=('Continuous wavelet transform module for Python.'),
license='BSD',
url='https://github.com/regeirk/pycwt',
packages=['pycwt'],
install_requires=['numpy', 'scipy', 'matplotlib', 'tqdm'],
long_description=read('README.rst'),
keywords=['wavelet', 'spectral analysis', 'signal processing',
'data science', 'timeseries'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Intended Audience :: Science/Research'
],
)
| [
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
628,
198,
198,
32,
24318,
20673,
796,
19203,
50,
1765,
459,
666,
509,
5034,
1362,
11,
399,
14991,
4848,
2926,
11,
4422,
2959,
41414,
258,
11,
705,
198,
220,
220,
220,
220,
220,
... | 2.581509 | 411 |
import torch
from models.convnext import *
from utils import get_params_groups, create_lr_scheduler
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=16, help="number of batch of each epoch")
parser.add_argument("--num_classes", type=int, default=2, help="number of classes")
opt = parser.parse_args()
train(opt)
| [
11748,
28034,
198,
6738,
4981,
13,
42946,
19545,
1330,
1635,
198,
6738,
3384,
4487,
1330,
651,
62,
37266,
62,
24432,
11,
2251,
62,
14050,
62,
1416,
704,
18173,
198,
11748,
1822,
29572,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
... | 2.988439 | 173 |
# """ 104: Crie um programa que tenha a função leiaInt(), que vai funcionar de forma
# semelhante 'a função input() do Python, só que fazendo a validação para aceitar apenas um valor numérico.
# Ex: n = leiaInt('Digite um n: ')"""
#
#
# def leia_int(num):
# n = str(input(num))
# while True:
# if not n.isnumeric():
# print('Erro! digite um numero valido')
# n = str(input(num))
# else:
# break
# return n
#
#
# n = leia_int('Difite um numero:')
# print(f'Você acabou de digitar o numero {n}')
n = leia_int('Difite um numero:')
print(f'Você acabou de digitar o numero {n}')
| [
2,
37227,
14436,
25,
327,
5034,
23781,
1430,
64,
8358,
3478,
3099,
257,
1257,
16175,
28749,
443,
544,
5317,
22784,
8358,
410,
1872,
25439,
295,
283,
390,
1296,
64,
198,
2,
5026,
417,
71,
12427,
705,
64,
1257,
16175,
28749,
5128,
3419,... | 2.071197 | 309 |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
| [
2,
220,
15069,
33448,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
220,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
628
] | 3.285714 | 35 |
#!/usr/bin/env python
"""
Code for loading the contents of VCF files into the vardb database.
Use one transaction for whole file, and prompts user before committing.
Adds annotation if supplied annotation is different than what is already in db.
Can use specific annotation parsers to split e.g. allele specific annotation.
"""
import re
import logging
from sqlalchemy import tuple_
from datalayer import queries
from vardb.util import vcfiterator
from vardb.deposit.importers import get_allele_from_record
from vardb.datamodel import sample, user, gene, assessment, allele
from .deposit_from_vcf import DepositFromVCF
log = logging.getLogger(__name__)
VALID_PREFILTER_KEYS = set(
[
"non_multiallelic",
"hi_frequency",
"position_not_nearby",
"no_classification",
"low_mapping_quality",
]
)
class BlockIterator(object):
"""
Generates "blocks" of potentially multiallelic records from a batch of records.
Due to the nature of decomposed + normalized variants, we need to be careful how we
process the data. Variants belonging to the same sample's genotype can be on different positions
after decomposition. Example:
Normal:
10 ATT A,AG,ATC 0/1 1/2 2/3
After decompose/normalize:
10 ATT A 0/1 1/. ./.
11 TT G ./. ./1 1/.
12 T C ./. ./. ./1
For a Genotype, we want to keep connection to both Alleles (using Genotype.secondallele_id).
For each sample, if the variant genotype is:
- '0/0' we don't have any variant, ignore it.
- '0/1' or '1/1' we can import Genotype directly using a single Allele.
- '1/.' we need to wait for next './1' entry in order to connect the second Allele.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
10669,
329,
11046,
262,
10154,
286,
569,
22495,
3696,
656,
262,
410,
446,
65,
6831,
13,
198,
198,
11041,
530,
8611,
329,
2187,
2393,
11,
290,
36454,
2836,
878,
17222,
13,
... | 2.855263 | 608 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Delete point. Synchonize with layer and file
-------------------
begin : 2018-07-11
git sha : $Format:%H$
author : M.-D. Van Damme
***************************************************************************/
"""
from qgis.gui import QgsMapTool
from qgis.core import QgsMapLayer
from PyQt4.QtGui import QCursor
from PyQt4.QtCore import Qt
import math
import sys
import util_layer
import util_io
import util_table
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
14,
17174,
17174,
4557,
8162,
198,
220,
198,
23520,
966,
13,
16065,
354,
261,
1096,
351,
7679,
290,
2393,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.563786 | 243 |
__all__=['index'] | [
834,
439,
834,
28,
17816,
9630,
20520
] | 2.428571 | 7 |
from uuid import uuid4
import time
import docker
import pytest
@pytest.fixture(scope='session')
def root_directory(request):
"""Return the project root directory so the docker API can locate the Dockerfile"""
return str(request.config.rootdir)
@pytest.fixture(scope='session')
def session_uuid() -> str:
"""Return a unique uuid string to provide label to identify the image build for this session"""
return str(uuid4())
@pytest.fixture(scope='package', autouse=True)
| [
6738,
334,
27112,
1330,
334,
27112,
19,
198,
11748,
640,
198,
11748,
36253,
198,
11748,
12972,
9288,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
29891,
11537,
198,
4299,
6808,
62,
34945,
7,
25927,
2599,
198,
220,
220... | 3.275168 | 149 |
# Generated by Django 3.1.13 on 2021-07-29 13:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1485,
319,
33448,
12,
2998,
12,
1959,
1511,
25,
2920,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
... | 3.038462 | 52 |
import unittest
import unittest.mock
from programy.storage.entities.braintree import BraintreeStore
| [
11748,
555,
715,
395,
198,
11748,
555,
715,
395,
13,
76,
735,
198,
198,
6738,
1430,
88,
13,
35350,
13,
298,
871,
13,
1671,
2913,
631,
1330,
1709,
2913,
631,
22658,
198
] | 3.15625 | 32 |
import os
import pytorch_lightning as pl
from dgl.nn.pytorch import GATConv
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from torch.nn import BCEWithLogitsLoss, ModuleList
from torch.nn.functional import elu
from torch.optim import Adam
from torchmetrics import F1
from project.datasets.PPI.ppi_dgl_data_module import PPIDGLDataModule
from project.utils.utils import collect_args, process_args, construct_wandb_pl_logger
class LitGAT(pl.LightningModule):
"""A GAT-based GNN."""
def __init__(self, node_feat: int = 5, hidden_dim: int = 5, num_classes: int = 2,
num_hidden_layers: int = 0, lr: float = 0.01, num_epochs: int = 50):
"""Initialize all the parameters for a LitGAT GNN."""
super().__init__()
self.save_hyperparameters()
# Build the network
self.node_feat = node_feat
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.num_hidden_layers = num_hidden_layers
self.lr = lr
self.num_epochs = num_epochs
# Assemble the layers of the network
self.conv_block = self.build_gnn_model()
# Declare loss function(s) for training, validation, and testing
self.bce = BCEWithLogitsLoss(reduction='mean')
self.train_f1 = F1(num_classes=self.num_classes)
self.val_f1 = F1(num_classes=self.num_classes)
self.test_f1 = F1(num_classes=self.num_classes)
def build_gnn_model(self):
"""Define the layers of a LitGAT GNN."""
# Marshal all GNN layers
# Input projection (no residual)
heads = [4, 4, 6]
conv_block = [GATConv(in_feats=self.node_feat, out_feats=self.hidden_dim, num_heads=heads[0], activation=elu)]
# Hidden layers
for l in range(1, self.num_hidden_layers):
# Due to multi-head, the in_dim = num_hidden * num_heads
conv_block.append(
GATConv(self.hidden_dim * heads[l - 1], self.hidden_dim, heads[l], residual=True, activation=elu))
# Output projection
conv_block.append(GATConv(
self.hidden_dim * heads[-2], self.num_classes, heads[-1], residual=True))
return ModuleList(conv_block)
# ---------------------
# Training
# ---------------------
def gnn_forward(self, graph, feats):
"""Make a forward pass through the entire network."""
for i in range(self.num_hidden_layers):
feats = self.conv_block[i](graph, feats).flatten(1)
# Output projection
logits = self.conv_block[-1](graph, feats).mean(1)
return logits
def forward(self, graph, feats):
"""Make a forward pass through the entire network."""
# Forward propagate with both GNNs
logits = self.gnn_forward(graph, feats)
# Return network prediction
return logits.squeeze()
def training_step(self, batch, batch_idx):
"""Lightning calls this inside the training loop."""
graphs, labels = batch
# Make a forward pass through the network for an entire batch of training graph pairs
logits = self(graphs, graphs.ndata['feat'])
# Compute prediction
preds = logits
# Calculate the batch loss
bce = self.bce(logits, labels) # Calculate BCE of a single batch
# Log training step metric(s)
self.log('train_bce', bce, sync_dist=True)
self.log('train_f1', self.train_f1(preds, labels), sync_dist=True)
return {'loss': bce}
def validation_step(self, batch, batch_idx):
"""Lightning calls this inside the validation loop."""
graphs, labels = batch
# Make a forward pass through the network for an entire batch of validation graph pairs
logits = self(graphs, graphs.ndata['feat'])
# Compute prediction
preds = logits
# Calculate the batch loss
bce = self.bce(logits, labels) # Calculate BCE of a single batch
# Log validation step metric(s)
self.log('val_bce', bce, sync_dist=True)
self.log('val_f1', self.val_f1(preds, labels), sync_dist=True)
return {'loss': bce}
def test_step(self, batch, batch_idx):
"""Lightning calls this inside the testing loop."""
graphs, labels = batch
# Make a forward pass through the network for an entire batch of testing graph pairs
logits = self(graphs, graphs.ndata['feat'])
# Compute prediction
preds = logits
# Calculate the batch loss
bce = self.bce(logits, labels) # Calculate BCE of a single batch
# Log testing step metric(s)
self.log('test_bce', bce, sync_dist=True)
self.log('test_f1', self.test_f1(preds, labels), sync_dist=True)
return {'loss': bce}
# ---------------------
# Training Setup
# ---------------------
def configure_optimizers(self):
"""Called to configure the trainer's optimizer(s)."""
optimizer = Adam(self.parameters(), lr=self.lr)
return optimizer
if __name__ == '__main__':
cli_main()
| [
11748,
28686,
198,
198,
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
6738,
288,
4743,
13,
20471,
13,
9078,
13165,
354,
1330,
402,
1404,
3103,
85,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
13,
13345,
10146,
1330,
9104,
97... | 2.44175 | 2,103 |
#MIT License
#
#Copyright (c) 2017 Tom van Ommeren
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from collections import OrderedDict
from .util import * | [
2,
36393,
13789,
198,
2,
198,
2,
15269,
357,
66,
8,
2177,
4186,
5719,
440,
3020,
14226,
198,
2,
198,
2,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
1659,
428,
3788,
290,
3917,
... | 3.711974 | 309 |
import json
import string
import time
import pandas as pd
from datetime import timedelta, datetime
from random import randint, uniform, choice, randrange, choices
def random_time_range(start, end, file_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, file_format))
etime = time.mktime(time.strptime(end, file_format))
ptime = stime + prop * (etime - stime)
return time.strftime(file_format, time.localtime(ptime))
def random_date(start, end):
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return start + timedelta(seconds=random_second)
# output = random_permits(n_rows=3)
# print(output)
# data = json.loads(output)
# print(data)
# df = pd.DataFrame(data)
# print(df.columns)
| [
11748,
33918,
198,
11748,
4731,
198,
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
4818,
8079,
1330,
28805,
12514,
11,
4818,
8079,
198,
6738,
4738,
1330,
43720,
600,
11,
8187,
11,
3572,
11,
43720,
9521,
11,
7747,
6... | 3.01699 | 412 |
from time import time
import joblib
import matplotlib.pyplot as plt
import numpy as np
from sklearn import decomposition
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from yellowbrick.classifier import ConfusionMatrix
from yellowbrick.classifier import PrecisionRecallCurve
from yellowbrick.classifier import ROCAUC
from optimus.helpers.columns import parse_columns
from optimus.helpers.converter import format_dict
from optimus.helpers.core import val_to_list
from optimus.infer import is_numeric
| [
6738,
640,
1330,
640,
198,
11748,
1693,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
26969,
9150,
198,
6738,
1341,
35720,
1330,
20731,
198,
6738,
1... | 3.642308 | 260 |
from investmentGame.Order import Order
#from investmentGame.Portfolio import Portfolio
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from investmentGame.db import Base
#User(name='Jeroen', age=26, balance=20)#, password='Welcome'
#u.transaction('buy', 'market_order', 20, 1) | [
6738,
4896,
8777,
13,
18743,
1330,
8284,
198,
2,
6738,
4896,
8777,
13,
13924,
13652,
1330,
4347,
13652,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
41146,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
... | 3.522936 | 109 |
import re
import sys
JIRA_ID_REGEX = re.compile(r"[A-Z]+-\d+")
MISSING_JIRA_ID_MSG = """
Commit message is missing [JIRA task id].
Include [JIRA task id] in commit message, like so:
#################################
ABC-123 this is my commit message
#################################
where ABC-123 is a sample [JIRA task id].
For more details check:
https://confluence.atlassian.com/adminjiracloud/integrating-with-development-tools-776636216.html
"""
def commit_msg_hook(commit_msg_filepath: str) -> None:
"""Scans for valid jira task id in commit message
https://pre-commit.com/#pre-commit-for-commit-messages"""
with open(commit_msg_filepath) as commit_msg:
if not jira_id_in_commit_msg(commit_msg.read()):
sys.exit(MISSING_JIRA_ID_MSG)
if __name__ == "__main__":
commit_msg_hook(sys.argv[1])
| [
11748,
302,
198,
11748,
25064,
198,
198,
41,
40,
3861,
62,
2389,
62,
31553,
6369,
796,
302,
13,
5589,
576,
7,
81,
17912,
32,
12,
57,
48688,
12,
59,
67,
10,
4943,
198,
198,
44,
16744,
2751,
62,
41,
40,
3861,
62,
2389,
62,
5653,
... | 2.659306 | 317 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# invenio-records-lom is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Permission-config classes for LOMRecordService-objects."""
from invenio_records_permissions.generators import AnyUser
from invenio_records_permissions.policies.records import RecordPermissionPolicy
class LOMRecordPermissionPolicy(RecordPermissionPolicy):
"""Flask-principal style permissions for LOM record services.
Note that the invenio_access.Permission class always adds ``superuser-access``,
so admin-Identities are always allowed to take any action.
"""
# TODO: settle permissions
can_create = [AnyUser()]
can_publish = [AnyUser()]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
402,
3247,
2059,
286,
8987,
13,
198,
2,
198,
2,
287,
574,
952,
12,
8344,
3669,
12,
75,
296,
318,
1479,
3788,
26,
345,
460,
1... | 3.236 | 250 |
# Leia a distancia da viagem e calcule o valor da passagem
d = float(input('Informe a distância da sua viagem: '))
if d <= 200:
p = d * 0.5
else:
p = d * 0.45
print('Para uma viagem de {}, você pagará R$ {:.2f}'.format(d, p))
| [
2,
41212,
257,
1233,
1192,
544,
12379,
25357,
363,
368,
304,
2386,
23172,
267,
1188,
273,
12379,
1208,
363,
368,
198,
198,
67,
796,
12178,
7,
15414,
10786,
818,
687,
68,
257,
1233,
22940,
10782,
544,
12379,
424,
64,
25357,
363,
368,
... | 2.155963 | 109 |
from typing import List, Optional, Union
from vgdb.evaluator import Evaluator
from vgdb.lexer import Lexer
from vgdb.parser import Parser
from vgdb.statement import CreateTable, Insert, Select
from vgdb.table import Table
| [
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
198,
198,
6738,
410,
70,
9945,
13,
18206,
84,
1352,
1330,
26439,
84,
1352,
198,
6738,
410,
70,
9945,
13,
2588,
263,
1330,
17210,
263,
198,
6738,
410,
70,
9945,
13,
48610,
1330,
23042,
26... | 3.343284 | 67 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@File : test_summary_abnormal_input.py
@Author:
@Date : 2019-08-5
@Desc : test summary function of abnormal input
"""
import logging
import os
import numpy as np
from mindspore.common.tensor import Tensor
from mindspore.train.summary.summary_record import SummaryRecord
CUR_DIR = os.getcwd()
SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/"
log = logging.getLogger("test")
log.setLevel(level=logging.ERROR)
def get_test_data(step):
""" get_test_data """
test_data_list = []
tag1 = "x1[:Scalar]"
tag2 = "x2[:Scalar]"
np1 = np.array(step + 1).astype(np.float32)
np2 = np.array(step + 2).astype(np.float32)
dict1 = {}
dict1["name"] = tag1
dict1["data"] = Tensor(np1)
dict2 = {}
dict2["name"] = tag2
dict2["data"] = Tensor(np2)
test_data_list.append(dict1)
test_data_list.append(dict2)
return test_data_list
# Test: call method on parse graph code
| [
2,
15069,
12131,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.985102 | 537 |
# -*- coding: utf-8 -*-
import _locale
_locale._getdefaultlocale = (lambda *args: ['zh_CN', 'utf8'])
import io
import os
import sys
import six
import signal
from tccli.log import init
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
try:
reload(sys) # Python 2.7
sys.setdefaultencoding('utf8')
except NameError:
try:
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
from importlib import reload # Python 3.4+
reload(sys)
except ImportError:
from imp import reload # Python 3.0 - 3.3
reload(sys)
from tccli.command import CLICommand
from tencentcloud import __version__ as sdkVersion
from tccli import __version__
from tccli.exceptions import UnknownArgumentError, ConfigurationError, NoCredentialsError, NoRegionError, ClientError
from tccli.error_msg import USAGE
log = init('tccli.main')
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4808,
17946,
1000,
198,
62,
17946,
1000,
13557,
1136,
12286,
17946,
1000,
796,
357,
50033,
1635,
22046,
25,
37250,
23548,
62,
44175,
3256,
705,
40477,
23,
6,
... | 2.701408 | 355 |
from product_app.models import Book
| [
6738,
1720,
62,
1324,
13,
27530,
1330,
4897,
198
] | 4 | 9 |
from .user import User # noqa
from .profiles import Profile # noqa
| [
6738,
764,
7220,
1330,
11787,
220,
1303,
645,
20402,
198,
6738,
764,
5577,
2915,
1330,
13118,
220,
1303,
645,
20402,
198
] | 3.285714 | 21 |
#!/home/jason/oss/asus-rt-n14uhp-mrtg/tmp/ve_asus-rt-n14uhp-mrtg/bin/python3.4
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
2,
48443,
11195,
14,
73,
888,
14,
793,
14,
40895,
12,
17034,
12,
77,
1415,
7456,
79,
12,
76,
17034,
70,
14,
22065,
14,
303,
62,
40895,
12,
17034,
12,
77,
1415,
7456,
79,
12,
76,
17034,
70,
14,
8800,
14,
29412,
18,
13,
19,
198,... | 2.3125 | 80 |
from __future__ import print_function
import sys
import re
########################################################
############### PRODUCERS ##############################
########################################################
class Producer(object):
""" Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc.
"""
Message = Message # to allow later customization
keywords2consumer = {}
default = Producer('default')
Producer.keywords2consumer['default'] = default_consumer
########################################################
############### CONSUMERS ##############################
########################################################
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
11748,
302,
628,
198,
29113,
14468,
7804,
198,
7804,
4242,
21017,
4810,
3727,
9598,
4877,
1303,
14468,
7804,
4242,
2,
198,
29113,
14468,
7804,
628,
198,
4871,
30436,
... | 4.47093 | 172 |