content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
class ExportPatternTable(object,IEnumerable[KeyValuePair[ExportPatternKey,ExportPatternInfo]],IEnumerable,IDisposable):
"""
A table supporting a mapping of FillPatterns in Revit to pattern names that will be set
in the target export format.
ExportPatternTable()
"""
def Add(self,exportPatternKey,exportPatternInfo):
"""
Add(self: ExportPatternTable,exportPatternKey: ExportPatternKey,exportPatternInfo: ExportPatternInfo)
Inserts a (key,info) pair into Export pattern table.
exportPatternKey: The export pattern key to be added.
exportPatternInfo: The export pattern info to be added.
"""
pass
def Clear(self):
"""
Clear(self: ExportPatternTable)
Removes all contents stored in the table.
"""
pass
def ContainsKey(self,exportpatternKey):
"""
ContainsKey(self: ExportPatternTable,exportpatternKey: ExportPatternKey) -> bool
Checks whether a pattern key exists in the table.
exportpatternKey: The export pattern Key.
Returns: True if the pattern key exists in the table.
"""
pass
def Dispose(self):
""" Dispose(self: ExportPatternTable) """
pass
def GetEnumerator(self):
"""
GetEnumerator(self: ExportPatternTable) -> IEnumerator[KeyValuePair[ExportPatternKey,ExportPatternInfo]]
Returns an enumerator that iterates through a collection.
Returns: An IEnumerator object that can be used to iterate through the collection.
"""
pass
def GetExportPatternInfo(self,exportPatternKey):
"""
GetExportPatternInfo(self: ExportPatternTable,exportPatternKey: ExportPatternKey) -> ExportPatternInfo
Gets a copy of the pattern info associated to the input pattern key.
exportPatternKey: The export pattern Key.
Returns: Return the patternInfo for this key.
"""
pass
def GetKeys(self):
"""
GetKeys(self: ExportPatternTable) -> IList[ExportPatternKey]
Gets all the keys stored in the map.
Returns: Return the key array.
"""
pass
def GetPatternTableIterator(self):
"""
GetPatternTableIterator(self: ExportPatternTable) -> ExportPatternTableIterator
Returns a PatternTableIterator that iterates through the collection.
Returns: A PatternTableIterator object that can be used to iterate through key-value
pairs in the collection.
"""
pass
def GetValues(self):
"""
GetValues(self: ExportPatternTable) -> IList[ExportPatternInfo]
Returns all the values stored in the map.
Returns: Return the info array.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: ExportPatternTable,disposing: bool) """
pass
def Remove(self,exportPatternKey):
"""
Remove(self: ExportPatternTable,exportPatternKey: ExportPatternKey)
Removes the pair (key,info) by pattern key.
exportPatternKey: The export pattern key.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
""" __contains__[KeyValuePair[ExportPatternKey,ExportPatternInfo]](enumerable: IEnumerable[KeyValuePair[ExportPatternKey,ExportPatternInfo]],value: KeyValuePair[ExportPatternKey,ExportPatternInfo]) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Count of the items contained in the collection.
Get: Count(self: ExportPatternTable) -> int
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ExportPatternTable) -> bool
"""
| [
4871,
36472,
47546,
10962,
7,
15252,
11,
40,
4834,
30831,
58,
9218,
11395,
47,
958,
58,
43834,
47546,
9218,
11,
43834,
47546,
12360,
60,
4357,
40,
4834,
30831,
11,
2389,
271,
1930,
540,
2599,
201,
198,
37227,
201,
198,
317,
3084,
6493... | 2.912678 | 1,546 |
# Your MyCircularQueue object will be instantiated and called as such:
obj = CircularQueue(3)
obj.enQueue(6)
param_2 = obj.deQueue()
obj.enQueue(5)
param_2 = obj.deQueue()
# obj.enQueue(3)
# param_3 = obj.Front()
# param_4 = obj.Rear()
print(obj, obj.Front(), obj.Rear()) | [
198,
2,
3406,
2011,
31560,
934,
34991,
2134,
481,
307,
9113,
12931,
290,
1444,
355,
884,
25,
198,
26801,
796,
7672,
934,
34991,
7,
18,
8,
628,
198,
26801,
13,
268,
34991,
7,
21,
8,
628,
198,
17143,
62,
17,
796,
26181,
13,
2934,
... | 2.536364 | 110 |
# 基于PyTorch复现并修正后的马金TD方法
# 修正:叶子节点选择错误;SGD->Adagrad
import sys
import numpy as np
from models import TD_RvNN_Ma
import time
import random
from torch import optim
import datetime
from evaluate import *
from config import *
from data_io import loadLabel
from get_args import _ma_args
# str = index:wordfreq index:wordfreq
################################# loas data ###################################
##################################### MAIN ####################################
# 1. load tree & word & index & label
print("Step1:processing data")
tree_train, word_train, index_train, leaf_idxs_train, y_train, \
tree_test, word_test, index_test, leaf_idxs_test, y_test = \
loadData(TD_RvNN_TFIDF_path, train_path, test_path, label15_path, _ma_args.train_threshold)
print()
# 2. ini RNN model
print('Step2:build model')
t0 = time.time()
model = TD_RvNN_Ma.RvNN(_ma_args.vocab_dim, _ma_args.random_dim, _ma_args.class_num)
model.to(device)
t1 = time.time()
print('Recursive model established,', (t1 - t0) / 60, 's\n')
# 3. looping SGD
print('Step3:start training')
optimizer = optim.Adagrad(model.parameters(), lr=0.05)
losses_5, losses = [], []
num_examples_seen = 0
indexs = list(range(len(y_train)))
highest_acc = 0
for epoch in range(1, _ma_args.epoch + 1):
# one SGD
random.shuffle(indexs)
for cnt, i in enumerate(indexs):
pred_y, loss = model.forward(torch.Tensor(word_train[i]).cuda(device),
torch.LongTensor(index_train[i]).cuda(device).long(),
torch.LongTensor(tree_train[i]).cuda(device),
torch.Tensor(leaf_idxs_train[i]).cuda(device).long(),
torch.Tensor(y_train[i]).cuda(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.data.cpu().tolist())
num_examples_seen += 1
if (cnt + 1) % 100 == 0:
# print("iteration:%d/%d" % (cnt, len(indexs)))
break
# print("epoch=%d: idx=%d, loss=%f" % (epoch, i, np.mean(losses)))
# cal loss & evaluate
if epoch % 1 == 0:
losses_5.append((num_examples_seen, np.mean(losses)))
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# print("%s: Loss after num_examples_seen=%d epoch=%d: %f" % (time, num_examples_seen, epoch, np.mean(losses)))
sys.stdout.flush()
prediction = []
for j in range(len(y_test)):
prediction.append(
model.predict_up(torch.Tensor(word_test[j]).cuda(device),
torch.Tensor(index_test[j]).cuda(device).long(),
torch.Tensor(tree_test[j]).cuda(device).long(),
torch.Tensor(leaf_idxs_test[j]).cuda(device).long())
.cpu().data.numpy().tolist())
# print("predictions:", prediction)
res = evaluation_4class(prediction, y_test)
highest_acc = max(highest_acc, res[1])
# print('results:', res)
# print()
sys.stdout.flush()
# Adjust the learning rate if loss increases
# if len(losses_5) > 1 and losses_5[-1][1] > losses_5[-2][1]:
# lr = lr * 0.5
# print("Setting learning rate to %.12f" % lr)
# sys.stdout.flush()
sys.stdout.flush()
losses = []
print('最高acc:', highest_acc)
| [
2,
10263,
253,
118,
12859,
236,
20519,
15884,
354,
13783,
235,
163,
236,
108,
33176,
114,
46479,
106,
29826,
96,
28938,
236,
21410,
165,
102,
105,
34932,
239,
21016,
43095,
37345,
243,
201,
198,
2,
220,
46479,
106,
29826,
96,
171,
120... | 2.010245 | 1,757 |
import youtube
from praw.models import Comment
"""
These methods are used for detecting the current state of a post (e.g., claimed,
unclaimed, done) and the surrounding context based on the specific comment data
given.
This helps reduce the amount of data we _have_ to store in Redis.
"""
| [
11748,
35116,
198,
6738,
279,
1831,
13,
27530,
1330,
18957,
198,
198,
37811,
198,
4711,
5050,
389,
973,
329,
31521,
262,
1459,
1181,
286,
257,
1281,
357,
68,
13,
70,
1539,
4752,
11,
198,
403,
12795,
11,
1760,
8,
290,
262,
7346,
4732... | 3.857143 | 77 |
from xlsxwriter import Workbook
from parselib import Matrix
from util import measure_between
MIN_CELL_WIDTH = 3
MIN_CELL_HEIGHT = 12
MUTED_RED = '#63BE7B'
MUTED_YELLOW = '#F7B97B'
MUTED_GREEN = '#EF676A'
| [
6738,
2124,
7278,
87,
16002,
1330,
5521,
2070,
198,
6738,
1582,
741,
571,
1330,
24936,
198,
6738,
7736,
1330,
3953,
62,
23395,
198,
198,
23678,
62,
5222,
3069,
62,
54,
2389,
4221,
796,
513,
198,
23678,
62,
5222,
3069,
62,
13909,
9947,... | 2.314607 | 89 |
import gzip
import base64
import simplejson as json
import pandas as pd
from copy import deepcopy as copy
from .util import Util
class BuilderParams:
"""
Validating input parameters and building jsons for gridOptions and gridData
-----
Attributes:
obj: a Grid object whose attributes will be checked and built by BuilderParams.
"""
def __init__(self,
obj):
"""
"""
self.obj = obj
def valid(self):
"""
Checks if the values for the given entries are valid.
"""
msg = 'width must be an int (number of pixels) or a string'
assert (isinstance(self.obj.width_in, int)
or isinstance(self.obj.width_in, str)), msg
msg = 'height must be an int (number of pixels)'
assert isinstance(self.obj.height_in, int), msg
li_theme = [
'ag-theme-balham',
'ag-theme-balham-dark',
'ag-theme-material',
'ag-theme-fresh',
'ag-theme-dark',
'ag-theme-blue',
'ag-theme-bootstrap',
'ag-theme-excel', # custom style added by ipyaggrid
]
msg = 'theme must be one of {}'.format(li_theme)
assert self.obj.theme in li_theme, msg
msg = 'css_rules must be a string'
assert isinstance(self.obj.css_rules, str), msg
msg = 'quick_filter must be a boolean'
assert isinstance(self.obj.quick_filter, bool), msg
msg = 'export_csv must be a boolean'
assert isinstance(self.obj.export_csv, bool), msg
msg = 'export_excel must be a boolean'
assert isinstance(self.obj.export_excel, bool), msg
msg = 'license must be a string'
assert isinstance(self.obj.license, str), msg
msg = 'hide_grid must be a boolean'
assert isinstance(self.obj.hide_grid, bool), msg
msg = 'keep_multiindex must be a boolean'
assert isinstance(self.obj.keep_multiindex, bool), msg
li_export = ['disabled', 'auto', 'buttons']
msg = 'export_mode must be one of {}'.format(li_export)
assert self.obj.export_mode in li_export, msg
li_fit = ['', 'size_to_fit', 'auto']
msg = 'columns_fit must be one of {}'.format(li_fit)
assert self.obj.columns_fit in li_fit, msg
msg = 'center must be a boolean'
assert isinstance(self.obj.center, bool), msg
msg = 'show_toggle_delete must be a boolean'
assert isinstance(self.obj.show_toggle_delete, bool), msg
msg = 'show_toggle_edit must be a boolean'
assert isinstance(self.obj.show_toggle_edit, bool), msg
msg = 'sync_on_edit must be a boolean'
assert isinstance(self.obj.sync_on_edit, bool), msg
msg = 'sync_grid must be a boolean'
assert isinstance(self.obj.sync_grid, bool), msg
msg = 'js_helpers_custom must be a str'
assert isinstance(self.obj.js_helpers_custom, str), msg
msg = 'js_pre_helpers must be a list of str'
assert isinstance(self.obj.js_pre_helpers, list), msg
if isinstance(self.obj.js_pre_helpers, list):
msg = 'each element of js_pre_helpers must be a str'
for e in self.obj.js_pre_helpers:
assert isinstance(e, str), msg
self.obj.js_pre_helpers = self.obj.js_pre_helpers
msg = 'js_pre_grid must be a list of str'
assert isinstance(self.obj.js_pre_grid, list), msg
if isinstance(self.obj.js_pre_grid, list):
msg = 'each element of js_pre_grid must be a str'
for e in self.obj.js_pre_grid:
assert isinstance(e, str), msg
self.obj.js_pre_grid = self.obj.js_pre_grid
msg = 'js_post_grid must be a list of str'
assert isinstance(self.obj.js_post_grid, list), msg
if isinstance(self.obj.js_post_grid, list):
msg = 'each element of js_post_grid must be a str'
for e in self.obj.js_post_grid:
assert isinstance(e, str), msg
self.obj.js_post_grid = self.obj.js_post_grid
if self.obj.menu_in is None:
self.obj.menu_in = {}
msg = 'menu must be a dict'
assert isinstance(self.obj.menu_in, dict), msg
if 'buttons' in self.obj.menu_in:
msg = 'menu["buttons"] must be a list'
assert isinstance(self.obj.menu_in['buttons'], list), msg
msg = 'each element of menu["buttons"] must be a dict with "name" as key and string as value'
for e in self.obj.menu_in['buttons']:
assert 'name' in e, msg
assert isinstance(e['name'], str), msg
if not ('action' in e):
e['action'] = ''
msgbis = 'the action of a button must be of type str'
assert isinstance(e['action'], str), msgbis
if 'inputs' in self.obj.menu_in:
msg = 'menu["inputs"] must be a list'
assert isinstance(self.obj.menu_in['inputs'], list), msg
li_names = ['Dropdown Menu', 'Quick Filter',
'Toggle Edit', 'Toggle Delete']
msg = 'each element of menu["inputs"] must have a "name" key with its value in {}'.format(
li_names)
for e in self.obj.menu_in['inputs']:
assert 'name' in e, msg
assert e['name'] in li_names, msg
msg = 'grid_data must be a list or a dataframe'
assert isinstance(self.obj.grid_data_in,
(list, pd.core.frame.DataFrame)), msg
if isinstance(self.obj.grid_data_in, list):
msg = 'each element of grid_data must be a dict'
for e in self.obj.grid_data_in:
assert isinstance(e, dict), msg
self.obj._is_df = False
else:
self.obj._is_df = True
msg = 'both grid_options and grid_options_multi cannot be set'
assert ((self.obj.grid_options == {}) or
(self.obj.grid_options_multi == [])), msg
msg = 'one exactly of grid_options or grid_options_multi mut be set'
assert not((self.obj.grid_options == {}) and
(self.obj.grid_options_multi == [])), msg
if self.obj.grid_options == {}:
self.obj._is_grid_options_multi = True
else:
self.obj._is_grid_options_multi = False
if self.obj.grid_options != {}:
msg = 'grid_options must be a dict'
assert isinstance(self.obj.grid_options, dict), msg
if self.obj.grid_options_multi != []:
msg = 'grid_options_multi must be a list or a tuple'
assert isinstance(self.obj.grid_options_multi, (list, tuple)), msg
msg1 = 'each element of grid_options_multi must be a list or tuple of length 2'
msg2 = 'in each grid_options_multi element of length 2, the first one must be a string'
msg3 = 'in each grid_options_multi element of length 3, the second one must be a dict'
for e in self.obj.grid_options_multi:
assert isinstance(e, (list, tuple)) and len(e) == 2, msg1
assert isinstance(e[0], str), msg2
assert isinstance(e[1], dict), msg3
def build(self):
"""
Builds the parameters of the Grid.
"""
# Manage width
if isinstance(self.obj.width_in, int):
self.obj.width = str(self.obj.width_in) + 'px'
else:
self.obj.width = self.obj.width_in
# manage multi options
self.obj.is_multi = True if self.obj.grid_options_multi != [] else False
# Manage height
if self.obj.height_in == 0:
self.obj.height_in = 350
self.obj.height = str(self.obj.height_in) + 'px'
if self.obj.theme == 'ag-theme-excel':
self.obj.grid_options['suppressColumnVirtualisation'] = True
# licence
# self.obj.license = Util.encode_b64(self.obj.license)
# css rules down
self.obj.css_rules_down = Util.build_css_rules(self.obj.css_rules)
if self.obj.is_multi:
grid_options_multi_2 = []
for name, options in self.obj.grid_options_multi:
self.obj._grid_data_down, options_2 = self.preprocess_input(
self.obj.grid_data_in,
options,
index=self.obj.index,
keep_multiindex=self.obj.keep_multiindex)
grid_options_multi_2.append((name, options_2))
self.obj.grid_options_multi_json = grid_options_multi_2
else:
self.obj._grid_data_down, self.obj.grid_options = self.preprocess_input(
self.obj.grid_data_in,
self.obj.grid_options,
index=self.obj.index,
keep_multiindex=self.obj.keep_multiindex)
if self.obj.is_multi:
self.obj._grid_options_multi_down = Util.build_options(
{'data': self.obj.grid_options_multi}, True)
else:
self.obj._grid_options_mono_down = Util.build_options(
self.obj.grid_options, False)
# js builtin helpers
self.obj._js_helpers_builtin = Util.load_file(
'js', 'helpersBuiltin.js')
# js custom helpers
if self.obj.js_helpers_custom == '':
self.js_helpers_custom = 'helpersCustom = {}'
def preprocess_input(self,
grid_data,
grid_options,
index,
keep_multiindex):
"""
"""
if self.obj.show_toggle_edit:
func_update_edit = """function(params){
return window.agGridOptions["""+str(self.obj._id)+"""].editableCustom;
}"""
if 'defaultColDef' in grid_options:
grid_options['defaultColDef']['editable'] = func_update_edit
else:
grid_options['defaultColDef'] = {'editable': func_update_edit}
if self.obj.paste_from_excel:
grid_options['processDataFromClipboard'] = """function(params){
const { data } = params;
console.log(data);
if (data.length <= 1) {
return null;
}
const cols = data[0];
const colDefs = [];
cols.forEach(col => {
const field = col.toLowerCase().replace(' ', '-');
colDefs.push({ headerName: col, field });
});
const new_dat = [];
const l = colDefs.length;
for (let i = 1; i < data.length; i += 1) {
const row = data[i];
const new_row = {};
if (row.length === l) {
for (let j = 0; j < row.length; j += 1) {
new_row[colDefs[j].field] = row[j];
}
new_dat.push(new_row);
}
}
gridOptions.api.setColumnDefs(colDefs);
gridOptions.api.setRowData(new_dat);
if (view.model.get('columns_fit') === 'size_to_fit') {
gridOptions.api.sizeColumnsToFit();
} else if (view.model.get('columns_fit') === 'auto') {
const allColumnIds = [];
gridOptions.columnApi.getAllColumns().forEach(column => {
allColumnIds.push(column.colId);
});
gridOptions.columnApi.autoSizeColumns(allColumnIds);
}
return null;
}
"""
# Setup menu, buttons, inputs, and manages menu CSS.
menu_in = copy(self.obj.menu_in)
menu = {'buttons': [], 'inputs': []}
if not ('buttons' in menu_in):
menu_in['buttons'] = []
if not ('inputs' in menu_in):
menu_in['inputs'] = []
Util.setup_menu(self.obj, menu_in, menu, grid_options)
self.obj.menu = menu
if Util.is_df(grid_data):
self.obj._is_df = True
else:
self.obj._is_df = False
if Util.is_multiindex_df(grid_data):
grid_data_2, grid_options_2 = Util.prepare_multiindex_df(
grid_data,
grid_options,
index=index,
keep_multiindex=keep_multiindex)
elif Util.is_df(grid_data):
grid_data_2, grid_options_2 = Util.prepare_singleindex_df(
grid_data,
grid_options,
index=index)
else:
grid_data_2, grid_options_2 = grid_data, grid_options
grid_options_2 = Util.update_columnTypes(
grid_options_2)
return Util.build_data(grid_data_2), grid_options_2
def to_dict(self):
"""
"""
d = copy(self.__dict__)
d = {k: v for k, v in d.items() if v is not None}
return d
def pprint(self, indent=2):
"""
"""
d = json.dumps(self.to_dict(),
sort_keys=True,
indent=indent)
print(d)
| [
198,
11748,
308,
13344,
198,
11748,
2779,
2414,
198,
198,
11748,
2829,
17752,
355,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
4866,
1330,
2769,
30073,
355,
4866,
198,
198,
6738,
764,
22602,
1330,
7273,
346,
628,
198,
... | 2.001199 | 6,670 |
"""
Space : O(1)
Time : O(n)
"""
| [
37811,
198,
14106,
220,
220,
1058,
440,
7,
16,
8,
198,
7575,
220,
220,
220,
1058,
440,
7,
77,
8,
198,
37811,
628
] | 1.695652 | 23 |
"""Init data for QuAcc"""
from ase.atoms import Atoms
from ase.io.jsonio import decode, encode
from quacc._version import __version__
from quacc.settings import QuaccSettings
Atoms.as_dict = atoms_as_dict
Atoms.from_dict = atoms_from_dict
SETTINGS = QuaccSettings()
| [
37811,
31768,
1366,
329,
2264,
17320,
37811,
198,
6738,
257,
325,
13,
265,
3150,
1330,
1629,
3150,
198,
6738,
257,
325,
13,
952,
13,
17752,
952,
1330,
36899,
11,
37773,
198,
198,
6738,
627,
4134,
13557,
9641,
1330,
11593,
9641,
834,
1... | 3.044944 | 89 |
from facePoints import facePoints
model_path = "shape_predictor_68_face_landmarks.dat"
frontal_face_detector = dlib.get_frontal_face_detector()
face_landmark_detector = dlib.shape_predictor(model_path)
mouth_cascade = cv2.CascadeClassifier('models/face_detect/haarcascade_mcs_mouth.xml') | [
6738,
1986,
40710,
1330,
1986,
40710,
198,
198,
19849,
62,
6978,
796,
366,
43358,
62,
79,
17407,
273,
62,
3104,
62,
2550,
62,
1044,
14306,
13,
19608,
1,
198,
198,
8534,
282,
62,
2550,
62,
15255,
9250,
796,
288,
8019,
13,
1136,
62,
... | 2.71028 | 107 |
# Module: core
# Date: 20th December 2014
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Core Module
Crates server component, load plugins and handles process signals.
"""
from logging import getLogger
from signal import SIGINT, SIGHUP, SIGTERM
from circuits import handler, BaseComponent
from .server import Server
from .plugins import Plugins
| [
2,
19937,
25,
220,
220,
4755,
198,
2,
7536,
25,
220,
220,
220,
220,
1160,
400,
3426,
1946,
198,
2,
6434,
25,
220,
220,
3700,
19790,
11,
386,
6404,
291,
379,
1790,
21170,
5013,
16605,
2010,
16605,
35851,
628,
198,
37811,
14055,
19937... | 3.628571 | 105 |
(
activity_df
.sort_values('start_date_local')
.assign(time_gap=lambda df: df['start_date_local'].diff())
.sort_values('time_gap', ascending=False)
.head(10)
)
| [
7,
198,
220,
220,
220,
3842,
62,
7568,
198,
220,
220,
220,
764,
30619,
62,
27160,
10786,
9688,
62,
4475,
62,
12001,
11537,
198,
220,
220,
220,
764,
562,
570,
7,
2435,
62,
43554,
28,
50033,
47764,
25,
47764,
17816,
9688,
62,
4475,
... | 2.307692 | 78 |
from tkinter import*
cal = Tk()
cal.title("Skiy - Calculator")
cal.minsize(width=360, height=500)
cal.maxsize(width=360, height=500)
cal.iconbitmap('C:/Users/Okeke Divine-Vessel/Desktop/Desktop/Programmer/Icons/Cal.ico')
operator=""
text_Input = StringVar()
txtDisplay = Entry(cal,font=('arial', 20,'bold'), textvariable=text_Input, bd=30, insertwidth=4,
bg="powder blue", justify='right').grid(columnspan=4)
btn7=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="7",bg="powder blue",command=lambda:btnClick(7) ).grid(row=1,column=0)
btn8=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="8", bg="powder blue",command=lambda:btnClick(8)).grid(row=1,column=1)
btn9=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="9", bg="powder blue",command=lambda:btnClick(9)).grid(row=1,column=2)
Addition=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 19,'bold'),
text="+", bg="powder blue",command=lambda:btnClick("+")).grid(row=1,column=3)
# ===============================================================================
btn4=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="4", bg="powder blue",command=lambda:btnClick(4)).grid(row=2,column=0)
btn5=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="5", bg="powder blue",command=lambda:btnClick(5)).grid(row=2,column=1)
btn6=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="6", bg="powder blue",command=lambda:btnClick(6)).grid(row=2,column=2)
Subtraction=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="-", bg="powder blue",command=lambda:btnClick("-")).grid(row=2,column=3)
# ===============================================================================
btn1=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="1", bg="powder blue",command=lambda:btnClick(1)).grid(row=3,column=0)
btn2=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="2", bg="powder blue",command=lambda:btnClick(2)).grid(row=3,column=1)
btn3=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="3", bg="powder blue",command=lambda:btnClick(3)).grid(row=3,column=2)
Mutiply=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 19,'bold'),
text="x", bg="powder blue",command=lambda:btnClick("*")).grid(row=3,column=3)
# ===============================================================================
btnClear=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="C", bg="powder blue", command=btnClearDisplay).grid(row=4,column=1)
btn0=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="0", bg="powder blue",command=lambda:btnClick(0)).grid(row=4,column=0)
btnEquals=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="=", bg="powder blue", command=btnEqualsInput).grid(row=4,column=2)
Division=Button(cal,padx=16,pady=16,bd=8, fg="black",font=('arial', 20,'bold'),
text="/", bg="powder blue",command=lambda:btnClick("/")).grid(row=4,column=3)
cal.mainloop() | [
6738,
256,
74,
3849,
1330,
9,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
9948,
796,
309,
74,
3419,
201,
198,
9948,
13,
7839,
7203,
50,
4106,
88,
532,
43597,
4943,
201,
198,
9948,
13,
42951,
1096,
7,
10394,
28,
15277,
11,
... | 2.379884 | 1,382 |
#! /usr/bin/env python3.7
# -*- coding: utf-8 -*-
# Copyright 2020 the HERA Project
# Licensed under the MIT License
import numpy as np
from pyuvdata import UVData, utils
import argparse
import sys
import os
import shutil
# Parse arguments
a = argparse.ArgumentParser(
description='Script for "fixing" H3C files to ensure that data is unflagged, nsamples = 1, and the uvws are correct given the antenna antenna_positions.'
)
a.add_argument("infile", help="Path to input pyuvdata-readable data file.")
a.add_argument("outfile", help="Path to output uvh5 data file.")
args = a.parse_args()
# Read data
uv = UVData()
uv.read(args.infile)
# Fix flags and nsamples
uv.flag_array = np.zeros_like(uv.flag_array)
uv.nsample_array = np.ones_like(uv.nsample_array)
# Fix uvw array using atenna postiions and array location
unique_times = np.unique(uv.time_array)
for ind, jd in enumerate(unique_times):
inds = np.where(uv.time_array == jd)[0]
ant_uvw = utils.phase_uvw(
uv.telescope_location_lat_lon_alt[1],
uv.telescope_location_lat_lon_alt[0],
uv.antenna_positions,
)
ant_sort = np.argsort(uv.antenna_numbers)
ant1_index = np.searchsorted(uv.antenna_numbers[ant_sort], uv.ant_1_array[inds])
ant2_index = np.searchsorted(uv.antenna_numbers[ant_sort], uv.ant_2_array[inds])
uv.uvw_array[inds] = (
ant_uvw[ant_sort][ant2_index, :] - ant_uvw[ant_sort][ant1_index, :]
)
# Update history
uv.history += f'\n\nData fixed to unflag all integrations, set nsamples to 1, and the correct uvw_array using the command:\n{" ".join(sys.argv)}\n\n'
# Write results to disk, deleting infile if the infile and outfiles are the same (used for when the infile is a softlink)
if args.infile == args.outfile:
try:
os.remove(args.infile)
except IsADirectoryError:
shutil.rmtree(args.infile)
uv.write_uvh5(args.outfile)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
262,
24906,
32,
4935,
198,
2,
49962,
739,
262,
17168,
13789,
198,
198,
11748,
299,
3... | 2.514667 | 750 |
from dataset.tf_data_handler import tf_data_handler
import tensorflow as tf
from config import Config
import time
from model.model import vgg_crnn
import numpy as np
from tools.utils import ctc_decode,cacualte_acc
data_handler_obj = tf_data_handler()
test_loader = data_handler_obj.get_data_loader(Config.test_anno, batch_size=Config.train_batch_size,img_root=Config.img_root)
# model = crnn()
# model = build_model(Config.dict_size)
model = vgg_crnn()
# model.build(input_shape=(None,32,None,1))
model.load_weights("/home/ethony/workstation/my_crnn_tf2/checkpoint/200W/epoch_0_model")
# model_path = "/home/ethony/workstation/my_crnn_tf2/checkpoint/epoch_0_model"
# model = tf.keras.models.load_model(model_path)
# model.build(input_shape=(None, 32, None, 1))
img_tensor = tf.convert_to_tensor(np.random.uniform(-1,1,size=(2,32,320,1)),dtype = tf.float32)
for i in range(10):
start_time = time.time()
pred = model(img_tensor)
print("cost time : {0}".format(time.time() - start_time))
| [
198,
6738,
27039,
13,
27110,
62,
7890,
62,
30281,
1330,
48700,
62,
7890,
62,
30281,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
4566,
1330,
17056,
198,
11748,
640,
198,
6738,
2746,
13,
19849,
1330,
410,
1130,
62,
6098,
20471,
... | 2.613577 | 383 |
# encoding: utf-8
"""
lxml custom element classes for shape-related XML elements.
"""
from __future__ import absolute_import
from .. import parse_xml
from ..ns import nsdecls
from .shared import BaseShapeElement
from ..simpletypes import ST_DrawingElementId, XsdUnsignedInt
from ..xmlchemy import (
BaseOxmlElement, OneAndOnlyOne, RequiredAttribute, ZeroOrOne
)
class CT_Connection(BaseShapeElement):
"""
A `a:stCxn` or `a:endCxn` element specifying a connection between
an end-point of a connector and a shape connection point.
"""
id = RequiredAttribute('id', ST_DrawingElementId)
idx = RequiredAttribute('idx', XsdUnsignedInt)
class CT_Connector(BaseShapeElement):
"""
A line/connector shape ``<p:cxnSp>`` element
"""
_tag_seq = (
'p:nvCxnSpPr', 'p:spPr', 'p:style', 'p:extLst'
)
nvCxnSpPr = OneAndOnlyOne('p:nvCxnSpPr')
spPr = OneAndOnlyOne('p:spPr')
del _tag_seq
@classmethod
def new_cxnSp(cls, id_, name, prst, x, y, cx, cy, flipH, flipV):
"""
Return a new ``<p:cxnSp>`` element tree configured as a base
connector.
"""
tmpl = cls._cxnSp_tmpl()
flip = (
(' flipH="1"' if flipH else '') + (' flipV="1"' if flipV else '')
)
xml = tmpl.format(**{
'nsdecls': nsdecls('a', 'p'),
'id': id_,
'name': name,
'x': x,
'y': y,
'cx': cx,
'cy': cy,
'prst': prst,
'flip': flip,
})
return parse_xml(xml)
@staticmethod
class CT_ConnectorNonVisual(BaseOxmlElement):
"""
``<p:nvCxnSpPr>`` element, container for the non-visual properties of
a connector, such as name, id, etc.
"""
cNvPr = OneAndOnlyOne('p:cNvPr')
cNvCxnSpPr = OneAndOnlyOne('p:cNvCxnSpPr')
nvPr = OneAndOnlyOne('p:nvPr')
class CT_NonVisualConnectorProperties(BaseOxmlElement):
"""
`p:cNvCxnSpPr` element, container for the non-visual properties specific
to a connector shape, such as connections and connector locking.
"""
_tag_seq = (
'a:cxnSpLocks', 'a:stCxn', 'a:endCxn', 'a:extLst'
)
stCxn = ZeroOrOne('a:stCxn', successors=_tag_seq[2:])
endCxn = ZeroOrOne('a:endCxn', successors=_tag_seq[3:])
del _tag_seq
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
75,
19875,
2183,
5002,
6097,
329,
5485,
12,
5363,
23735,
4847,
13,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
11485,
1330,
21136,
62,
1... | 2.129613 | 1,111 |
#!/usr/bin/python
# -*- coding: utf8 -*-
import numpy as np
import pandas as pd
import collections
import pandas_ml as pdml
from pandas_ml.confusion_matrix.stats import binom_interval, class_agreement, prop_test
class ConfusionMatrixAbstract(object):
"""
Abstract class for confusion matrix
You shouldn't instantiate this class.
You might instantiate ConfusionMatrix or BinaryConfusionMatrix classes
"""
TRUE_NAME = 'Actual'
PRED_NAME = 'Predicted'
@property
def classes(self):
"""
Returns classes (property)
"""
return(self._classes())
def _classes(self, df=None):
"""
Returns classes (method)
"""
if df is None:
df = self.to_dataframe()
idx_classes = (df.columns | df.index).copy()
idx_classes.name = 'Classes'
return(idx_classes)
def to_dataframe(self, normalized=False, calc_sum=False,
sum_label='__all__'):
"""
Returns a Pandas DataFrame
"""
if normalized:
a = self._df_confusion.values.astype('float')
a = a.astype('float') / a.sum(axis=1)[:, np.newaxis]
df = pd.DataFrame(a,
index=self._df_confusion.index.copy(),
columns=self._df_confusion.columns.copy())
else:
df = self._df_confusion
if calc_sum:
df = df.copy()
df[sum_label] = df.sum(axis=1)
# df = pd.concat([df, pd.DataFrame(df.sum(axis=1), columns=[sum_label])], axis=1)
df = pd.concat([df, pd.DataFrame(df.sum(axis=0), columns=[sum_label]).T])
df.index.name = self.true_name
return(df)
@property
def true(self):
"""
Returns sum of actual (true) values for each class
"""
s = self.to_dataframe().sum(axis=1)
s.name = self.true_name
return(s)
@property
def pred(self):
"""
Returns sum of predicted values for each class
"""
s = self.to_dataframe().sum(axis=0)
s.name = self.pred_name
return(s)
def to_array(self, normalized=False, sum=False):
"""
Returns a Numpy Array
"""
return(self.to_dataframe(normalized, sum).values)
def toarray(self, *args, **kwargs):
"""
see to_array
"""
return(self.to_array(*args, **kwargs))
def len(self):
"""
Returns len of a confusion matrix.
For example: 3 means that this is a 3x3 (3 rows, 3 columns) matrix
"""
return(self._len)
def sum(self):
"""
Returns sum of a confusion matrix.
Also called "population"
It should be the number of elements of either y_true or y_pred
"""
return(self.to_dataframe().sum().sum())
@property
def population(self):
"""
see also sum
"""
return(self.sum())
@property
def title(self):
"""
Returns title
"""
if self.is_binary:
return("Binary confusion matrix")
else:
return("Confusion matrix")
def plot(self, normalized=False, backend='matplotlib',
ax=None, max_colors=10, **kwargs):
"""
Plots confusion matrix
"""
df = self.to_dataframe(normalized)
try:
cmap = kwargs['cmap']
except KeyError:
import matplotlib.pyplot as plt
cmap = plt.cm.gray_r
title = self.title
if normalized:
title += " (normalized)"
if backend == 'matplotlib':
import matplotlib.pyplot as plt
# if ax is None:
fig, ax = plt.subplots(figsize=(9, 8))
plt.imshow(df, cmap=cmap, interpolation='nearest') # imshow / matshow
ax.set_title(title)
tick_marks_col = np.arange(len(df.columns))
tick_marks_idx = tick_marks_col.copy()
ax.set_yticks(tick_marks_idx)
ax.set_xticks(tick_marks_col)
ax.set_xticklabels(df.columns, rotation=45, ha='right')
ax.set_yticklabels(df.index)
ax.set_ylabel(df.index.name)
ax.set_xlabel(df.columns.name)
# N_min = 0
N_max = self.max()
if N_max > max_colors:
# Continuous colorbar
plt.colorbar()
else:
# Discrete colorbar
pass
# ax2 = fig.add_axes([0.93, 0.1, 0.03, 0.8])
# bounds = np.arange(N_min, N_max + 2, 1)
# norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# cb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds, format='%1i')
return ax
elif backend == 'seaborn':
import seaborn as sns
ax = sns.heatmap(df, **kwargs)
return ax
# You should test this yourself
# because I'm facing an issue with Seaborn under Mac OS X (2015-04-26)
# RuntimeError: Cannot get window extent w/o renderer
# sns.plt.show()
else:
msg = "'backend' must be either 'matplotlib' or 'seaborn'"
raise ValueError(msg)
def binarize(self, select):
"""Returns a binary confusion matrix from
a confusion matrix"""
if not isinstance(select, collections.Iterable):
select = np.array(select)
y_true_bin = self.y_true().map(lambda x: x in select)
y_pred_bin = self.y_pred().map(lambda x: x in select)
from pandas_ml.confusion_matrix.bcm import BinaryConfusionMatrix
binary_cm = BinaryConfusionMatrix(y_true_bin, y_pred_bin)
return(binary_cm)
def enlarge(self, select):
"""
Enlarges confusion matrix with new classes
It should add empty rows and columns
"""
if not isinstance(select, collections.Iterable):
idx_new_cls = pd.Index([select])
else:
idx_new_cls = pd.Index(select)
new_idx = self._df_confusion.index | idx_new_cls
new_idx.name = self.true_name
new_col = self._df_confusion.columns | idx_new_cls
new_col.name = self.pred_name
print(new_col)
self._df_confusion = self._df_confusion.loc[:, new_col]
# self._df_confusion = self._df_confusion.loc[new_idx, new_col].fillna(0)
# ToFix: KeyError: 'the label [True] is not in the [index]'
@property
def stats_overall(self):
"""
Returns an OrderedDict with overall statistics
"""
df = self._df_confusion
d_stats = collections.OrderedDict()
d_class_agreement = class_agreement(df)
key = 'Accuracy'
try:
d_stats[key] = d_class_agreement['diag'] # 0.35
except KeyError:
d_stats[key] = np.nan
key = '95% CI'
try:
d_stats[key] = binom_interval(np.sum(np.diag(df)), df.sum().sum()) # (0.1539, 0.5922)
except: # noqa
d_stats[key] = np.nan
d_prop_test = prop_test(df)
d_stats['No Information Rate'] = 'ToDo' # 0.8
d_stats['P-Value [Acc > NIR]'] = d_prop_test['p.value'] # 1
d_stats['Kappa'] = d_class_agreement['kappa'] # 0.078
d_stats['Mcnemar\'s Test P-Value'] = 'ToDo' # np.nan
return(d_stats)
@property
def stats_class(self):
"""
Returns a DataFrame with class statistics
"""
# stats = ['TN', 'FP', 'FN', 'TP']
# df = pd.DataFrame(columns=self.classes, index=stats)
df = pd.DataFrame(columns=self.classes)
# ToDo Avoid these for loops
for cls in self.classes:
binary_cm = self.binarize(cls)
binary_cm_stats = binary_cm.stats()
for key, value in binary_cm_stats.items():
df.loc[key, cls] = value # binary_cm_stats
d_name = {
'population': 'Population',
'P': 'P: Condition positive',
'N': 'N: Condition negative',
'PositiveTest': 'Test outcome positive',
'NegativeTest': 'Test outcome negative',
'TP': 'TP: True Positive',
'TN': 'TN: True Negative',
'FP': 'FP: False Positive',
'FN': 'FN: False Negative',
'TPR': 'TPR: (Sensitivity, hit rate, recall)', # True Positive Rate
'TNR': 'TNR=SPC: (Specificity)', # True Negative Rate
'PPV': 'PPV: Pos Pred Value (Precision)',
'NPV': 'NPV: Neg Pred Value',
'prevalence': 'Prevalence',
# 'xxx': 'xxx: Detection Rate',
# 'xxx': 'xxx: Detection Prevalence',
# 'xxx': 'xxx: Balanced Accuracy',
'FPR': 'FPR: False-out',
'FDR': 'FDR: False Discovery Rate',
'FNR': 'FNR: Miss Rate',
'ACC': 'ACC: Accuracy',
'F1_score': 'F1 score',
'MCC': 'MCC: Matthews correlation coefficient',
'informedness': 'Informedness',
'markedness': 'Markedness',
'LRP': 'LR+: Positive likelihood ratio',
'LRN': 'LR-: Negative likelihood ratio',
'DOR': 'DOR: Diagnostic odds ratio',
'FOR': 'FOR: False omission rate',
}
df.index = df.index.map(lambda id: self._name_from_dict(id, d_name))
return(df)
def stats(self, lst_stats=None):
"""
Return an OrderedDict with statistics
"""
d_stats = collections.OrderedDict()
d_stats['cm'] = self
d_stats['overall'] = self.stats_overall
d_stats['class'] = self.stats_class
return(d_stats)
def _name_from_dict(self, key, d_name):
"""
Returns name (value in dict d_name
or key if key doesn't exists in d_name)
"""
try:
return(d_name[key])
except (KeyError, TypeError):
return(key)
def _str_dict(self, d, line_feed_key_val='\n',
line_feed_stats='\n\n', d_name=None):
"""
Return a string representation of a dictionary
"""
s = ""
for i, (key, val) in enumerate(d.items()):
name = self._name_from_dict(key, d_name)
if i != 0:
s = s + line_feed_stats
s = s + "%s:%s%s" % (name, line_feed_key_val, val)
return(s)
def _str_stats(self, lst_stats=None):
"""
Returns a string representation of statistics
"""
d_stats_name = {
"cm": "Confusion Matrix",
"overall": "Overall Statistics",
"class": "Class Statistics",
}
stats = self.stats(lst_stats)
d_stats_str = collections.OrderedDict([
("cm", str(stats['cm'])),
("overall", self._str_dict(
stats['overall'],
line_feed_key_val=' ', line_feed_stats='\n')),
("class", str(stats['class'])),
])
s = self._str_dict(
d_stats_str, line_feed_key_val='\n\n',
line_feed_stats='\n\n\n', d_name=d_stats_name)
return(s)
def print_stats(self, lst_stats=None):
"""
Prints statistics
"""
print(self._str_stats(lst_stats))
def get(self, actual=None, predicted=None):
"""
Get confusion matrix value for a given
actual class and a given predicted class
if only one parameter is given (actual or predicted)
we get confusion matrix value for actual=actual and predicted=actual
"""
if actual is None:
actual = predicted
if predicted is None:
predicted = actual
return(self.to_dataframe().loc[actual, predicted])
def max(self):
"""
Returns max value of confusion matrix
"""
return(self.to_dataframe().max().max())
def min(self):
"""
Returns min value of confusion matrix
"""
return(self.to_dataframe().min().min())
@property
def is_binary(self):
"""Return False"""
return(False)
@property
def classification_report(self):
"""
Returns a DataFrame with classification report
"""
columns = np.array(['precision', 'recall', 'F1_score', 'support'])
index = self.classes
df = pd.DataFrame(index=index, columns=columns)
for cls in self.classes:
binary_cm = self.binarize(cls)
for stat in columns:
df.loc[cls, stat] = getattr(binary_cm, stat)
total_support = df.support.sum()
df.loc['__avg / total__', :] = (df[df.columns[:-1]].transpose() * df.support).sum(axis=1) / df.support.sum()
df.loc['__avg / total__', 'support'] = total_support
return(df)
def _avg_stat(self, stat):
"""
Binarizes confusion matrix
and returns (weighted) average statistics
"""
s_values = pd.Series(index=self.classes)
for cls in self.classes:
binary_cm = self.binarize(cls)
v = getattr(binary_cm, stat)
print(v)
s_values[cls] = v
value = (s_values * self.true).sum() / self.population
return(value)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
17268,
198,
198,
11748,
19798,
292,
62,
4029... | 2.02647 | 6,649 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register$', views.register),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
220,
198,
6738,
764,
1330,
5009,
198,
220,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
30238,
3,
3256,
5009,
13,
30238,
828,
198,
60,
198
... | 2.613636 | 44 |
# @Time: 2022/3/31 11:32
# @Author: chang liu
# @Email: chang_liu_tamu@gmail.com
# @File:20-Handling-Errors.py
# Use HTTPException
from fastapi import FastAPI, HTTPException
app = FastAPI()
items = {"foo": "The Foo Wrestlers"}
@app.get("/items/{item_id}")
#raise an exception
# Add custom headers
@app.get("/items-header/{item_id}")
# Install custom exception handlers
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
app = FastAPI()
@app.exception_handler(UnicornException)
@app.get("/unicorns/{name}")
# Override the default exception handlers
from fastapi import FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.responses import PlainTextResponse
from starlette.exceptions import HTTPException as StarletteHTTPException
app = FastAPI()
@app.exception_handler(StarletteHTTPException)
@app.exception_handler(RequestValidationError)
@app.get("/items/{item_id}")
# Use the RequestValidationError body
from fastapi import FastAPI, Request, status
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from pydantic import BaseModel
app = FastAPI()
@app.exception_handler(RequestValidationError)
@app.post("/items/")
# Re-use FastAPI's exception handlers
from fastapi import FastAPI, HTTPException
from fastapi.exception_handlers import (
http_exception_handler,
request_validation_exception_handler,
)
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException
app = FastAPI()
@app.exception_handler(StarletteHTTPException)
@app.exception_handler(RequestValidationError)
@app.get("/items/{item_id}")
'''
FastAPI's HTTPException vs Starlette's HTTPException¶
FastAPI has its own HTTPException.
And FastAPI's HTTPException error class inherits from Starlette's HTTPException error class.
The only difference, is that FastAPI's HTTPException allows you to add headers to be included in the response.
''' | [
2,
2488,
7575,
25,
33160,
14,
18,
14,
3132,
1367,
25,
2624,
198,
2,
2488,
13838,
25,
1488,
7649,
84,
198,
2,
2488,
15333,
25,
1488,
62,
4528,
84,
62,
83,
321,
84,
31,
14816,
13,
785,
198,
2,
2488,
8979,
25,
1238,
12,
12885,
13... | 3.352657 | 621 |
# Error handling for matrix2latex.py,
# todo: don't yell at errors, fix them!
# To clean the code, error handling is moved to small functions
# "expected %s to be a str, got %s" % (key, type(value))
# assert length == n,\
# "Error: %g of %g alignments given '%s'\n" % (length, n, value)
# assert isinstance(value, list),\
# "Expected %s to be a list, got %s" % (key, type(value))
# for e in value:
# assertStr(e, "%s element" % key)
| [
2,
13047,
9041,
329,
17593,
17,
17660,
87,
13,
9078,
11,
198,
2,
284,
4598,
25,
836,
470,
28946,
379,
8563,
11,
4259,
606,
0,
198,
2,
1675,
3424,
262,
2438,
11,
4049,
9041,
318,
3888,
284,
1402,
5499,
198,
2,
220,
220,
220,
220,... | 2.349515 | 206 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" builder模块
建造者模块
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
37811,
27098,
162,
101,
94,
161,
251,
245,
201,
198,
161,
119,
118,
34460,
254,
38519,
162,
101,
... | 1.425926 | 54 |
##===----------------- _memory.pyx - dpctl module -------*- Cython -*------===##
##
## Data Parallel Control (dpCtl)
##
## Copyright 2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##===----------------------------------------------------------------------===##
##
## \file
## This top-level dpctl module.
##
##===----------------------------------------------------------------------===##
"""
Data Parallel Control (dpCtl)
dpCtl provides a lightweight Python abstraction over DPC++/SYCL and
OpenCL runtime objects. The DPC++ runtime wrapper objects can be
accessed by importing dpctl. The OpenCL runtime wrapper objects can be
accessed by importing dpctl.ocldrv. The library is in an early-beta
stage of development and not yet ready for production usage.
dpCtl's intended usage is as a common SYCL interoperability layer for
different Python libraries and applications. The OpenCL support inside
dpCtl is slated to be deprecated and then removed in future releases
of the library.
Currently, only a small subset of DPC++ runtime objects are exposed
through the dpctl module. The main API classes are defined in the _sycl_core.pyx file.
Please use `pydoc dpctl._sycl_core` to look at the current API for dpctl.
Please use `pydoc dpctl.ocldrv` to look at the current API for dpctl.ocldrv.
"""
__author__ = "Intel Corp."
from ._sycl_core import *
from ._version import get_versions
def get_include():
"""
Return the directory that contains the dpCtl *.h header files.
Extension modules that need to be compiled against dpCtl should use
this function to locate the appropriate include directory.
"""
import os.path
return os.path.join(os.path.dirname(__file__), "include")
__version__ = get_versions()["version"]
del get_versions
| [
2235,
18604,
1783,
12,
4808,
31673,
13,
9078,
87,
532,
288,
79,
34168,
8265,
35656,
9,
12,
327,
7535,
532,
9,
23031,
18604,
2235,
198,
2235,
198,
2235,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 3.522189 | 676 |
PANEL_GROUP = 'policy'
PANEL_GROUP_NAME = 'Policy'
PANEL_GROUP_DASHBOARD = 'admin'
| [
47,
1565,
3698,
62,
46846,
796,
705,
30586,
6,
198,
47,
1565,
3698,
62,
46846,
62,
20608,
796,
705,
36727,
6,
198,
47,
1565,
3698,
62,
46846,
62,
35,
11211,
8202,
9795,
796,
705,
28482,
6,
198
] | 2.243243 | 37 |
# -*- coding: utf-8 -*-
"""Untitled1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1E7m0EoAUNm8hn-rtemotVovsJVUjqLhS
"""
from torchvision.transforms import Normalize
import skimage.io
from skimage.transform import resize
import glob
from sklearn.metrics import roc_auc_score
from torchvision import transforms
import matplotlib.pyplot as plt
from torchvision import datasets
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from time import time
from sklearn.metrics import roc_curve, auc, precision_score,recall_score
import numpy as np
from PIL import Image
from facenet_pytorch import MTCNN
import numpy as np
import glob
from sklearn.metrics import roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
img_size = (220,220)
batch_size = 32
test_batch_size = 32
mean = [0, 0, 0]
std = [1, 1, 1]
lr=0.050
wd=0.0
decay_step=1
lr_decay=0.96
log_interval=200
num_epochs =11
epoch = 0
lam=1
neg=len(glob.glob('/kaggle/working/train/real/*.jpg'))
pos=len(glob.glob('/kaggle/working/train/fake/*.jpg'))
total=neg+pos
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
data_train_dir='/kaggle/working/train'
data_valid_dir='/kaggle/working/test'
dataset_train = datasets.ImageFolder(data_train_dir, transform=transform)
dataset_valid = datasets.ImageFolder(data_valid_dir, transform=transform)
train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=batch_size, shuffle=True, num_workers=2,pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
dataset_valid,
batch_size=test_batch_size, shuffle=False
)
if torch.cuda.is_available():
device = torch.device('cuda')
loss_criterion = nn.BCELoss(reduce=False)
loss_criterion=loss_criterion.cuda()
loss_criterion2 = nn.L1Loss(size_average=True)
loss_criterion2=loss_criterion2.cuda()
classifier=classifier.cuda()
noise_net=noise_net.cuda()
params = list(classifier.parameters()) + list(net.parameters())
optimizer = optim.Adam(params, lr=lr, weight_decay=wd)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=decay_step, gamma=lr_decay)
while epoch < num_epochs:
train_metrics = run_epoch('train', train_loader, 1, optimizer, epoch=epoch, loss_criterion=loss_criterion)
valid_metrics = run_epoch('valid', valid_loader, 1, epoch=epoch, loss_criterion=loss_criterion)
print('Train Metrics : {}, Validation Metrics : {}'.format(str(train_metrics), str(valid_metrics)))
print(epoch)
epoch += 1
lr_scheduler.step() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
46332,
16,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
1378,
403... | 2.589354 | 1,052 |
import typing
from flask import current_app
from werkzeug.local import LocalProxy
from flask_currency.model import CurrencyMixin
curr = LocalProxy(lambda: current_app.extensions["currency"])
| [
11748,
19720,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
266,
9587,
2736,
1018,
13,
12001,
1330,
10714,
44148,
198,
198,
6738,
42903,
62,
34415,
13,
19849,
1330,
20113,
35608,
259,
198,
198,
22019,
81,
796,
10714,
44148,
7,... | 3.611111 | 54 |
import pygame
import random
import time
import sys
class GameRound:
"""Round handler class"""
@staticmethod
| [
11748,
12972,
6057,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
25064,
198,
198,
4871,
3776,
22685,
25,
198,
220,
220,
220,
37227,
22685,
21360,
1398,
37811,
628,
220,
220,
220,
2488,
12708,
24396,
198
] | 3.371429 | 35 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: v.stone@163.com
"""
RobotFramework Listener 的使用指南:
http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#listener-interface
Allure RobotFramework 插件的 GitHub 仓库:
https://github.com/allure-framework/allure-python/tree/master/allure-robotframework
Allure RobotFramework 插件的 RF Listener 源码
https://github.com/allure-framework/allure-python/blob/master/allure-robotframework/src/listener/robot_listener.py
这里直接复制上述监听器内容,加以修改使得在 CLI 界面上也能看到日志
"""
import os
import allure_commons
from allure_commons.lifecycle import AllureLifecycle
from allure_commons.logger import AllureFileLogger
from allure_robotframework.allure_listener import AllureListener
from allure_robotframework.types import RobotKeywordType
DEFAULT_OUTPUT_PATH = os.path.join('output', 'allure-results')
# noinspection PyPep8Naming
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
410,
13,
6440,
31,
24136,
13,
785,
198,
37811,
198,
14350,
313,
21055,
6433,
7343,
877,
13328,
248,
... | 2.315789 | 380 |
if __name__ == '__main__':
main()
| [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.166667 | 18 |
import torch
from difflr.models import LinearClassifierDSC
from difflr.data import CIFARDataset
from difflr import CONFIG
from difflr.experiments import Tuner
import time
from difflr.utils import check_gpu
check_gpu()
CONFIG.DRY_RUN = False
if __name__ == '__main__':
main()
| [
11748,
28034,
198,
6738,
814,
14050,
13,
27530,
1330,
44800,
9487,
7483,
5258,
34,
198,
6738,
814,
14050,
13,
7890,
1330,
327,
5064,
9795,
265,
292,
316,
198,
6738,
814,
14050,
1330,
25626,
198,
6738,
814,
14050,
13,
23100,
6800,
1330,
... | 3.031915 | 94 |
import weakref
d = weakref.WeakValueDictionary()
d['a'] = A()
print(list(d.keys()))
| [
11748,
4939,
5420,
628,
198,
198,
67,
796,
4939,
5420,
13,
44898,
11395,
35,
14188,
3419,
198,
67,
17816,
64,
20520,
796,
317,
3419,
198,
4798,
7,
4868,
7,
67,
13,
13083,
3419,
4008,
198
] | 2.485714 | 35 |
import simplejson
import httplib2
import wsgi_intercept
from wsgi_intercept import httplib2_intercept
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.model.user import User
from test.fixtures import make_test_env, make_fake_space, get_auth
def test_mutual_subscription():
"""
Subscription should not result in the same bag showing up more than once.
"""
response, content = add_subscription('fnd', 'cdent', cookie=get_auth('cdent', 'bar'))
assert response['status'] == '204'
recipe = store.get(Recipe('cdent_public'))
bags = [bag for bag, filter in recipe.get_recipe()]
unique_bags = list(set(bags))
assert len(bags) == len(unique_bags)
def test_unsubscribe():
"""
Remove a space from a subscription list.
XXX What happens when there's additional bags (other than public
and private) from the recipe of the subscribed space and the subscribed
space has changed since we first subscribed. How do we know what to remove
from the recipe?
XXX And what happens with subscription in general if a space is subscribed
to another space that then goes away? (A non-existent bag in a recipe will
cause an error)
"""
response, content = remove_subscription('psd', 'fnd')
assert response['status'] == '204'
recipe = Recipe('fnd_public')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 8
recipe = Recipe('fnd_private')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 9
# do it with non-existent space
response, content = remove_subscription('spanner', 'fnd')
assert response['status'] == '409'
assert 'Invalid content for unsubscription' in content
recipe = Recipe('fnd_public')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 8
recipe = Recipe('fnd_private')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 9
# unsubscribe self?
response, content = remove_subscription('fnd', 'fnd')
assert response['status'] == '409'
assert 'Attempt to unsubscribe self' in content
recipe = Recipe('fnd_public')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 8
recipe = Recipe('fnd_private')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 9
# unsubscribe mutuality
# We don't want a subscribed-to space which has subscribed to the
# subscribing space to cause removal of one's own bags
# In this test cdent is subscribed to fnd and fnd is subscribed
# to cdent. We only want to remove the cdent bags.
# The solution in code is not perfect because we only
# make the match based on bag.name, not [bag, filter].
response, content = remove_subscription('cdent', 'fnd')
assert response['status'] == '204'
recipe = Recipe('fnd_public')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 7
recipe = Recipe('fnd_private')
recipe = store.get(recipe)
recipe = recipe.get_recipe()
assert len(recipe) == 8
| [
11748,
2829,
17752,
198,
11748,
1841,
489,
571,
17,
198,
11748,
266,
82,
12397,
62,
3849,
984,
198,
198,
6738,
266,
82,
12397,
62,
3849,
984,
1330,
1841,
489,
571,
17,
62,
3849,
984,
198,
198,
6738,
256,
1638,
306,
12384,
13,
19849,... | 2.936406 | 1,085 |
import numpy as np
from sklearn.utils import check_random_state
from .gaussian_likelihood_fast import compute_gaussian_likelihood
from .distributions import spherical_normal_log_pdf
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
35720,
13,
26791,
1330,
2198,
62,
25120,
62,
5219,
198,
6738,
764,
4908,
31562,
62,
2339,
11935,
62,
7217,
1330,
24061,
62,
4908,
31562,
62,
2339,
11935,
198,
6738,
764,
17080,
2455,
... | 3.528302 | 53 |
from .events import EventScheduler, Priority, event_handler
| [
6738,
764,
31534,
1330,
8558,
50,
1740,
18173,
11,
34416,
11,
1785,
62,
30281,
198
] | 4 | 15 |
# -*- encoding: utf-8 -*-
__author__ = 'krolev'
from read_conll_data import *
# from Perceptron import Perceptron
from Perceptron3 import Perceptron3
from codecs import open
from matrice_confusion import ConfusionMatrix
import yaml
import codecs
from read_conll_data import *
def TaggingCorpora(language="de", universal=False, outputName="sortie.yaml"):
"""
Cette fonction est juste un raccourci. Elle taggue, et enregistre
les guessed_sentences dans un ficheir au format yaml afin
d'utiliser l'output comme input de la matrice de confusion.
Les parties commentés sont les conditions utilisées pour
regrouper les classes au sein de chauqe corpus.
:param language: string indiquant la langue du corpus
:param universal: booléen permettant de prende des données universelles ou fines
:param outputName: un nom de fichier de sortie
:return: None
"""
perceptron = Perceptron3()
if universal:
(train_data, dev_data, test_data) = get_universal_treebank_data(language)
## Le bloc commenté si dessous représente les fusions apportées au corpus d'entrainement
# if language == "fr":
# for i in range(len(train_data)):
# for k in range(len(train_data[i][1])):
# if train_data[i][1][k]=="X":
# train_data[i][1][k]="NOUN"
else:
(train_data, dev_data, test_data) = get_tiger_corpus_data()
## Le bloc commenté si dessous représente les fusions apportées au corpus d'entrainement
# for i in range(len(train_data)):
# for k in range(len(train_data[i][1])):
# if train_data[i][1][k]=="PTKANT":
# train_data[i][1][k]="PROAV"
# elif train_data[i][1][k]=="VVIMP":
# train_data[i][1][k]="NE"
# elif train_data[i][1][k]=="PTKA":
# train_data[i][1][k]="APPR"
# elif train_data[i][1][k]=="FM":
# train_data[i][1][k]="NE"
# if train_data[i][1][k]=="XY":
# train_data[i][1][k]="NE"
perceptron.train(train_data, dev_data, 10)
perceptron.average_weights()
print("Résultats sur le corpus de test : {0}".format(perceptron.evaluate(test_data)))
datas = perceptron.tag(test_data)
yaml.dump(datas, open(outputName, 'w', 'utf-8'), allow_unicode=True, default_flow_style=False)
################################
#
# Pour lancer les lignes du Main il suffit de commenter/decommenter
# les différentes instances du Perceptron.
# Il y a trois instances soit trois blocs.
#
#
#
################################
if __name__ == '__main__':
# Tagging du conll deutsch (universal tagset) :
## Entrainement sur corpus
TaggingCorpora(language="de", universal=True, outputName="deutsch_univTagSet3.yaml")
## Regard sur la matrice de confusion
with codecs.open("deutsch_univTagSet3.yaml","r","utf-8") as fichier:
guessed_data = yaml.load(fichier)
(train_data, dev_data, test_data) = get_universal_treebank_data("de")
matriceConfusion = ConfusionMatrix(guessed_data,test_data)
### str(matriceConfusion) affiche l'état de la matrice
with codecs.open("deutsch_univTagSet3.csv","w","utf-8") as sortiee:
sortiee.write(str(matriceConfusion))
print()
### la méthode_confused_classes() permet avec un
### seuil de tolérance, d'afficher les éléments potentiellement fusionnables
for element in matriceConfusion.confused_classes():
print(element)
# Tagging du conll french (universal tagset) :
## Entrainement sur corpus
TaggingCorpora(language="fr", universal=True, outputName="french_univTagSet3.yaml")
## Regard sur la matrice de confusion
with codecs.open("french_univTagSet3.yaml","r","utf-8") as fichier:
guessed_data = yaml.load(fichier)
(train_data, dev_data, test_data) = get_universal_treebank_data("fr")
matriceConfusion = ConfusionMatrix(guessed_data,test_data)
### str(matriceConfusion) affiche l'état de la matrice
with codecs.open("french_univTagSet3.csv","w","utf-8") as sortiee:
sortiee.write(str(matriceConfusion))
print()
### la méthode_confused_classes() permet avec un
### seuil de tolérance, d'afficher les éléments potentiellement fusionnables
for element in matriceConfusion.confused_classes():
print(element)
# tagging du tiger corpus avec etiquettes riches :
## Entrainement sur corpus
TaggingCorpora(outputName="tiger_corpus_richTagSet2.yaml")
with codecs.open("tiger_corpus_richTagSet2.yaml","r","utf-8") as fichier:
guessed_data = yaml.load(fichier)
(train_data, dev_data, test_data) = get_tiger_corpus_data()
matriceConfusion = ConfusionMatrix(guessed_data, test_data)
### str(matriceConfusion) affiche l'état de la matrice$
with codecs.open("tiger_corpus_richTagSet2.matrix","w","utf-8") as sortiee:
sortiee.write(str(matriceConfusion))
print()
### la méthode_confused_classes() permet avec un
### seuil de tolérance, d'afficher les éléments potentiellement fusionnables
for element in matriceConfusion.confused_classes():
print(element)
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
834,
9800,
834,
796,
705,
74,
305,
2768,
6,
201,
198,
201,
198,
6738,
1100,
62,
1102,
297,
62,
7890,
1330,
1635,
201,
198,
2,
422,
2448,
984,
1313,
1330,
2448,
... | 2.311313 | 2,316 |
# Generated by Django 3.0.4 on 2020-07-01 12:30
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
2998,
12,
486,
1105,
25,
1270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import unittest
from unittest.mock import Mock, patch
from fplanalyzer.definitions import MIN_GAMEWEEK, MAX_GAMEWEEK
from fplanalyzer.fplrequests import getPlayerAndCaptainNumbers
from fplanalyzer.fpl_api import is_gameweek_valid, get_current_gameweek, get_league_users
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
8529,
198,
198,
6738,
277,
11578,
3400,
9107,
13,
4299,
50101,
1330,
20625,
62,
38,
2390,
6217,
33823,
11,
25882,
62,
38,
2390,
6217,
33823,
198,
6738,
27... | 2.914894 | 94 |
from __future__ import print_function
import argparse
import math
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from tqdm import tqdm
from models import LCV_ours_sub3
parser = argparse.ArgumentParser(description='360SD-Net')
parser.add_argument('--maxdisp', type=int, default=68, help='maxium disparity')
parser.add_argument('--model', default='360SDNet', help='select model')
parser.add_argument('--datapath', default='data/MP3D/train/', help='datapath')
parser.add_argument('--datapath_val',
default='data/MP3D/val/',
help='datapath for validation')
parser.add_argument('--epochs',
type=int,
default=500,
help='number of epochs to train')
parser.add_argument('--start_decay',
type=int,
default=400,
help='number of epoch for lr to start decay')
parser.add_argument('--start_learn',
type=int,
default=50,
help='number of epoch for LCV to start learn')
parser.add_argument('--batch',
type=int,
default=16,
help='number of batch to train')
parser.add_argument('--checkpoint', default=None, help='load checkpoint path')
parser.add_argument('--save_checkpoint',
default='./checkpoints',
help='save checkpoint path')
parser.add_argument('--tensorboard_path',
default='./logs',
help='tensorboard path')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument('--real',
action='store_true',
default=False,
help='adapt to real world images')
parser.add_argument('--SF3D',
action='store_true',
default=False,
help='read stanford3D data')
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# tensorboard Path -----------------------
writer_path = args.tensorboard_path
if args.SF3D:
writer_path += '_SF3D'
if args.real:
writer_path += '_real'
writer = SummaryWriter(writer_path)
# -----------------------------------------
# import dataloader ------------------------------
from dataloader import filename_loader as lt
if args.real:
from dataloader import grayscale_Loader as DA
print("Real World image loaded!!!")
else:
from dataloader import RGB_Loader as DA
print("Synthetic data image loaded!!!")
# -------------------------------------------------
# Random Seed -----------------------------
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# ------------------------------------------
# Create Angle info ------------------------------------------------
# Y angle
angle_y = np.array([(i - 0.5) / 512 * 180 for i in range(256, -256, -1)])
angle_ys = np.tile(angle_y[:, np.newaxis, np.newaxis], (1, 1024, 1))
equi_info = angle_ys
# -------------------------------------------------------------------
# Load Data ---------------------------------------------------------
train_up_img, train_down_img, train_up_disp, valid_up_img, valid_down_img, valid_up_disp = lt.dataloader(
args.datapath, args.datapath_val)
Equi_infos = equi_info
TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFolder(
Equi_infos, train_up_img, train_down_img, train_up_disp, True),
batch_size=args.batch,
shuffle=True,
num_workers=8,
drop_last=False)
ValidImgLoader = torch.utils.data.DataLoader(DA.myImageFolder(
Equi_infos, valid_up_img, valid_down_img, valid_up_disp, False),
batch_size=args.batch,
shuffle=False,
num_workers=4,
drop_last=False)
# -----------------------------------------------------------------------------------------
# Load model ----------------------------------------------
if args.model == '360SDNet':
model = LCV_ours_sub3(args.maxdisp)
else:
raise NotImplementedError('Model Not Implemented!!!')
# ----------------------------------------------------------
# assign initial value of filter cost volume ---------------------------------
init_array = np.zeros((1, 1, 7, 1)) # 7 of filter
init_array[:, :, 3, :] = 28. / 540
init_array[:, :, 2, :] = 512. / 540
model.forF.forfilter1.weight = torch.nn.Parameter(torch.Tensor(init_array))
# -----------------------------------------------------------------------------
# Multi_GPU for model ----------------------------
if args.cuda:
model = nn.DataParallel(model)
model.cuda()
# -------------------------------------------------
# Load Checkpoint -------------------------------
start_epoch = 0
if args.checkpoint is not None:
state_dict = torch.load(args.checkpoint)
model.load_state_dict(state_dict['state_dict'])
start_epoch = state_dict['epoch']
# load pretrain from MP3D for SF3D
if start_epoch == 50 and args.SF3D:
start_epoch = 0
print("MP3D pretrained 50 epoch for SF3D Loaded!!!")
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# --------------------------------------------------
# Optimizer ----------
optimizer = optim.Adam(model.parameters(), lr=0.01, betas=(0.9, 0.999))
# ---------------------
# Freeze Unfreeze Function
# freeze_layer ----------------------
# if use nn.DataParallel(model), model.module.filtercost
# else use model.filtercost
freeze_layer(model.module.forF.forfilter1)
# Unfreeze_layer --------------------
# ------------------------------------
# Train Function -------------------
# Valid Function -----------------------
# Adjust Learning Rate
# Disparity to Depth Function
# Main Function ----------------------------------
# ----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
... | 2.698947 | 2,375 |
import sys
import os
from Dice3DS import dom3ds
import zlib
import StringIO
import struct
if __name__ == '__main__':
main() | [
11748,
25064,
198,
11748,
28686,
198,
6738,
34381,
18,
5258,
1330,
2401,
18,
9310,
198,
11748,
1976,
8019,
198,
11748,
10903,
9399,
198,
11748,
2878,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3... | 3.073171 | 41 |
from types import SimpleNamespace
import numpy as np
import pytest
from ..cwgan import (
ConditionalMinibatch, ConditionalProber,
RandomChoiceSampler, NaiveRandomChoiceSampler,
DEFAULT_PARAMS,
)
from .test_conditional_prober import mock_model
def access_all_attrs(obj):
""" Access all attributes of `obj`, mostly for smoke tests. """
for name in dir(obj):
getattr(obj, name)
@pytest.mark.parametrize('num_models, probes_per_model, num_bandwidths', [
(1, 3, 5),
(2, 3, 5),
])
@pytest.mark.parametrize('num_models, probes_per_model', [
(1, 3),
(2, 3),
(5, 7),
(8, 64),
])
@pytest.mark.parametrize('sampler_class', [
RandomChoiceSampler,
NaiveRandomChoiceSampler,
])
@pytest.mark.parametrize('seed', [0])
# @pytest.mark.parametrize('seed', list(range(100)))
| [
6738,
3858,
1330,
17427,
36690,
10223,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
11485,
66,
86,
1030,
1330,
357,
198,
220,
220,
220,
9724,
1859,
9452,
571,
963,
11,
9724,
1859,
2964,
527,
11,
19... | 2.455621 | 338 |
import discord
from discord.ext import commands
from main import MY_COLOR, PREFIX #we're using our main colour & prefix that we declared in our main.py file
#this is how to set up a basic cog
| [
11748,
36446,
201,
198,
6738,
36446,
13,
2302,
1330,
9729,
201,
198,
6738,
1388,
1330,
17615,
62,
46786,
11,
22814,
47084,
1303,
732,
821,
1262,
674,
1388,
9568,
1222,
21231,
326,
356,
6875,
287,
674,
1388,
13,
9078,
2393,
201,
198,
2... | 3.6 | 55 |
# Copyright (c) 2017-2020 Wenyi Tang.
# Author: Wenyi Tang
# Email: wenyitang@outlook.com
# Update: 2020 - 2 - 16
import os
import unittest
if not os.getcwd().endswith('Tests'):
os.chdir('Tests')
import numpy as np
from PIL import Image
from VSR.Backend.TF.Util import Vgg
URL = 'data/set5_x2/img_001_SRF_2_LR.png'
image_boy = np.asarray(Image.open(URL))
if __name__ == '__main__':
unittest.main()
| [
2,
220,
15069,
357,
66,
8,
2177,
12,
42334,
370,
28558,
72,
18816,
13,
198,
2,
220,
6434,
25,
370,
28558,
72,
18816,
198,
2,
220,
9570,
25,
266,
28558,
270,
648,
31,
448,
5460,
13,
785,
198,
2,
220,
10133,
25,
12131,
532,
362,
... | 2.338983 | 177 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Main file of pyCFG2Chomsky. """
import sys
from cfg import ContextFreeGrammar as CFG
if __name__ == '__main__':
myCFG = CFG(sys.stdin)
print "End of input."
myCFG.transform_to_Chomsky()
# print "\nChomsky Normal Form Grammar\nRules:"
myCFG.print_rules()
# print "Terminals:"
# for term in myCFG.terminals:
# print term
# print "Nonterminals:"
# print myCFG.nonterminals
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
8774,
2393,
286,
12972,
22495,
38,
17,
1925,
37093,
13,
37227,
198,
198,
11748,
25064,
198,
198,
6738,
3021... | 2.363636 | 198 |
import unittest
import solution
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
11748,
4610,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.59375 | 32 |
import os
main = "cmd/enode/main.go"
print get_version()
| [
11748,
28686,
198,
198,
12417,
796,
366,
28758,
14,
268,
1098,
14,
12417,
13,
2188,
1,
198,
198,
4798,
651,
62,
9641,
3419,
198
] | 2.458333 | 24 |
import discord
import psycopg2 as dbSQL
from discord.ext import commands
from datetime import datetime
from datetime import timedelta
import stackprinter as sp
from bin import zb
from bin import zb_config
_var = zb_config
class InactiveCog(commands.Cog):
""" Forecast, kick, ban, exclude inactive members """
# Hidden means it won't show up on the default help.
@commands.command(name='ialist', hidden=True)
async def server_setup(self, ctx, *args):
"""Command to show inactive members"""
try:
# Ensures only bot owner or user with perms can use command
if zb.is_trusted(ctx,4):
async with ctx.channel.typing():
# Lists current Server Access Roles
sql = """ SELECT name, created_at, real_user_id
FROM (
SELECT DISTINCT ON (m.int_user_id)
m.int_user_id, created_at, u.name, u.real_user_id, m.guild_id
FROM (
SELECT t1.* FROM (
SELECT int_user_id, guild_id, max(created_at) AS MaxCreated
FROM messages
GROUP BY int_user_id, guild_id) t2
JOIN messages t1 on t2.int_user_id = t1.int_user_id
AND t2.guild_id = t1.guild_id
AND t2.MaxCreated = t1.created_at
) m
LEFT JOIN users u ON u.int_user_id = m.int_user_id
ORDER BY m.int_user_id ASC
) s
WHERE guild_id = {0}
AND NOT created_at > now() - INTERVAL '60 day'
AND real_user_id in {1}
AND NOT real_user_id in {2}
ORDER BY created_at ASC; """
ignore = zb.get_member_with_role(ctx,
zb.get_trusted_roles(ctx,_var.maxRoleRanks))
sql = sql.format(str(ctx.guild.id),
zb.sql_list(zb.get_members_ids(ctx)),
zb.sql_list(ignore))
data, rows, string = zb.sql_query(sql)
if rows == 0:
await ctx.send(f'**There are** {0} **inactive members**')
return
# Get members
lst = []
members = []
unMembers = []
now = datetime.utcnow()
i = 0
while i < rows:
undecided = ''
unRole = ctx.guild.get_role(517850437626363925)
member = ctx.guild.get_member(data[i][2])
if unRole in member.roles:
undecided = '❁-'
unMembers.append(member)
diff = (now - data[i][1]).days
lst.append(f'{diff} days -{undecided} {member}')
members.append(member)
# increment loop
i+=1
title = f'**There are** {rows} **inactive members**'
pages, embeds = await zb.build_embed_print(self,ctx,lst,title)
# Starting page
try:
page = int(args[0])
if page < 1:
page = 1
elif page > pages:
page = pages
except:
page = 1
# Purge members
purge = False
try:
check = str(args[0]).lower()
if check == 'purge':
purge = True
except:
pass
if zb.is_trusted(ctx,2):
await ctx.send(f'Purging {len(members)} members.',
delete_after=15)
for member in members:
await member.kick(reason='Purged for inactivity')
elif zb.is_trusted(ctx,3):
await ctx.send(f'Purging {len(unMembers)} members.',
delete_after=15)
for member in unMembers:
await member.kick(reason='Purged for inactivity')
if purge and zb.is_trusted(ctx,3):
await ctx.send('Purged inactive members',
delete_after=15)
await ctx.message.delete()
return
initialEmbed = embeds[page-1]
await zb.print_embed_nav(self,ctx,initialEmbed,embeds,pages,page,'')
except Exception as e:
await zb.bot_errors(ctx,sp.format(e))
| [
11748,
36446,
198,
11748,
17331,
22163,
70,
17,
355,
20613,
17861,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
8931,
1050,
3849,
355,
599,
198,
... | 1.585512 | 3,327 |
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SpanContext encapsulates the current context within the request's trace."""
import logging
import random
import re
import uuid
from opencensus.trace import trace_options
_INVALID_TRACE_ID = '0' * 32
_INVALID_SPAN_ID = 0
_TRACE_HEADER_KEY = 'X_CLOUD_TRACE_CONTEXT'
_TRACE_ID_FORMAT = '[0-9a-f]{32}?'
# Default options, enable tracing
DEFAULT_OPTIONS = 1
# Default trace options
DEFAULT = trace_options.TraceOptions(DEFAULT_OPTIONS)
class SpanContext(object):
"""SpanContext includes 3 fields: traceId, spanId, and an trace_options flag
which indicates whether or not the request is being traced. It contains the
current context to be propagated to the child spans.
:type trace_id: str
:param trace_id: (Optional) Trace_id is a 32 digits uuid for the trace.
If not given, will generate one automatically.
:type span_id: int
:param span_id: (Optional) Identifier for the span, unique within a trace.
:type trace_options: :class: `~opencensus.trace.trace_options.TraceOptions`
:param trace_options: (Optional) TraceOptions indicates 8 trace options.
:type from_header: bool
:param from_header: (Optional) Indicates whether the trace context is
generated from request header.
"""
def __str__(self):
"""Returns a string form of the SpanContext. This is the format of
the Trace Context Header and should be forwarded to downstream
requests as the X-Cloud-Trace-Context header.
:rtype: str
:returns: String form of the SpanContext.
"""
enabled = self.trace_options.enabled
header = '{}/{};o={}'.format(
self.trace_id,
self.span_id,
int(enabled))
return header
def check_span_id(self, span_id):
"""Check the type of span_id to ensure it is int. If it is not int,
first try to convert it to int, if failed to convert, then log a
warning message and set the span_id to None.
:type span_id: int
:param span_id: Identifier for the span, unique within a trace.
:rtype: int
:returns: Span_id for the current span.
"""
if span_id is None:
return None
if span_id == 0:
logging.warning(
'Span_id {} is invalid, cannot be zero.'.format(span_id))
self.from_header = False
return None
if not isinstance(span_id, int):
try:
span_id = int(span_id)
except (TypeError, ValueError):
logging.warning(
'The type of span_id should be int, got {}.'.format(
span_id.__class__.__name__))
self.from_header = False
span_id = None
return span_id
def check_trace_id(self, trace_id):
"""Check the format of the trace_id to ensure it is 32-character hex
value representing a 128-bit number. Also the trace_id cannot be zero.
:type trace_id: str
:param trace_id:
:rtype: str
:returns: Trace_id for the current context.
"""
assert isinstance(trace_id, str)
if trace_id is _INVALID_TRACE_ID:
logging.warning(
'Trace_id {} is invalid (cannot be all zero), '
'generate a new one.'.format(trace_id))
self.from_header = False
return generate_trace_id()
trace_id_pattern = re.compile(_TRACE_ID_FORMAT)
match = trace_id_pattern.match(trace_id)
if match:
return trace_id
else:
logging.warning(
'Trace_id {} does not the match the required format,'
'generate a new one instead.'.format(trace_id))
self.from_header = False
return generate_trace_id()
def generate_span_id():
"""Return the random generated span ID for a span. Must be 16 digits
as Stackdriver Trace V2 API only accepts 16 digits span ID.
:rtype: int
:returns: Identifier for the span. Must be a 64-bit integer other
than 0 and unique within a trace.
"""
span_id = random.randint(10**15, 10**16 - 1)
return span_id
def generate_trace_id():
"""Generate a trace_id randomly.
:rtype: str
:returns: 32 digit randomly generated trace ID.
"""
trace_id = uuid.uuid4().hex
return trace_id
| [
2,
15069,
2177,
11,
4946,
34,
7314,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 2.469118 | 2,040 |
print(dumb()) | [
4798,
7,
67,
2178,
28955
] | 2.6 | 5 |
# Python dictionary is like map in Java
if __name__ == "__main__":
createDictionary()
| [
2,
11361,
22155,
318,
588,
3975,
287,
7349,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
2251,
35,
14188,
3419,
201,
198
] | 2.589744 | 39 |
from matplotlib import pyplot as plt
import os
import pandas as pd
import shutil
from PACKAGES.process import *
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
PROJECT_DIR="C:/Users/brice/Data_science_project"
data_dir=PROJECT_DIR+'/DATA/'
training_dir=PROJECT_DIR+'/DATA/training_data/'
#read the back cover book's
book_cover=cv2.imread(data_dir+'books/book0.jpg')
#reduce the image size in order to show it
book_cover_reduced=resize(book_cover,'down',8,8)[0]
#split the book(we're going to keep the bottom zone of the b)
book_cover_bottom=book_cover_reduced[7*int(book_cover_reduced.shape[0]/10):,:]
cv2.imshow('Basic Image',book_cover_bottom)
cv2.waitKey(0)
# we remove the color
book_cover_gray = cv2.cvtColor(book_cover_bottom, cv2.COLOR_BGR2GRAY)
book_cover_gray=cv2.fastNlMeansDenoising(book_cover_gray,None,10,7,21)
#countours detection with the auto_canny
edgeds = auto_canny(book_cover_gray)
#defining a filtering kernel
kernels = cv2.getStructuringElement(cv2.MORPH_RECT, (15,15))
#filling the gaps to put together the pixels that are near to each other
closed = cv2.morphologyEx(edgeds, cv2.MORPH_CLOSE, kernels)
closed = cv2.dilate(closed, None, iterations = 3)
cv2.imshow('Basic Image',closed)
cv2.waitKey(0)
#we increase the size of the image to capture the isbn number zone
book_cover_bottom_increased,Rx,Ry=resize(book_cover_bottom.copy(),'up',3,5)
#here we capture the isbn number zone and store it as a "jpg" image in a folder (isbn_zone)
isbn_zone=draw_countours(book_cover_bottom_increased,book_cover_bottom,'code_zone',15,15,Rx,Ry)
#we want to safely create a folder to store the isbn digits
digit_dir = data_dir+'/isbn_digits'
try:
shutil.rmtree(digit_dir)
except OSError as e:
print("Error: %s : %s" % (digit_dir, e.strerror))
isbn_zone_image=cv2.imread(data_dir+"isbn_zone/code_zone.jpg")
cv2.imshow('top',isbn_zone_image)
cv2.waitKey(0)
#we increase the isbn zone image in other to capture the digits
isbn_zone_image=cv2.resize(isbn_zone_image,(isbn_zone_image.shape[1]*4,isbn_zone_image.shape[0]*2))
isbn_zone_gray = cv2.cvtColor(isbn_zone_image, cv2.COLOR_BGR2GRAY)
#we turn the image into binary so that we can catch the digits
isbn_zone_binary = cv2.adaptiveThreshold(isbn_zone_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,15,5)
#we draw the digits edges
isbn_zone_edged = auto_canny(isbn_zone_binary)
kernels = cv2.getStructuringElement(cv2.MORPH_RECT, (3,2))
closed = cv2.morphologyEx(isbn_zone_binary, cv2.MORPH_CLOSE, kernels,iterations=1)
#getting the countours
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#getting the areas of all objects(we hope all digits) founded in the isbn_zone
all_areas=[cv2.contourArea(rect) for rect in cnts]
#as the isbn length is 13, we don't want to get more of 13 objects
#beside, it's possible to have have some noises
if len(all_areas)<15:
gray=resize(isbn_zone_gray,'up',2,3)[0]
isbn_zone_image_,Rx,Ry=resize(isbn_zone_image.copy(),'up',2,3)
isbn_zone_binary = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,255,30)
edgeds = auto_canny(isbn_zone_binary)
isbn_zone_binary = cv2.dilate(isbn_zone_binary, None, iterations = 1)
kernels = cv2.getStructuringElement(cv2.MORPH_RECT, (4,7))
closeds = cv2.morphologyEx(isbn_zone_binary, cv2.MORPH_CROSS, kernels,iterations=2)
cnts = cv2.findContours(closeds.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
all_areas=[cv2.contourArea(rect) for rect in cnts]
min_area,max_area=100,5000
min_p,max_p=0.1,2
else :
isbn_zone_image_,Rx,Ry=isbn_zone_image.copy(),1,1
min_area,max_area=10,8000
min_p,max_p=0.1,2
areas_digits=pd.Series([],dtype='float64')
good_contours=[]
#As we maybe caught a noise instead of digit, we want reject it
for i,cnt in enumerate(cnts):
#we collect the positions, width and height of digits (objects caught respecting the previous rules)
x,y,w,h = cv2.boundingRect(cnt)
if (w/h <= max_p) & (w/h >= min_p) & (h/isbn_zone_image.shape[0] < 0.3) & (y > 2*h/3) & (w*h > min_area) & (w*h < max_area) :
good_contours.append(cnt)
#Let's collect the digits as we just made selection
for i,cnt in enumerate(good_contours[:13]):
x,y,w,h = cv2.boundingRect(cnt)
x_,y_,w_,h_=int(x/Rx),int(y/Ry),int(w/Rx),int(h/Ry)
#we draw the countours on a copy of isbnzone image
cv2.rectangle(isbn_zone_image_,(x,y),(x+w,y+h),(0,0,255),1)
#we save the digits naming them after theirs x positions
cv2.imwrite(data_dir+"isbn_digits/"+str(x)+".jpg",isbn_zone_image[y_-3:y_+h_+3,x_-3:x_+w_+3])
#------------------------------------------MACHINE LEARNING-----------------------------------------#
#In this part we'll compare 4 machine learning algorithms
#loading train data (more informations about these data on the Training data folder)
images=np.loadtxt(training_dir+"/flattened_images.txt", np.float32)
classes=np.loadtxt(training_dir+"/classifications.txt", np.float32)
#turn values in strings
classes_str=np.array([chr(int(c)) for c in classes])
#labels to use with KNN algorithm of OpenCV
labels=classes.reshape((classes.size, 1))
#labels to use with Sklean algorithms
classes=[chr(int(c)) for c in classes]
print("Images Shape : {}".format(np.shape(images)))
print("Shape of labels for Sklearn : {}".format(np.shape(classes)))
print("Shape of labels for OpenCV: {}".format(np.shape(labels)))
#instanciation of models
ovr_clf = OneVsRestClassifier(estimator=SVC(random_state=0,C=1))
svm_clf=SVC(gamma=0.0001,C=1)
knn_clf = KNeighborsClassifier(n_neighbors=1)
kNearest_clf = cv2.ml.KNearest_create()
#model fitting
ovr_clf.fit(images,classes)
svm_clf.fit(images,classes)
knn_clf.fit(images,classes)
kNearest_clf.train(images, cv2.ml.ROW_SAMPLE,labels)
#with all the previous models trained, we'll predict the digits values from their respective images
# collect test data (digits stored in isbn_digits)
digit_files = [c for c in os.listdir(data_dir + "isbn_digits") if len(c) < 10]
# we sort the digits by x position to get the right order
digit_files = sorted(digit_files, key=get_pos, reverse=False)
# creating list of values to store the predictions
OVR_List_Results, SVM_List_Results, KNN_List_Results, KNN_Cv_List_Results, \
OVR_List, SVM_List, KNN_List, KNN_Cv_List = [], [], [], [], [], [], [], []
for i, digit in enumerate(digit_files, 1):
# reading isbn digits
im = cv2.imread(data_dir + "isbn_digits/" + digit)
try:
imgROI, w, h, imgResised = get_ROI_to_predict(im, 20, 30)
# we set the size of digits images to the same values as the train data images
imROIResized = resize(imgROI, 't', w, h)[0]
# we extract the features from the Region Of Interest
imROIToPredict = imROIResized.reshape((1, w * h))
# we keep the image without the ROI exctraction so that we can compare the results
im_non_processed = imgResised.reshape((1, w * h))
# show the 13 digits to predict
plt.subplot(1, 13, i)
plt.imshow(imgResised, cmap='Greys')
plt.axis('off')
# predictions with KNN from OpenCV
retval, Results, neigh_resp, dists = kNearest_clf.findNearest(np.float32(imROIToPredict), k=1)
retval, Results_non_processed, neigh_resp, dists = kNearest_clf.findNearest(np.float32(im_non_processed), k=1)
# we collect results
KNN_Cv_List_Results.append(str(chr(int(Results[0][0]))))
KNN_Cv_List.append(str(chr(int(Results_non_processed[0][0]))))
# prediction with Sklearn models
OVR_List_Results.append(str(ovr_clf.predict(imROIToPredict)[0]))
SVM_List_Results.append(str(svm_clf.predict(imROIToPredict)[0]))
KNN_List_Results.append(str(knn_clf.predict(imROIToPredict)[0]))
# we collect results
OVR_List.append(str(ovr_clf.predict(im_non_processed)[0]))
SVM_List.append(str(svm_clf.predict(im_non_processed)[0]))
KNN_List.append(str(knn_clf.predict(im_non_processed)[0]))
except:
print("Error related to images")
print('Digits to predict:')
plt.show()
print("Prediction results WITH ROI extraction")
print(' KNN OpenCV : |{}|'.format('|'.join(KNN_Cv_List_Results)))
print(' OneVsRest Sklearn: |{}|'.format('|'.join(OVR_List_Results)))
print(' KNN Sklearn : |{}|'.format('|'.join(KNN_List_Results)))
print(' SVM Sklearn: |{}|'.format('|'.join(SVM_List_Results)))
print("Prediction results WITHOUT ROI extraction")
print(' KNN OpenCV : |{}|'.format('|'.join(KNN_Cv_List)))
print(' OneVsRest Sklearn: |{}|'.format('|'.join(OVR_List)))
print(' KNN Sklearn : |{}|'.format('|'.join(KNN_List)))
print(' SVM Sklearn: |{}|'.format('|'.join(SVM_List))) | [
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4423,
346,
198,
6738,
47035,
25552,
13,
14681,
1330,
1635,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,
311,... | 2.401403 | 3,707 |
from web3 import Web3, Account
from solc import compile_files, link_code, compile_source
import json
import os
import time
import chainUtil as chain
import DS
account_list = [
'0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1', # user, initializer
'0xFFcf8FDEE72ac11b5c542428B35EEF5769C409f0', # attribute ins.
'0x22d491Bde2303f2f43325b2108D26f1eAbA1e32b', # attribute ins.
'0xE11BA2b4D45Eaed5996Cd0823791E0C93114882d', # track ins.
'0xd03ea8624C8C5987235048901fB614fDcA89b117', # track ins.
'0x95cED938F7991cd0dFcb48F0a06a40FA1aF46EBC', # track ins.
'0x3E5e9111Ae8eB78Fe1CC3bb8915d5D461F3Ef9A9', # extra, backup account
]
user_num = 1
attri_num = 2
track_num = 3
# use truffle to complie all the smart contracts
# os.system("truffle complie")
# Connect to blockchain in localhost:8545, the default port of truffle
w3 = Web3(Web3.HTTPProvider("http://localhost:8545"))
if w3.isConnected() is False:
raise Exception('error in connecting')
AttriChain_file = open('./build/contracts/AttriChain.json', 'r', encoding='utf-8')
AttriChain_json = json.load(AttriChain_file)
AttriChain = w3.eth.contract(abi=AttriChain_json['abi'], bytecode=AttriChain_json['bytecode'])
# w3.parity.personal.unlockAccount(account_list[0], '', '')
tx_hash = AttriChain.constructor().transact({'from': w3.eth.accounts[0]})
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
AttriChain = w3.eth.contract(address=tx_receipt.contractAdress, abi=AttriChain_json['abi'])
# ------------------ chain init --------------------
upk, usk = [], []
apk, ask = [], []
epk, esk, esvk = 0, [], []
# user init
for i in range(user_num):
_upk, _usk = DS.keyGen()
upk.append(_upk)
usk.append(_usk)
# attri ins. init
for i in range(attri_num):
_apk, _ask = DS.keyGen()
apk.append(_apk)
ask.append(_ask)
# -------------------- request ------------------------
uid = 0
attri_a = b'a1' # attribution
# ----------------- Authentication ---------------------
# Assemble message,
# the first 271 byte is the public key,
# then is the attribute information, they divided by '|||'
message = upk[0] + b'|||' + attri_a
cred = DS.sign(ask[0], message)
# TODO:
# test_file = open('../build/contracts/test.json', 'r', encoding='utf-8')
# test_json = json.load(test_file)
# test = w3.eth.contract(abi=test_json['abi'], bytecode=test_json['bytecode'])
# tx_hash = test.constructor().transact({'from': w3.eth.accounts[0]})
# tx_receipt = ''
# while not tx_receipt:
# tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
# time.sleep(0.1)
# tx_hash = contract_instance.functions.request(0, 0).transact({'from': w3.eth.accounts[0]})
# tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
| [
6738,
3992,
18,
1330,
5313,
18,
11,
10781,
198,
6738,
1540,
66,
1330,
17632,
62,
16624,
11,
2792,
62,
8189,
11,
17632,
62,
10459,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
6333,
18274,
346,
355,
6333,
198,
117... | 2.456984 | 1,081 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-04-15 04:27
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
23,
319,
12131,
12,
3023,
12,
1314,
8702,
25,
1983,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
"""
Copyright (c) 2020 Ansys Inc. """
######## IMPORTS ########
# General purpose imports
import os,sys
sys.path.append("C:\\Program Files\\Lumerical\\v211\\api\\python\\")
import numpy as np
import json
from lumjson import LumEncoder, LumDecoder
import lumapi
# Optimization global parameters
height = 220e-9
etch_depth = 80e-9
y0 = 0
n_grates = 25
params_size = 2*n_grates
indexSi = 3.47668
indexSiO2 = 1.44401
base_sim_3D = "C:\\Users\\HWANGTAESEUNG\\Documents\\Inverse design of grating coupler (2D) (1)\\pid_grating_coupler_3D_TE_base.fsp"
params_file = "C:\\Users\\HWANGTAESEUNG\\Documents\\Inverse design of grating coupler (2D) (1)\\pid_optim_final.json"
sim_3D_file = "C:\\Users\\HWANGTAESEUNG\\Documents\\Inverse design of grating coupler (2D) (1)\\pid_grating_coupler_3D.fsp"
gds_file = "C:\\Users\\HWANGTAESEUNG\\Documents\\Inverse design of grating coupler (2D) (1)\\pid_grating_coupler_3d.gds"
cur_path = os.path.dirname(os.path.realpath(__file__))
if __name__ == "__main__":
with open(params_file) as fh:
initial_params = json.load(fh, cls=LumDecoder)["initial_params"]
with lumapi.FDTD(filename = os.path.join(cur_path, base_sim_3D), hide = False) as fdtd:
focusing_grating(initial_params, fdtd)
fdtd.save(os.path.join(cur_path, sim_3D_file))
gds_export_script(fdtd, gds_file) | [
37811,
198,
220,
220,
220,
15069,
357,
66,
8,
12131,
1052,
17597,
3457,
13,
37227,
198,
198,
7804,
30023,
33002,
46424,
21017,
198,
2,
3611,
4007,
17944,
198,
11748,
28686,
11,
17597,
198,
17597,
13,
6978,
13,
33295,
7203,
34,
25,
685... | 2.383803 | 568 |
from .marccd import MarCCD
| [
6738,
764,
3876,
535,
67,
1330,
1526,
4093,
35,
198
] | 2.7 | 10 |
#!/usr/bin/env python3
"""
Record game.
Authors:
LICENCE:
"""
import os
from pathlib import Path
from time import sleep
import gym
import numpy as np
from gym.wrappers.monitoring.video_recorder import VideoRecorder
import games
class Record:
"""Record a game from the environment."""
def __init__(
self,
game: games.Game_type,
record: bool = False,
store_path: Path = None,
) -> None:
"""Ctor."""
self.game = game
self.env = gym.make(game.name)
self.env.reset()
self.env.render(mode="human")
self.record = record
self.store_path = store_path
if self.store_path is None:
self.store_path = Path("temp/trial")
if not os.path.exists(self.store_path):
os.mkdir(self.store_path)
if self.record:
self.video = VideoRecorder(
self.env,
str(self.store_path / "video.mp4"),
)
self.env.viewer.window.push_handlers(game.keyboard)
self.actions = []
self.states = []
def close(self) -> None:
"""Close game."""
self.env.close()
if self.record:
self.video.close()
def record_game(self) -> None:
"""Record a game for a given env."""
isopen = True
action_state_path = self.store_path / "action_state"
while isopen:
self.env.reset()
total_reward = 0.0
steps = 0
restart = False
while True:
action = self.game.get_action()
state, reward, done, _ = self.env.step(action)
self.actions.append(action)
self.states.append(state)
total_reward += reward
if steps % 200 == 0 or done:
print("\naction {:+0.2f}".format(action))
print(f"step {steps} total_reward {total_reward:+0.2f}")
steps += 1
if self.record:
self.video.capture_frame()
isopen = self.env.render(mode="human")
if done or restart or not isopen:
break
sleep(0.08)
if self.record:
np.savez_compressed(
action_state_path,
actions=np.array(self.actions, dtype=np.uint8),
states=np.array(self.states, dtype=np.uint8),
)
def test() -> None:
"""Test record on Enduro-v4."""
recenv = Record(games.Enduro(), record=True)
recenv.record_game()
recenv.close()
if __name__ == "__main__":
test()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
23739,
983,
13,
198,
198,
30515,
669,
25,
198,
43,
2149,
18310,
25,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
640,
1330,
3993,
... | 1.949265 | 1,360 |
from loguru import logger
from sklearn.svm import SVC
import mlflow
def create_svc_model(svc_hyperparameters):
"""
Description
-----------
Creates a Support Vector Classifier model for use in a scikit-learn pipeline
based upon the input logreg_hyperparameters.
The hyperparameters are also tracked as parameters in MLFlow.
Parameters
---------
svc_hyperparameters: dict
The hyperparameters for the model
Returns:
--------
model: sklearn.svm.SVC
The scikit-learn SVC model
model_name: str
The name of the model
cv: int
The number of cross-validation folds to perform when evaluating the
model
Raises
------
Exception: Exception
Generic exception for logging
Examples
--------
model, model_name, cv = create_logreg_model(
logreg_hyperparameters=dict(
model_name="svc",
model_type="SVC",
C="0.2",
max_iter=-1,
solver="lbfgs",
n_jobs=-1,
cv=5
)
)
"""
logger.info("Running create_svc_model")
try:
# Create the model
model = SVC(
C=svc_hyperparameters["C"],
kernel=svc_hyperparameters["kernel"],
probability=svc_hyperparameters["probability"],
max_iter=svc_hyperparameters["max_iter"],
)
# Log the parameters with MLFlow
mlflow.log_param("model_name", svc_hyperparameters["model_name"])
mlflow.log_param("model_type", svc_hyperparameters["model_type"])
mlflow.log_param("C", svc_hyperparameters["C"])
mlflow.log_param("kernel", svc_hyperparameters["kernel"])
mlflow.log_param("probability", svc_hyperparameters["probability"])
mlflow.log_param("max_iter", svc_hyperparameters["max_iter"])
mlflow.log_param("cv", svc_hyperparameters["cv"])
model_name = svc_hyperparameters["model_name"]
cv = svc_hyperparameters["cv"]
return model, model_name, cv
except Exception:
logger.exception("Error running create_svc_model()")
| [
6738,
2604,
14717,
1330,
49706,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,
311,
15922,
198,
11748,
285,
1652,
9319,
628,
198,
4299,
2251,
62,
21370,
66,
62,
19849,
7,
21370,
66,
62,
49229,
17143,
7307,
2599,
198,
220,
220,
220,
3722... | 2.276302 | 941 |
########################################################################
# CoC API Calls
########################################################################
import requests
import json
from .models import WarInfo, ClanInfo, LeagueInfo
| [
29113,
29113,
7804,
198,
2,
41055,
7824,
27592,
198,
29113,
29113,
7804,
198,
11748,
7007,
198,
11748,
33918,
198,
198,
6738,
764,
27530,
1330,
1810,
12360,
11,
22467,
12360,
11,
4041,
12360,
628
] | 7.333333 | 33 |
r"""
Cartesian Products
"""
# ****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.enumerated_sets import EnumeratedSets
from sage.sets.set_from_iterator import EnumeratedSetFromIterator
import sage.misc.prandom as rnd
from sage.misc.mrange import xmrange_iter, _is_finite, _len
from .ranker import unrank
from sage.rings.infinity import infinity
class CartesianProduct_iters(EnumeratedSetFromIterator):
r"""
Cartesian product of finite sets.
This class will soon be deprecated (see :trac:`18411` and :trac:`19195`).
One should instead use the functorial construction
:class:`cartesian_product <sage.categories.cartesian_product.CartesianProductFunctor>`.
The main differences in behavior are:
- construction: ``CartesianProduct`` takes as many argument as
there are factors whereas ``cartesian_product`` takes a single
list (or iterable) of factors;
- representation of elements: elements are represented by plain
Python list for ``CartesianProduct`` versus a custom element
class for ``cartesian_product``;
- membership testing: because of the above, plain Python lists are
not considered as elements of a ``cartesian_product``.
All of these is illustrated in the examples below.
EXAMPLES::
sage: F1 = ['a', 'b']
sage: F2 = [1, 2, 3, 4]
sage: F3 = Permutations(3)
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: C = CartesianProduct_iters(F1, F2, F3)
sage: c = cartesian_product([F1, F2, F3])
sage: type(C.an_element())
<... 'list'>
sage: type(c.an_element())
<class 'sage.sets.cartesian_product.CartesianProduct_with_category.element_class'>
sage: l = ['a', 1, Permutation([3,2,1])]
sage: l in C
True
sage: l in c
False
sage: elt = c(l)
sage: elt
('a', 1, [3, 2, 1])
sage: elt in c
True
sage: elt.parent() is c
True
"""
def __init__(self, *iters):
"""
TESTS::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: cp = CartesianProduct_iters([1,2],[3,4]); cp
Cartesian product of [1, 2], [3, 4]
sage: loads(dumps(cp)) == cp
True
sage: TestSuite(cp).run(skip='_test_an_element')
Check that :trac:`24558` is fixed::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: from sage.sets.set_from_iterator import EnumeratedSetFromIterator
sage: I = EnumeratedSetFromIterator(Integers)
sage: CartesianProduct_iters(I, I)
Cartesian product of {0, 1, -1, 2, -2, ...}, {0, 1, -1, 2, -2, ...}
"""
self.iters = iters
self._mrange = xmrange_iter(iters)
category = EnumeratedSets()
try:
category = category.Finite() if self.is_finite() else category.Infinite()
except ValueError: # Unable to determine if it is finite or not
pass
name = "Cartesian product of " + ", ".join(map(str, self.iters))
EnumeratedSetFromIterator.__init__(self, iterfunc,
name=name,
category=category,
cache=False)
def __contains__(self, x):
"""
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: cp = CartesianProduct_iters([1,2],[3,4])
sage: [1,3] in cp
True
sage: [1,2] in cp
False
sage: [1, 3, 1] in cp
False
Note that it differs with the behavior of Cartesian products::
sage: cp = cartesian_product([[1,2], [3,4]])
sage: [1,3] in cp
False
"""
try:
return len(x) == len(self.iters) and all(x[i] in self.iters[i] for i in range(len(self.iters)))
except (TypeError, IndexError):
return False
def __reduce__(self):
r"""
Support for pickle.
TESTS::
sage: cp = cartesian_product([[1,2],range(9)])
sage: loads(dumps(cp)) == cp
True
"""
return (self.__class__, (self.iters))
def __repr__(self):
"""
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: CartesianProduct_iters(list(range(2)), list(range(3)))
Cartesian product of [0, 1], [0, 1, 2]
"""
return "Cartesian product of " + ", ".join(map(str, self.iters))
def cardinality(self):
r"""
Returns the number of elements in the Cartesian product of
everything in \*iters.
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: CartesianProduct_iters(range(2), range(3)).cardinality()
6
sage: CartesianProduct_iters(range(2), range(3)).cardinality()
6
sage: CartesianProduct_iters(range(2), range(3), range(4)).cardinality()
24
This works correctly for infinite objects::
sage: CartesianProduct_iters(ZZ, QQ).cardinality()
+Infinity
sage: CartesianProduct_iters(ZZ, []).cardinality()
0
"""
return self._mrange.cardinality()
def __len__(self):
"""
Return the number of elements of the Cartesian product.
OUTPUT:
An ``int``, the number of elements in the Cartesian product. If the
number of elements is infinite or does not fit into a python ``int``, a
``TypeError`` is raised.
.. SEEALSO::
:meth:`cardinality`
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: C = CartesianProduct_iters(range(3), range(4))
sage: len(C)
12
sage: C = CartesianProduct_iters(ZZ, QQ)
sage: len(C)
Traceback (most recent call last):
...
TypeError: cardinality does not fit into a Python int
sage: C = CartesianProduct_iters(ZZ, [])
sage: len(C)
0
"""
return len(self._mrange)
def list(self):
"""
Returns
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: CartesianProduct_iters(range(3), range(3)).list()
[[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
sage: CartesianProduct_iters('dog', 'cat').list()
[['d', 'c'],
['d', 'a'],
['d', 't'],
['o', 'c'],
['o', 'a'],
['o', 't'],
['g', 'c'],
['g', 'a'],
['g', 't']]
"""
return [e for e in self]
def __iterate__(self):
r"""
An iterator for the elements in the Cartesian product of the
iterables \*iters.
From Recipe 19.9 in the Python Cookbook by Alex Martelli and David
Ascher.
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: [e for e in CartesianProduct_iters(range(3), range(3))]
[[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
sage: [e for e in CartesianProduct_iters('dog', 'cat')]
[['d', 'c'],
['d', 'a'],
['d', 't'],
['o', 'c'],
['o', 'a'],
['o', 't'],
['g', 'c'],
['g', 'a'],
['g', 't']]
"""
return iter(self._mrange)
def is_finite(self):
"""
The Cartesian product is finite if all of its inputs are
finite, or if any input is empty.
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: CartesianProduct_iters(ZZ, []).is_finite()
True
sage: CartesianProduct_iters(4,4).is_finite()
Traceback (most recent call last):
...
ValueError: Unable to determine whether this product is finite
"""
finites = [_is_finite(L, fallback=None) for L in self.iters]
if any(f is None for f in finites):
raise ValueError("Unable to determine whether this product is finite")
if all(f is True for f in finites):
return True
lens = [_len(L) for L in self.iters]
if any(l == 0 for l in lens):
return True
return False
def unrank(self, x):
"""
For finite Cartesian products, we can reduce unrank to the
constituent iterators.
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: C = CartesianProduct_iters(range(1000), range(1000), range(1000))
sage: C[238792368]
[238, 792, 368]
Check for :trac:`15919`::
sage: FF = IntegerModRing(29)
sage: C = CartesianProduct_iters(FF, FF, FF)
sage: C.unrank(0)
[0, 0, 0]
"""
try:
lens = [_len(it) for it in self.iters]
except (TypeError, AttributeError):
return CartesianProduct_iters.unrank(self, x)
positions = []
for n in lens:
if n is infinity:
return CartesianProduct_iters.unrank(self, x)
if n == 0:
raise IndexError("Cartesian Product is empty")
positions.append(x % n)
x = x // n
if x != 0:
raise IndexError("x larger than the size of the Cartesian Product")
positions.reverse()
return [unrank(L, i) for L,i in zip(self.iters, positions)]
def random_element(self):
r"""
Returns a random element from the Cartesian product of \*iters.
EXAMPLES::
sage: from sage.combinat.cartesian_product import CartesianProduct_iters
sage: c = CartesianProduct_iters('dog', 'cat').random_element()
sage: c in CartesianProduct_iters('dog', 'cat')
True
"""
return [rnd.choice(_) for _ in self.iters]
| [
81,
37811,
198,
43476,
35610,
18675,
198,
37811,
198,
2,
41906,
17174,
46068,
198,
2,
220,
220,
220,
220,
220,
220,
15069,
357,
34,
8,
4343,
4995,
27667,
1279,
76,
71,
33807,
31,
14816,
13,
785,
22330,
198,
2,
198,
2,
220,
4307,
6... | 2.118509 | 5,257 |
import unittest
import tempfile
import os
import shutil
from spark_python_jobs.main import SampleJob
from pyspark.sql import SparkSession
from unittest.mock import MagicMock
from pyspark.sql import functions as F
if __name__ == "__main__":
unittest.main() | [
11748,
555,
715,
395,
198,
11748,
20218,
7753,
198,
11748,
28686,
198,
11748,
4423,
346,
628,
198,
198,
6738,
9009,
62,
29412,
62,
43863,
13,
12417,
1330,
27565,
33308,
198,
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
198,
673... | 3.142857 | 84 |
import unittest
import norwegian_numbers.util as internal
import norwegian_numbers as nn
# MOD10CD tests
# MOD11CD tests
# KID tests
# Account number tests
# Organisation number tests
# Birth number tests
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
4249,
20684,
62,
77,
17024,
13,
22602,
355,
5387,
198,
11748,
4249,
20684,
62,
77,
17024,
355,
299,
77,
628,
198,
220,
220,
220,
1303,
19164,
940,
8610,
5254,
628,
220,
220,
220,
1303,
19164,
1157,
... | 2.878788 | 99 |
"""
**apps_authenticator**
will handle internal application api calls
**NOTE:**
this authenticator runs on the api side will have access to all the data classes
"""
__developer__ = "mobius-crypt"
__email__ = "mobiusndou@gmail.com"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
__licence__ = "MIT"
import functools
import hmac
from typing import Optional, Callable, List
import requests
from flask import request
from config import config_instance
from config.exceptions import UnAuthenticatedError, error_codes, status_codes
from config.use_context import use_context
from security.users_authenticator import decode_auth_token
from utils import is_development
def verify_app_id(app_id: str, domain: str) -> bool:
"""
**verify_app_id**
given a micro-services domain name check if the app_id is the same as the app_id
created
:param app_id:
:param domain:
:return:
"""
_endpoint: str = '_ipn/micro-services/verify-app-id'
_url: str = f"{domain}{_endpoint}"
_secret_key: str = config_instance.SECRET_KEY
_kwargs: dict = dict(domain=domain, app_id=app_id, SECRET_KEY=_secret_key)
_result: requests.Response = requests.post(url=_url, json=_kwargs)
_ok_codes: List[int] = [
status_codes.successfully_updated_code, status_codes.status_ok_code]
if _result.status_code in _ok_codes and _result.headers['content-type'] == 'application/json':
json_data: dict = _result.json()
else:
debug_verify_app_id(_result, domain)
message: str = 'Application not authenticated'
raise UnAuthenticatedError(description=message)
if not json_data.get('status'):
# TODO if app not authenticated adding the ip address to a black list may be a good idea
message: str = "application un-authenticated"
raise UnAuthenticatedError(description=message)
_payload: dict = json_data.get('payload')
# Note: comparing a known secret_key with the returned secret_key
compare_secret_key: bool = hmac.compare_digest(
_secret_key, _payload.get('SECRET_KEY'))
compare_app_id: bool = hmac.compare_digest(app_id, _payload.get('app_id'))
return json_data.get('status') and compare_secret_key and compare_app_id
@use_context
def is_app_authenticated(domain: Optional[str], secret_key: Optional[str],
auth_token: Optional[str]) -> bool:
"""
**apps_authenticator**
authenticate application to api calls
:param domain:
:param secret_key:
:param auth_token:
:return: True
"""
decoded_token = decode_auth_token(auth_token=auth_token)
if not bool(decoded_token):
return False
_domain, _secret_key, _app_id = decoded_token.split('#')
domain = f"{domain}/" if not domain.endswith("/") else domain
if is_development() and domain == 'http://localhost:8082/':
domain = 'http://127.0.0.1:8082/'
compare_secret_key: bool = hmac.compare_digest(_secret_key, secret_key)
compare_domain: bool = hmac.compare_digest(_domain, domain)
return compare_secret_key and compare_domain and verify_app_id(app_id=_app_id, domain=_domain)
def handle_internal_auth(func: Callable) -> Callable:
"""
**handle_internal_auth**
handles authentication of internal api calls
:param func:
:return:
"""
@functools.wraps(func)
return auth_wrapper
def verify_cron_job(cron_domain: str, secret_key: str) -> bool:
"""verify if the executor of the cron job is authorized"""
is_domain: bool = hmac.compare_digest(
cron_domain, config_instance.CRON_DOMAIN)
is_secret: bool = hmac.compare_digest(
secret_key, config_instance.CRON_SECRET)
return is_domain and is_secret
def handle_cron_auth(func: Callable) -> Callable:
"""authenticate cron job execution routes"""
@functools.wraps(func)
return auth_wrapper
def is_domain_authorised(domain) -> bool:
"""
:param domain:
:return:
"""
pass
| [
37811,
201,
198,
220,
220,
220,
12429,
18211,
62,
41299,
26407,
1174,
201,
198,
220,
220,
220,
220,
220,
220,
220,
481,
5412,
5387,
3586,
40391,
3848,
201,
198,
201,
198,
220,
220,
220,
12429,
16580,
25,
1174,
201,
198,
220,
220,
22... | 2.50116 | 1,724 |
bosString = "asdfasdf(2014)"
bosString = bosString.replace("(", "")
bosString = bosString.replace(")", "")
print(bosString) | [
39565,
10100,
796,
366,
292,
7568,
292,
7568,
7,
4967,
16725,
198,
39565,
10100,
796,
37284,
10100,
13,
33491,
7203,
7,
1600,
366,
4943,
198,
39565,
10100,
796,
37284,
10100,
13,
33491,
7,
4943,
1600,
366,
4943,
198,
198,
4798,
7,
395... | 2.818182 | 44 |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlDekselVergrendeling(KeuzelijstField):
"""Manieren waarop het deksel is vergrendeld."""
naam = 'KlDekselVergrendeling'
label = 'Dekselvergrendeling'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlDekselVergrendeling'
definition = 'Manieren waarop het deksel is vergrendeld.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlDekselVergrendeling'
options = {
'bouten': KeuzelijstWaarde(invulwaarde='bouten',
label='bouten',
definitie='Het deksel is vergrendeld met inox schroefbouten met zeskantmoer',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlDekselVergrendeling/bouten'),
'haken': KeuzelijstWaarde(invulwaarde='haken',
label='haken',
definitie='Het deksel is met haken vergrendeld',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlDekselVergrendeling/haken'),
'inbus': KeuzelijstWaarde(invulwaarde='inbus',
label='inbus',
definitie='Het deksel is vergrendeld met inox schroefbouten met inbusmoer',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlDekselVergrendeling/inbus')
}
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
27354,
265,
9497,
13,
8896,
10277,
417,
2926,
301,
15878,
1330,
3873,
10277,
417,
2926,
301,
15878,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43... | 1.885906 | 894 |
from tensorflow.keras import Sequential,Model
from mpunet.logging import ScreenLogger
from mpunet.utils.conv_arithmetics import compute_receptive_fields
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, BatchNormalization, Cropping2D, \
Concatenate, Conv2D, MaxPooling2D, \
UpSampling2D, Reshape, ZeroPadding2D,\
Dense, Conv2DTranspose, Activation
import numpy as np
from tensorflow import keras
import tensorflow as tf
## Nested block
class UNet2Plus_deep(Model):
"""
2D UNet implementation with batch normalization and complexity factor adj.
See original paper at http://arxiv.org/abs/1505.04597
"""
| [
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
24604,
1843,
11,
17633,
198,
6738,
29034,
403,
316,
13,
6404,
2667,
1330,
15216,
11187,
1362,
198,
6738,
29034,
403,
316,
13,
26791,
13,
42946,
62,
283,
342,
27757,
1330,
24061,
62,
260,
2... | 2.249337 | 377 |
# Forms using wtforms
from wtforms import Form, StringField, SelectField, validators, PasswordField, TextAreaField
from wtforms_validators import Alpha, AlphaNumeric, Integer
from wtforms.fields.html5 import EmailField, DateField
from datetime import date
# Customer Register Form
# Customer Login Form
# Admin Login Form
# FAQ Creation Form
| [
2,
39196,
1262,
266,
83,
23914,
198,
6738,
266,
83,
23914,
1330,
5178,
11,
10903,
15878,
11,
9683,
15878,
11,
4938,
2024,
11,
30275,
15878,
11,
8255,
30547,
15878,
198,
6738,
266,
83,
23914,
62,
12102,
2024,
1330,
12995,
11,
12995,
45... | 3.763441 | 93 |
from __future__ import division, print_function, absolute_import
import numpy as np
from skimage import util
def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype')
def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path)
def center_crop(img, size, value=0.0):
"""Center crop with padding (using `value`) if necessary"""
new_img = np.full(size + img.shape[2:], value, dtype=img.dtype)
dest = [0, 0]
source = [0, 0]
ss = [0, 0]
for i in range(2):
if img.shape[i] < size[i]:
diff = size[i] - img.shape[i]
dest[i] = diff // 2
source[i] = 0
ss[i] = img.shape[i]
else:
diff = img.shape[i] - size[i]
source[i] = diff // 2
ss[i] = size[i]
new_img[dest[0]:dest[0]+ss[0], dest[1]:dest[1]+ss[1]] = \
img[source[0]:source[0]+ss[0], source[1]:source[1]+ss[1]]
return new_img
def center_crop_reflect(img, size):
"""Center crop with mirror padding if necessary"""
a0 = max(0, size[0] - img.shape[0])
a1 = max(0, size[1] - img.shape[1])
v = ((a0//2, a0-a0//2), (a1//2, a1-a1//2))
if img.ndim == 3:
v = v + ((0, 0),)
pimg = util.pad(img, v, mode='reflect')
return center_crop(pimg, size)
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
1330,
7736,
628,
198,
4299,
3440,
7,
6978,
11,
288,
4906,
28,
37659,
13,
22468,
2414,
2599,
198,
... | 2.181034 | 1,044 |
# coding: utf-8
import pprint
import re
import six
class TemplateInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'title': 'str',
'description': 'str',
'productshorts': 'list[str]',
'products': 'list[TemplateProductExt]',
'topic': 'list[TopicCategory]',
'creator_id': 'str',
'creator': 'str',
'nickname': 'str',
'score': 'int',
'label': 'str',
'store': 'int',
'store_info': 'str',
'status': 'int',
'view_count': 'int',
'usage_count': 'int',
'created_at': 'str',
'updated_at': 'str',
'published_at': 'str',
'favorite_state': 'int',
'tags': 'list[TagInfo]',
'type': 'int',
'is_static': 'int',
'maintainers': 'list[str]',
'pipeline_template': 'PipelineTemplateInfo',
'platform_source': 'int',
'references': 'list[Reference]',
'properties': 'object',
'dependencies': 'list[object]',
'dependency_type': 'str',
'forum_id': 'int',
'file_size': 'int',
'deployment': 'object',
'update_id': 'str',
'is_support_cloudide': 'bool'
}
attribute_map = {
'id': 'id',
'title': 'title',
'description': 'description',
'productshorts': 'productshorts',
'products': 'products',
'topic': 'topic',
'creator_id': 'creator_id',
'creator': 'creator',
'nickname': 'nickname',
'score': 'score',
'label': 'label',
'store': 'store',
'store_info': 'store_info',
'status': 'status',
'view_count': 'view_count',
'usage_count': 'usage_count',
'created_at': 'created_at',
'updated_at': 'updated_at',
'published_at': 'published_at',
'favorite_state': 'favorite_state',
'tags': 'tags',
'type': 'type',
'is_static': 'is_static',
'maintainers': 'maintainers',
'pipeline_template': 'pipeline_template',
'platform_source': 'platform_source',
'references': 'references',
'properties': 'properties',
'dependencies': 'dependencies',
'dependency_type': 'dependency_type',
'forum_id': 'forum_id',
'file_size': 'file_size',
'deployment': 'deployment',
'update_id': 'update_id',
'is_support_cloudide': 'is_support_cloudide'
}
def __init__(self, id=None, title=None, description=None, productshorts=None, products=None, topic=None, creator_id=None, creator=None, nickname=None, score=None, label=None, store=None, store_info=None, status=None, view_count=None, usage_count=None, created_at=None, updated_at=None, published_at=None, favorite_state=None, tags=None, type=None, is_static=None, maintainers=None, pipeline_template=None, platform_source=None, references=None, properties=None, dependencies=None, dependency_type=None, forum_id=None, file_size=None, deployment=None, update_id=None, is_support_cloudide=None):
"""TemplateInfo - a model defined in huaweicloud sdk"""
self._id = None
self._title = None
self._description = None
self._productshorts = None
self._products = None
self._topic = None
self._creator_id = None
self._creator = None
self._nickname = None
self._score = None
self._label = None
self._store = None
self._store_info = None
self._status = None
self._view_count = None
self._usage_count = None
self._created_at = None
self._updated_at = None
self._published_at = None
self._favorite_state = None
self._tags = None
self._type = None
self._is_static = None
self._maintainers = None
self._pipeline_template = None
self._platform_source = None
self._references = None
self._properties = None
self._dependencies = None
self._dependency_type = None
self._forum_id = None
self._file_size = None
self._deployment = None
self._update_id = None
self._is_support_cloudide = None
self.discriminator = None
if id is not None:
self.id = id
if title is not None:
self.title = title
if description is not None:
self.description = description
if productshorts is not None:
self.productshorts = productshorts
if products is not None:
self.products = products
if topic is not None:
self.topic = topic
if creator_id is not None:
self.creator_id = creator_id
if creator is not None:
self.creator = creator
if nickname is not None:
self.nickname = nickname
if score is not None:
self.score = score
if label is not None:
self.label = label
if store is not None:
self.store = store
if store_info is not None:
self.store_info = store_info
if status is not None:
self.status = status
if view_count is not None:
self.view_count = view_count
if usage_count is not None:
self.usage_count = usage_count
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if published_at is not None:
self.published_at = published_at
if favorite_state is not None:
self.favorite_state = favorite_state
if tags is not None:
self.tags = tags
if type is not None:
self.type = type
if is_static is not None:
self.is_static = is_static
if maintainers is not None:
self.maintainers = maintainers
if pipeline_template is not None:
self.pipeline_template = pipeline_template
if platform_source is not None:
self.platform_source = platform_source
if references is not None:
self.references = references
if properties is not None:
self.properties = properties
if dependencies is not None:
self.dependencies = dependencies
if dependency_type is not None:
self.dependency_type = dependency_type
if forum_id is not None:
self.forum_id = forum_id
if file_size is not None:
self.file_size = file_size
if deployment is not None:
self.deployment = deployment
if update_id is not None:
self.update_id = update_id
if is_support_cloudide is not None:
self.is_support_cloudide = is_support_cloudide
@property
def id(self):
"""Gets the id of this TemplateInfo.
模板id
:return: The id of this TemplateInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TemplateInfo.
模板id
:param id: The id of this TemplateInfo.
:type: str
"""
self._id = id
@property
def title(self):
"""Gets the title of this TemplateInfo.
模板名
:return: The title of this TemplateInfo.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this TemplateInfo.
模板名
:param title: The title of this TemplateInfo.
:type: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this TemplateInfo.
模板描述
:return: The description of this TemplateInfo.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateInfo.
模板描述
:param description: The description of this TemplateInfo.
:type: str
"""
self._description = description
@property
def productshorts(self):
"""Gets the productshorts of this TemplateInfo.
模板关联的所有云服务(产品短名)
:return: The productshorts of this TemplateInfo.
:rtype: list[str]
"""
return self._productshorts
@productshorts.setter
def productshorts(self, productshorts):
"""Sets the productshorts of this TemplateInfo.
模板关联的所有云服务(产品短名)
:param productshorts: The productshorts of this TemplateInfo.
:type: list[str]
"""
self._productshorts = productshorts
@property
def products(self):
"""Gets the products of this TemplateInfo.
模板关联的云产品
:return: The products of this TemplateInfo.
:rtype: list[TemplateProductExt]
"""
return self._products
@products.setter
def products(self, products):
"""Sets the products of this TemplateInfo.
模板关联的云产品
:param products: The products of this TemplateInfo.
:type: list[TemplateProductExt]
"""
self._products = products
@property
def topic(self):
"""Gets the topic of this TemplateInfo.
模板标签
:return: The topic of this TemplateInfo.
:rtype: list[TopicCategory]
"""
return self._topic
@topic.setter
def topic(self, topic):
"""Sets the topic of this TemplateInfo.
模板标签
:param topic: The topic of this TemplateInfo.
:type: list[TopicCategory]
"""
self._topic = topic
@property
def creator_id(self):
"""Gets the creator_id of this TemplateInfo.
模板创建者id
:return: The creator_id of this TemplateInfo.
:rtype: str
"""
return self._creator_id
@creator_id.setter
def creator_id(self, creator_id):
"""Sets the creator_id of this TemplateInfo.
模板创建者id
:param creator_id: The creator_id of this TemplateInfo.
:type: str
"""
self._creator_id = creator_id
@property
def creator(self):
"""Gets the creator of this TemplateInfo.
模板创建者,有别名返回别名
:return: The creator of this TemplateInfo.
:rtype: str
"""
return self._creator
@creator.setter
def creator(self, creator):
"""Sets the creator of this TemplateInfo.
模板创建者,有别名返回别名
:param creator: The creator of this TemplateInfo.
:type: str
"""
self._creator = creator
@property
def nickname(self):
"""Gets the nickname of this TemplateInfo.
模板创建者,有别名返回别名
:return: The nickname of this TemplateInfo.
:rtype: str
"""
return self._nickname
@nickname.setter
def nickname(self, nickname):
"""Sets the nickname of this TemplateInfo.
模板创建者,有别名返回别名
:param nickname: The nickname of this TemplateInfo.
:type: str
"""
self._nickname = nickname
@property
def score(self):
"""Gets the score of this TemplateInfo.
模板评分(点赞数)
:return: The score of this TemplateInfo.
:rtype: int
"""
return self._score
@score.setter
def score(self, score):
"""Sets the score of this TemplateInfo.
模板评分(点赞数)
:param score: The score of this TemplateInfo.
:type: int
"""
self._score = score
@property
def label(self):
"""Gets the label of this TemplateInfo.
模板标签(new、hot等)
:return: The label of this TemplateInfo.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this TemplateInfo.
模板标签(new、hot等)
:param label: The label of this TemplateInfo.
:type: str
"""
self._label = label
@property
def store(self):
"""Gets the store of this TemplateInfo.
代码存储位置
:return: The store of this TemplateInfo.
:rtype: int
"""
return self._store
@store.setter
def store(self, store):
"""Sets the store of this TemplateInfo.
代码存储位置
:param store: The store of this TemplateInfo.
:type: int
"""
self._store = store
@property
def store_info(self):
"""Gets the store_info of this TemplateInfo.
获取代码模版所需的信息
:return: The store_info of this TemplateInfo.
:rtype: str
"""
return self._store_info
@store_info.setter
def store_info(self, store_info):
"""Sets the store_info of this TemplateInfo.
获取代码模版所需的信息
:param store_info: The store_info of this TemplateInfo.
:type: str
"""
self._store_info = store_info
@property
def status(self):
"""Gets the status of this TemplateInfo.
模板状态(0:审核中 1: 已上架 2: 未上架(已下架)3: 未上架(合规检查不通过)4:未上架(待上架)5:已删除)
:return: The status of this TemplateInfo.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this TemplateInfo.
模板状态(0:审核中 1: 已上架 2: 未上架(已下架)3: 未上架(合规检查不通过)4:未上架(待上架)5:已删除)
:param status: The status of this TemplateInfo.
:type: int
"""
self._status = status
@property
def view_count(self):
"""Gets the view_count of this TemplateInfo.
访问量
:return: The view_count of this TemplateInfo.
:rtype: int
"""
return self._view_count
@view_count.setter
def view_count(self, view_count):
"""Sets the view_count of this TemplateInfo.
访问量
:param view_count: The view_count of this TemplateInfo.
:type: int
"""
self._view_count = view_count
@property
def usage_count(self):
"""Gets the usage_count of this TemplateInfo.
引用量
:return: The usage_count of this TemplateInfo.
:rtype: int
"""
return self._usage_count
@usage_count.setter
def usage_count(self, usage_count):
"""Sets the usage_count of this TemplateInfo.
引用量
:param usage_count: The usage_count of this TemplateInfo.
:type: int
"""
self._usage_count = usage_count
@property
def created_at(self):
"""Gets the created_at of this TemplateInfo.
创建时间
:return: The created_at of this TemplateInfo.
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this TemplateInfo.
创建时间
:param created_at: The created_at of this TemplateInfo.
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this TemplateInfo.
更新时间
:return: The updated_at of this TemplateInfo.
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this TemplateInfo.
更新时间
:param updated_at: The updated_at of this TemplateInfo.
:type: str
"""
self._updated_at = updated_at
@property
def published_at(self):
"""Gets the published_at of this TemplateInfo.
模板上架时间
:return: The published_at of this TemplateInfo.
:rtype: str
"""
return self._published_at
@published_at.setter
def published_at(self, published_at):
"""Sets the published_at of this TemplateInfo.
模板上架时间
:param published_at: The published_at of this TemplateInfo.
:type: str
"""
self._published_at = published_at
@property
def favorite_state(self):
"""Gets the favorite_state of this TemplateInfo.
点赞状态(1:点赞,0:未点赞)
:return: The favorite_state of this TemplateInfo.
:rtype: int
"""
return self._favorite_state
@favorite_state.setter
def favorite_state(self, favorite_state):
"""Sets the favorite_state of this TemplateInfo.
点赞状态(1:点赞,0:未点赞)
:param favorite_state: The favorite_state of this TemplateInfo.
:type: int
"""
self._favorite_state = favorite_state
@property
def tags(self):
"""Gets the tags of this TemplateInfo.
模板标签
:return: The tags of this TemplateInfo.
:rtype: list[TagInfo]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this TemplateInfo.
模板标签
:param tags: The tags of this TemplateInfo.
:type: list[TagInfo]
"""
self._tags = tags
@property
def type(self):
"""Gets the type of this TemplateInfo.
模板类型(0:doc、1:code、2:pipeline、3:devops四种)
:return: The type of this TemplateInfo.
:rtype: int
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TemplateInfo.
模板类型(0:doc、1:code、2:pipeline、3:devops四种)
:param type: The type of this TemplateInfo.
:type: int
"""
self._type = type
@property
def is_static(self):
"""Gets the is_static of this TemplateInfo.
动、静态代码模板标识(0:动态模板codetemplate,1:静态模板codesample)
:return: The is_static of this TemplateInfo.
:rtype: int
"""
return self._is_static
@is_static.setter
def is_static(self, is_static):
"""Sets the is_static of this TemplateInfo.
动、静态代码模板标识(0:动态模板codetemplate,1:静态模板codesample)
:param is_static: The is_static of this TemplateInfo.
:type: int
"""
self._is_static = is_static
@property
def maintainers(self):
"""Gets the maintainers of this TemplateInfo.
模板相关联的所有维护人账号名称
:return: The maintainers of this TemplateInfo.
:rtype: list[str]
"""
return self._maintainers
@maintainers.setter
def maintainers(self, maintainers):
"""Sets the maintainers of this TemplateInfo.
模板相关联的所有维护人账号名称
:param maintainers: The maintainers of this TemplateInfo.
:type: list[str]
"""
self._maintainers = maintainers
@property
def pipeline_template(self):
"""Gets the pipeline_template of this TemplateInfo.
:return: The pipeline_template of this TemplateInfo.
:rtype: PipelineTemplateInfo
"""
return self._pipeline_template
@pipeline_template.setter
def pipeline_template(self, pipeline_template):
"""Sets the pipeline_template of this TemplateInfo.
:param pipeline_template: The pipeline_template of this TemplateInfo.
:type: PipelineTemplateInfo
"""
self._pipeline_template = pipeline_template
@property
def platform_source(self):
"""Gets the platform_source of this TemplateInfo.
平台来源(0:codelabs、1:devstar)
:return: The platform_source of this TemplateInfo.
:rtype: int
"""
return self._platform_source
@platform_source.setter
def platform_source(self, platform_source):
"""Sets the platform_source of this TemplateInfo.
平台来源(0:codelabs、1:devstar)
:param platform_source: The platform_source of this TemplateInfo.
:type: int
"""
self._platform_source = platform_source
@property
def references(self):
"""Gets the references of this TemplateInfo.
相关文档,示例,帖子
:return: The references of this TemplateInfo.
:rtype: list[Reference]
"""
return self._references
@references.setter
def references(self, references):
"""Sets the references of this TemplateInfo.
相关文档,示例,帖子
:param references: The references of this TemplateInfo.
:type: list[Reference]
"""
self._references = references
@property
def properties(self):
"""Gets the properties of this TemplateInfo.
模板自定义参数列表
:return: The properties of this TemplateInfo.
:rtype: object
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this TemplateInfo.
模板自定义参数列表
:param properties: The properties of this TemplateInfo.
:type: object
"""
self._properties = properties
@property
def dependencies(self):
"""Gets the dependencies of this TemplateInfo.
dependency信息
:return: The dependencies of this TemplateInfo.
:rtype: list[object]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this TemplateInfo.
dependency信息
:param dependencies: The dependencies of this TemplateInfo.
:type: list[object]
"""
self._dependencies = dependencies
@property
def dependency_type(self):
"""Gets the dependency_type of this TemplateInfo.
dependency类型
:return: The dependency_type of this TemplateInfo.
:rtype: str
"""
return self._dependency_type
@dependency_type.setter
def dependency_type(self, dependency_type):
"""Sets the dependency_type of this TemplateInfo.
dependency类型
:param dependency_type: The dependency_type of this TemplateInfo.
:type: str
"""
self._dependency_type = dependency_type
@property
def forum_id(self):
"""Gets the forum_id of this TemplateInfo.
关联论坛板块id
:return: The forum_id of this TemplateInfo.
:rtype: int
"""
return self._forum_id
@forum_id.setter
def forum_id(self, forum_id):
"""Sets the forum_id of this TemplateInfo.
关联论坛板块id
:param forum_id: The forum_id of this TemplateInfo.
:type: int
"""
self._forum_id = forum_id
@property
def file_size(self):
"""Gets the file_size of this TemplateInfo.
模板文件解压缩之后的大小(单位:KB)
:return: The file_size of this TemplateInfo.
:rtype: int
"""
return self._file_size
@file_size.setter
def file_size(self, file_size):
"""Sets the file_size of this TemplateInfo.
模板文件解压缩之后的大小(单位:KB)
:param file_size: The file_size of this TemplateInfo.
:type: int
"""
self._file_size = file_size
@property
def deployment(self):
"""Gets the deployment of this TemplateInfo.
部署信息
:return: The deployment of this TemplateInfo.
:rtype: object
"""
return self._deployment
@deployment.setter
def deployment(self, deployment):
"""Sets the deployment of this TemplateInfo.
部署信息
:param deployment: The deployment of this TemplateInfo.
:type: object
"""
self._deployment = deployment
@property
def update_id(self):
"""Gets the update_id of this TemplateInfo.
模板关联更新态Id
:return: The update_id of this TemplateInfo.
:rtype: str
"""
return self._update_id
@update_id.setter
def update_id(self, update_id):
"""Sets the update_id of this TemplateInfo.
模板关联更新态Id
:param update_id: The update_id of this TemplateInfo.
:type: str
"""
self._update_id = update_id
@property
def is_support_cloudide(self):
"""Gets the is_support_cloudide of this TemplateInfo.
是否支持使用CloudIDE运行源码
:return: The is_support_cloudide of this TemplateInfo.
:rtype: bool
"""
return self._is_support_cloudide
@is_support_cloudide.setter
def is_support_cloudide(self, is_support_cloudide):
"""Sets the is_support_cloudide of this TemplateInfo.
是否支持使用CloudIDE运行源码
:param is_support_cloudide: The is_support_cloudide of this TemplateInfo.
:type: bool
"""
self._is_support_cloudide = is_support_cloudide
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
279,
4798,
198,
11748,
302,
198,
198,
11748,
2237,
628,
628,
198,
198,
4871,
37350,
12360,
25,
628,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
49213,
25,
198,
220,
220,
220,
220... | 2.049728 | 12,850 |
#!/usr/bin/python3
import jk_flexdata
#
# This test checks for a bug. It this program terminates and does not get into an endless loop, everything is okay.
#
x = jk_flexdata.FlexObject({
"a": [
]
})
print("****************")
print(type(x.a))
for a in x.a:
print(a)
print("****************")
print(type(x.b))
for a in x.b:
print(a)
print("****************")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
628,
198,
11748,
474,
74,
62,
32880,
7890,
628,
198,
198,
2,
198,
2,
770,
1332,
8794,
329,
257,
5434,
13,
632,
428,
1430,
5651,
689,
290,
857,
407,
651,
656,
281,
13079,
9052,
11,
2279,
... | 2.612245 | 147 |
print("stellaaaaa")
#test
print("bennnn is gonna stay")
| [
4798,
7203,
301,
695,
24794,
64,
4943,
198,
2,
9288,
198,
4798,
7203,
65,
1697,
20471,
318,
8066,
2652,
4943,
198
] | 2.666667 | 21 |
from urllib.request import urlopen
from urllib.request import Request
from urllib.parse import urlencode
import json
| [
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19390,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
198,
11748,
33918,
628
] | 3.575758 | 33 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import threading
import multiprocessing
import sys
import copy
if sys.version_info.major == 2:
import Queue
elif sys.version_info.major == 3:
import queue as Queue
else:
raise Exception("Error Python version")
import os
import logging
import collections
import json
from .error_catch import ErrorCatch, CustomException, CustomExceptionCode, ParamChecker, ParamVerify
from .operator import Op, RequestOp, ResponseOp, VirtualOp
from .channel import (ThreadChannel, ProcessChannel, ChannelData,
ChannelDataType, ChannelStopError)
from .error_catch import ProductErrCode
from .error_catch import CustomExceptionCode as ChannelDataErrcode
from .profiler import TimeProfiler, PerformanceTracer
from .util import NameGenerator, ThreadIdGenerator, PipelineProcSyncManager
from .proto import pipeline_service_pb2
_LOGGER = logging.getLogger(__name__)
class DAGExecutor(object):
"""
DAG Executor, the service entrance of DAG.
"""
def __init__(self, response_op, server_conf, worker_idx):
"""
Initialize DAGExecutor.
Args:
response_op: Response OP
server_conf: server conf. config.yaml
worker_idx: DAGExecutor index, PipelineServer creates many
DAGExecutors when _build_dag_each_worker is true.
Returns:
None.
"""
build_dag_each_worker = server_conf["build_dag_each_worker"]
server_worker_num = server_conf["worker_num"]
dag_conf = server_conf["dag"]
self._retry = dag_conf["retry"]
self._server_use_profile = dag_conf["use_profile"]
channel_size = dag_conf["channel_size"]
channel_recv_frist_arrive = dag_conf["channel_recv_frist_arrive"]
self._is_thread_op = dag_conf["is_thread_op"]
tracer_conf = dag_conf["tracer"]
tracer_interval_s = tracer_conf["interval_s"]
self.name = "@DAGExecutor"
self._profiler = TimeProfiler()
self._profiler.enable(True)
self._tracer = None
if tracer_interval_s >= 1:
self._tracer = PerformanceTracer(
self._is_thread_op, tracer_interval_s, server_worker_num)
self._dag = DAG(self.name, response_op, self._server_use_profile,
self._is_thread_op, channel_size, build_dag_each_worker,
self._tracer, channel_recv_frist_arrive)
(in_channel, out_channel, pack_rpc_func,
unpack_rpc_func) = self._dag.build()
self._dag.start()
self._set_in_channel(in_channel)
self._set_out_channel(out_channel)
self._pack_rpc_func = pack_rpc_func
self._unpack_rpc_func = unpack_rpc_func
if self._tracer is not None:
self._tracer.start()
# generate id
# data_id: Server Unique ID, automatically generated by the framework
# log_id: Trace one product request, can be empty, not unique.
base_counter = 0
gen_id_step = 1
if build_dag_each_worker:
base_counter = worker_idx
gen_id_step = server_worker_num
self._id_generator = ThreadIdGenerator(
max_id=1000000000000000000,
base_counter=base_counter,
step=gen_id_step)
self._cv_pool = {}
self._cv_for_cv_pool = threading.Condition()
self._fetch_buffer = {}
self._recive_func = None
self._client_profile_key = "pipeline.profile"
self._client_profile_value = "1"
@ErrorCatch
def start(self):
"""
Starting one thread for receiving data from the last channel background.
Args:
None
Returns:
None
"""
self._recive_func = threading.Thread(
target=DAGExecutor._recive_out_channel_func, args=(self, ))
self._recive_func.daemon = True
self._recive_func.start()
_LOGGER.debug("[DAG Executor] Start recive thread")
def stop(self):
"""
Stopping DAG
Args:
None
Returns:
None
"""
self._dag.stop()
self._dag.join()
_LOGGER.info("[DAG Executor] Stop")
def _get_next_data_id(self):
"""
Generate data_id incrementally and Uniquely
Args:
None
Returns:
data_id: uniq id
cond_v: condition variable
"""
data_id = self._id_generator.next()
cond_v = threading.Condition()
with self._cv_for_cv_pool:
self._cv_pool[data_id] = cond_v
self._fetch_buffer[data_id] = None
return data_id, cond_v
def _set_in_channel(self, in_channel):
"""
Set in_channel of DAG
Args:
in_channel: input channel of DAG
Returns:
None
"""
if not isinstance(in_channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical("[DAG Executor] Failed to set in_channel: "
"in_channel must be Channel type, but get {}".
format(type(in_channel)))
os._exit(-1)
self._in_channel = in_channel
_LOGGER.info("[DAG] set in channel succ, name [{}]".format(self.name))
def _set_out_channel(self, out_channel):
"""
Set out_channel of DAG
Args:
out_channel: output channel of DAG
Returns:
None
"""
if not isinstance(out_channel, (ThreadChannel, ProcessChannel)):
_LOGGER.critical("[DAG Executor] Failed to set out_channel: "
"must be Channel type, but get {}".format(
type(out_channel)))
os._exit(-1)
out_channel.add_consumer(self.name)
self._out_channel = out_channel
def _recive_out_channel_func(self):
"""
Receiving data from the output channel, and pushing data into
_fetch_buffer. Function _get_channeldata_from_fetch_buffer gets
data by retry time.
Args:
None
Returns:
None
"""
cv = None
while True:
try:
channeldata_dict = self._out_channel.front(self.name)
except ChannelStopError:
_LOGGER.info("[DAG Executor] Stop.")
with self._cv_for_cv_pool:
for data_id, cv in self._cv_pool.items():
closed_errror_data = ChannelData(
error_code=ChannelDataErrcode.CLOSED_ERROR.value,
error_info="dag closed.",
data_id=data_id)
with cv:
self._fetch_buffer[data_id] = closed_errror_data
cv.notify_all()
break
if len(channeldata_dict) != 1:
_LOGGER.critical(
"[DAG Executor] Failed to fetch result: out_channel "
"cannot have multiple input ops")
os._exit(-1)
(_, channeldata), = channeldata_dict.items()
if not isinstance(channeldata, ChannelData):
_LOGGER.critical(
'[DAG Executor] Failed to fetch result: data in out_channel" \
" must be ChannelData type, but get {}'
.format(type(channeldata)))
os._exit(-1)
data_id = channeldata.id
_LOGGER.debug("(logid={}) [recive thread] Fetched data".format(
data_id))
with self._cv_for_cv_pool:
cond_v = self._cv_pool[data_id]
with cond_v:
self._fetch_buffer[data_id] = channeldata
cond_v.notify_all()
def _get_channeldata_from_fetch_buffer(self, data_id, cond_v):
"""
Getting the channel data from _fetch_buffer.
Args:
data_id: search key
cond_v: conditional variable
Returns:
ready_data: one channel data processed
"""
ready_data = None
with cond_v:
with self._cv_for_cv_pool:
if self._fetch_buffer[data_id] is not None:
# The requested data is already ready
ready_data = self._fetch_buffer[data_id]
self._cv_pool.pop(data_id)
self._fetch_buffer.pop(data_id)
if ready_data is None:
# Wait for data ready
cond_v.wait()
with self._cv_for_cv_pool:
ready_data = self._fetch_buffer[data_id]
self._cv_pool.pop(data_id)
self._fetch_buffer.pop(data_id)
_LOGGER.debug("(data_id={}) [resp thread] Got data".format(data_id))
return ready_data
def _pack_channeldata(self, rpc_request, data_id):
"""
Unpacking data from RPC request. and creating one channelData.
Args:
rpc_request: one RPC request
data_id: data id, unique
Returns:
ChannelData: one channel data to be processed
"""
dictdata = None
log_id = None
try:
dictdata, log_id, prod_errcode, prod_errinfo = self._unpack_rpc_func(
rpc_request)
except Exception as e:
_LOGGER.error(
"(logid={}) Failed to parse RPC request package: {}"
.format(data_id, e),
exc_info=True)
return ChannelData(
error_code=ChannelDataErrcode.RPC_PACKAGE_ERROR.value,
error_info="rpc package error: {}".format(e),
data_id=data_id,
log_id=log_id)
else:
# because unpack_rpc_func is rewritten by user, we need to look
# for product_errcode in returns, and client_profile_key field
# in rpc_request
if prod_errcode is not None:
# product errors occured
_LOGGER.error("unpack_rpc_func prod_errcode:{}".format(
prod_errcode))
return ChannelData(
error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
error_info="",
prod_error_code=prod_errcode,
prod_error_info=prod_errinfo,
data_id=data_id,
log_id=log_id)
profile_value = None
profile_value = dictdata.get(self._client_profile_key)
client_need_profile = (profile_value == self._client_profile_value)
return ChannelData(
datatype=ChannelDataType.DICT.value,
dictdata=dictdata,
data_id=data_id,
log_id=log_id,
client_need_profile=client_need_profile)
def call(self, rpc_request):
"""
DAGExcutor enterance function. There are 5 steps:
1._get_next_data_id: Generate an incremental ID
2._pack_channeldata: pack the channel data from request.
3.retry loop:
a. push channel_data into _in_channel
b. get_channeldata_from_fetch_buffer: get results.
4._pack_for_rpc_resp: pack RPC responses
5.profile: generte profile string and pack into response.
Args:
rpc_request: one RPC request
Returns:
rpc_resp: one RPC response
"""
if self._tracer is not None:
trace_buffer = self._tracer.data_buffer()
data_id, cond_v = self._get_next_data_id()
start_call, end_call = None, None
if not self._is_thread_op:
start_call = self._profiler.record("call_{}#DAG-{}_0".format(
data_id, data_id))
else:
start_call = self._profiler.record("call_{}#DAG_0".format(data_id))
self._profiler.record("prepack_{}#{}_0".format(data_id, self.name))
req_channeldata = self._pack_channeldata(rpc_request, data_id)
self._profiler.record("prepack_{}#{}_1".format(data_id, self.name))
log_id = req_channeldata.log_id
_LOGGER.info("(data_id={} log_id={}) Succ Generate ID ".format(data_id,
log_id))
resp_channeldata = None
for i in range(self._retry):
_LOGGER.debug("(data_id={}) Pushing data into Graph engine".format(
data_id))
try:
if req_channeldata is None:
_LOGGER.critical(
"(data_id={} log_id={}) req_channeldata is None"
.format(data_id, log_id))
if not isinstance(self._in_channel,
(ThreadChannel, ProcessChannel)):
_LOGGER.critical(
"(data_id={} log_id={})[DAG Executor] Failed to "
"set in_channel: in_channel must be Channel type, but get {}".
format(data_id, log_id, type(self._in_channel)))
self._in_channel.push(req_channeldata, self.name)
except ChannelStopError:
_LOGGER.error("(data_id:{} log_id={})[DAG Executor] Stop".
format(data_id, log_id))
with self._cv_for_cv_pool:
self._cv_pool.pop(data_id)
return self._pack_for_rpc_resp(
ChannelData(
error_code=ChannelDataErrcode.CLOSED_ERROR.value,
error_info="dag closed.",
data_id=data_id))
_LOGGER.debug("(data_id={} log_id={}) Wait for Graph engine...".
format(data_id, log_id))
resp_channeldata = self._get_channeldata_from_fetch_buffer(data_id,
cond_v)
if resp_channeldata.error_code == ChannelDataErrcode.OK.value:
_LOGGER.info("(data_id={} log_id={}) Succ predict".format(
data_id, log_id))
break
else:
_LOGGER.error("(data_id={} log_id={}) Failed to predict: {}"
.format(data_id, log_id,
resp_channeldata.error_info))
if resp_channeldata.error_code != ChannelDataErrcode.TIMEOUT.value:
break
if i + 1 < self._retry:
_LOGGER.warning(
"(data_id={} log_id={}) DAGExecutor retry({}/{})"
.format(data_id, log_id, i + 1, self._retry))
_LOGGER.debug("(data_id={} log_id={}) Packing RPC response package"
.format(data_id, log_id))
self._profiler.record("postpack_{}#{}_0".format(data_id, self.name))
rpc_resp = self._pack_for_rpc_resp(resp_channeldata)
self._profiler.record("postpack_{}#{}_1".format(data_id, self.name))
if not self._is_thread_op:
end_call = self._profiler.record("call_{}#DAG-{}_1".format(data_id,
data_id))
else:
end_call = self._profiler.record("call_{}#DAG_1".format(data_id))
if self._tracer is not None:
trace_buffer.put({
"name": "DAG",
"id": data_id,
"succ":
resp_channeldata.error_code == ChannelDataErrcode.OK.value,
"actions": {
"call_{}".format(data_id): end_call - start_call,
},
})
profile_str = self._profiler.gen_profile_str()
if self._server_use_profile:
sys.stderr.write(profile_str)
# add profile info into rpc_resp
if resp_channeldata.client_need_profile:
profile_set = resp_channeldata.profile_data_set
profile_set.add(profile_str)
profile_value = "".join(list(profile_set))
rpc_resp.key.append(self._client_profile_key)
rpc_resp.value.append(profile_value)
return rpc_resp
def _pack_for_rpc_resp(self, channeldata):
"""
Packing one RPC response
Args:
channeldata: one channel data to be packed
Returns:
resp: one RPC response
"""
try:
return self._pack_rpc_func(channeldata)
except Exception as e:
_LOGGER.error(
"(logid={}) Failed to pack RPC response package: {}"
.format(channeldata.id, e),
exc_info=True)
resp = pipeline_service_pb2.Response()
resp.err_no = ChannelDataErrcode.RPC_PACKAGE_ERROR.value
resp.err_msg = "rpc package error: {}".format(e)
return resp
class DAG(object):
"""
Directed Acyclic Graph(DAG) engine, builds one DAG topology.
"""
@staticmethod
def get_use_ops(response_op):
"""
Starting from ResponseOp, recursively traverse the front OPs. Getting
all used ops and the post op list of each op (excluding ResponseOp)
Args:
response_op: ResponseOp
Returns:
used_ops: used ops, set
succ_ops_of_use_op: op and the next op list, dict.
"""
unique_names = set()
used_ops = set()
succ_ops_of_use_op = {} # {op_name: succ_ops}
que = Queue.Queue()
que.put(response_op)
while que.qsize() != 0:
op = que.get()
for pred_op in op.get_input_ops():
if pred_op.name not in succ_ops_of_use_op:
succ_ops_of_use_op[pred_op.name] = []
if op != response_op:
succ_ops_of_use_op[pred_op.name].append(op)
if pred_op not in used_ops:
que.put(pred_op)
used_ops.add(pred_op)
# check the name of op is globally unique
if pred_op.name in unique_names:
_LOGGER.critical("Failed to get used Ops: the"
" name of Op must be unique: {}".
format(pred_op.name))
os._exit(-1)
unique_names.add(pred_op.name)
return used_ops, succ_ops_of_use_op
def _gen_channel(self, name_gen):
"""
Generate one ThreadChannel or ProcessChannel.
Args:
name_gen: channel name
Returns:
channel: one channel generated
"""
channel = None
if self._is_thread_op:
channel = ThreadChannel(
name=name_gen.next(),
maxsize=self._channel_size,
channel_recv_frist_arrive=self._channel_recv_frist_arrive)
else:
channel = ProcessChannel(
self._manager,
name=name_gen.next(),
maxsize=self._channel_size,
channel_recv_frist_arrive=self._channel_recv_frist_arrive)
_LOGGER.debug("[DAG] Generate channel: {}".format(channel.name))
return channel
def _gen_virtual_op(self, name_gen):
"""
Generate one virtual Op
Args:
name_gen: Op name
Returns:
vir_op: one virtual Op object.
"""
vir_op = VirtualOp(name=name_gen.next())
_LOGGER.debug("[DAG] Generate virtual_op: {}".format(vir_op.name))
return vir_op
def _topo_sort(self, used_ops, response_op, out_degree_ops):
"""
Topological sort of DAG, creates inverted multi-layers views.
Args:
used_ops: op used in DAG
response_op: response op
out_degree_ops: Next op list for each op, dict. the output of
get_use_ops()
Returns:
dag_views: the inverted hierarchical topology list. examples:
DAG :[A -> B -> C -> E]
\-> D /
dag_views: [[E], [C, D], [B], [A]]
last_op:the last op front of ResponseOp
"""
out_degree_num = {
name: len(ops)
for name, ops in out_degree_ops.items()
}
que_idx = 0 # scroll queue
ques = [Queue.Queue() for _ in range(2)]
zero_indegree_num = 0
for op in used_ops:
if len(op.get_input_ops()) == 0:
zero_indegree_num += 1
if zero_indegree_num != 1:
_LOGGER.critical("Failed to topo sort: DAG contains "
"multiple RequestOps")
os._exit(-1)
last_op = response_op.get_input_ops()[0]
ques[que_idx].put(last_op)
# topo sort to get dag_views
dag_views = []
sorted_op_num = 0
while True:
que = ques[que_idx]
next_que = ques[(que_idx + 1) % 2]
dag_view = []
while que.qsize() != 0:
op = que.get()
dag_view.append(op)
sorted_op_num += 1
for pred_op in op.get_input_ops():
out_degree_num[pred_op.name] -= 1
if out_degree_num[pred_op.name] == 0:
next_que.put(pred_op)
dag_views.append(dag_view)
if next_que.qsize() == 0:
break
que_idx = (que_idx + 1) % 2
if sorted_op_num < len(used_ops):
_LOGGER.critical("Failed to topo sort: not legal DAG")
os._exit(-1)
return dag_views, last_op
def _build_dag(self, response_op):
"""
Building DAG, the most important function in class DAG. Core steps:
1.get_use_ops: Getting used ops, and out degree op list for each op.
2._topo_sort: Topological sort creates inverted multi-layers views.
3.create channels and virtual ops.
Args:
response_op: ResponseOp
Returns:
actual_ops: all OPs used in DAG, including virtual OPs
channels: all channels used in DAG
input_channel: the channel of first OP
output_channel: the channel of last OP
pack_func: pack_response_package function of response_op
unpack_func: unpack_request_package function of request_op
"""
if response_op is None:
_LOGGER.critical("Failed to build DAG: ResponseOp"
" has not been set.")
os._exit(-1)
used_ops, out_degree_ops = DAG.get_use_ops(response_op)
if not self._build_dag_each_worker:
_LOGGER.info("================= USED OP =================")
for op in used_ops:
if not isinstance(op, RequestOp):
_LOGGER.info(op.name)
_LOGGER.info("-------------------------------------------")
if len(used_ops) <= 1:
_LOGGER.critical(
"Failed to build DAG: besides RequestOp and ResponseOp, "
"there should be at least one Op in DAG.")
os._exit(-1)
if self._build_dag_each_worker:
_LOGGER.info("Because `build_dag_each_worker` mode is used, "
"Auto-batching is set to the default config: "
"batch_size=1, auto_batching_timeout=None")
for op in used_ops:
op.use_default_auto_batching_config()
dag_views, last_op = self._topo_sort(used_ops, response_op,
out_degree_ops)
dag_views = list(reversed(dag_views))
if not self._build_dag_each_worker:
_LOGGER.info("================== DAG ====================")
for idx, view in enumerate(dag_views):
_LOGGER.info("(VIEW {})".format(idx))
for op in view:
_LOGGER.info(" [{}]".format(op.name))
for out_op in out_degree_ops[op.name]:
_LOGGER.info(" - {}".format(out_op.name))
_LOGGER.info("-------------------------------------------")
# create channels and virtual ops
virtual_op_name_gen = NameGenerator("vir")
channel_name_gen = NameGenerator("chl")
virtual_ops = []
channels = []
input_channel = None
actual_view = None
for v_idx, view in enumerate(dag_views):
if v_idx + 1 >= len(dag_views):
break
next_view = dag_views[v_idx + 1]
if actual_view is None:
actual_view = view
actual_next_view = []
pred_op_of_next_view_op = {}
for op in actual_view:
# find actual succ op in next view and create virtual op
for succ_op in out_degree_ops[op.name]:
if succ_op in next_view:
if succ_op not in actual_next_view:
actual_next_view.append(succ_op)
if succ_op.name not in pred_op_of_next_view_op:
pred_op_of_next_view_op[succ_op.name] = []
pred_op_of_next_view_op[succ_op.name].append(op)
else:
# create virtual op
virtual_op = self._gen_virtual_op(virtual_op_name_gen)
virtual_ops.append(virtual_op)
out_degree_ops[virtual_op.name] = [succ_op]
actual_next_view.append(virtual_op)
pred_op_of_next_view_op[virtual_op.name] = [op]
virtual_op.add_virtual_pred_op(op)
actual_view = actual_next_view
# create channel
processed_op = set()
for o_idx, op in enumerate(actual_next_view):
if op.name in processed_op:
continue
channel = self._gen_channel(channel_name_gen)
channels.append(channel)
op.add_input_channel(channel)
_LOGGER.info("op:{} add input channel.".format(op.name))
pred_ops = pred_op_of_next_view_op[op.name]
if v_idx == 0:
input_channel = channel
else:
# if pred_op is virtual op, it will use ancestors as producers to channel
for pred_op in pred_ops:
pred_op.add_output_channel(channel)
_LOGGER.info("pred_op:{} add output channel".format(
pred_op.name))
processed_op.add(op.name)
# find same input op to combine channel
for other_op in actual_next_view[o_idx + 1:]:
if other_op.name in processed_op:
continue
other_pred_ops = pred_op_of_next_view_op[other_op.name]
if len(other_pred_ops) != len(pred_ops):
continue
same_flag = True
for pred_op in pred_ops:
if pred_op not in other_pred_ops:
same_flag = False
break
if same_flag:
other_op.add_input_channel(channel)
processed_op.add(other_op.name)
output_channel = self._gen_channel(channel_name_gen)
channels.append(output_channel)
last_op.add_output_channel(output_channel)
_LOGGER.info("last op:{} add output channel".format(last_op.name))
pack_func, unpack_func = None, None
pack_func = response_op.pack_response_package
actual_ops = virtual_ops
for op in used_ops:
if len(op.get_input_ops()) == 0:
#set special features of the request op.
#1.set unpack function.
#2.set output channel.
unpack_func = op.unpack_request_package
op.add_output_channel(input_channel)
continue
actual_ops.append(op)
for c in channels:
_LOGGER.debug("Channel({}):\n\t- producers: {}\n\t- consumers: {}"
.format(c.name, c.get_producers(), c.get_consumers()))
return (actual_ops, channels, input_channel, output_channel, pack_func,
unpack_func)
def build(self):
"""
Interface for building one DAG outside.
Args:
None
Returns:
_input_channel: the channel of first OP
_output_channel: the channel of last OP
_pack_func: pack_response_package function of response_op
_unpack_func: unpack_request_package function of request_op
"""
(actual_ops, channels, input_channel, output_channel, pack_func,
unpack_func) = self._build_dag(self._response_op)
_LOGGER.info("[DAG] Succ build DAG")
self._actual_ops = actual_ops
self._channels = channels
self._input_channel = input_channel
self._output_channel = output_channel
self._pack_func = pack_func
self._unpack_func = unpack_func
if self._tracer is not None:
self._tracer.set_channels(self._channels)
return self._input_channel, self._output_channel, self._pack_func, self._unpack_func
def start(self):
"""
Each OP starts a thread or process by _is_thread_op
Args:
None
Returns:
_threads_or_proces: threads or process list.
"""
self._threads_or_proces = []
for op in self._actual_ops:
op.use_profiler(self._use_profile)
op.set_tracer(self._tracer)
if self._is_thread_op:
self._threads_or_proces.extend(op.start_with_thread())
else:
self._threads_or_proces.extend(op.start_with_process())
_LOGGER.info("[DAG] start")
# not join yet
return self._threads_or_proces
def join(self):
"""
All threads or processes join.
Args:
None
Returns:
None
"""
for x in self._threads_or_proces:
if x is not None:
x.join()
def stop(self):
"""
Stopping and cleanning all channels.
Args:
None
Returns:
None
"""
for chl in self._channels:
chl.stop()
for op in self._actual_ops:
op.clean_input_channel()
op.clean_output_channels()
| [
2,
220,
220,
15069,
357,
66,
8,
12131,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 1.911227 | 16,559 |
# The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pandas as pd
import numpy as np
import scipy
import scipy.stats
import time
from datetime import datetime
import calendar
from opmon_analyzer import constants
from opmon_analyzer.analyzer_conf import DataModelConfiguration
| [
2,
220,
383,
17168,
13789,
198,
2,
220,
15069,
357,
66,
8,
33448,
12,
35834,
5136,
329,
4225,
3575,
1799,
23555,
357,
22125,
1797,
8,
198,
2,
220,
15069,
357,
66,
8,
2177,
12,
42334,
24633,
666,
6188,
4482,
11416,
357,
49,
3539,
8... | 3.710327 | 397 |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import numpy as np
class HiddenLayer(DeepLayer):
"""
A hidden layer for a DBNN. This is based on a restricted Boltzmann machine.
"""
def __init__(self, owner, input_count, output_count):
"""
Create a hidden layer for a DBNN.
:param owner: The DBNN that this layer belongs to.
:param input_count: The number of visible units, the input.
:param output_count: The number of hidden units, the output.
"""
super(HiddenLayer, self).__init__(owner, input_count, output_count)
def binomial(self,n,p):
"""
Sample n times at probability p and return the count of how many samples were 1 (true).
:param n: The number of samples needed.
:param p: The probability of choosing 1 (true).
:return: The count of how many 1 (true)'s were sampled.
"""
if p < 0 or p > 1:
return 0
c = 0
for i in range(n):
r = np.random.rand()
if r < p:
c+=1
return c
def sigmoid(x):
"""
Compute the sigmoid (logisitic) for x.
:param x: The value to compute for.
:return: The result.
"""
return 1.0 / (1.0 + np.exp(-x))
def output(self, input, w, b):
"""
Calculate the sigmoid output for this layer.
:param input: The input values for this layer's visable.
:param w: Thw weights for this layer.
:param b: The bias value for this layer.
:return: The hidden values for this layer, the output.
"""
linear_output = 0.0
# First calculate the linear output. Similar to linear regression.
for j in range(self.input_count):
linear_output += w[j] * input[j]
linear_output += b
# Now return the signoid of the linear sum.
return HiddenLayer.sigmoid(linear_output)
def sample_h_given_v(self,v,h):
"""
Sample the hidden (h) output values, given the (v) input values. This is different than the output method
in that we are actually sampling discrete (0 or 1) values.
:param v: The visible units.
:param h: The hidden units, the count of how many times a true (1) was sampled.
"""
for i in range(self.output_count):
h[i] = self.binomial(1, self.output(v, self.weights[i], self.bias[i]))
class UnsupervisedTrainDBN:
"""
Unsupervised training for DBNN's. This class trains a single layer at a time.
"""
def __init__(self,network, level, training_input, learning_rate, k):
"""
Construct a trainer for upsupervised training for the DBNN.
:param network: The DBNN to train.
:param level: The level of the DBNN being trained.
:param training_input: The training input cases.
:param learning_rate: The learning rate.
:param k: The number of cycles to use per iteration.
"""
self.network = network
self.level = level
self.training_input = training_input
self.learning_rate = learning_rate
self.k = k
def iteration(self):
"""
Perform one iteration of unsupervised training on the DBN.
"""
layer_input = None
for row in self.training_input:
# Perform layer-wise sample up to the layer being trained.
for l in range(self.level+1):
if l == 0:
layer_input = row[:]
else:
# Get the previous input size, if we are on the first layer, this is the input count.
# Otherwise it is the input (visible) count from the previous layer.
if l == 1:
prev_layer_input_size = self.network.input_count
else:
prev_layer_input_size = self.network.layers[l - 1].input_count
# Copy the previous layer's input to a new array.
prev_layer_input = layer_input[:]
# Construct an array to hold the current layer's input
layer_input = [0.0] * self.network.layers[l].input_count
# Sample this layer's hidden neuron values (h), given previous layer's input (v).
# The output goes into layerInput, which is used in the next layer.
self.network.layers[l - 1].sample_h_given_v(prev_layer_input, layer_input)
# Perform up-down algorithm.
self.contrastive_divergence(self.network.rbm[self.level], layer_input, self.learning_rate, self.k)
def contrastive_divergence(self, rbm, input, lr, k):
"""
Perform contrastive divergence, also known as the up-down algorithm.
:param rbm: The RBM to use.
:param input: The input training pattern.
:param lr: The learning rate.
:param k: The number of cycles.
"""
# The positive gradient mean & samples (P) - Only for hidden (H)
mean_ph = [0.0] * rbm.hidden_count
sample_ph = [0.0] * rbm.hidden_count
# The negative gradient mean & samples (N) - For both visible (V) & hidden (H)
means_nv = [0.0] * rbm.visible_count
samples_nv = [0.0] * rbm.visible_count
means_nh = [0.0] * rbm.hidden_count
samples_nh = [0.0] * rbm.hidden_count
# Calculate (sample) meanPH and samplePH
self.sample_hv(rbm, input, mean_ph, sample_ph)
for step in range(self.k):
if step == 0:
self.gibbs_hvh(rbm, sample_ph, means_nv, samples_nv, means_nh, samples_nh);
else:
self.gibbs_hvh(rbm, samples_nh, means_nv, samples_nv, means_nh, samples_nh)
# Adjust the weights, based on calculated mean values.
# This uses the maximum likelihood learning rule.
for i in range(rbm.hidden_count):
for j in range(rbm.visible_count):
rbm.layer.weights[i][j] += lr *(mean_ph[i] * input[j] - means_nh[i] * samples_nv[j]) / len(input)
rbm.h_bias[i] += lr * (sample_ph[i] - means_nh[i]) / len(input)
# Adjust the biases for learning.
for i in range(rbm.visible_count):
rbm.v_bias[i] += lr * (input[i] - samples_nv[i]) / len(input)
def sample_hv(self, rbm, v0sample, mean, sample):
"""
Sample the hidden neurons (output), given the visible (input). Return the mean, and a sample, based on that
mean probability.
:param rbm The RBM to use.
:param v0Sample The input to the layer.
:param mean Output: mean value of each hidden neuron.
:param sample Output: sample, based on mean.
"""
for i in range(rbm.hidden_count):
# Find the mean.
mean[i] = self.prop_up(rbm, v0sample, rbm.layer.weights[i], rbm.h_bias[i])
# Sample, based on that mean.
sample[i] = rbm.binomial(1, mean[i])
def prop_up(self, rbm, v, w, b):
"""
Estimate the mean of a hidden neuron in an RBM. Propagate upward part, from visible to hidden.
:param rbm: The RBM to use.
:param v: The input (v), visible neurons.
:param w: The weights.
:param b: The bias.
:return: The mean.
"""
sum = 0.0
for j in range(rbm.visible_count):
sum += w[j] * v[j]
sum += b
return DeepBeliefNetwork.sigmoid(sum)
def gibbs_hvh(self, rbm, sample_h0, means_nv, samples_nv, means_nh, samples_nh):
"""
Perform Gibbs sampling. Hidden to visible to hidden.
:param rbm: The RBM to use.
:param sample_h0: The hidden samples.
:param means_nv: Output: means for the visible (v) neurons.
:param samples_nv: Output: samples for the visible (v) neurons.
:param means_nh: Output: means for the hidden (h) neurons.
:param samples_nh: Output: samples for the hidden (h) neurons.
"""
self.sample_vh(rbm, sample_h0, means_nv, samples_nv)
self.sample_hv(rbm, samples_nv, means_nh, samples_nh)
def sample_vh(self, rbm, sample_h0, mean, sample):
"""
Sample the visible (input), given the hidden neurons (output). Return the mean, and a sample, based on that
mean probability.
:param rbm: The RBM to use.
:param sample_h0: Hidden (h) samples.
:param mean: Output: Visible (v) mean.
:param sample: Output: Visible (v) sample.
"""
for i in range(rbm.visible_count):
mean[i] = self.prop_down(rbm, sample_h0, i, rbm.v_bias[i])
sample[i] = rbm.binomial(1, mean[i])
def prop_down(self, rbm, h, i, b):
"""
Estimate the mean of a visible neuron in an RBM. Propagate downward part, from hidden to visible.
:param rbm: The RBM to use.
:param h: The hidden neurons.
:param i: The visible neuron to use.
:param b: Bias value.
:return: The estimated mean
"""
sum = 0.0
for j in range(rbm.hidden_count):
sum += rbm.layer.weights[j][i] * h[j]
sum += b
return DeepBeliefNetwork.sigmoid(sum) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
220,
220,
220,
35941,
9345,
329,
27411,
198,
220,
220,
220,
14701,
513,
25,
10766,
18252,
290,
47986,
27862,
198,
220,
220,
220,
11361,
10628,
198,
220,
220,
220,
2638,
137... | 2.304328 | 4,436 |
from botocore.client import BaseClient
from ..key_store import KeyStore
from ..raw import CryptoBytes
| [
6738,
10214,
420,
382,
13,
16366,
1330,
7308,
11792,
198,
198,
6738,
11485,
2539,
62,
8095,
1330,
7383,
22658,
198,
6738,
11485,
1831,
1330,
36579,
45992,
628
] | 3.851852 | 27 |
import numpy as np
from createnodes import createnodes
from facenumbering import facenumbering
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1827,
268,
4147,
1330,
1827,
268,
4147,
198,
6738,
1777,
268,
4494,
278,
1330,
1777,
268,
4494,
278,
198
] | 3.653846 | 26 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 03:28:27 2019
@author: carl
"""
from octavvs import decomposition
if __name__ == '__main__':
decomposition.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
8621,
1596,
7643,
25,
2078,
25,
1983,
13130,
198,
198,
31,
9800,
25,
1097,
75,
198... | 2.519481 | 77 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
if __name__ == '__main__':
summary(SeNet(3), (3, 28, 28), device="cpu") | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
49736,
1330,
10638,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 2.983051 | 59 |
"""
ffs.contrib
Make it a package.
"""
| [
37811,
198,
487,
82,
13,
3642,
822,
198,
198,
12050,
340,
257,
5301,
13,
198,
37811,
198
] | 2.352941 | 17 |
import netdiff
import sys
if __name__ == "__main__":
eval_check_list = [ # list all functions allowed for eval() here
'mlag_status_from_dut',
]
args = netdiff.tools.parsers.src_dst_parser()
in_data = netdiff.read._from(args.src_format, args.src_filename)
if args.case in eval_check_list:
if not isinstance(in_data, str):
out_data = eval(args.case + f'({in_data})')
else:
out_data = eval(args.case + f'("{in_data}")')
netdiff.write.to_file(args.dst_filename, args.dst_format, out_data)
else:
sys.exit(f'ERROR: Specified case {args.case} is not supported!') | [
11748,
2010,
26069,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5418,
62,
9122,
62,
4868,
796,
685,
220,
1303,
1351,
477,
5499,
3142,
329,
5418,
3419,
994,
198,
220,
... | 2.238754 | 289 |
import sys
import os
from os import listdir
from os.path import isfile, join
mypath="./"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
o = open("data.dat", "w")
for f in sorted(onlyfiles):
if f.find("combination_of_observables")>=0 and f.find(".out")>=0:
g=f.split(".")[0]
s=(int(g.split("_")[3]))*2
m=g.split("_")[4]
l=g.split("_")[5]
fi = open(f, "r")
v0=0
v1=0
v2=0
for x in fi:
if x.find("unique pauliops")==0:
v0=x.split()[2]
if x.find("number of cliques")==0:
v1=x.split()[3]
# if x.find("groups with https")>=0:
# v2=x.split()[4]
fi.close()
o.write(f+" "+str(s)+" "+m+" "+l+" "+str(v0)+" "+str(v1)+"\n ")
#+str(v1)+" "+str(v2)+"\n")
o.close()
| [
11748,
25064,
198,
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
1820,
6978,
28,
1911,
30487,
198,
8807,
16624,
796,
685,
69,
329,
277,
287,
1351,
15908,
7,
1820,
6... | 1.700397 | 504 |
## wstat.py - workflow status summary from Parsl monitoring database
## The idea is not to replace the "sqlite3" interactive command or the
## Parsl web interface, but to complement them to create some useful
## interactive summaries specific to Parsl workflows.
## Python dependencies: sqlite3, tabulate
## T.Glanzman - Spring 2019
__version__ = "0.8.3"
pVersion='0.8.0' ## Parsl version
import sys,os
import sqlite3
from tabulate import tabulate
import datetime
import argparse
### class pmon - interpret Parsl monitoring database
####################################################
##
## M A I N
##
####################################################
if __name__ == '__main__':
reportTypes = ['fullSummary','shortSummary','workflowHistory']
## Parse command line arguments
parser = argparse.ArgumentParser(description='A simple Parsl status reporter. Available reports include:'+str(reportTypes))
parser.add_argument('reportType',help='Type of report to display (default=%(default)s)',nargs='?',default='fullSummary')
parser.add_argument('-f','--file',default='monitoring.db',help='name of Parsl monitoring database file (default=%(default)s)')
parser.add_argument('-r','--runnum',type=int,help='Specific run number of interest (default = latest)')
parser.add_argument('-s','--schemas',action='store_true',default=False,help="only print out monitoring db schema for all tables")
parser.add_argument('-v','--version', action='version', version=__version__)
args = parser.parse_args()
print('\nwstat - Parsl workflow status (version ',__version__,', written for Parsl version '+pVersion+')\n')
## Create a Parsl Monitor object
m = pmon(dbfile=args.file)
## Print out table schemas only
if args.schemas:
## Fetch a list of all tables in this database
tableList = m.getTableList()
print('Tables: ',tableList)
## Print out schema for all tables
for table in tableList:
schema = m.getTableSchema(table)
print(schema[0][0])
pass
sys.exit()
## Check validity of run number
if not args.runnum == None and (int(args.runnum) > m.runmax or int(args.runnum) < m.runmin):
print('%ERROR: Requested run number, ',args.runnum,' is out of range (',m.runmin,'-',m.runmax,')')
sys.exit(1)
## Print out requested report
if args.reportType not in reportTypes: sys.exit(1)
if args.reportType == 'fullSummary':
m.fullSummary(runnum=args.runnum)
if args.reportType == 'shortSummary':
m.shortSummary(runnum=args.runnum)
if args.reportType == 'workflowHistory':
m.workflowHistory()
## Done
sys.exit()
| [
2235,
266,
14269,
13,
9078,
532,
30798,
3722,
10638,
422,
23042,
75,
9904,
6831,
198,
198,
2235,
383,
2126,
318,
407,
284,
6330,
262,
366,
25410,
578,
18,
1,
14333,
3141,
393,
262,
198,
2235,
23042,
75,
3992,
7071,
11,
475,
284,
168... | 2.815524 | 992 |
# -*- coding: utf-8 -*-
"""
Su command module.
"""
__author__ = 'Agnieszka Bylica, Marcin Usielski, Michal Ernst'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = 'agnieszka.bylica@nokia.com, marcin.usielski@nokia.com, michal.ernst@nokia.com'
import re
from moler.cmd.commandchangingprompt import CommandChangingPrompt
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
COMMAND_OUTPUT_su = """
xyz@debian:~$ su
Password:
root@debian:/home/xyz#"""
COMMAND_KWARGS_su = {
'login': None, 'options': None, 'password': '1234', 'expected_prompt': 'root@debian:/home/xyz#'
}
COMMAND_RESULT_su = {'RESULT': []}
COMMAND_OUTPUT_su_option = """
xyz@debian:~$ su -c 'ls' xyz
Password:
Dokumenty Pobrane Publiczny Pulpit Szablony Wideo
xyz@debian:~$ """
COMMAND_KWARGS_su_option = {
'login': 'xyz', 'options': "-c 'ls'", 'password': '1234'
}
COMMAND_RESULT_su_option = {'RESULT': ['Dokumenty Pobrane Publiczny Pulpit Szablony Wideo']}
COMMAND_OUTPUT_newline_after_prompt = """
xyz@debian:~$ su
Password:
root@debian:/home/xyz# """
COMMAND_KWARGS_newline_after_prompt = {
'login': None, 'options': None, 'password': '1234', 'expected_prompt': 'root@debian:/home/xyz#',
'allowed_newline_after_prompt': True
}
COMMAND_RESULT_newline_after_prompt = {'RESULT': []}
COMMAND_OUTPUT_newline_after_prompt_with_prompt_change = """
xyz@debian:~$ su
Password:
root@debian:/home/xyz
$ export PS1="${PS1::-4} #
root@debian:/home/xyz # """
COMMAND_KWARGS_newline_after_prompt_with_prompt_change = {
'login': None, 'options': None, 'password': '1234', 'expected_prompt': 'root@debian:/home/xyz',
'allowed_newline_after_prompt': True, 'set_prompt': r'export PS1="${PS1::-4} #"',
}
COMMAND_RESULT_newline_after_prompt_with_prompt_change = {'RESULT': [r'$ export PS1="${PS1::-4} #']}
COMMAND_OUTPUT_set_timeout = """
xyz@debian:~$ su
Password:
root@debian:/home/xyz# export TMOUT="2678400"
root@debian:/home/xyz# """
COMMAND_KWARGS_set_timeout = {
'login': None, 'options': None, 'password': '1234', 'expected_prompt': 'root@debian:/home/xyz#',
'set_timeout': r'export TMOUT="2678400"',
}
COMMAND_RESULT_set_timeout = {'RESULT': []}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
5606,
3141,
8265,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
32,
4593,
444,
89,
4914,
2750,
677,
64,
11,
13067,
259,
4021,
72,
1424,
4106,
11,
2... | 2.38913 | 920 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Splits the LD reference panel into multiple subfiles, to greatly speed up
# GCTA-cojo. Each subfile is a 3-Mb window, so that for any top_loci variant,
# we can use a subfile that has the top_loci variant +- 1 Mb.
import pandas as pd
import os
import subprocess as sp
import argparse
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
parser.add_argument('--path',
metavar="<string>",
help='Path to LD reference; {chrom} in place of each chromosome name',
type=str,
required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
13341,
896,
262,
27178,
4941,
6103,
656,
3294,
850,
16624,
11,
284,
9257,
2866,
510,
198,
2,
402,
4177,
... | 2.362229 | 323 |
from .device_simulator import DeviceSimulator
from .server_simulator import SingleDeviceServerSimulator
| [
6738,
764,
25202,
62,
14323,
8927,
1330,
16232,
8890,
8927,
198,
6738,
764,
15388,
62,
14323,
8927,
1330,
14206,
24728,
10697,
8890,
8927,
198
] | 4.333333 | 24 |
from django.conf import settings
from django.core.cache import cache
from django.db import models
from collections import OrderedDict
RUOLI = [('ND', 'ND - Personale non docente'),
('DC', 'DC - Dirigente a contratto'),
('NB', 'NB - ND Centro Residenziale'),
('D0', 'D0 - Dirigenti Superiori'),
('NM', 'NM - Non docenti a tempo det.-Tesoro'),
('NG', 'NG - Addetti ufficio stampa'),
('PO', 'PO - Professori Ordinari'),
('PA', 'PA - Professori Associati'),
('RU', 'RU - Ricercatori Universitari'),
('RM', 'RM - Ricercatori a tempo det-Tesoro'),
('RD', 'RD - Ricercatori Legge 240/10 - t.det.')]
CARRIERA_FIELDS_MAP = {'descr_aff_org': 'ds_aff_org',
'descr_sede': 'ds_sede',
'descr_inquadramento': 'ds_inquadr',
'descr_profilo': 'ds_profilo',
'attivita': 'attivita',
'data_inizio_rapporto': 'dt_rap_ini',
'data_inizio': 'dt_ini',
'data_fine': 'dt_fin',
'inquadramento': 'inquadr',
'ruolo': 'ruolo'}
# per i docenti invence rimuoviamo gli attributi inutili e aggiungiamo quelli specifici
CARRIERA_DOCENTE_FIELDS_MAP = CARRIERA_FIELDS_MAP.copy()
del CARRIERA_DOCENTE_FIELDS_MAP['descr_profilo']
CARRIERA_DOCENTE_FIELDS_MAP.update({"aff_org": "aff_org",
"ds_ruolo": "ds_ruolo",
"ds_attivita": "ds_attivita",
"dt_avanz" : "dt_avanz",
"dt_prox_avanz" : "dt_prox_avanz",
"cd_sett_concors" : "cd_sett_concors",
"ds_sett_concors" : "ds_sett_concors",
"cd_ssd" : "cd_ssd",
"ds_ssd" : "ds_ssd",
"area_ssd" : "area_ssd",
"ds_area_ssd" : "ds_area_ssd",
"scatti" : "scatti",
"inquadramento": "inquadr",
"descr_inquadramento": "ds_inquadr"})
INCARICHI_FIELDS_MAP = {'data_doc': 'data_doc',
'num_doc': 'num_doc',
'tipo_doc': 'tipo_doc',
'descr_tipo': 'des_tipo',
'data_inizio': 'dt_ini',
'data_fine': 'dt_fin',
'relaz_accomp': 'relaz_accomp',
'ruolo': 'ruolo'}
class V_ANAGRAFICA(models.Model):
"""
Configurazione Oracle view
gdm = V_ANAGRAFICA.objects.get(matricola='17403'.zfill(6))
"""
nome = models.CharField(max_length=100, blank=True, null=True)
cognome = models.CharField(max_length=100, blank=True, null=True)
matricola = models.CharField(max_length=6, primary_key=True)
email = models.EmailField(max_length=100, blank=True, null=True)
cod_fis = models.CharField('Codice Fiscale', max_length=16, blank=False, null=False)
class V_RUOLO(models.Model):
"""
Configurazione Oracle view
"""
ruolo = models.CharField(max_length=4, primary_key=True)
comparto = models.CharField(max_length=1, blank=True, null=True)
tipo_ruolo = models.CharField(max_length=2, blank=False, null=False)
descr = models.CharField(max_length=254, blank=True, null=True)
is_docente = models.NullBooleanField(default=False)
# non ho chiavi primarie dalle viste in 'native' mode.
# mentre in replica posso sfruttarle
if settings.CSA_MODE == settings.CSA_REPLICA:
class V_CARRIERA(models.Model):
"""
Configurazione Oracle view
"""
matricola = models.CharField(max_length=6)
ds_aff_org = models.CharField(max_length=254, blank=True, null=True)
ds_sede = models.CharField(max_length=254, blank=True, null=True)
ds_inquadr = models.CharField(max_length=254, blank=True, null=True)
ds_profilo = models.CharField(max_length=254, blank=True, null=True)
ruolo = models.CharField(max_length=254, blank=True, null=True)
inquadr = models.CharField(max_length=254, blank=True, null=True)
dt_ini = models.DateTimeField(blank=True, null=True)
dt_fin = models.DateTimeField(blank=True, null=True)
dt_rap_ini = models.DateTimeField(blank=True, null=True)
attivita = models.CharField(max_length=254, blank=True, null=True)
class V_CARRIERA_DOCENTI(models.Model):
"""
Mappatura su Oracle view
"""
matricola = models.CharField(max_length=6)
ds_aff_org = models.CharField(max_length=254, blank=True, null=True)
ds_sede = models.CharField(max_length=254, blank=True, null=True)
ds_inquadr = models.CharField(max_length=254, blank=True, null=True)
ruolo = models.CharField(max_length=254, blank=True, null=True)
inquadr = models.CharField(max_length=254, blank=True, null=True)
dt_ini = models.DateTimeField(blank=True, null=True)
dt_fin = models.DateTimeField(blank=True, null=True)
dt_rap_ini = models.DateTimeField(blank=True, null=True)
attivita = models.CharField(max_length=254, blank=True, null=True)
#tipici dei docenti
aff_org = models.CharField(max_length=254, blank=True, null=True)
ds_attivita = models.CharField(max_length=254, blank=True, null=True)
ds_ruolo = models.CharField(max_length=254, blank=True, null=True)
dt_avanz = models.DateField(blank=True, null=True)
dt_prox_avanz = models.DateField(blank=True, null=True)
cd_sett_concors = models.CharField(max_length=10, blank=True, null=True)
ds_sett_concors = models.CharField(max_length=254, blank=True, null=True)
cd_ssd = models.CharField(max_length=12, blank=True, null=True)
ds_ssd = models.CharField(max_length=100, blank=True, null=True)
area_ssd = models.CharField(max_length=2, blank=True, null=True)
ds_area_ssd = models.CharField(max_length=100, blank=True, null=True)
scatti = models.BooleanField(default=0)
class V_INCARICO_DIP(models.Model):
"""
Configurazione Oracle view per incarichi
"""
matricola = models.CharField(max_length=6)
ruolo = models.CharField(max_length=254, blank=True, null=True)
relaz_accomp = models.CharField(max_length=1024, blank=True, null=True)
des_tipo = models.CharField(max_length=254, blank=True, null=True)
tipo_doc = models.CharField(max_length=254, blank=True, null=True)
num_doc = models.CharField(max_length=254, blank=True, null=True)
data_doc = models.DateTimeField(blank=True, null=True)
dt_ini = models.DateTimeField(blank=True, null=True)
dt_fin = models.DateTimeField(blank=True, null=True)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
628,
198,
49,
52,
3535,
40,
796,
685,
1... | 1.975795 | 3,553 |
import argparse
import re
import json
from .analyzer import TuroyoAnalyzer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analyze Turoyo words and sentences from command line.\n'
'Usage: python3 -m uniparser-turoyo (Turoyo sentence here.)')
parser.add_argument('text', default='', help='Text in Turoyo (Latin-based alphabet)')
args = parser.parse_args()
text = args.text
main(text)
| [
11748,
1822,
29572,
198,
11748,
302,
198,
11748,
33918,
198,
6738,
764,
38200,
9107,
1330,
309,
1434,
8226,
37702,
9107,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
2957... | 2.487047 | 193 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.backend.python.subsystems import setup
from pants.base.deprecated import warn_or_error
| [
2,
15069,
33448,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
... | 3.814286 | 70 |
import lib
from lib.common import calc_stat
from collections import OrderedDict
runs = lib.get_runs(["cogs_trafo"])
runs = lib.common.group(runs, ['transformer.variant'])
stats = calc_stat(runs, lambda k: k.endswith("/accuracy/total"))
runs = lib.get_runs(["pcfg_nosched_productivity"])
runs = lib.common.group(runs, ['transformer.variant'])
pcfg_gen_stats = calc_stat(runs, lambda k: k.endswith("/accuracy/total"))
runs = lib.get_runs(["pcfg_nosched_iid"])
runs = lib.common.group(runs, ['transformer.variant'])
pcfg_iid_stats = calc_stat(runs, lambda k: k.endswith("/accuracy/total"))
columns = OrderedDict()
columns["IID Validation"] = ["val"]
columns["Gen. Test"] = ["gen"]
d = OrderedDict()
d["Token Emb. Up."] = "transformer.variant_opennmt"
d["No scaling"] = "transformer.variant_noscale"
d["Pos. Emb. Down."] = "transformer.variant_scaledinit"
print(stats)
print(" & & " + " & ".join(columns) + "\\\\")
print("\\midrule")
print("\\parbox[t]{3mm}{\\multirow{3}{*}{\\rotatebox[origin=c]{90}{\\small COGS}}}")
print_table({vname: [stats[vcode][f"validation/{k[0]}/accuracy/total"].get() for k in columns.values()] for vname, vcode in d.items()})
print("\\midrule")
print("\\parbox[t]{3mm}{\\multirow{3}{*}{\\rotatebox[origin=c]{90}{\\small PCFG}}}")
print_table({vname: [pcfg_iid_stats[vcode]["validation/val/accuracy/total"].get(), pcfg_gen_stats[vcode]["validation/val/accuracy/total"].get()] for vname, vcode in d.items()})
| [
11748,
9195,
198,
6738,
9195,
13,
11321,
1330,
42302,
62,
14269,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
48381,
796,
9195,
13,
1136,
62,
48381,
7,
14692,
66,
18463,
62,
9535,
6513,
8973,
8,
198,
48381,
796,
9195,
13,
... | 2.531579 | 570 |
# -*- coding: utf-8 -*-
import numpy as np
import torch as t
import torch.nn as nn
from torch import FloatTensor as FT
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
355,
256,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
28034,
1330,
48436,
51,
22854,
355,
19446,
6... | 2.755556 | 45 |
import sys
import unittest
from functools import reduce
from http import HTTPStatus
from unittest.mock import call, patch
import cloudfoundry_client.main.main as main
from abstract_test_case import AbstractTestCase
from fake_requests import mock_response
| [
11748,
25064,
198,
11748,
555,
715,
395,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
2638,
1330,
14626,
19580,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
869,
11,
8529,
198,
198,
11748,
6279,
9275,
563,
62,
16366,
13,
12417,... | 3.779412 | 68 |
#!/usr/bin/env python
# Imports
import aiohttp
import logging
import json
logger = logging.getLogger('aprs-service')
class DarkSky:
"""Class to handle querying the DarkSky API."""
async def _wx(self, lat, lng):
"""Query the DarkSky API."""
logger.info("DarkSky request for {}, {}".format(lat, lng))
# Build URL
url = "https://api.forecast.io/forecast/{}/{},{}?units=ca".format(
self._apikey,
lat,
lng
)
# Query DarkSky API
async with aiohttp.ClientSession() as session:
async with session.get(
url
) as response:
return await response.text()
async def wx(self, lat, lng):
"""Get weather for a given lat/lng."""
response = await self._wx(lat, lng)
try:
weather_data = json.loads(response)
except Exception as e:
logger.error(e)
return False
# Build response
info = {}
info['summary'] = weather_data['currently']['summary']
info['temp_c'] = weather_data['currently']['temperature']
info['humidity'] = weather_data['currently']['humidity'] * 100
info['windspeed'] = weather_data['currently']['windSpeed']
info['windGust'] = weather_data['currently']['windGust']
wind_degrees = weather_data['currently']['windBearing']
wind_cardinal = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N')
wind_res = round(wind_degrees / 22.5)
info['wind_dir'] = wind_cardinal[int(wind_res)]
info['feelslike_c'] = weather_data['currently']['apparentTemperature']
return '{summary} {temp_c:.1f}C Hu:{humidity:.1f}% W:f/{wind_dir}@{windspeed:.1f}kmh(g:{windGust:.1f}kmh) FL:{feelslike_c:.1f}C'.format(**info)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
1846,
3742,
198,
11748,
257,
952,
4023,
198,
11748,
18931,
198,
11748,
33918,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
499,
3808,
12,
15271,
11537,
628,
... | 2.12709 | 897 |
from html.parser import HTMLParser
from pathlib import Path
from typing import Union, TypeVar
import jinja2
from onemsdk.config import get_static_dir
from onemsdk.exceptions import MalformedHTMLException, ONEmSDKException
from onemsdk.parser.node import Node
from onemsdk.parser.tag import get_tag_cls, Tag
__all__ = ['load_html', 'load_template']
StackT = TypeVar('StackT', bound=Stack)
_jinja_env = None
| [
6738,
27711,
13,
48610,
1330,
11532,
46677,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4479,
11,
5994,
19852,
198,
198,
11748,
474,
259,
6592,
17,
198,
198,
6738,
319,
5232,
34388,
13,
11250,
1330,
651,
62,
12708,
62,
... | 3.036232 | 138 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-03 19:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
17,
319,
2177,
12,
2919,
12,
3070,
678,
25,
2919,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.73913 | 69 |
from django.shortcuts import render, redirect
# Create your views here.
from django.http import HttpResponse
from website.models import *
from django.contrib import messages
from passlib.hash import pbkdf2_sha256
from . import functions
from django.core.exceptions import ObjectDoesNotExist
# 0 Admin, 1 Customer, 2 Merchant, 3 Advertiser
## ----- POST product
# ------- End
# def payment_detail(request):
# if check_rule(request) == 0:
# return redirect('/merchant/login')
# return render(request,'merchant/manager_payment/manager_payment_detail.html')
# ---- Service
# ------ End
### Ly Thanh
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
3052,
13,
27530,
1330,
1635,
198,
198,
6738,
42625,
14208,
... | 3.182741 | 197 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlite3
import click
click.disable_unicode_literals_warning = True
sql = """create table if not exists users(
id integer primary key autoincrement,
name varchar(30)
)
"""
@click.group()
@cli.command(short_help="initialize database and tables")
@cli.command(short_help="fill records to database")
@click.option("--total", "-t", default=300, help="fill data for example")
if __name__ == "__main__":
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
44161,
578,
18,
198,
11748,
3904,
198,
198... | 2.873626 | 182 |
#!/usr/bin/python3
from PyQt5.QtWidgets import QApplication, QLabel
app = QApplication([])
label = QLabel(' Hello World !!!! ')
label.show()
app.exec_()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
33986,
198,
198,
1324,
796,
1195,
23416,
26933,
12962,
198,
18242,
796,
1195,
33986,
10786,
220,
18... | 2.57377 | 61 |
# Generated by Django 4.0.2 on 2022-03-14 10:09
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
17,
319,
33160,
12,
3070,
12,
1415,
838,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from core.logger import Logger
from db_spec import DBSPEC
from datetime import datetime
import pytz
from tzwhere import tzwhere
import csv
logger = Logger()
DB = None
GEO = tzwhere.tzwhere(forceTZ=True)
| [
6738,
4755,
13,
6404,
1362,
1330,
5972,
1362,
198,
6738,
20613,
62,
16684,
1330,
20137,
48451,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
12972,
22877,
198,
6738,
256,
89,
3003,
1330,
256,
89,
3003,
198,
11748,
269,
21370,
198... | 2.890411 | 73 |
# Generated by Django 3.0.8 on 2020-07-11 12:57
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
23,
319,
12131,
12,
2998,
12,
1157,
1105,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
""""Argo Workflow for building the notebook-server-jupyter-pytorch OCI images using Kaniko"""
from kubeflow.kubeflow.cd import config, kaniko_builder
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = kaniko_builder.Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build(dockerfile="components/example-notebook-servers/jupyter-pytorch/cpu.Dockerfile",
context="components/example-notebook-servers/jupyter-pytorch/",
destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH,
second_dockerfile="components/example-notebook-servers/jupyter-pytorch/cuda.Dockerfile",
second_destination=config.NOTEBOOK_SERVER_JUPYTER_PYTORCH_CUDA,
mem_override="8Gi",
deadline_override=6000)
| [
15931,
15931,
3163,
2188,
5521,
11125,
329,
2615,
262,
20922,
12,
15388,
12,
73,
929,
88,
353,
12,
9078,
13165,
354,
24775,
40,
4263,
1262,
14248,
12125,
37811,
198,
6738,
479,
549,
891,
9319,
13,
74,
549,
891,
9319,
13,
10210,
1330,
... | 2.21322 | 469 |
# 9.4 Write a program to read through the mbox-short.txt and figure out who has
# the sent the greatest number of mail messages. The program looks for 'From '
# lines and takes the second word of those lines as the person who sent the
# mail. The program creates a Python dictionary that maps the sender's mail
# address to a count of the number of times they appear in the file. After the
# dictionary is produced, the program reads through the dictionary using a
# maximum loop to find the most prolific committer.
name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
mailcount = dict()
for line in handle:
if not line.startswith('From '):
continue
line = line.split()
sender = line[1]
mailcount[sender] = mailcount.get(sender,0) + 1
#print(mailcount)
bigsender = "none"
bigcounter = 0
for sender in mailcount:
# print(mailcount[sender])
if mailcount[sender] > bigcounter:
bigcounter = mailcount[sender]
bigsender = sender
print(bigsender, bigcounter)
| [
2,
860,
13,
19,
19430,
257,
1430,
284,
1100,
832,
262,
285,
3524,
12,
19509,
13,
14116,
290,
3785,
503,
508,
468,
198,
2,
262,
1908,
262,
6000,
1271,
286,
6920,
6218,
13,
383,
1430,
3073,
329,
705,
4863,
705,
198,
2,
3951,
290,
... | 3.141566 | 332 |
from __future__ import annotations
import napari
from ...layers.utils.interactivity_utils import (
orient_plane_normal_around_cursor,
)
from ...layers.utils.layer_utils import register_layer_action
from ...utils.translations import trans
from ._image_constants import Mode
from .image import Image
@Image.bind_key('z')
@register_image_action(trans._('Orient plane normal along z-axis'))
@Image.bind_key('y')
@register_image_action(trans._('orient plane normal along y-axis'))
@Image.bind_key('x')
@register_image_action(trans._('orient plane normal along x-axis'))
@register_image_action(trans._('orient plane normal along view direction'))
@Image.bind_key('o')
@Image.bind_key('Space')
def hold_to_pan_zoom(layer):
"""Hold to pan and zoom in the viewer."""
if layer._mode != Mode.PAN_ZOOM:
# on key press
prev_mode = layer.mode
layer.mode = Mode.PAN_ZOOM
yield
# on key release
layer.mode = prev_mode
@register_image_action(trans._('Transform'))
@register_image_action(trans._('Pan/zoom'))
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
25422,
2743,
198,
198,
6738,
2644,
75,
6962,
13,
26791,
13,
3849,
21797,
62,
26791,
1330,
357,
198,
220,
220,
220,
11367,
62,
14382,
62,
11265,
62,
14145,
62,
66,
21471,
11,
198,
... | 2.791667 | 384 |