hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
51b2b4ac9db120f311d239b140062958cf771fe6
2,898
py
Python
app.py
Yaamboo/suomipelit-api
fd5d0058d4820667dd78669207ae7646055239f4
[ "Apache-2.0" ]
null
null
null
app.py
Yaamboo/suomipelit-api
fd5d0058d4820667dd78669207ae7646055239f4
[ "Apache-2.0" ]
null
null
null
app.py
Yaamboo/suomipelit-api
fd5d0058d4820667dd78669207ae7646055239f4
[ "Apache-2.0" ]
null
null
null
from flask import abort, Flask, jsonify import sqlite3 import re from Suomipelit.jsonencoder import OmaEncoder from Suomipelit.models import Peli, Peliarvostelu, Kappale, Kuva app = Flask(__name__) app.json_encoder = OmaEncoder @app.route("/api/pelit") def pelit(): return jsonify(lataa_pelit()) def lataa_pelit(): connection = sqlite3.connect("suomipelit.db") connection.row_factory = sqlite3.Row c = connection.cursor() pelit = [] for pelirivi in c.execute("SELECT * FROM pelit order by id asc LIMIT 0,5"): peli = muodostaPeli(pelirivi, c) pelit.append(peli) # print(kappaleet) return pelit @app.route("/api/pelit/<id>") def peli(id): #id voi olla vain numeroita clean_id = int(id) peli = lataa_peli(clean_id) if peli is not None: return jsonify(peli) abort(404) def lataa_peli(id): connection = sqlite3.connect("suomipelit.db") connection.row_factory = sqlite3.Row c = connection.cursor() c.execute("select * from pelit where id = ?", (id,)) peli = c.fetchone() if peli is not None: return muodostaPeli(peli, connection) return None def muodostaPeli(pelirivi, connection): peli = Peli(pelirivi["id"]) peli.nimi = pelirivi["nimi"] peli.tekija = pelirivi["tekija"] peli.url = pelirivi["url"] peli.kuvaus = pelirivi["kuvaus"] peli.vaatimukset = pelirivi["vaatimukset"] pelikuva = Kuva(pelirivi["id"]) pelikuva.asemointi = None pelikuva.kuvateksti = None if pelirivi["kuva_iso"] != None and len(pelirivi["kuva_iso"]) > 0: pelikuva.tiedosto = pelirivi["kuva_iso"] else: pelikuva.tiedosto = pelirivi["kuva"] peli.kuva = pelikuva if pelirivi["uusittu"] == 1: arvostelu = Peliarvostelu() arvostelu.julkaistu = pelirivi["paivays"] arvostelu.kirjoittaja = pelirivi["user"] kappaleet = [] for rivi in connection.cursor().execute("SELECT * FROM kappale where artikkeli_id = ? and kaytto='PELI' order by artikkeli_id asc, sivu asc, jarjestys", (pelirivi["id"],)): kappale = Kappale(rivi["id"], rivi["otsikko"], rivi["teksti"]) kappale.artikkeliId = rivi["artikkeli_id"] kappale.sivu = rivi["sivu"] if len(rivi["kuva"]) > 0: kuva = Kuva(rivi["id"]) if rivi["kuva_iso"] != None and len(rivi["kuva_iso"]) > 0: kuva.tiedosto = rivi["kuva_iso"] else: kuva.tiedosto = rivi["kuva"] kuva.asemointi = rivi["asemointi"] kuva.kuvateksti = rivi["kuvateksti"] else: kuva = None kappale.kuva = kuva kappaleet.append(kappale) arvostelu.kappaleet = kappaleet peli.arvostelu = arvostelu else: peli.arvostelu = None return peli
28.693069
180
0.613182
332
2,898
5.286145
0.268072
0.023932
0.02906
0.018234
0.164103
0.118519
0.094587
0.094587
0.094587
0.094587
0
0.006564
0.263975
2,898
100
181
28.98
0.816221
0.014493
0
0.157895
0
0.013158
0.141354
0
0
0
0
0
0
1
0.065789
false
0
0.065789
0.013158
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51b31553ef083d8d41b49feb74b1b78a77ce9832
1,563
py
Python
ices_erf32_generator_cli.py
sharkdata/ices
e529a2636f06b942d39b57897ca17023f76fb80d
[ "MIT" ]
null
null
null
ices_erf32_generator_cli.py
sharkdata/ices
e529a2636f06b942d39b57897ca17023f76fb80d
[ "MIT" ]
null
null
null
ices_erf32_generator_cli.py
sharkdata/ices
e529a2636f06b942d39b57897ca17023f76fb80d
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- coding:utf-8 -*- # # Copyright (c) 2021-present SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). import pathlib import click import ices_erf32_generator_main global ices_config @click.command() @click.option( "--row", default=0, prompt="Execute row", help="Row number used to select which YAML-file to generate ICES-Erf32 from.", ) def run_erf32_generator_command(row): """ """ global ices_erf32_config if (row < 0) or (row > len(ices_erf32_config)): print("\n\nERROR: Wrong value. Please try again.\n\n") return generator = ices_erf32_generator_main.IcesErf32Generator() if row == 0: for config_file in ices_erf32_config: generator.generate_erf32(config_file) else: generator.generate_erf32(ices_erf32_config[row - 1]) if __name__ == "__main__": """ """ global ices_erf32_config ices_erf32_config = [] for file_path in pathlib.Path("erf32_config").glob("ices_erf32_*.yaml"): ices_erf32_config.append(str(file_path)) ices_erf32_config = sorted(ices_erf32_config) # Print before command. print("\n\nICES ERF 3.2 generator.") print("-----------------------------") print("Select row number. Press enter to run all.") print("Press Ctrl-C to terminate.\n") for index, row in enumerate(ices_erf32_config): print(index + 1, " ", row) print("") # Execute command. run_erf32_generator_command()
29.490566
84
0.666027
206
1,563
4.81068
0.42233
0.127144
0.151362
0.060545
0
0
0
0
0
0
0
0.042197
0.196417
1,563
52
85
30.057692
0.746815
0.152271
0
0.055556
0
0
0.226994
0.022239
0
0
0
0
0
1
0.027778
false
0
0.083333
0
0.138889
0.194444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51b33fd5e4e90e4cc52168abb05d8d58cd914c5b
752
py
Python
topics/Bitwise/Missing_Number_268/[1_Bitwise_XOR]_Missing_Number_268.py
DmitryNaimark/leetcode-solutions-python
16af5f3a9cb8469d82b14c8953847f0e93a92324
[ "MIT" ]
1
2019-10-31T11:06:23.000Z
2019-10-31T11:06:23.000Z
topics/Bitwise/Missing_Number_268/[1_Bitwise_XOR]_Missing_Number_268.py
DmitryNaimark/leetcode-solutions-python
16af5f3a9cb8469d82b14c8953847f0e93a92324
[ "MIT" ]
null
null
null
topics/Bitwise/Missing_Number_268/[1_Bitwise_XOR]_Missing_Number_268.py
DmitryNaimark/leetcode-solutions-python
16af5f3a9cb8469d82b14c8953847f0e93a92324
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/missing-number/ # --------------------------------------------------- from typing import List # Runtime Complexity: O(N) # Space Complexity: O(1) # Idea: XOR with all numbers in range [0, n] and XOR with all nums. class Solution: def missingNumber(self, nums: List[int]) -> int: res = len(nums) n = len(nums) for i in range(n): res ^= nums[i] ^ i return res # --------------------------------------------------- # Test Cases # --------------------------------------------------- solution = Solution() # 2 print(solution.missingNumber([3, 0, 1])) # 8 print(solution.missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1])) # 1 print(solution.missingNumber([0]))
25.931034
67
0.470745
87
752
4.068966
0.551724
0.110169
0.220339
0
0
0
0
0
0
0
0
0.030252
0.208777
752
28
68
26.857143
0.564706
0.466755
0
0
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0.083333
0
0.333333
0.25
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
51b3dd135b10c21259433ea1463301ed5c72163c
2,790
py
Python
test/module_train_test.py
nktankta/PytorchCNNModules
bc1469ceb37477d3f60062f14a750f272e7ceeb0
[ "MIT" ]
null
null
null
test/module_train_test.py
nktankta/PytorchCNNModules
bc1469ceb37477d3f60062f14a750f272e7ceeb0
[ "MIT" ]
null
null
null
test/module_train_test.py
nktankta/PytorchCNNModules
bc1469ceb37477d3f60062f14a750f272e7ceeb0
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms import numpy as np from module_easyModel import EasyModel from module_list import get_test_module import pytest test_modules = get_test_module() transform = transforms.Compose( [transforms.RandomRotation(15), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))]) trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True, num_workers=2) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8)) criterion = nn.CrossEntropyLoss() @pytest.mark.parametrize("mode", ["normal","residual","dense"]) @pytest.mark.parametrize("test_module", test_modules) def test_train_model(test_module,mode): print("start testing") net = EasyModel(1,10,test_module,mode=mode).to("cuda") optimizer = optim.Adam(net.parameters(), lr=0.01) for epoch in range(2): running_loss = 0.0 for i, (inputs, labels) in enumerate(trainloader, 0): inputs = inputs.to("cuda") labels = labels.to("cuda") optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: print('[{:d}, {:5d}] loss: {:.3f}' .format(epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for (images, labels) in testloader: images = images.cuda() labels = labels.cuda() outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy: {:.2f} %%'.format(100 * float(correct / total))) assert float(correct / total)>0.25
36.710526
69
0.524373
281
2,790
5.120996
0.398577
0.034746
0.018068
0.038916
0.051425
0.051425
0
0
0
0
0
0.030474
0.364875
2,790
75
70
37.2
0.781603
0
0
0.151515
0
0
0.047704
0
0
0
0
0
0.015152
1
0.015152
false
0
0.136364
0
0.151515
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51b52f4360dde9f8fcf753a559f4341aae212c20
1,592
py
Python
Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py
gen-li/modularizationandtesting
103be0c80bd70ffcf4c700861497745733b72640
[ "MIT" ]
null
null
null
Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py
gen-li/modularizationandtesting
103be0c80bd70ffcf4c700861497745733b72640
[ "MIT" ]
null
null
null
Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py
gen-li/modularizationandtesting
103be0c80bd70ffcf4c700861497745733b72640
[ "MIT" ]
null
null
null
import numpy as np import scipy.stats as st import statsmodels as sm from scipy import optimize y = np.random.randint(2, size=(100,1)) x = np.random.normal(0,1,(100,2)) res_correct = sm.discrete.discrete_model.Logit(y,x).fit() res_correct.params def Logit(b,y,x): # y = np.random.randint(2, size=(100,1)) # x = np.random.normal(0,1,(100,2)) n = x.shape[0] # b = np.zeros((s,1)) # log_likelihood = (y.T @ x @ b)[0] - np.log(1 + np.exp(x.T @ b)) log_likelihood = -y.T @ np.log(1 + np.exp(-x @ b)) + (np.ones((n,1)) - y).T @ np.log(1 - 1 / (1 + np.exp(- x @ b))) return -log_likelihood[0] Logit(y,x,np.array((2,1))) s = x.shape[1] b_0 = np.array((0,0)) optimize.minimize(Logit,x0=b_0,args=(y,x)) optimize.fmin_bfgs(Logit, b_0,args=(y,x,)) y.shape # def OLS(y,x,cf=0.95): # """ # OLS estimation. # # Parameters # −−−−−−−−−− # y : Dependent variable # x : Explanatory variable # cf: Confidence level # # Returns # −−−−−−− # beta : Beta # se: Standard Error # confidence: Confidence Interval # # See Also # −−−−−−−− # other_function : This is a related function # """ # # beta = np.linalg.inv(x.T @ x) @ (x.T @ y) # # se_term1 = ((y - x @ beta).T @ (y - x @ beta)) / (x.shape[0] - 1) # se_term2 = x.T @ x # cov_matrix = se_term1 * se_term2 # se = np.sqrt(np.diag(cov_matrix)) # # confidence = [beta - st.norm.ppf(1 - (1-0.95)/2) * se, beta \ # + st.norm.ppf(1 - (1-0.95)/2) * se] # # return {"Beta":beta, "Standard Error":se, "Confidence Interval":confidence}
23.411765
119
0.557161
269
1,592
3.327138
0.30855
0.017877
0.020112
0.023464
0.215642
0.176536
0.149721
0.149721
0.149721
0.149721
0
0.050654
0.231156
1,592
67
120
23.761194
0.660131
0.564698
0
0
0
0
0
0
0
0
0
0
0
1
0.055556
false
0
0.222222
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51ba33a22ebf12fec0e07b6d40becaca5f11662d
116
py
Python
number/numberSystem.py
Dariece/number_system_calc
5d5934c59557af9bdf0accdfc68dd8719e7fd331
[ "Apache-2.0" ]
null
null
null
number/numberSystem.py
Dariece/number_system_calc
5d5934c59557af9bdf0accdfc68dd8719e7fd331
[ "Apache-2.0" ]
null
null
null
number/numberSystem.py
Dariece/number_system_calc
5d5934c59557af9bdf0accdfc68dd8719e7fd331
[ "Apache-2.0" ]
null
null
null
from enum import Enum class NumberSystem(Enum): BINARY = 2 OCTAL = 8 DECIMAL = 10 HEXADECIMAL = 16
14.5
25
0.637931
15
116
4.933333
0.866667
0
0
0
0
0
0
0
0
0
0
0.074074
0.301724
116
8
26
14.5
0.839506
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.166667
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
51bc4a4baca841ac8b5c86065dd040c9313df97d
5,847
py
Python
fractals/pytorch/model_processor.py
NeilBostian/ML
df487db8755ad074cdd42f1094747815ae555896
[ "Unlicense" ]
1
2019-10-11T21:36:06.000Z
2019-10-11T21:36:06.000Z
fractals/pytorch/model_processor.py
NeilBostian/ML
df487db8755ad074cdd42f1094747815ae555896
[ "Unlicense" ]
null
null
null
fractals/pytorch/model_processor.py
NeilBostian/ML
df487db8755ad074cdd42f1094747815ae555896
[ "Unlicense" ]
null
null
null
import os import random import pickle import datetime import torch import numpy as np from PIL import Image from model import Model from train_data import TrainData from loss_train_data import get_loss_train_data class ModelProcessor(): def __init__(self, path): self.path = path self.device = torch.device('cuda') self.model = Model(self.device) self._load_model() def train_frames(self): if not self._loss_trained: raise os.error('Loss has not been trained yet (call ModelProcessor.train_loss())') for x, y, _ in ModelProcessor._train_frames_iter(300000, 1): epoch = self._epoch loss = self.model.train_frame(x, y) print(f'{datetime.datetime.now()} train_frame epoch {epoch} loss={loss}') self._epoch = self._epoch + 1 if (epoch % 500) == 0: self._checkpoints[epoch] = { 'epoch': epoch, 'loss': loss } self.model.save(self._path(f'ckpt-{epoch}.pt')) self._save_model() self._process_sample_images() def train_loss(self): if self._loss_trained: raise os.error('Loss has already been trained on this model') for x, y, epoch in ModelProcessor._train_loss_iter(400, 4): loss = self.model.train_loss(x, y) print(f'{datetime.datetime.now()} train_loss epoch {epoch} loss={loss}') self._loss_trained = True self._checkpoints[1] = { 'epoch': 1, 'loss': None } self.model.save(self._path(f'ckpt-1.pt')) self._save_model() def _load_model(self): if not os.path.exists(self.path): os.mkdir(self.path) if not os.path.exists(self._path('index')): self._loss_trained = False self._epoch = 1 self._checkpoints = { } self._save_model() else: with open(self._path('index'), 'rb') as f: mdata = pickle.load(f) self._loss_trained = mdata['loss_trained'] self._epoch = mdata['epoch'] self._checkpoints = mdata['checkpoints'] if len(self._checkpoints) > 0: latest_checkpoint = max(self._checkpoints) ckpt_path = self._path(f'ckpt-{latest_checkpoint}.pt') if os.path.exists(ckpt_path): self.model.load(ckpt_path) else: self.model.load(self._path('ckpt-1.pt')) self._epoch = 1 self._checkpoints = { 1: {'epoch': 1, 'loss': None} } def _save_model(self): with open(self._path('index'), 'wb') as f: mdata = { 'loss_trained': self._loss_trained, 'epoch': self._epoch, 'checkpoints': self._checkpoints } pickle.dump(mdata, f) def _path(self, *paths): return os.path.join(self.path, *paths) def _train_frames_iter(num_batches, batch_size): def _train_frames_iter_singles(): for i in range(0, batch_size * num_batches): td = TrainData.get_random() x = td.get_train_image() y = td.get_next_train_image() yield (x, y, i) xs = [] ys = [] for x, y, i in _train_frames_iter_singles(): xs.append(x[0]) ys.append(y[0]) if len(xs) >= batch_size: epoch = int((i + 1) / batch_size) yield (np.array(xs), np.array(ys), epoch) xs = [] ys = [] def _train_loss_iter(num_batches, batch_size): def _train_loss_iter_singles(): for i in range(0, batch_size * num_batches): g = random.randint(0, 1) if g == 0: x = TrainData.get_random().get_train_image() y = 0 else: x = get_loss_train_data() y = 1 yield (x, y, i) xs = [] ys = [] for x, y, i in _train_loss_iter_singles(): xs.append(x[0]) ys.append(y) if len(xs) >= batch_size: epoch = int((i + 1) / batch_size) yield (np.array(xs), np.array(ys), epoch) xs = [] ys = [] def _process_sample_images(self): """ Processes images in the '.data/model_sample_inputs' directory through the model, each with 5 samples """ model = self.model epoch = self._epoch for img in os.listdir('.data/model_sample_inputs'): sample_outputs = self._path('sample_outputs') if not os.path.exists(sample_outputs): os.mkdir(sample_outputs) out_dir = self._path(f'sample_outputs', img) if not os.path.exists(out_dir): os.mkdir(out_dir) print(f'process sample {img}') try: x = Image.open(f'.data/model_sample_inputs/{img}') x.load() x.save(f'{out_dir}/{epoch}-0.png') x = TrainData.preprocess_pil_image(x) max_iters = 4 for i in range(1, max_iters + 1): x = model.get_frame(x) y = TrainData.postprocess_pil_image(x) y.save(f'{out_dir}/{epoch}-{i}.png') y.close() print(f'process sample {img} completed {i}/{max_iters}') except Exception as e: print(f'exception processing sample {img} {e}') pass
31.605405
116
0.507611
691
5,847
4.068017
0.180897
0.039843
0.032017
0.015653
0.334045
0.245464
0.245464
0.165777
0.118819
0.097474
0
0.011599
0.380708
5,847
185
117
31.605405
0.764706
0.017103
0
0.208333
0
0
0.110279
0.036411
0
0
0
0
0
1
0.076389
false
0.006944
0.069444
0.006944
0.159722
0.034722
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51be12ad14475c1998ae105630807a45e802e7d8
549
py
Python
pedal/assertions/__init__.py
acbart/python-analysis
3cd2cc22d50a414ae6b62c74d2643be4742238d4
[ "MIT" ]
14
2019-08-22T03:40:23.000Z
2022-03-13T00:30:53.000Z
pedal/assertions/__init__.py
pedal-edu/pedal
3cd2cc22d50a414ae6b62c74d2643be4742238d4
[ "MIT" ]
74
2019-09-12T04:35:56.000Z
2022-01-26T19:21:32.000Z
pedal/assertions/__init__.py
acbart/python-analysis
3cd2cc22d50a414ae6b62c74d2643be4742238d4
[ "MIT" ]
2
2018-09-16T22:39:15.000Z
2018-09-17T12:53:28.000Z
""" The assertions module contains classic unittest-style assert statements. """ from pedal.assertions.setup import _setup_assertions, resolve_all from pedal.assertions.constants import TOOL_NAME from pedal.core.report import Report, MAIN_REPORT from pedal.assertions.commands import * def reset(report=MAIN_REPORT): """ Resets (or initializes) the information about assertions. Args: report: """ report[TOOL_NAME] = { 'failures': 0, 'exceptions': False } Report.register_tool(TOOL_NAME, reset)
21.96
72
0.717668
65
549
5.923077
0.538462
0.093506
0.148052
0
0
0
0
0
0
0
0
0.002252
0.191257
549
24
73
22.875
0.864865
0.271403
0
0
0
0
0.04878
0
0
0
0
0
0.3
1
0.1
false
0
0.4
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
51be5f0b52819e31e6e93a3882ca71e420ce2121
573
py
Python
python/Python-Quick-Start/generator_func.py
pepincho/playground
9202a3dab880ff789e5fb96b259c3e0c2503cb49
[ "MIT" ]
null
null
null
python/Python-Quick-Start/generator_func.py
pepincho/playground
9202a3dab880ff789e5fb96b259c3e0c2503cb49
[ "MIT" ]
null
null
null
python/Python-Quick-Start/generator_func.py
pepincho/playground
9202a3dab880ff789e5fb96b259c3e0c2503cb49
[ "MIT" ]
null
null
null
# print all prime numbers in a range with a generator function in python #that is an utility function def isprime(n): if n == 1: return False for x in range(2, n): if n % x == 0: return False else: return True #generator function is used in the for loop as an iterator #this function return an iterator object def primes(n = 1): while (True): if isprime(n): yield n #yield makes tihs a generator n += 1 #for loop use primes function as an iterator for n in primes(): if n > 100: break print(n)
23.875
72
0.624782
94
573
3.808511
0.457447
0.02514
0.022346
0
0
0
0
0
0
0
0
0.020101
0.30541
573
23
73
24.913043
0.879397
0.460733
0
0.133333
0
0
0
0
0
0
0
0
0
1
0.133333
false
0
0
0
0.333333
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51bf6b3ed4c7f402aaa77eba2ced4b65f3cd67ca
5,373
py
Python
safe_notification_service/firebase/tests/utils.py
vaporyorg/safe-notification-service
7a1bf51b4bb529aab8d6f943616c88f969c98644
[ "MIT" ]
8
2018-05-17T00:49:57.000Z
2021-08-23T12:52:49.000Z
safe_notification_service/firebase/tests/utils.py
vaporyorg/safe-notification-service
7a1bf51b4bb529aab8d6f943616c88f969c98644
[ "MIT" ]
64
2018-11-08T17:47:18.000Z
2022-01-31T17:00:22.000Z
safe_notification_service/firebase/tests/utils.py
vaporyorg/safe-notification-service
7a1bf51b4bb529aab8d6f943616c88f969c98644
[ "MIT" ]
4
2020-12-28T20:28:07.000Z
2022-03-19T16:46:44.000Z
import json from firebase_admin import _http_client, messaging from firebase_admin.credentials import Base from google.auth.credentials import Credentials from requests import adapters, models FIREBASE_AUTH_CREDENTIALS = { "type": "service_account", "project_id": "mock-project-id", "private_key_id": "mock-key-id-1", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAwJENcRev+eXZKvhhWLiV3Lz2MvO+naQRHo59g3vaNQnbgyduN/L4krlr\nJ5c6FiikXdtJNb/QrsAHSyJWCu8j3T9CruiwbidGAk2W0RuViTVspjHUTsIHExx9euWM0Uom\nGvYkoqXahdhPL/zViVSJt+Rt8bHLsMvpb8RquTIb9iKY3SMV2tCofNmyCSgVbghq/y7lKORt\nV/IRguWs6R22fbkb0r2MCYoNAbZ9dqnbRIFNZBC7itYtUoTEresRWcyFMh0zfAIJycWOJlVL\nDLqkY2SmIx8u7fuysCg1wcoSZoStuDq02nZEMw1dx8HGzE0hynpHlloRLByuIuOAfMCCYwID\nAQABAoIBADFtihu7TspAO0wSUTpqttzgC/nsIsNn95T2UjVLtyjiDNxPZLUrwq42tdCFur0x\nVW9Z+CK5x6DzXWvltlw8IeKKeF1ZEOBVaFzy+YFXKTz835SROcO1fgdjyrme7lRSShGlmKW/\nGKY+baUNquoDLw5qreXaE0SgMp0jt5ktyYuVxvhLDeV4omw2u6waoGkifsGm8lYivg5l3VR7\nw2IVOvYZTt4BuSYVwOM+qjwaS1vtL7gv0SUjrj85Ja6zERRdFiITDhZw6nsvacr9/+/aut9E\naL/koSSb62g5fntQMEwoT4hRnjPnAedmorM9Rhddh2TB3ZKTBbMN1tUk3fJxOuECgYEA+z6l\neSaAcZ3qvwpntcXSpwwJ0SSmzLTH2RJNf+Ld3eBHiSvLTG53dWB7lJtF4R1KcIwf+KGcOFJv\nsnepzcZBylRvT8RrAAkV0s9OiVm1lXZyaepbLg4GGFJBPi8A6VIAj7zYknToRApdW0s1x/XX\nChewfJDckqsevTMovdbg8YkCgYEAxDYX+3mfvv/opo6HNNY3SfVunM+4vVJL+n8gWZ2w9kz3\nQ9Ub9YbRmI7iQaiVkO5xNuoG1n9bM+3Mnm84aQ1YeNT01YqeyQsipP5Wi+um0PzYTaBw9RO+\n8Gh6992OwlJiRtFk5WjalNWOxY4MU0ImnJwIfKQlUODvLmcixm68NYsCgYEAuAqI3jkk55Vd\nKvotREsX5wP7gPePM+7NYiZ1HNQL4Ab1f/bTojZdTV8Sx6YCR0fUiqMqnE+OBvfkGGBtw22S\nLesx6sWf99Ov58+x4Q0U5dpxL0Lb7d2Z+2Dtp+Z4jXFjNeeI4ae/qG/LOR/b0pE0J5F415ap\n7Mpq5v89vepUtrkCgYAjMXytu4v+q1Ikhc4UmRPDrUUQ1WVSd+9u19yKlnFGTFnRjej86hiw\nH3jPxBhHra0a53EgiilmsBGSnWpl1WH4EmJz5vBCKUAmjgQiBrueIqv9iHiaTNdjsanUyaWw\njyxXfXl2eI80QPXh02+8g1H/pzESgjK7Rg1AqnkfVH9nrwKBgQDJVxKBPTw9pigYMVt9iHrR\niCl9zQVjRMbWiPOc0J56+/5FZYm/AOGl9rfhQ9vGxXZYZiOP5FsNkwt05Y1UoAAH4B4VQwbL\nqod71qOcI0ywgZiIR87CYw40gzRfjWnN+YEEW1qfyoNLilEwJB8iB/T+ZePHGmJ4MmQ/cTn9\nxpdLXA==\n-----END RSA PRIVATE KEY-----", "client_email": "mock-email@mock-project.iam.gserviceaccount.com", "client_id": "1234567890", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/mock-project-id.iam.gserviceaccount.com" } FIREBASE_TOKEN = 'mock-token' class MockHttpClient(_http_client.HttpClient): def request(self, method, url, **kwargs): return kwargs['json'] def parse_body(self, resp): resp.update({'name': 'test-name'}) return resp class MockGoogleCredential(Credentials): """A mock Google authentication credential.""" def refresh(self, request): self.token = 'mock-token' class MockCredential(Base): """A mock Firebase credential implementation.""" def __init__(self): self._g_credential = MockGoogleCredential() def get_credential(self): return self._g_credential class MockAdapter(adapters.HTTPAdapter): """A mock HTTP adapter for the Python requests module.""" def __init__(self, data, status, recorder): adapters.HTTPAdapter.__init__(self) self._data = data self._status = status self._recorder = recorder def send(self, request, **kwargs): request._extra_kwargs = kwargs self._recorder.append(request) resp = models.Response() resp.url = request.url resp.status_code = self._status resp.raw = self._data.encode() return resp def send_message(*args, message_instance, data, token): message = messaging.Message( data=data, token=token ) response = message_instance.send(message) return response class MessagingService: _DEFAULT_RESPONSE = json.dumps({'name': 'message-id'}) def __init__(self, app, *args, **kwargs): # self.fcm_service = messaging._get_messaging_service(app) # self.fcm_service._client.session.mount( # 'https://fcm.googleapis.com', # MockAdapter(json.dumps({'name': 'message-id'}), 200, self.recorder) # ) # super(MessagingService, self).__init__(app, *args, **kwargs) self.fcm_service, self.recorder = self._instrument_messaging_service(app) self.session = self.fcm_service._client.session self._client = MockHttpClient(session=self.session) self._fcm_url = 'https://fcm.googleapis.com/v1/projects/{0}/messages:send'.format(app.project_id) self._timeout = app.options.get('httpTimeout') def _instrument_messaging_service(self, app, status=200, payload=_DEFAULT_RESPONSE): fcm_service = messaging._get_messaging_service(app) recorder = [] fcm_service._client.session.mount( 'https://fcm.googleapis.com', MockAdapter(payload, status, recorder) ) return fcm_service, recorder def send(self, message, dry_run=False): data = {'message': messaging._MessagingService.encode_message(message)} if dry_run: data['validate_only'] = True resp = self._client.body('post', url=self._fcm_url, json=data, timeout=self._timeout) return resp['name']
49.293578
1,722
0.758608
505
5,373
7.867327
0.370297
0.017619
0.014095
0.017367
0.113013
0.094135
0.083061
0.030204
0.030204
0.030204
0
0.057223
0.138098
5,373
108
1,723
49.75
0.800691
0.074819
0
0.026667
0
0.026667
0.466667
0.349495
0
0
0
0
0
1
0.146667
false
0
0.066667
0.026667
0.386667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
51c02944976e6d03f939af067e9a4a01386ea663
8,577
py
Python
pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py
Kulivox/PyGalGen
816004bce50703737384e2fbdcfe43b61ce2f4dd
[ "MIT" ]
null
null
null
pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py
Kulivox/PyGalGen
816004bce50703737384e2fbdcfe43b61ce2f4dd
[ "MIT" ]
null
null
null
pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py
Kulivox/PyGalGen
816004bce50703737384e2fbdcfe43b61ce2f4dd
[ "MIT" ]
null
null
null
""" Module responsible for discovery of import statements importing Argument parser and discovery of the statements initializing the parser itself """ import ast import sys from typing import Tuple, Optional, Any, Set, List from .parsing_exceptions import ArgParseImportNotFound, ArgParserNotUsed from .parsing_commons import Discovery ARGPARSE_MODULE_NAME = "argparse" ARGUMENT_PARSER_CLASS_NAME = "ArgumentParser" class ImportDiscovery(Discovery): """ Class responsible for discovery and extraction of import statements """ def __init__(self, actions: List[ast.AST]): super(ImportDiscovery, self).__init__(actions) self.argparse_module_alias: Optional[str] = None self.argument_parser_alias: Optional[str] = None def visit_Import(self, node: ast.Import) -> Any: for item in node.names: if item.name == ARGPARSE_MODULE_NAME: alias = item.asname if item.asname is not None \ else ARGPARSE_MODULE_NAME self.argparse_module_alias = alias self.actions.append(node) return # stdlib modules should be also imported during this step if item.name in sys.stdlib_module_names: self.actions.append(node) def visit_ImportFrom(self, node: ast.ImportFrom) -> Any: if node.module is None: return for name in node.module.split("."): if name in sys.stdlib_module_names and name != \ ARGPARSE_MODULE_NAME: self.actions.append(node) return if ARGPARSE_MODULE_NAME not in node.module: return for item in node.names: if item.name == ARGUMENT_PARSER_CLASS_NAME: alias = item.asname if item.asname is not None \ else ARGUMENT_PARSER_CLASS_NAME self.argument_parser_alias = alias self.actions.append(node) return # stdlib modules should be also imported during this step def report_findings(self) -> Tuple: if self.argparse_module_alias is None and \ self.argument_parser_alias is None: raise ArgParseImportNotFound return (self.actions, self.argparse_module_alias, self.argument_parser_alias) class ParserDiscovery(Discovery): """ Class responsible for discovery of ArgumentParser creation and assignment """ class ParserRenameFinder(ast.NodeVisitor): def __init__(self, func_name: str): self.func_name = func_name self.arg_pos: Optional[int] = None self.keyword = Optional[str] = None def find_by_argument_pos(self, tree: ast.AST, n: int): self.arg_pos = n self.keyword = None self.visit(tree) def __init__(self, actions: List[ast.AST], argparse_alias: Optional[str], argument_parser_alias: Optional[str]): self.argument_parser_alias = argument_parser_alias self.argparse_module_alias = argparse_alias self.main_parser_name: Optional[str] = None super(ParserDiscovery, self).__init__(actions) # checks whether this assignment creates argument parser, # and removes any arguments from the constructor, # because they should not be needed def is_this_argparse(self, node: ast.Assign) -> \ Tuple[bool, Optional[str]]: if not (len(node.targets) == 1 and isinstance(node.targets[0], ast.Name)): return False, None name = node.targets[0].id # ArgumentParser was imported using from ... import if (isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name) and node.value.func.id == self.argument_parser_alias): node.value.keywords = [] node.value.args = [] return True, name # ArgumentParser is created using attribute call on imported module if (isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Attribute) and node.value.func.attr == ARGUMENT_PARSER_CLASS_NAME and node.value.func.value.id == self.argparse_module_alias): node.value.args = [] node.value.keywords = [] return True, name return False, None def visit_Assign(self, node: ast.Assign): # visit into children of this node is not necessary is_argparse, name = self.is_this_argparse(node) if is_argparse: self.main_parser_name = name self.actions.append(node) def report_findings(self) -> Tuple: if self.main_parser_name is None: raise ArgParserNotUsed return self.actions, self.main_parser_name # this visitor class goes through the tree and tries to find creation of # all argument groups # it works only if the group is assigned a name # (is created as a normal variable) class GroupDiscovery(Discovery): """ Class responsible for discovery of statements that initialize argument groups """ def __init__(self, actions: List[ast.AST], main_name: str): self.main_name = main_name self.groups = set() super(GroupDiscovery, self).__init__(actions) @staticmethod def is_this_group_creation(node: ast.Assign): if not (len(node.targets) == 1 and isinstance(node.targets[0], ast.Name)): return False, None name = node.targets[0].id if not (isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Attribute) and node.value.func.attr == "add_argument_group"): return False, None return True, name def visit_Assign(self, node: ast.Assign): is_group_creation, name = self.is_this_group_creation(node) if is_group_creation: self.groups.add(name) self.actions.append(node) def report_findings(self) -> Tuple: return self.actions, self.main_name, self.groups # # this visitor goes through all calls and extracts those to argument # parser and groups. IMPORTANT! it also renames parsers on which those calls # are called to ensure everything can be interpreted correctly class ArgumentCreationDiscovery(Discovery): """ Class responsible for extraction of statements which initialize the input arguments. It is able to extract function calls on the original parser, and on the argument groups extracted by GroupDiscovery """ def __init__(self, actions: List[ast.AST], main_name: str, groups: Set[str]): self.main_name = main_name self.sections = groups super(ArgumentCreationDiscovery, self).__init__(actions) def is_call_on_parser_or_group(self, node: ast.Call): return isinstance(node.func, ast.Attribute) and \ node.func.attr == "add_argument" and \ (node.func.value.id in self.sections or node.func.value.id ==self.main_name) def visit_Call(self, node: ast.Call) -> Any: if self.is_call_on_parser_or_group(node): assert isinstance(node.func, ast.Attribute) # name of the variable needs to be rewritten, # because we want to use only one parser if node.func.value.id != self.main_name and \ node.func.value.id not in self.sections: node.func.value.id = self.main_name self.actions.append(ast.Expr(node)) self.generic_visit(node) def report_findings(self) -> Tuple: return self.actions, self.main_name, self.sections def get_parser_init_and_actions(source: ast.Module) -> \ Tuple[List[ast.AST], str, Set[str]]: """ Function used to extract necessary imports, parser and argument creation function calls Parameters ---------- source : ast.Module source file parsed into ATT Returns ------- List of extracted AST nodes, the main name of the parser and a set of section names """ discovery_classes = [ImportDiscovery, ParserDiscovery, GroupDiscovery, ArgumentCreationDiscovery] findings = [], for cls in discovery_classes: discovery = cls(*findings) discovery.visit(source) findings = discovery.report_findings() actions, main_name, sections = findings return actions, main_name, sections
35.589212
79
0.641483
1,046
8,577
5.09847
0.17304
0.039377
0.028502
0.025877
0.350834
0.270954
0.238702
0.184699
0.174198
0.174198
0
0.00097
0.279002
8,577
240
80
35.7375
0.861417
0.198205
0
0.303448
0
0
0.007876
0
0
0
0
0
0.006897
1
0.131034
false
0
0.075862
0.02069
0.365517
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c0e5193ac5162e3a0fb7638cb34cf6d76fc644
358
py
Python
cpovc_pfs/pmtct/urls.py
uonafya/cpims-ovc-3.0
ec2768c00fc0855eb4983a94204cfcdee0824e19
[ "Apache-2.0" ]
2
2022-02-26T14:04:40.000Z
2022-03-23T17:33:32.000Z
cpovc_pfs/pmtct/urls.py
uonafya/cpims-ovc-3.0
ec2768c00fc0855eb4983a94204cfcdee0824e19
[ "Apache-2.0" ]
null
null
null
cpovc_pfs/pmtct/urls.py
uonafya/cpims-ovc-3.0
ec2768c00fc0855eb4983a94204cfcdee0824e19
[ "Apache-2.0" ]
19
2022-02-26T13:44:58.000Z
2022-03-26T17:20:22.000Z
from django.urls import path from . import views # This should contain urls related to OVC ONLY urlpatterns = [ path('', views.pmtct_home, name='pmtct_home'), path('new/<int:id>/', views.new_pmtct, name='new_pmtct'), path('view/<int:id>/', views.view_pmtct, name='view_pmtct'), path('edit/<int:id>/', views.edit_pmtct, name='edit_pmtct'), ]
32.545455
64
0.678771
54
358
4.351852
0.407407
0.06383
0.12766
0
0
0
0
0
0
0
0
0
0.139665
358
10
65
35.8
0.762987
0.122905
0
0
0
0
0.25641
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c1564b9fb274dd54467c39928249894cdba7a4
954
py
Python
config_editor/__init__.py
penguinolog/config-editor
d0b8500eaf8e4cf7c7f93c03c4c4e78072bb51ce
[ "Apache-2.0" ]
null
null
null
config_editor/__init__.py
penguinolog/config-editor
d0b8500eaf8e4cf7c7f93c03c4c4e78072bb51ce
[ "Apache-2.0" ]
null
null
null
config_editor/__init__.py
penguinolog/config-editor
d0b8500eaf8e4cf7c7f93c03c4c4e78072bb51ce
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Alexey Stepanov aka penguinolog # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Config file editors.""" from __future__ import absolute_import from .config_editor import ConfigEditor from .json_editor import JsonEditor from .yaml_editor import YamlEditor __all__ = ( "ConfigEditor", "JsonEditor", "YamlEditor" ) __version__ = '0.9.2' __author__ = "Alexey Stepanov <penguinolog@gmail.com>"
30.774194
78
0.735849
129
954
5.286822
0.635659
0.087977
0.038123
0.046921
0
0
0
0
0
0
0
0.014267
0.191824
954
30
79
31.8
0.870298
0.650943
0
0
0
0
0.240506
0.072785
0
0
0
0
0
1
0
false
0
0.363636
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
51c49423dfa19179ff9a8960299203c4be600c6e
311
py
Python
delay.py
fleidloff/effect-pedal
20680294e70979ec230ec2798c836a6447c49853
[ "MIT" ]
null
null
null
delay.py
fleidloff/effect-pedal
20680294e70979ec230ec2798c836a6447c49853
[ "MIT" ]
null
null
null
delay.py
fleidloff/effect-pedal
20680294e70979ec230ec2798c836a6447c49853
[ "MIT" ]
null
null
null
import pyo from settings import audioSource s = pyo.Server(audio=audioSource, nchnls=1).boot() s.start() a = pyo.Input(chnl=0).out() delay = pyo.Delay(a, delay=.5, feedback=.5) delay.out() while True: s = raw_input('Delay'); if s == "q": quit() delay.setDelay(float(s)) #s.gui(locals())
17.277778
50
0.630225
49
311
3.979592
0.612245
0
0
0
0
0
0
0
0
0
0
0.015686
0.180064
311
18
51
17.277778
0.74902
0.048232
0
0
0
0
0.02027
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c64365386b4968c448213772fdf24be5c8b7b8
297
py
Python
djangoPharma/drugs/validators.py
thodoris/djangoPharma
76089e67bc9940651a876d078879469127f5ac66
[ "Apache-2.0" ]
null
null
null
djangoPharma/drugs/validators.py
thodoris/djangoPharma
76089e67bc9940651a876d078879469127f5ac66
[ "Apache-2.0" ]
null
null
null
djangoPharma/drugs/validators.py
thodoris/djangoPharma
76089e67bc9940651a876d078879469127f5ac66
[ "Apache-2.0" ]
null
null
null
from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ def validate_integer(value): if type(value) is not int: raise ValidationError( _('%(value)s is not an even number'), params={'value': value}, )
27
55
0.659933
36
297
5.333333
0.722222
0.104167
0
0
0
0
0
0
0
0
0
0
0.249158
297
10
56
29.7
0.860987
0
0
0
0
0
0.121212
0
0
0
0
0
0
1
0.125
false
0
0.25
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c73412f6b848915731c179d918c5aa37f2bde0
261
py
Python
python/destryseuler/tests/test_p1.py
destrys/euler
7afd8fba023f29c42d11cc4725cb99e49b62b014
[ "MIT" ]
null
null
null
python/destryseuler/tests/test_p1.py
destrys/euler
7afd8fba023f29c42d11cc4725cb99e49b62b014
[ "MIT" ]
5
2020-03-24T15:30:22.000Z
2021-06-01T21:51:31.000Z
python/destryseuler/tests/test_p1.py
destrys/euler
7afd8fba023f29c42d11cc4725cb99e49b62b014
[ "MIT" ]
null
null
null
from destryseuler import p1 def test_p1_answer(): assert p1.answer(10) == 23 def test_brute(): assert p1.natural_3and5_brute(10) == 23 def test_lambda(): assert p1.natural_3and5_lambda(10) == 23 assert p1.natural_3and5_lambda(1000) == 233168
21.75
50
0.716475
40
261
4.425
0.4
0.180791
0.254237
0.338983
0.293785
0
0
0
0
0
0
0.157407
0.172414
261
11
51
23.727273
0.662037
0
0
0
0
0
0
0
0
0
0
0
0.5
1
0.375
true
0
0.125
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
5
51c80e982e0d954d47d04ce8e0ca20615f304653
6,568
py
Python
Reto1/unet_training.py
Hackaton-JusticIA-2021/pista-latente-ML-sol
3aeeae5970539c0b17358e4ac8585b13c9cea07b
[ "MIT" ]
null
null
null
Reto1/unet_training.py
Hackaton-JusticIA-2021/pista-latente-ML-sol
3aeeae5970539c0b17358e4ac8585b13c9cea07b
[ "MIT" ]
null
null
null
Reto1/unet_training.py
Hackaton-JusticIA-2021/pista-latente-ML-sol
3aeeae5970539c0b17358e4ac8585b13c9cea07b
[ "MIT" ]
1
2021-08-22T02:38:38.000Z
2021-08-22T02:38:38.000Z
import numpy as np import cv2 import os import keras import tensorflow as tf import random import matplotlib.pyplot as plt from glob import glob from keras import layers from keras.backend.tensorflow_backend import set_session from tensorflow.python.client import device_lib input_dir_1 = "unet/images/" target_dir_1 = "unet/target/" input_dir_2= "data/images/" target_dir_2 = "data/target/" img_size = (32, 32) num_classes = 2 batch_size = 32 input_img_paths_1 = sorted(glob(os.path.join(input_dir_1, '*' + '.png'))) target_img_paths_1 = sorted(glob(os.path.join(target_dir_1, '*' + '.png'))) input_img_paths_2 = sorted(glob(os.path.join(input_dir_2, '*' + '.png'))) target_img_paths_2 = sorted(glob(os.path.join(target_dir_2, '*' + '.png'))) input_img_paths = input_img_paths_1 + input_img_paths_2 target_img_paths = target_img_paths_1 + target_img_paths_2 print("Number of samples:", len(input_img_paths)) for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]): print(input_path, "|", target_path) class Patches(keras.utils.Sequence): """Helper to iterate over the data (as Numpy arrays).""" def __init__(self, batch_size, img_size, input_img_paths, target_img_paths): self.batch_size = batch_size self.img_size = img_size self.input_img_paths = input_img_paths self.target_img_paths = target_img_paths self.current_batch = 0 def __len__(self): return len(self.target_img_paths) // self.batch_size def __getitem__(self, idx): """Returns tuple (input, target) correspond to batch #idx.""" #print(idx) i = idx * self.batch_size if i == 0: data_zip_list = list(zip(self.input_img_paths, self.target_img_paths)) random.shuffle(data_zip_list) self.input_img_paths, self.target_img_paths = zip(*data_zip_list) batch_input_img_paths = self.input_img_paths[i : i + self.batch_size] batch_target_img_paths = self.target_img_paths[i : i + self.batch_size] x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32") for j, path in enumerate(batch_input_img_paths): img = cv2.imread(path, cv2.IMREAD_COLOR) n = np.random.randint(0, 3) if n == 0: img = cv2.blur(img, (3, 3)) / 255. elif n == 1: img = cv2.blur(img, (5, 5)) / 255. else: img = img / 255. x[j] = img y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="float32") for j, path in enumerate(batch_target_img_paths): img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) * 1. y[j] = np.expand_dims(img, 2) return x, y def get_model(img_size, num_classes): inputs = keras.Input(shape=img_size) ### [First half of the network: downsampling inputs] ### # Entry block x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) previous_block_activation = x # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [64, 128, 256]: x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.MaxPooling2D(3, strides=2, padding="same")(x) # Project residual residual = layers.Conv2D(filters, 1, strides=2, padding="same")(previous_block_activation) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual ### [Second half of the network: upsampling inputs] ### for filters in [256, 128, 64, 32]: x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.UpSampling2D(2)(x) # Project residual residual = layers.UpSampling2D(2)(previous_block_activation) residual = layers.Conv2D(filters, 1, padding="same")(residual) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = layers.Conv2D(num_classes, 3, activation="sigmoid", padding="same")(x) # Define the model model = keras.Model(inputs, outputs) return model tf_config = tf.ConfigProto(device_count = {'GPU': 0}) tf_config.gpu_options.per_process_gpu_memory_fraction = 0.7 tf_config.gpu_options.visible_device_list = "0" set_session(tf.Session(config=tf_config)) # Free up RAM in case the model definition cells were run multiple times #keras.backend.clear_session() # Build model model = get_model((32, 32, 3), 1) #model.load_weights('oxford_segmentation.h5') model.summary() # Split our img paths into a training and a validation set val_samples = int(0.2*len(input_img_paths)) data_zip_list = list(zip(input_img_paths, target_img_paths)) random.shuffle(data_zip_list) input_img_paths, target_img_paths = zip(*data_zip_list) train_input_img_paths = input_img_paths[:-val_samples] train_target_img_paths = target_img_paths[:-val_samples] val_input_img_paths = input_img_paths[-val_samples:] val_target_img_paths = target_img_paths[-val_samples:] # Instantiate data Sequences for each split train_gen = Patches(batch_size, img_size, train_input_img_paths, train_target_img_paths) val_gen = Patches(batch_size, img_size, val_input_img_paths, val_target_img_paths) # Configure the model for training. # We use the "sparse" version of categorical_crossentropy # because our target data is integers. opt = keras.optimizers.SGD() model.compile(optimizer="SGD", loss="binary_crossentropy") callbacks = [keras.callbacks.ModelCheckpoint("oxford_segmentation.h5", save_best_only=True)] # Train the model, doing validation at the end of each epoch. epochs = 10 hist = model.fit_generator(train_gen, epochs=epochs, validation_data=val_gen, callbacks=callbacks) fig = plt.figure() plt.plot(hist.history['loss'], label = 'Training value', color = 'darkslategray') plt.plot(hist.history['val_loss'], label = 'Validation value', color = 'darkslategray', linestyle = '--') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.savefig('loss.pdf') plt.close(fig)
37.531429
105
0.701279
966
6,568
4.52381
0.238095
0.087872
0.071396
0.027231
0.414416
0.367735
0.303661
0.26865
0.132265
0.121281
0
0.023071
0.175091
6,568
175
106
37.531429
0.783499
0.140073
0
0.173554
0
0
0.053524
0.003925
0
0
0
0
0
1
0.033058
false
0
0.090909
0.008264
0.157025
0.016529
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c897010d686c63214412a25e6bca01df90e345
343
py
Python
wangdai/spiders/zj_sprider.py
jiaoshenmene/wangdai
82090948602bc756048b4655b41a8a342e58a03e
[ "MIT" ]
null
null
null
wangdai/spiders/zj_sprider.py
jiaoshenmene/wangdai
82090948602bc756048b4655b41a8a342e58a03e
[ "MIT" ]
null
null
null
wangdai/spiders/zj_sprider.py
jiaoshenmene/wangdai
82090948602bc756048b4655b41a8a342e58a03e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import scrapy class Sprider(scrapy.Spider): name = "zj" start_urls = [ 'https://www.wdzj.com/pingji.html' ] def parse(self , response): for quote in response.css('div.tb-platname'): yield { 'name': quote.css('a::text').extract_first(), }
19.055556
62
0.51312
39
343
4.461538
0.871795
0
0
0
0
0
0
0
0
0
0
0.00431
0.323615
343
18
63
19.055556
0.74569
0.061224
0
0
0
0
0.186916
0
0
0
0
0
0
1
0.090909
false
0
0.090909
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c8fa56a66ee4effef1038af84f282d8f3c34a3
2,792
py
Python
cwmud/core/help.py
whutch/cwmud
bee8b126a5e70edd0593dae9753a6be8d52357cf
[ "MIT" ]
11
2016-03-03T03:56:59.000Z
2021-11-19T15:38:51.000Z
cwmud/core/help.py
whutch/atria
bee8b126a5e70edd0593dae9753a6be8d52357cf
[ "MIT" ]
26
2016-08-31T23:19:45.000Z
2019-10-19T21:50:33.000Z
cwmud/core/help.py
whutch/atria
bee8b126a5e70edd0593dae9753a6be8d52357cf
[ "MIT" ]
2
2016-01-22T21:22:34.000Z
2016-02-09T06:03:57.000Z
# -*- coding: utf-8 -*- """Help information management.""" # Part of Clockwork MUD Server (https://github.com/whutch/cwmud) # :copyright: (c) 2008 - 2017 Will Hutcheson # :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt) import fnmatch import re from .utils.bases import Manager class HelpSourceManager(Manager): """A manager for help source registration.""" def find(self, pattern): """Find one or more help entries matching a pattern. :param str pattern: A pattern to match entries against :returns list: A list of HelpEntry instances that match the pattern """ found = {} for source in self._items.values(): for entry in source.find(pattern): if entry.key not in found: found[entry.key] = entry return list(found.values()) class HelpSource: """A searchable source of help data.""" def __init__(self): """Create a new help source.""" self._entries = {} def __contains__(self, key): return key in self._entries def __getitem__(self, key): return self._entries[key] def __setitem__(self, key, value): self._entries[key] = value def __delitem__(self, key): del self._entries[key] def __iter__(self): return iter(self._entries) def keys(self): """Return an iterator through this source's entry keys.""" return self._entries.keys() def entries(self): """Return an iterator through this source's entries.""" return self._entries.values() def find(self, pattern): """Find one or more help entries matching a pattern. :param str pattern: A pattern to match entries against :returns list: A list of HelpEntry instances that match the pattern """ pattern = re.compile(fnmatch.translate(pattern)) matches = [] for entry in self.entries(): for topic in entry.topics: if pattern.match(topic): matches.append(entry) return matches class HelpEntry: """A single entry of help information.""" def __init__(self, key, title, text): """Create a new help entry.""" self._key = key self._related = set() self._text = text self._title = title self._topics = set() @property def related(self): """Return this entry's related topics.""" return frozenset(self._related) @property def text(self): """Return this entry's text.""" return self._text @property def topics(self): """Return this entry's topic keywords.""" return frozenset(self._topics) HELP_SOURCES = HelpSourceManager()
25.851852
75
0.607808
337
2,792
4.89911
0.305638
0.059964
0.025439
0.034525
0.302847
0.23622
0.23622
0.23622
0.190188
0.190188
0
0.004505
0.284384
2,792
107
76
26.093458
0.821822
0.336318
0
0.09434
0
0
0
0
0
0
0
0
0
1
0.264151
false
0
0.056604
0.056604
0.566038
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
51c925099b64da573af34c5499717de76a3fec2e
706
py
Python
asterisk/forms.py
ahmednamoha/astroapp
10ff7d2fa92ce430ce39a036c501f64429ddcec7
[ "MIT" ]
null
null
null
asterisk/forms.py
ahmednamoha/astroapp
10ff7d2fa92ce430ce39a036c501f64429ddcec7
[ "MIT" ]
null
null
null
asterisk/forms.py
ahmednamoha/astroapp
10ff7d2fa92ce430ce39a036c501f64429ddcec7
[ "MIT" ]
null
null
null
from django.db import models from django import forms from django.forms import ModelForm, TextInput, FileField, NumberInput from .models import Extentions, Queue class ExtentionsForm(ModelForm): class Meta: model = Extentions fields = ['exten', 'file'] widgets = {'exten': NumberInput( attrs={'class': 'form-control', 'placeholder': 'Short code'})} class QueueForm(ModelForm): class Meta: model = Queue fields = ['name', 'optin', 'exten'] widgets = {'optin': NumberInput( attrs={'class': 'form-control', 'placeholder': '1'}), 'name': TextInput( attrs={'class': 'form-control', 'placeholder': 'queue name'})}
29.416667
84
0.620397
71
706
6.169014
0.422535
0.068493
0.09589
0.143836
0.269406
0.196347
0
0
0
0
0
0.001848
0.233711
706
23
85
30.695652
0.807763
0
0
0.117647
0
0
0.201133
0
0
0
0
0
0
1
0
false
0
0.235294
0
0.470588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c93dd8819a23b3597d92e0d07d3b2369c52da0
1,983
py
Python
admin-portal/therapy/models.py
oakbani/ksdp-portal
8f44b3cb0081a7f31b9c8121883dd51945a05520
[ "MIT" ]
null
null
null
admin-portal/therapy/models.py
oakbani/ksdp-portal
8f44b3cb0081a7f31b9c8121883dd51945a05520
[ "MIT" ]
null
null
null
admin-portal/therapy/models.py
oakbani/ksdp-portal
8f44b3cb0081a7f31b9c8121883dd51945a05520
[ "MIT" ]
1
2021-09-19T10:58:17.000Z
2021-09-19T10:58:17.000Z
from django.db import models from clients.models import Client # Create your models here. class TherapyCenter(models.Model): title = models.CharField(max_length=30) location = models.CharField(max_length=30) phone_no = models.CharField(max_length=15) def __str__(self): return self.title class Therapist(models.Model): name = models.CharField(max_length=30) contact = models.CharField(max_length=15) OT = models.IntegerField(choices=((1, "Yes"), (2, "No"))) PT = models.IntegerField(choices=((1, "Yes"), (2, "No"))) ST = models.IntegerField(choices=((1, "Yes"), (2, "No"))) def __str__(self): return self.name days = ( (1, "Monday"), (2, "Tuesday"), (3, "Wednesday"), (4, "Thursday"), (5, "Friday"), (6, "Saturday"), (7, "Sunday"), ) class TherapistSchedule(models.Model): therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE) day = models.IntegerField(choices=days) start_time = models.TimeField() end_time = models.TimeField() therapy_center = models.ForeignKey(TherapyCenter, on_delete=models.CASCADE) def __str__(self): return f"{self.therapist}: {days[self.day-1][1]} ({self.start_time}-{self.end_time}) at {self.therapy_center}" class TherapySlot(models.Model): title = models.CharField(null=True, blank=True, max_length=30) date = models.DateField() start_time = models.TimeField() end_time = models.TimeField() therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE) therapy_type = models.IntegerField( choices=((1, "OT"), (2, "PT"), (3, "ST")), null=True, blank=True ) client = models.ForeignKey(Client, on_delete=models.CASCADE, null=True, blank=True) status = models.IntegerField(choices=((1, "Available"), (2, "Booked")), default=1) def __str__(self): return f"Therapist: {self.therapist}, Client: {self.client}, {self.date} ({self.start_time}-{self.end_time})"
32.508197
118
0.665154
249
1,983
5.144578
0.301205
0.070258
0.117096
0.093677
0.466042
0.270101
0.232631
0.157689
0
0
0
0.02011
0.172466
1,983
60
119
33.05
0.760512
0.012103
0
0.217391
0
0.043478
0.145631
0.05723
0
0
0
0
0
1
0.086957
false
0
0.043478
0.086957
0.76087
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51c9d30479808975f17a8356968ba3ffdf2e3a45
278
py
Python
src/onegov/form/filters.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/form/filters.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/form/filters.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
from onegov.core.utils import yubikey_public_id def as_float(value): return value and float(value) or 0.0 def strip_whitespace(value): return value and value.strip(' \r\n') or None def yubikey_identifier(value): return value and yubikey_public_id(value) or ''
19.857143
51
0.741007
45
278
4.422222
0.488889
0.165829
0.241206
0.286432
0
0
0
0
0
0
0
0.008658
0.169065
278
13
52
21.384615
0.852814
0
0
0
0
0
0.017986
0
0
0
0
0
0
1
0.428571
false
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
51caa180627fad5f8de627d1958808284fb5c7a9
435
py
Python
oa_server/time_series/urls.py
Open-Acidification/Open_Acidification_Server
3ae668dc033e1a1478a6fc59b8b4dcec4de87564
[ "MIT" ]
null
null
null
oa_server/time_series/urls.py
Open-Acidification/Open_Acidification_Server
3ae668dc033e1a1478a6fc59b8b4dcec4de87564
[ "MIT" ]
37
2020-04-23T16:55:16.000Z
2020-11-23T23:25:48.000Z
oa_server/time_series/urls.py
Open-Acidification/TankControllerManager
3ae668dc033e1a1478a6fc59b8b4dcec4de87564
[ "MIT" ]
4
2021-09-30T21:43:52.000Z
2022-02-23T03:26:25.000Z
from django.urls import path from . import views urlpatterns = [ path('', views.time_series_save), path('<int:ts_id>/', views.time_series_detail), path('<str:ts_type>/', views.time_series_list), path('<str:ts_type>/generate/hold/', views.time_series_generate_hold), path('<str:ts_type>/generate/ramp/', views.time_series_generate_ramp), path('<str:ts_type>/generate/sine/', views.time_series_generate_sine), ]
36.25
74
0.71954
63
435
4.650794
0.333333
0.1843
0.307167
0.177474
0.215017
0
0
0
0
0
0
0
0.112644
435
11
75
39.545455
0.759067
0
0
0
0
0
0.252874
0.193103
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
51cc3d274750348efa67895ac9e88ba7ad90e3f1
912
py
Python
dae_utils.py
FlorentPajot/sample-projects
dea586307c301a5aedf6ac94f7a6b3b15bde9552
[ "MIT" ]
null
null
null
dae_utils.py
FlorentPajot/sample-projects
dea586307c301a5aedf6ac94f7a6b3b15bde9552
[ "MIT" ]
null
null
null
dae_utils.py
FlorentPajot/sample-projects
dea586307c301a5aedf6ac94f7a6b3b15bde9552
[ "MIT" ]
null
null
null
import numpy as np from sklearn.base import BaseEstimator, TransformerMixin class SwapNoise(BaseEstimator, TransformerMixin): def __init__(self, ratio=.15, random_seed=123): self.seed = random_seed self.ratio = ratio def fit(self): return self def transform(self, input_data): x = np.zeros(np.shape(input_data)) np.random.seed(self.seed) for c in range(np.shape(input_data)[1]): c_ = np.array(input_data)[:, c] x[:, c] = self.partial_transform(c_) return x def partial_transform(self, x): x_ = np.copy(x) swap_idx = np.where(np.random.rand(len(x)) < self.ratio)[0] np.put(x_, swap_idx, np.random.choice(x, len(swap_idx))) return x_ def fit_transform(self, input_data): self.fit() return self.transform(input_data)
30.4
67
0.591009
122
912
4.229508
0.352459
0.104651
0.054264
0.085271
0
0
0
0
0
0
0
0.01087
0.29386
912
30
68
30.4
0.790373
0
0
0
0
0
0
0
0
0
0
0
0
1
0.217391
false
0
0.086957
0.043478
0.521739
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
51cd281f3447f7a93f24738d9ec41e8cdfb758c9
18,705
py
Python
tests/test_lime_structure.py
N2-Survey/SurveyFramework
8295fd9808c95db85befb1f60391460908a7d2b0
[ "MIT" ]
1
2022-01-26T19:04:38.000Z
2022-01-26T19:04:38.000Z
tests/test_lime_structure.py
N2-Survey/SurveyFramework
8295fd9808c95db85befb1f60391460908a7d2b0
[ "MIT" ]
77
2021-09-14T09:33:03.000Z
2022-03-31T18:02:47.000Z
tests/test_lime_structure.py
N2-Survey/SurveyFramework
8295fd9808c95db85befb1f60391460908a7d2b0
[ "MIT" ]
3
2021-09-14T20:11:28.000Z
2021-12-26T11:56:25.000Z
"""Test functions related to parsing of LimeSurvey files""" import unittest from bs4 import BeautifulSoup from n2survey.lime.structure import ( # TODO: test _get_clean_string,; TODO: test _get_question_group_name, _parse_question, _parse_question_description, _parse_question_responses, _parse_question_subquestions, _parse_question_title, _parse_section, read_lime_questionnaire_structure, ) class TestXMLSectionParsing(unittest.TestCase): """Test parsing <section> tags in an XML structure file""" def test_simple_section(self): """Test simple section parsing""" section = BeautifulSoup( """ <section id="16"> <sectionInfo> <position>title</position> <text>Group 1</text> <administration>self</administration> </sectionInfo> <sectionInfo> <position>before</position> <text>This is Question Group 1</text> <administration>self</administration> </sectionInfo> <question></question> </section>""", "xml", ) self.assertDictEqual( _parse_section(section.section), {"id": 16, "title": "Group 1", "info": "This is Question Group 1"}, ) def test_multiply_info_sections(self): """Test simple section parsing""" section = BeautifulSoup( """ <section id="16"> <sectionInfo> <position>title</position> <text>Group 1</text> <administration>self</administration> </sectionInfo> <sectionInfo> <position>before</position> <text>This is Question Group 1</text> <administration>self</administration> </sectionInfo> <sectionInfo> <position>after</position> <text>This is Question Group 1</text> <administration>self</administration> </sectionInfo> <question></question> </section>""", "xml", ) self.assertDictEqual( _parse_section(section.section), { "id": 16, "title": "Group 1", "info": "This is Question Group 1 This is Question Group 1", }, ) def test_long_description(self): """Test a section with a long description""" section = BeautifulSoup( """ <section id="13913"> <sectionInfo> <position>title</position> <text>Supervision</text> <administration>self</administration> </sectionInfo> <sectionInfo> <position>before</position> <text>&lt;p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;"&gt;&lt;b id="docs-internal-guid-90bd833e-7fff-2c78-398d-1ee9bdc67ae4"&gt;For the following questions, we would like to make the distinction between “formal” and “direct” supervisor clear: &lt;/b&gt;&lt;/p&gt; “Formal” supervisor refers to the main advisor of your thesis as present in your committee.&lt;/b&gt;&lt;/p&gt; “Direct” supervisor refers to the person you actually consult and discuss your project with on a more regular basis.&lt;/b&gt;&lt;/p&gt; &lt;p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;"&gt;Section 4/8&lt;/p&gt;</text> <administration>self</administration> </sectionInfo> <question></question> </section>""", "xml", ) self.assertDictEqual( _parse_section(section.section), { "id": 13913, "title": "Supervision", "info": ( "For the following questions, " "we would like to make the distinction between “formal” and “direct” " "supervisor clear: “Formal” supervisor refers to the main advisor of " "your thesis as present in your committee. “Direct” supervisor refers " "to the person you actually consult and discuss your project with on a " "more regular basis. Section 4/8" ), }, ) class TestXMLQuestionParsing(unittest.TestCase): """Test parsing <question> tags in an XML structure file""" maxDiff = None def test_question_title_parsing(self): question = BeautifulSoup( """<question> <text>&lt;p&gt;Do you have one of the following (multiple answers possible)?&lt;/p&gt; &lt;p style="border:medium none;border-bottom:0cm none #000000;padding-bottom:0cm;margin:0cm 0cm .0001pt;padding:0cm;"&gt; &lt;/p&gt;</text> </question>""", "xml", ) question = question.question self.assertEqual( _parse_question_title(question), "Do you have one of the following (multiple answers possible)?", ) def test_question_description_parsing(self): question = BeautifulSoup( """ <question> <text>Is your formal supervisor your direct supervisor?</text> <directive> <position>during</position> <text>&lt;p&gt;“Formal” supervisor refers to the main advisor of your thesis as present in your committee. “Direct” supervisor refers to the person you actually consult and discuss your project with on a more regular basis.&lt;/p&gt;</text> <administration>self</administration> </directive> <response varName="E4"></response> </question>""", "xml", ) question = question.question self.assertEqual( _parse_question_description(question), ( "“Formal” supervisor refers to the main advisor of your thesis as present " "in your committee. “Direct” supervisor refers to the person you actually " "consult and discuss your project with on a more regular basis." ), ) def test_choice_question_without_contingent(self): question = BeautifulSoup( """ <question> <text>This is Group 1 Question 1 of type "5 point choice".</text> <directive> <position>during</position> <text>Help text for G1Q1  </text> <administration>self</administration> </directive> <response varName="G1Q1"> <fixed> <category> <label>1</label> <value>1</value> </category> <category> <label>2</label> <value>2</value> </category> <category> <label>3</label> <value>3</value> </category> <category> <label>4</label> <value>4</value> </category> <category> <label>5</label> <value>5</value> </category> </fixed> </response> </question>""", "xml", ) question = question.question choices = {"1": "1", "2": "2", "3": "3", "4": "4", "5": "5"} self.assertEqual(_parse_question_subquestions(question), []) self.assertEqual( _parse_question_responses(question), [ ( { "name": "G1Q1", "format": None, "length": None, "label": None, "choices": choices, }, None, ) ], ) self.assertEqual( _parse_question(question), [ { "name": "G1Q1", "label": 'This is Group 1 Question 1 of type "5 point choice".', "format": None, "choices": choices, "question_group": "G1Q1", "question_label": 'This is Group 1 Question 1 of type "5 point choice".', "question_description": "Help text for G1Q1", "type": "single-choice", } ], ) def test_choice_question_with_contingent(self): question = BeautifulSoup( """ <question> <text>My overall work is predominantly</text> <response varName="A3"> <fixed> <category> <label>Option 1</label> <value>B1</value> </category> <category> <label>Option 2</label> <value>B2</value> </category> <category> <label>Other</label> <value>-oth-</value> <contingentQuestion varName="A3other"> <text>Other</text> <length>24</length> <format>longtext</format> </contingentQuestion> </category> </fixed> </response> </question>""", "xml", ) question = question.question choices = {"B1": "Option 1", "B2": "Option 2", "-oth-": "Other"} self.assertEqual(_parse_question_subquestions(question), []) self.assertEqual( _parse_question_responses(question), [ ( { "name": "A3", "format": None, "length": None, "label": None, "choices": choices, }, { "name": "A3other", "format": "longtext", "length": "24", "text": "Other", "contingent_of_name": "A3", "contingent_of_choice": "-oth-", }, ) ], ) self.assertEqual( _parse_question(question), [ { "name": "A3", "label": "My overall work is predominantly", "format": None, "choices": choices, "question_group": "A3", "question_label": "My overall work is predominantly", "question_description": "", "type": "single-choice", }, { "name": "A3other", "label": "My overall work is predominantly / Other", "format": "longtext", "contingent_of_name": "A3", "contingent_of_choice": "-oth-", "question_group": "A3", "question_label": "My overall work is predominantly", "question_description": "", "type": "single-choice", }, ], ) def test_question_without_choices(self): question = BeautifulSoup( """ <question> <text>Some cool question</text> <response varName="Q1"> <free> <format>text</format> <length>10</length> <label>What is good about it?</label> </free> </response> </question> """, "xml", ) question = question.question self.assertEqual(_parse_question_subquestions(question), []) self.assertEqual( _parse_question_responses(question), [ ( { "name": "Q1", "format": "text", "length": "10", "label": "What is good about it?", "choices": None, }, None, ) ], ) self.assertEqual( _parse_question(question), [ { "name": "Q1", "label": "What is good about it?", "format": "text", "choices": None, "question_group": "Q1", "question_label": "Some cool question", "question_description": "", "type": "free", }, ], ) def test_multi_response_question(self): question = BeautifulSoup( """ <question> <text>Some cool question</text> <response varName="Q1_R1"> <free> <format>text</format> <length>10</length> <label>What is good about it?</label> </free> </response> <response varName="Q1_R2"> <free> <format>text</format> <length>10</length> <label>What is bad about it?</label> </free> </response> </question> """, "xml", ) question = question.question self.assertEqual(_parse_question_subquestions(question), []) self.assertEqual( _parse_question(question), [ { "name": "Q1_R1", "label": "What is good about it?", "format": "text", "choices": None, "question_group": "Q1", "question_label": "Some cool question", "question_description": "", "type": "free", }, { "name": "Q1_R2", "label": "What is bad about it?", "format": "text", "choices": None, "question_group": "Q1", "question_label": "Some cool question", "question_description": "", "type": "free", }, ], ) def test_question_with_subquestions(self): question = BeautifulSoup( """ <question> <text>This is Group 2 Question 8 of type "array by column".</text> <directive> <position>during</position> <text>Help text for G2Q8</text> <administration>self</administration> </directive> <subQuestion varName="G2Q8_SQ001"> <text>How do you rate this?</text> </subQuestion> <subQuestion varName="G2Q8_SQ002"> <text>How do you rate that?</text> </subQuestion> <response varName="G2Q8"> <fixed rotate="true"> <category> <label>Option 1</label> <value>A1</value> </category> <category> <label>Option 2</label> <value>A2</value> </category> <category> <label>Option 3</label> <value>A3</value> </category> </fixed> </response> </question> """, "xml", ) question = question.question choices = {"A1": "Option 1", "A2": "Option 2", "A3": "Option 3"} self.assertEqual( _parse_question_subquestions(question), [ ("G2Q8_SQ001", "How do you rate this?"), ("G2Q8_SQ002", "How do you rate that?"), ], ) self.assertEqual( _parse_question(question), [ { "name": "G2Q8_SQ001", "label": "How do you rate this?", "format": None, "choices": choices, "question_group": "G2Q8", "question_label": 'This is Group 2 Question 8 of type "array by column".', "question_description": "Help text for G2Q8", "type": "array", }, { "name": "G2Q8_SQ002", "label": "How do you rate that?", "format": None, "choices": choices, "question_group": "G2Q8", "question_label": 'This is Group 2 Question 8 of type "array by column".', "question_description": "Help text for G2Q8", "type": "array", }, ], ) class TestXMLQuestionnarieParsing(unittest.TestCase): def test_test_survery_structure_file(self): structure = read_lime_questionnaire_structure( # "tests/data/test_survey_structure.xml" "data/survey_structure.xml" ) self.assertEqual(len(structure["sections"]), 10) self.assertEqual(len(structure["questions"]), 453) structure = read_lime_questionnaire_structure( # "tests/data/test_survey_structure.xml" "data/survey_structure_2021.xml" ) self.assertEqual(len(structure["sections"]), 13) self.assertEqual(len(structure["questions"]), 553) if __name__ == "__main__": unittest.main()
35.696565
353
0.444534
1,492
18,705
5.459786
0.136059
0.049104
0.036828
0.051559
0.777928
0.729438
0.645961
0.613307
0.573533
0.543457
0
0.024737
0.446725
18,705
523
354
35.764818
0.762393
0.021492
0
0.476351
0
0
0.240758
0.005213
0
0
0
0.001912
0.074324
1
0.037162
false
0
0.010135
0
0.060811
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
51ceefe54c4a73ce82c7712450c6f6534c6876d4
3,501
py
Python
service/handlers/my_handler.py
ran-isenberg/aws-lambda-handler-cookbook
adfe58dacd87315151265818869bb842c7eb4971
[ "MIT" ]
61
2022-02-07T05:21:14.000Z
2022-03-27T14:11:30.000Z
service/handlers/my_handler.py
ran-isenberg/aws-lambda-handler-cookbook
adfe58dacd87315151265818869bb842c7eb4971
[ "MIT" ]
17
2022-02-26T05:25:31.000Z
2022-03-16T20:02:46.000Z
service/handlers/my_handler.py
ran-isenberg/aws-lambda-handler-cookbook
adfe58dacd87315151265818869bb842c7eb4971
[ "MIT" ]
4
2022-02-17T16:35:27.000Z
2022-03-07T03:13:07.000Z
from http import HTTPStatus from typing import Any, Dict from aws_lambda_powertools.metrics.metrics import MetricUnit from aws_lambda_powertools.utilities.feature_flags.exceptions import ConfigurationStoreError, SchemaValidationError from aws_lambda_powertools.utilities.parser import ValidationError, parse from aws_lambda_powertools.utilities.parser.envelopes import ApiGatewayEnvelope from aws_lambda_powertools.utilities.typing import LambdaContext from service.handlers.schemas.dynamic_configuration import FeatureFlagsNames, MyConfiguration from service.handlers.schemas.env_vars import MyHandlerEnvVars from service.handlers.schemas.input import Input from service.handlers.schemas.output import Output from service.handlers.utils.dynamic_configuration import get_dynamic_configuration_store, parse_configuration from service.handlers.utils.env_vars_parser import get_environment_variables, init_environment_variables from service.handlers.utils.http_responses import build_response from service.handlers.utils.observability import logger, metrics, tracer @tracer.capture_method(capture_response=False) def inner_function_example(my_name: str, order_item_count: int) -> Output: # process input, etc. return output config_store = get_dynamic_configuration_store() campaign: bool = config_store.evaluate( name=FeatureFlagsNames.TEN_PERCENT_CAMPAIGN.value, context={}, default=False, ) logger.debug('campaign feature flag value', extra={'campaign': campaign}) premium: bool = config_store.evaluate( name=FeatureFlagsNames.PREMIUM.value, context={'customer_name': my_name}, default=False, ) logger.debug('premium feature flag value', extra={'premium': premium}) return Output(success=True, order_item_count=order_item_count) @init_environment_variables(model=MyHandlerEnvVars) @metrics.log_metrics @tracer.capture_lambda_handler(capture_response=False) def my_handler(event: Dict[str, Any], context: LambdaContext) -> Dict[str, Any]: logger.set_correlation_id(context.aws_request_id) logger.info('my_handler is called, calling inner_function_example') env_vars: MyHandlerEnvVars = get_environment_variables(model=MyHandlerEnvVars) logger.debug('environment variables', extra=env_vars.dict()) try: my_configuration: MyConfiguration = parse_configuration(model=MyConfiguration) logger.debug('fetched dynamic configuration', extra={'configuration': my_configuration.dict()}) except (SchemaValidationError, ConfigurationStoreError) as exc: logger.exception(f'dynamic configuration error, error={str(exc)}') return build_response(http_status=HTTPStatus.INTERNAL_SERVER_ERROR, body={}) try: # we want to extract and parse the HTTP body from the api gw envelope input: Input = parse(event=event, model=Input, envelope=ApiGatewayEnvelope) logger.info('got create request', extra={'order_item_count': input.order_item_count}) except (ValidationError, TypeError) as exc: logger.error('event failed input validation', extra={'error': str(exc)}) return build_response(http_status=HTTPStatus.BAD_REQUEST, body={}) response: Output = inner_function_example(input.my_name, input.order_item_count) logger.info('inner_function_example finished successfully') metrics.add_metric(name='ValidEvents', unit=MetricUnit.Count, value=1) return build_response(http_status=HTTPStatus.OK, body=response.dict())
51.485294
115
0.790346
420
3,501
6.37619
0.304762
0.03286
0.056759
0.042942
0.137043
0.113144
0.037341
0.037341
0.037341
0
0
0.000326
0.123679
3,501
67
116
52.253731
0.872555
0.028849
0
0.072727
0
0
0.107153
0.012953
0
0
0
0
0
1
0.036364
false
0
0.272727
0
0.381818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51cf936521268672d39eaa8e60b4fea15504c4c2
327
py
Python
faktura/breadcrumbs.py
Tethik/faktura
a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969
[ "MIT" ]
null
null
null
faktura/breadcrumbs.py
Tethik/faktura
a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969
[ "MIT" ]
1
2016-02-16T10:06:34.000Z
2016-02-16T10:06:34.000Z
faktura/breadcrumbs.py
Tethik/faktura
a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969
[ "MIT" ]
null
null
null
class Breadcrumb: def __init__(self, url, text): self.url = url self.text = text url_dict = { 'Main Menu': '/', 'Invoices': '/invoices', 'Customers': '/customers', 'Settings': '/settings' } def breadcrumbs(*shortwords): return [Breadcrumb(url_dict[word], word) for word in shortwords]
21.8
68
0.611621
36
327
5.388889
0.527778
0.072165
0
0
0
0
0
0
0
0
0
0
0.232416
327
14
69
23.357143
0.772908
0
0
0
0
0
0.192661
0
0
0
0
0
0
1
0.166667
false
0
0
0.083333
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d00e635b44bc4b309c05a3baf9619e16652004
441
py
Python
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_2.py
nhutnamhcmus/datacamp-playground
25457e813b1145e1d335562286715eeddd1c1a7b
[ "MIT" ]
1
2021-05-08T11:09:27.000Z
2021-05-08T11:09:27.000Z
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_2.py
nhutnamhcmus/datacamp-playground
25457e813b1145e1d335562286715eeddd1c1a7b
[ "MIT" ]
1
2022-03-12T15:42:14.000Z
2022-03-12T15:42:14.000Z
Introduction-to-data-visualization-with-matplotlib/3. Quantitative comparisons and statistical visualizations/script_2.py
nhutnamhcmus/datacamp-playground
25457e813b1145e1d335562286715eeddd1c1a7b
[ "MIT" ]
1
2021-04-30T18:24:19.000Z
2021-04-30T18:24:19.000Z
# Add bars for "Gold" with the label "Gold" ax.bar(medals.index, medals["Gold"], label="Gold") # Stack bars for "Silver" on top with label "Silver" ax.bar(medals.index, medals["Silver"], bottom=medals["Gold"], label="Silver") # Stack bars for "Bronze" on top of that with label "Bronze" ax.bar(medals.index, medals["Bronze"], bottom=medals["Silver"]+medals["Gold"], label="Bronze") # Display the legend ax.legend() plt.show()
33.923077
95
0.680272
67
441
4.477612
0.343284
0.07
0.11
0.16
0.22
0
0
0
0
0
0
0
0.142857
441
13
96
33.923077
0.793651
0.385488
0
0
0
0
0.180392
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
51d20805035ef6654add7f645a56e481da2c4877
96
py
Python
venv/lib/python3.8/site-packages/clikit/io/output_stream/__init__.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/clikit/io/output_stream/__init__.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/clikit/io/output_stream/__init__.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/64/da/97/267e8a2c0079f193f0db8c07cf48ce560bdfa25b876ba5b0c0a062bc16
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.395833
0
96
1
96
96
0.5
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
51d27d6990f67193e13df04e2f51e6f106d038fe
2,192
py
Python
checkmate/contrib/plugins/php/progpilot/issues_data.py
marcinguy/checkmate-ce
fc33c7c27bc640ab4db5dbda274a0edd3b3db218
[ "MIT" ]
null
null
null
checkmate/contrib/plugins/php/progpilot/issues_data.py
marcinguy/checkmate-ce
fc33c7c27bc640ab4db5dbda274a0edd3b3db218
[ "MIT" ]
null
null
null
checkmate/contrib/plugins/php/progpilot/issues_data.py
marcinguy/checkmate-ce
fc33c7c27bc640ab4db5dbda274a0edd3b3db218
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals issues_data = { "xss": { "severity": "1", "description": "Cross-Site Scripting detected", "categories": ["security"], "title": "XSS" }, "session_fixation": { "severity": "2", "description": "Session fixation detected", "categories": ["security"], "title": "Session fixation" }, "file_inclusion": { "severity": "2", "description": "File inclusion detected", "categories": ["security"], "title": "File Inclusion" }, "file_disclosure": { "severity": "1", "description": "File disclosure detected", "categories": ["security"], "title": "File disclosure" }, "ldap_injection": { "severity": "1", "description": "LDAP Injection detected", "categories": ["security"], "title": "LDAP Injection" }, "xml_injection": { "severity": "1", "description": "XML Injection detected", "categories": ["security"], "title": "XML Injection" }, "sql_injection": { "severity": "1", "description": "SQL Injection detected", "categories": ["security"], "title": "SQL Injection" }, "code_injection": { "severity": "1", "description": "Code Injection detected", "categories": ["security"], "title": "Code Injection" }, "header_injection": { "severity": "1", "description": "Header Injection detected", "categories": ["security"], "title": "Header Injection" }, "idor": { "severity": "1", "description": "Insecure Direct Object Reference detected", "categories": ["security"], "title": "Insecure Direct Object Reference" }, "command_injection": { "severity": "1", "description": "Command Injection detected", "categories": ["security"], "title": "Command Injection" }, "mail command_injection": { "severity": "1", "description": "Mail Command Injection detected", "categories": ["security"], "title": "Mail Command Injection" }, "w32api command_injection": { "severity": "1", "description": "w32api Command Injection detected", "categories": ["security"], "title": "w32api Command Injection" } }
25.788235
63
0.59854
187
2,192
6.925134
0.219251
0.180695
0.261004
0.311197
0.400772
0.10888
0
0
0
0
0
0.011581
0.212135
2,192
84
64
26.095238
0.738274
0.00958
0
0.320988
0
0
0.601199
0
0
0
0
0
0
1
0
false
0
0.012346
0
0.012346
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
51d3468a11499ea6c16dce5d6cf20348e89cbaf6
3,170
py
Python
workon/contrib/tracking/models.py
dalou/django-workon
ef63c0a81c00ef560ed693e435cf3825f5170126
[ "BSD-3-Clause" ]
null
null
null
workon/contrib/tracking/models.py
dalou/django-workon
ef63c0a81c00ef560ed693e435cf3825f5170126
[ "BSD-3-Clause" ]
null
null
null
workon/contrib/tracking/models.py
dalou/django-workon
ef63c0a81c00ef560ed693e435cf3825f5170126
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals import uuid try: from django.contrib.contenttypes.fields import GenericForeignKey except ImportError: from django.contrib.contenttypes.generic import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.utils.translation import ugettext_lazy as _, pgettext_lazy from django.db import models # Used for object modifications CREATE = 'CREATE' UPDATE = 'UPDATE' DELETE = 'DELETE' # Used for m2m modifications ADD = 'ADD' REMOVE = 'REMOVE' CLEAR = 'CLEAR' class TrackingEvent(models.Model): ACTIONS = ( (CREATE, _('Create')), (UPDATE, _('Update')), (DELETE, _('Delete')), (ADD, _('Add')), (REMOVE, pgettext_lazy('Remove from something', 'Remove')), (CLEAR, _('Clear')), ) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) date = models.DateTimeField( _("Date"), auto_now_add=True, editable=False ) action = models.CharField( _('Action'), max_length=6, choices=ACTIONS, editable=False ) object_content_type = models.ForeignKey( ContentType, related_name='workon_tracking_object_content_type', editable=False ) object_id = models.PositiveIntegerField(editable=False, null=True) object = GenericForeignKey('object_content_type', 'object_id') object_repr = models.CharField( _("Object representation"), help_text=_( "Object representation, useful if the object is deleted later." ), max_length=250, editable=False ) user_content_type = models.ForeignKey( ContentType, related_name='workon_tracking_user_content_type', editable=False, null=True, ) user_id = models.PositiveIntegerField(editable=False, null=True) user = GenericForeignKey('user_content_type', 'user_id') user_repr = models.CharField( _("User representation"), help_text=_( "User representation, useful if the user is deleted later." ), max_length=250, editable=False ) class Meta: db_table = "workon_tracking_tracking_event" verbose_name = _('Tracking event') verbose_name_plural = _('Tracking events') ordering = ['-date'] class TrackedFieldModification(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) event = models.ForeignKey( TrackingEvent, verbose_name=_("Event"), related_name='fields', editable=False ) field = models.CharField(_("Field"), max_length=40, editable=False) old_value = models.TextField( _("Old value"), help_text=_("JSON serialized"), null=True, editable=False, ) new_value = models.TextField( _("New value"), help_text=_("JSON serialized"), null=True, editable=False, ) class Meta: db_table = "workon_tracking_tracked_field_modification" verbose_name = _('Tracking field modification') verbose_name_plural = _('Tracking field modifications')
28.053097
79
0.658991
329
3,170
6.091185
0.297872
0.090818
0.025449
0.043413
0.338822
0.324351
0.288423
0.239521
0.170659
0.05988
0
0.004948
0.235016
3,170
113
80
28.053097
0.821443
0.017666
0
0.266667
0
0
0.192159
0.044987
0
0
0
0
0
1
0
false
0
0.088889
0
0.322222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d35292854e309612a05a0a9928f4f1a1103650
12,377
py
Python
mister_ed/utils/checkpoints.py
jonasnm/geometric-certificates
8730abaf2ab0c8972a2d40168d5fe64c8670fc62
[ "MIT" ]
40
2019-01-17T22:17:42.000Z
2022-03-23T06:24:00.000Z
mister_ed/utils/checkpoints.py
Mortal12138/geometric-certificates
8730abaf2ab0c8972a2d40168d5fe64c8670fc62
[ "MIT" ]
6
2019-08-03T08:49:21.000Z
2022-03-11T23:43:56.000Z
mister_ed/utils/checkpoints.py
Mortal12138/geometric-certificates
8730abaf2ab0c8972a2d40168d5fe64c8670fc62
[ "MIT" ]
4
2020-10-22T05:55:30.000Z
2022-03-15T06:26:55.000Z
""" Code for saving/loading pytorch models and batches of adversarial images CHECKPOINT NAMING CONVENTIONS: <unique_experiment_name>.<architecture_abbreviation>.<6 digits of epoch number>.path e.g. fgsm_def.resnet32.20180301.120000.path All checkpoints are stored in CHECKPOINT_DIR Checkpoints are state dicts only!!! """ import torch import math import os import re import glob import config import numpy as np import utils.pytorch_utils as utils import random CHECKPOINT_DIR = config.MODEL_PATH OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH ############################################################################## # # # CHECKPOINTING MODELS # # # ############################################################################## def clear_experiment(experiment_name, architecture): """ Deletes all saved state dicts for an experiment/architecture pair """ for filename in params_to_filename(experiment_name, architecture): full_path = os.path.join(*[CHECKPOINT_DIR, filename]) os.remove(full_path) if os.path.exists(full_path) else None def list_saved_epochs(experiment_name, architecture): """ Returns a list of int epochs we've checkpointed for this experiment name and architecture """ safe_int_cast = lambda s: int(s) if s.isdigit() else s extract_epoch = lambda f: safe_int_cast(f.split('.')[-2]) filename_list = params_to_filename(experiment_name, architecture) return [extract_epoch(f) for f in filename_list] def params_to_filename(experiment_name, architecture, epoch_val=None): """ Outputs string name of file. ARGS: experiment_name : string - name of experiment we're saving architecture : string - abbreviation for model architecture epoch_val : int/(intLo, intHi)/None - - if int we return this int exactly - if (intLo, intHi) we return all existing filenames with highest epoch in range (intLo, intHi), in sorted order - if None, we return all existing filenames with params in ascending epoch-sorted order RETURNS: filenames: string or (possibly empty) string[] of just the base name of saved models """ if isinstance(epoch_val, int): return '.'.join([experiment_name, architecture, '%06d' % epoch_val, 'path']) elif epoch_val == 'best': return '.'.join([experiment_name, architecture, epoch_val, 'path']) glob_prefix = os.path.join(*[CHECKPOINT_DIR, '%s.%s.*' % (experiment_name, architecture)]) re_prefix = '%s\.%s\.' % (experiment_name, architecture) re_suffix = r'\.path' valid_name = lambda f: bool(re.match(re_prefix + r'(\d{6}|best)' + re_suffix, f)) safe_int_cast = lambda s: int(s) if s.isdigit() else s select_epoch = lambda f: safe_int_cast(re.sub(re_prefix, '', re.sub(re_suffix, '', f))) valid_epoch = lambda e: ((e == 'best') or (e >= (epoch_val or (0, 0))[0] and e <= (epoch_val or (0, float('inf')))[1])) filename_epoch_pairs = [] best_filename = [] for full_path in glob.glob(glob_prefix): filename = os.path.basename(full_path) if not valid_name(filename): continue epoch = select_epoch(filename) if valid_epoch(epoch): if epoch != 'best': filename_epoch_pairs.append((filename, epoch)) else: best_filename.append(filename) return best_filename +\ [_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])] def save_state_dict(experiment_name, architecture, epoch_val, model, k_highest=10): """ Saves the state dict of a model with the given parameters. ARGS: experiment_name : string - name of experiment we're saving architecture : string - abbreviation for model architecture epoch_val : int - which epoch we're saving model : model - object we're saving the state dict of k_higest : int - if not None, we make sure to not include more than k state_dicts for (experiment_name, architecture) pair, keeping the k-most recent if we overflow RETURNS: The model we saved """ # First resolve THIS filename this_filename = params_to_filename(experiment_name, architecture, epoch_val) # Next clear up memory if too many state dicts current_filenames = [_ for _ in params_to_filename(experiment_name, architecture) if not _.endswith('.best.path')] delete_els = [] if k_highest is not None: num_to_delete = len(current_filenames) - k_highest + 1 if num_to_delete > 0: delete_els = sorted(current_filenames)[:num_to_delete] for delete_el in delete_els: full_path = os.path.join(*[CHECKPOINT_DIR, delete_el]) os.remove(full_path) if os.path.exists(full_path) else None # Finally save the state dict torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR, this_filename])) return model def load_state_dict_from_filename(filename, model): """ Skips the whole parameter argument thing and just loads the whole state dict from a filename. ARGS: filename : string - filename without directories model : nn.Module - has 'load_state_dict' method RETURNS: the model loaded with the weights contained in the file """ assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1 # LOAD FILENAME # If state_dict in keys, use that as the loader right_dict = lambda d: d.get('state_dict', d) model.load_state_dict(right_dict(torch.load( os.path.join(*[CHECKPOINT_DIR, filename])))) return model def load_state_dict(experiment_name, architecture, epoch, model): """ Loads a checkpoint that was previously saved experiment_name : string - name of experiment we're saving architecture : string - abbreviation for model architecture epoch_val : int - which epoch we're loading """ filename = params_to_filename(experiment_name, architecture, epoch) return load_state_dict_from_filename(filename, model) ############################################################################### # # # CHECKPOINTING DATA # # # ############################################################################### """ This is a hacky fix to save batches of adversarial images along with their labels. """ class CustomDataSaver(object): # TODO: make this more pytorch compliant def __init__(self, image_subdirectory): self.image_subdirectory = image_subdirectory # make this folder if it doesn't exist yet def save_minibatch(self, examples, labels): """ Assigns a random name to this minibatch and saves the examples and labels in two separate files: <random_name>.examples.npy and <random_name>.labels.npy ARGS: examples: Variable or Tensor (NxCxHxW) - examples to be saved labels : Variable or Tensor (N) - labels matching the examples """ # First make both examples and labels into numpy arrays examples = examples.cpu().numpy() labels = labels.cpu().numpy() # Make a name for the files random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET # Save both files example_file = '%s.examples.npy' % random_string example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory, example_file) np.save(example_path, examples) label_file = '%s.labels.npy' % random_string label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory, label_file) np.save(label_path, labels) class CustomDataLoader(object): # TODO: make this more pytorch compliant def __init__(self, image_subdirectory, batch_size=128, to_tensor=True, use_gpu=False): super(CustomDataLoader, self).__init__() self.image_subdirectory = image_subdirectory self.batch_size = batch_size assert to_tensor >= use_gpu self.to_tensor = to_tensor self.use_gpu = use_gpu def _prepare_data(self, examples, labels): """ Takes in numpy examples and labels and tensor-ifies and cuda's them if necessary """ if self.to_tensor: examples = torch.Tensor(examples) labels = torch.Tensor(labels) return utils.cudafy(self.use_gpu, (examples, labels)) def _base_loader(self, prefix, which): assert which in ['examples', 'labels'] filename = '%s.%s.npy' % (prefix, which) full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory, filename) return np.load(full_path) def _example_loader(self, prefix): """ Loads the numpy array of examples given the random 'prefix' """ return self._base_loader(prefix, 'examples') def _label_loader(self, prefix): """ Loads the numpy array of labels given the random 'prefix' """ return self._base_loader(prefix, 'labels') def __iter__(self): # First collect all the filenames: glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory, '*') files = glob.glob(glob_prefix) valid_random_names = set(os.path.basename(_).split('.')[0] for _ in files) # Now loop through filenames and yield out minibatches of correct size running_examples, running_labels = [], [] running_size = 0 for random_name in valid_random_names: # Load data from files and append to 'running' lists loaded_examples = self._example_loader(random_name) loaded_labels = self._label_loader(random_name) running_examples.append(loaded_examples) running_labels.append(loaded_labels) running_size += loaded_examples.shape[0] if running_size < self.batch_size: # Load enough data to populate one minibatch, which might # take multiple files continue # Concatenate all images together merged_examples = np.concatenate(running_examples, axis=0) merged_labels = np.concatenate(running_labels, axis=0) # Make minibatches out of concatenated things, for batch_no in range(running_size // self.batch_size): index_lo = batch_no * self.batch_size index_hi = index_lo + self.batch_size example_batch = merged_examples[index_lo:index_hi] label_batch = merged_labels[index_lo:index_hi] yield self._prepare_data(example_batch, label_batch) # Handle any remainder for remaining files remainder_idx = (running_size // self.batch_size) * self.batch_size running_examples = [merged_examples[remainder_idx:]] running_labels = [merged_labels[remainder_idx:]] running_size = running_size - remainder_idx # If we're out of files, yield this last sub-minibatch of data if running_size > 0: merged_examples = np.concatenate(running_examples, axis=0) merged_labels = np.concatenate(running_labels, axis=0) yield self._prepare_data(merged_examples, merged_labels)
38.437888
88
0.594328
1,439
12,377
4.90132
0.203614
0.039699
0.058982
0.022118
0.327095
0.281015
0.2321
0.191975
0.158656
0.139232
0
0.005213
0.302577
12,377
321
89
38.557632
0.811863
0.316474
0
0.106667
0
0
0.019475
0
0
0
0
0.006231
0.02
1
0.093333
false
0
0.06
0
0.24
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d4ea4507d80e27f773f363a22c466284face7c
2,219
py
Python
extra/carbontools.py
carbon-org/carbon
454d087f85f7fb9408eb0bc10ae702b8de844648
[ "MIT" ]
9
2021-03-20T13:09:52.000Z
2022-03-18T07:33:40.000Z
extra/carbontools.py
ThakeeNathees/Carbon
454d087f85f7fb9408eb0bc10ae702b8de844648
[ "MIT" ]
4
2020-08-11T07:57:00.000Z
2020-11-30T21:05:51.000Z
extra/carbontools.py
carbon-org/carbon
454d087f85f7fb9408eb0bc10ae702b8de844648
[ "MIT" ]
null
null
null
import os, sys import shutil CARBON_DIR = os.path.dirname(__file__) USAGE = '''\ ''' ## USAGE: ## sys.path.append('path/to/carbon/') ## import carbontools.py as cbtools ## lib = cbtools.GET_CARBON_LIB(env) def GET_CARBON_LIB(env): ## TODO: generate "*.gen.h" files SOURCES = [] cbenv = env.Clone(); cbenv.Append(CPPPATH=[os.path.join(CARBON_DIR, 'include/')]) ALL_SOURCES = [ 'src/var/*.cpp', 'src/core/*.cpp', 'src/native/*.cpp', 'src/compiler/*.cpp', 'src/thirdparty/dlfcn-win32/*.c', ] for src in ALL_SOURCES(cbenv): SOURCES.append(cbenv.Glob(os.path.join(CARBON_DIR, src))) lib = cbenv.Library( target = os.path.join(CARBON_DIR, 'bin/carbon'), source = SOURCES) return lib def main(): argcount = len(sys.argv) if argcount < 2: print(USAGE_STRING) exit() ## switch commands if sys.argv[1] == 'clean': cleanall = False for i in range(2, argcount): if sys.argv[i] in ('--all', '-a'): cleanall = True else: error_command(sys.argv[i]) clean(cleanall) else: error_command(sys.argv[1]) ## Internal methods #### def error_command(cmd): print('[*]: ERROR: unknown command "'+ cmd + '"\n' + USAGE) exit(-1) def error_exit(msg): print('[*]: ERROR: ' + msg + '"\n' + USAGE) exit(-1) def get_platform(): if sys.platform == 'win32': return 'windows' elif sys.platform in ('linux', 'linux2'): return 'x11' elif sys.platform == 'darwin': return 'osx' else: error_exit("platform(%s) not supported." % sys.platform) def clean(): CLEAN_DIRS = [ 'x64/', 'debug/' 'release/', 'debug/', 'bin/', '.vs', '.vscode', ] CLEAN_FILES = [ '.pdb', '.idb', '.ilk', '.obj', '.sln', '.vcxproj', '.vcxproj.filters', '.vcxproj.user', '.sconsign.dblite', ] os.system('scons -c') print('\n[*]: cleaning all files ...') for _dir in CLEAN_DIRS: try: shutil.rmtree(_dir) print('[*]: Removed - %s' % _dir) except: pass for path, dirs, files in os.walk('.'): for file in files: for suffix in CLEAN_FILES: if file.endswith(suffix): os.remove(os.path.join(path, file)) print('[*]: Removed - %s' % os.path.join(path, file)) print('[*]: done cleaning targets.') if __name__ == '__main__': main()
19.8125
63
0.611537
304
2,219
4.345395
0.394737
0.027252
0.03785
0.036336
0.133989
0.034822
0
0
0
0
0
0.008287
0.184317
2,219
112
64
19.8125
0.721547
0.080216
0
0.046512
0
0
0.222662
0.014844
0
0
0
0.008929
0
1
0.069767
false
0.011628
0.023256
0
0.104651
0.081395
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d59299ccdf71aaf83a73a14cec2a4bba7c6231
1,394
py
Python
tests/profiler/test_smtfprofiler_events.py
aaronmarkham/sagemaker-debugger
d271fbefb0cbe9686100850249c96a40fdc45b06
[ "Apache-2.0" ]
null
null
null
tests/profiler/test_smtfprofiler_events.py
aaronmarkham/sagemaker-debugger
d271fbefb0cbe9686100850249c96a40fdc45b06
[ "Apache-2.0" ]
null
null
null
tests/profiler/test_smtfprofiler_events.py
aaronmarkham/sagemaker-debugger
d271fbefb0cbe9686100850249c96a40fdc45b06
[ "Apache-2.0" ]
null
null
null
# First Party from smdebug.profiler import SMTFProfilerEvents def test_smtfprofiler_events(trace_file="./tests/profiler/smtf_profiler_trace.json"): trace_json_file = trace_file print(f"Reading the trace file {trace_json_file}") t_events = SMTFProfilerEvents(trace_json_file) all_trace_events = t_events.get_all_events() num_trace_events = len(all_trace_events) print(f"Number of events read = {num_trace_events}") assert num_trace_events == 49 event_list = t_events.get_events_at(1589314018458800000) # nanoseconds print(f"Number of events at 15013686 are {len(event_list)}") assert len(event_list) == 1 completed_event_list = t_events.get_events_within_range(0, 1589314018470000000) # nanoseconds print(f"Number of events occurred between 0 and 15013686 are {len(completed_event_list)}") assert len(completed_event_list) == 34 start_time_sorted = t_events.get_events_start_time_sorted() start_time_for_first_event = start_time_sorted[0].start_time print(f"The first event started at {start_time_for_first_event}") assert start_time_for_first_event == 1589314018458743000 end_time_sorted = t_events.get_events_end_time_sorted() end_time_for_last_event = end_time_sorted[-1].end_time print(f"The first event started at {end_time_for_last_event}") assert end_time_for_last_event == 1589314018481947000
42.242424
98
0.781205
206
1,394
4.868932
0.271845
0.062812
0.04985
0.063809
0.36989
0.227318
0.063809
0.063809
0
0
0
0.084448
0.142037
1,394
32
99
43.5625
0.754181
0.025108
0
0
0
0
0.265683
0.089299
0
0
0
0
0.217391
1
0.043478
false
0
0.043478
0
0.086957
0.26087
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d63a3e6da1e5e89a19fcaa83ea91fa806990e0
2,551
py
Python
package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py
neuromusic/PartSeg
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
[ "BSD-3-Clause" ]
15
2020-03-21T03:27:56.000Z
2022-03-21T07:46:39.000Z
package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py
neuromusic/PartSeg
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
[ "BSD-3-Clause" ]
479
2019-10-27T22:57:22.000Z
2022-03-30T12:48:14.000Z
package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py
neuromusic/PartSeg
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
[ "BSD-3-Clause" ]
5
2020-02-05T14:25:02.000Z
2021-12-21T03:44:52.000Z
from typing import Type import numpy as np import pytest from PartSegCore.segmentation import ROIExtractionAlgorithm from PartSegCore.segmentation.algorithm_base import ROIExtractionResult, SegmentationLimitException from PartSegCore.segmentation.restartable_segmentation_algorithms import final_algorithm_list as restartable_list from PartSegCore.segmentation.segmentation_algorithm import ( CellFromNucleusFlow, ThresholdFlowAlgorithm, close_small_holes, ) from PartSegCore.segmentation.segmentation_algorithm import final_algorithm_list as algorithm_list def empty(*args): pass @pytest.fixture(autouse=True) def fix_threshold_flow(monkeypatch): values = ThresholdFlowAlgorithm.get_default_values() values["threshold"]["values"]["core_threshold"]["values"]["threshold"] = 10 values["threshold"]["values"]["base_threshold"]["values"]["threshold"] = 5 def _param(self): return values monkeypatch.setattr(ThresholdFlowAlgorithm, "get_default_values", _param) values2 = CellFromNucleusFlow.get_default_values() values2["nucleus_threshold"]["values"]["threshold"] = 10 values2["cell_threshold"]["values"]["threshold"] = 5 def _param2(self): return values2 monkeypatch.setattr(CellFromNucleusFlow, "get_default_values", _param2) @pytest.mark.parametrize("algorithm", restartable_list + algorithm_list) @pytest.mark.parametrize("masking", [True, False]) def test_segmentation_algorithm(image, algorithm: Type[ROIExtractionAlgorithm], masking): assert algorithm.support_z() is True assert algorithm.support_time() is False assert isinstance(algorithm.get_steps_num(), int) instance = algorithm() instance.set_image(image) if masking: instance.set_mask(image.get_channel(0) > 0) instance.set_parameters(**instance.get_default_values()) if not masking and "Need mask" in algorithm.get_fields(): with pytest.raises(SegmentationLimitException): instance.calculation_run(empty) else: res = instance.calculation_run(empty) assert isinstance(instance.get_info_text(), str) assert isinstance(res, ROIExtractionResult) instance.clean() @pytest.mark.parametrize("ndim", (2, 3)) @pytest.mark.parametrize("dtype", (np.uint8, bool)) def test_close_small_holes(ndim, dtype): data = np.zeros((10,) * ndim, dtype=dtype) data[(slice(1, -1),) * ndim] = 1 copy = data.copy() data[(slice(3, -3),) * ndim] = 0 res = close_small_holes(data, 5 ** 2) assert np.all(res == copy)
35.430556
113
0.73971
290
2,551
6.317241
0.337931
0.049127
0.07369
0.026201
0.114629
0.058952
0
0
0
0
0
0.012408
0.147001
2,551
71
114
35.929577
0.829504
0
0
0
0
0
0.085849
0
0
0
0
0
0.107143
1
0.107143
false
0.017857
0.142857
0.035714
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d655292e154bea97fce427b26a455cc09bac09
958
py
Python
bootstrap.py
ayang818/pyweb-template
d4b8c97b9e99166a6b6d856929ef670771b90fd3
[ "MIT" ]
null
null
null
bootstrap.py
ayang818/pyweb-template
d4b8c97b9e99166a6b6d856929ef670771b90fd3
[ "MIT" ]
null
null
null
bootstrap.py
ayang818/pyweb-template
d4b8c97b9e99166a6b6d856929ef670771b90fd3
[ "MIT" ]
null
null
null
# coding=utf-8 import logging import os from flask import Flask from cloudware_server.route.base import register_routes def config_logger(): """ 设置日志等级 """ logging.getLogger().setLevel(logging.INFO) config_logger() def create_app(config=None): """ 创建 bootstrap app """ app = Flask(__name__, instance_relative_config=True) app.config.from_mapping( SECRET_KEY='dev', DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'), ) if not config: app.config.from_pyfile('config.py', silent=True) else: app.config.from_mapping(config) try: if not os.path.exists(app.instance_path): os.makedirs(app.instance_path) except OSError as e: logging.error('启动失败 %s', e) # 注册路由 register_routes(app) return app app = create_app() logging.info("%s", os.path.join(app.instance_path, 'flaskr.sqlite')) app.run(host='localhost', port=5000)
20.382979
68
0.656576
127
958
4.787402
0.488189
0.059211
0.098684
0.065789
0.121711
0.121711
0.121711
0.121711
0
0
0
0.006667
0.217119
958
46
69
20.826087
0.804
0.043841
0
0
0
0
0.063348
0
0
0
0
0
0
1
0.074074
false
0
0.148148
0
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d7b2bb9f36934a7abf44cab165f9dd936da6f5
2,990
py
Python
src/online/batch_job.py
jack139/fair
fe0ff64f8edbd794c3fb951ab6af420054e9e585
[ "BSD-3-Clause" ]
1
2019-07-16T09:46:39.000Z
2019-07-16T09:46:39.000Z
src/online/batch_job.py
jack139/fair
fe0ff64f8edbd794c3fb951ab6af420054e9e585
[ "BSD-3-Clause" ]
null
null
null
src/online/batch_job.py
jack139/fair
fe0ff64f8edbd794c3fb951ab6af420054e9e585
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import web from bson.objectid import ObjectId from config import setting import helper db = setting.db_web url = ('/online/batch_job') # - 批量处理订单 class handler: def GET(self): if helper.logged(helper.PRIV_USER,'BATCH_JOB'): render = helper.create_render() #user_data=web.input(start_date='', shop='__ALL__') # 查找shop db_shop = helper.get_shop_by_uid() shop_name = helper.get_shop(db_shop['shop']) # 统计线上订单 condition = { 'shop' : db_shop['shop'], 'status' : {'$in' : ['PAID','DISPATCH','ONROAD']}, 'type' : {'$in' : ['TUAN', 'SINGLE']}, # 只拼团用 } db_sale2 = db.order_app.find(condition, { 'order_id' : 1, 'paid_time' : 1, 'cart' : 1, 'type' : 1, 'status' : 1, 'address' : 1, }) skus={} for i in db_sale2: # 区分省份 sheng = i['address'][8].split(',')[0] if len(i['address'])>=9 else u'未知' if skus.has_key(i['cart'][0]['tuan_id']): if skus[i['cart'][0]['tuan_id']].has_key(sheng): skus[i['cart'][0]['tuan_id']][sheng]['num'] += 1 skus[i['cart'][0]['tuan_id']][sheng]['paid'] += (1 if i['status']=='PAID' else 0) skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] += (1 if i['status']=='DISPATCH' else 0) skus[i['cart'][0]['tuan_id']][sheng]['onroad'] += (1 if i['status']=='ONROAD' else 0) else: skus[i['cart'][0]['tuan_id']][sheng] = {} skus[i['cart'][0]['tuan_id']][sheng]['num'] = 1 skus[i['cart'][0]['tuan_id']][sheng]['paid'] = (1 if i['status']=='PAID' else 0) skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] = (1 if i['status']=='DISPATCH' else 0) skus[i['cart'][0]['tuan_id']][sheng]['onroad'] = (1 if i['status']=='ONROAD' else 0) else: r = db.pt_store.find_one({'tuan_id':i['cart'][0]['tuan_id']},{'title':1}) if r: title = r['title'] else: title = 'n/a' skus[i['cart'][0]['tuan_id']] = { 'name' : title, 'tuan_id' : i['cart'][0]['tuan_id'], } skus[i['cart'][0]['tuan_id']][sheng]={ 'num' : 1, # 要包含送的 'paid' : 1 if i['status']=='PAID' else 0, # 已付款,待拣货的, 拼团用 'dispatch' : 1 if i['status']=='DISPATCH' else 0, # 已付款,待配送, 拼团用 'onroad' : 1 if i['status']=='ONROAD' else 0, # 已付款,配送中, 拼团用 } total_sum={} for i in skus.keys(): for j in skus[i].keys(): if j in ['name','tuan_id']: continue if total_sum.has_key(j): total_sum[j]['paid'] += skus[i][j]['paid'] total_sum[j]['dispatch'] += skus[i][j]['dispatch'] total_sum[j]['onroad'] += skus[i][j]['onroad'] else: total_sum[j] = {} total_sum[j]['paid'] = skus[i][j]['paid'] total_sum[j]['dispatch'] = skus[i][j]['dispatch'] total_sum[j]['onroad'] = skus[i][j]['onroad'] return render.batch_job(helper.get_session_uname(), helper.get_privilege_name(), skus, shop_name['name'], total_sum) else: raise web.seeother('/')
31.473684
95
0.538462
446
2,990
3.46861
0.226457
0.061409
0.058177
0.096962
0.46671
0.458953
0.438268
0.404654
0.352295
0.336134
0
0.020949
0.217726
2,990
94
96
31.808511
0.640445
0.057191
0
0.067568
0
0
0.211052
0
0
0
0
0
0
1
0.013514
false
0
0.054054
0
0.094595
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d82a4a78317f4eced5c0075b91640e53d92d65
64
py
Python
create_browser.py
bannedcoder/selenium-easy-debug
657c7dea3df4c661198e5461abc95a5abaa9ab30
[ "MIT" ]
null
null
null
create_browser.py
bannedcoder/selenium-easy-debug
657c7dea3df4c661198e5461abc95a5abaa9ab30
[ "MIT" ]
null
null
null
create_browser.py
bannedcoder/selenium-easy-debug
657c7dea3df4c661198e5461abc95a5abaa9ab30
[ "MIT" ]
null
null
null
from helpers.reusable_browser import * create_driver_session()
16
38
0.84375
8
64
6.375
1
0
0
0
0
0
0
0
0
0
0
0
0.09375
64
3
39
21.333333
0.87931
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
51d8ae23bd3580d3a4606d438e3b8518ad7289ac
8,124
py
Python
attributes/architecture/main.py
Lufedi/reaper
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
[ "Apache-2.0" ]
null
null
null
attributes/architecture/main.py
Lufedi/reaper
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
[ "Apache-2.0" ]
1
2021-03-16T20:28:19.000Z
2021-03-16T20:28:19.000Z
attributes/architecture/main.py
Lufedi/reaper
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
[ "Apache-2.0" ]
1
2022-03-04T01:21:09.000Z
2022-03-04T01:21:09.000Z
import os import re import subprocess import json import networkx from pygments import lexers, token, util TOKENTYPE_WHITELIST = [ token.Name, token.Name.Attribute, token.Name.Builtin, token.Name.Builtin.Pseudo, token.Name.Constant, token.Name.Decorator, token.Name.Entity, token.Name.Exception, token.Name.Label, token.Name.Namespace, token.Name.Other, token.Name.Tag, token.Name.Variable, token.Name.Variable.Class, token.Name.Variable.Global, token.Name.Variable.Instance ] SUPPORTED_LANGUAGES = [] # Regular expression to parse the list of languages supported by ack as listed # by ack --help-types # Pattern: " --[no]python" RE_ACK_LANGUAGES = re.compile('(?:^\s{4}--\[no\])(\w*)') # Map GHTorrent's projects.language to ACK compatible language (if necessary). ACK_LANGUAGE_MAP = { 'c': 'cc', 'c++': 'cpp', 'c#': 'csharp', 'objective-c': 'objc', 'ojective-c++': 'objcpp', 'javascript': 'js' } def init(cursor): global SUPPORTED_LANGUAGES ack_process2 = subprocess.Popen( ['ack', '--help-types'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) lines, _ = [x.decode('utf-8') for x in ack_process2.communicate()] for line in lines.split('\n'): match = RE_ACK_LANGUAGES.match(line) if match: SUPPORTED_LANGUAGES.append(match.group(1)) def run(project_id, repo_path, cursor, **options): result = 0 cursor.execute(''' SELECT language FROM projects WHERE id = {0} '''.format(project_id)) record = cursor.fetchone() language = record[0] language = language.lower() if language else language ack_language = language if ack_language in ACK_LANGUAGE_MAP: ack_language = ACK_LANGUAGE_MAP[ack_language] # Edge case if the repository language is not supported by us. if (ack_language not in SUPPORTED_LANGUAGES) and (language.lower() != 'javascript'): return False, result file_paths = [] if language.lower() == 'javascript': for root, dirs, files in os.walk(repo_path): for _file in files: if _file.endswith(".js"): file_paths.append(os.path.join(root, _file)) else: ack_process = subprocess.Popen( ['ack', '-f', "--{0}".format(ack_language), repo_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) lines, _ = [ x.decode(errors='replace') for x in ack_process.communicate() ] file_paths = [line for line in lines.split('\n') if line.strip()] # Immediately fail the attribute if `minimumFiles` is not met. if len(file_paths) < options.get('minimumFiles', 2): return False, result graph = networkx.Graph() if language.lower() == 'javascript': # JavaScript: Use external utility success = build_js_graph(repo_path, file_paths, graph) else: lexer = lexers.get_lexer_by_name(language) success = build_graph(repo_path, graph, lexer) if success: monolithicity = get_connectedness(graph) else: monolithicity = 0 return monolithicity >= options['threshold'], monolithicity def build_js_graph(repo_path, file_paths, graph): # add nodes for file_path in file_paths: graph.add_node(Node(file_path)) name = repo_path.split('/')[-1] # get name of the repository # compute and store call graph as json using js-callgraph graph_process = f"gtimeout 1000 js-callgraph --cg {repo_path} --output {name}_graph.json >/dev/null 2>&1" os.system(graph_process) try: with open('{}_graph.json'.format(name), 'r') as json_file: # load the json representation of the call graph calls = json.load(json_file) for call in calls: source_file = call['source']['file'] # identify the source of the call target_file = call['target']['file'] # identify the target of the call # both source and target should be nodes in the call graph, i.e., .js files if source_file.endswith(".js") and target_file.endswith(".js"): graph.add_edge(Node(source_file), Node(target_file)) # add edge graph.to_undirected() # just in case, transform into undirected (should be undirected by default anyway) os.remove('{}_graph.json'.format(name)) # delete the json representation of the call graph return True except IOError as err: print(err) return False def build_graph(file_paths, graph, lexer): """ for each file in the set of files create a node and add it to the graph open the file read the contents into memory get a list of tokens from the lexer for each token in the resulting tokens check if the token is defining a symbol if true, add the symbol to the file node for each file in the set of files open the file read the contents into memory get a list of token from the lexer for each token in the resulting tokens check if the token is using a symbol if true: search the graph for the node that has the symbol definition create a relationship from the current file to the node with the symbol definition """ for file_path in file_paths: node = Node(file_path) graph.add_node(node) try: with open(file_path, 'r', encoding='utf-8') as file: contents = file.read() tokens = lexer.get_tokens(contents) for item in tokens: token_type = item[0] symbol = item[1] if token_type in [token.Name.Function, token.Name.Class]: node.defines.add(symbol) elif token_type in TOKENTYPE_WHITELIST: node.references.add(symbol) if 'DEBUG' in os.environ: print(node) except FileNotFoundError as e: continue except UnicodeDecodeError: continue for caller in graph.nodes_iter(): for reference in caller.references: for callee in graph.nodes_iter(): if callee is not caller and reference in callee.defines: graph.add_edge(caller, callee) return True def get_connectedness(graph): components = list(networkx.connected_component_subgraphs(graph)) # N = networkx.nx_agraph.to_agraph(graph) # N.layout(prog='dot') # N.draw("file.png") components.sort(key=lambda i: len(i.nodes()), reverse=True) largest_component = components[0] connectedness = 0 if graph.nodes() and len(graph.nodes()) > 0: connectedness = len(largest_component.nodes()) / len(graph.nodes()) return connectedness class Node(): def __init__(self, path): self.path = path self.defines = set() self.references = set() def __hash__(self): return hash(self.path) def __eq__(self, other): return self.path == other.path def __str__(self): symbol_str = '\r' + '\n'.join(self.defines) return "{0}\n{1}\n{2}".format( self.path, '=' * len(self.path), symbol_str ) if __name__ == '__main__': import importlib import json import mysql.connector import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) from lib.utilities import get_loc os.environ['DEBUG'] = '1' with open('../../config.json', 'r') as file: config = json.load(file) mysql_config = config['options']['datasource'] connection = mysql.connector.connect(**mysql_config) connection.connect() cursor = connection.cursor() init(None) result = run(sys.argv[1], sys.argv[2], cursor, threshold=0.75) cursor.close() connection.close() print(result) else: from lib.utilities import get_loc
31.858824
117
0.616322
1,035
8,124
4.712077
0.252174
0.033217
0.013943
0.010662
0.143121
0.132869
0.104162
0.089809
0.065204
0.043879
0
0.005619
0.27708
8,124
254
118
31.984252
0.824791
0.19387
0
0.111111
0
0.005556
0.08005
0.003568
0.005556
0
0
0
0
1
0.05
false
0
0.066667
0.011111
0.177778
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51d8b62bf25917e74c95918a73ec119b3673d41b
1,065
py
Python
opencv/colordetect/sendimage.py
ronhandler/gitroot
beb81c4b826939f16e57a98ac5845d8acecf151d
[ "Unlicense" ]
null
null
null
opencv/colordetect/sendimage.py
ronhandler/gitroot
beb81c4b826939f16e57a98ac5845d8acecf151d
[ "Unlicense" ]
null
null
null
opencv/colordetect/sendimage.py
ronhandler/gitroot
beb81c4b826939f16e57a98ac5845d8acecf151d
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import sys import os.path import cv2 import numpy as np import boto from boto.s3.key import Key cap = cv2.VideoCapture(0) ret, new_image = cap.read() if ret == False: exit(1) filename = 'new.jpg' cv2.imwrite(filename, new_image) bucket_name = 'ronhandler' AWS_ACCESS_KEY_ID = 'AKIAIYLDR3LU2XDICTSQ' AWS_SECRET_ACCESS_KEY = '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' print('Connecting to AWS S3...') conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, # Hardcoding the host parameter is a workaround for bug: # https://github.com/boto/boto/issues/621 host="s3-eu-west-1.amazonaws.com") bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = filename testfile = "/share/" + filename print('Uploading "%s" to "%s/%s"...' % (testfile, bucket_name, k.key)) k.set_contents_from_filename(testfile) print('Notifying the server that we have uploaded a file...') import urllib2 url = """http://ec2-52-16-188-96.eu-west-1.compute.amazonaws.com/admin/run.php""" urllib2.urlopen(url).read()
25.97561
81
0.723005
163
1,065
4.588957
0.552147
0.048128
0.032086
0.037433
0
0
0
0
0
0
0
0.043621
0.138967
1,065
40
82
26.625
0.772083
0.107981
0
0
0
0.034483
0.297782
0.069694
0
0
0
0
0
1
0
false
0
0.241379
0
0.241379
0.103448
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51dc8e18a95d43bdff9738868787ec57f30ae57c
2,569
py
Python
tests/unit/dataactvalidator/test_fabs39_detached_award_financial_assistance_2.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
1
2018-10-29T12:54:44.000Z
2018-10-29T12:54:44.000Z
tests/unit/dataactvalidator/test_fabs39_detached_award_financial_assistance_2.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
null
null
null
tests/unit/dataactvalidator/test_fabs39_detached_award_financial_assistance_2.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
null
null
null
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabs39_detached_award_financial_assistance_2' def test_column_headers(database): expected_subset = {"row_number", "place_of_performance_code", "place_of_perform_country_c"} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): """ PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode is not USA, not 00FORGN otherwise. """ det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FORGN", place_of_perform_country_c="UKR") det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FoRGN", place_of_perform_country_c="uKr") det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="ny**987", place_of_perform_country_c="USA") det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**987", place_of_perform_country_c="UsA") errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4]) assert errors == 0 def test_failure(database): """ Test failure for PrimaryPlaceOfPerformanceCode must be 00FORGN when PrimaryPlaceofPerformanceCountryCode is not USA, not 00FORGN otherwise. """ det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FORGN", place_of_perform_country_c="USA") det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FoRGN", place_of_perform_country_c="usA") det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="ny**987", place_of_perform_country_c="UKR") det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="NY**987", place_of_perform_country_c="ukR") errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4]) assert errors == 4
59.744186
112
0.671467
257
2,569
6.249027
0.237354
0.078456
0.100872
0.123288
0.759651
0.745953
0.745953
0.745953
0.745953
0.745953
0
0.025871
0.262748
2,569
42
113
61.166667
0.82207
0.102374
0
0.482759
0
0
0.081283
0.04174
0
0
0
0
0.103448
1
0.103448
false
0
0.068966
0
0.172414
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
51dded95316bf909a713965fa3b3d3c363309051
2,782
py
Python
hipp/utils/utils.py
cmcneil-usgs/hipp
be6f9f8cccdc32b7b96be92172977a5c4006500c
[ "MIT" ]
12
2020-10-07T22:12:11.000Z
2022-02-15T23:10:53.000Z
hipp/utils/utils.py
cmcneil-usgs/hipp
be6f9f8cccdc32b7b96be92172977a5c4006500c
[ "MIT" ]
7
2020-10-11T23:42:55.000Z
2021-12-15T23:16:43.000Z
hipp/utils/utils.py
cmcneil-usgs/hipp
be6f9f8cccdc32b7b96be92172977a5c4006500c
[ "MIT" ]
4
2020-10-11T19:48:58.000Z
2022-03-08T21:32:13.000Z
import glob import os import cv2 import hipp.io import hipp.utils """ Library for command line tools. """ def optimize_geotif(geotif_file_name, output_file_name=None, verbose=False, print_call=False): if output_file_name is None: file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name) output_file_name = os.path.join(file_path, file_name+'_optimized'+file_extension) call = ['gdal_translate', '-of','GTiff', '-co','TILED=YES', '-co','COMPRESS=LZW', '-co','BIGTIFF=IF_SAFER', geotif_file_name, output_file_name] if print_call==True: print(*call) else: hipp.io.run_command(call, verbose=verbose) return output_file_name def optimize_geotifs(input_directory, keep = False, verbose=False): print('Optimizing tifs in', input_directory, 'with:') print(*['gdal_translate', '-of','GTiff', '-co','TILED=YES', '-co','COMPRESS=LZW', '-co','BIGTIFF=IF_SAFER']) tifs = sorted(glob.glob(os.path.join(input_directory,'*.tif'))) output_tifs = [] for tif in tifs: tif_optimized = hipp.utils.optimize_geotif(tif, verbose=verbose) if not keep: os.remove(tif) os.rename(tif_optimized, tif) output_tifs.append(tif) else: output_tifs.append(tif_optimized) return output_tifs def enhance_geotif_resolution(geotif_file_name, output_file_name=None, factor=None, verbose=False, print_call=False): if output_file_name is None: file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name) output_file_name = os.path.join(file_path, file_name+'_high_res'+file_extension) img = cv2.imread(geotif_file_name,cv2.IMREAD_GRAYSCALE) w, h = img.shape[::-1] w, h = w*factor, h*factor call = ['gdal_translate', '-of','GTiff', '-co','TILED=YES', '-co','COMPRESS=LZW', '-co','BIGTIFF=IF_SAFER', '-outsize',str(w),str(h), '-r', 'cubic', geotif_file_name, output_file_name] if print_call==True: print(*call) else: hipp.io.run_command(call, verbose=verbose) return output_file_name
29.284211
83
0.515097
300
2,782
4.513333
0.25
0.124077
0.103397
0.088626
0.564993
0.564993
0.564993
0.520679
0.520679
0.520679
0
0.002311
0.377786
2,782
94
84
29.595745
0.779896
0
0
0.56338
0
0
0.097187
0
0
0
0
0
0
1
0.042254
false
0
0.070423
0
0.15493
0.112676
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51de4c03101e87908bf9ddadb35262f495914dc2
1,070
py
Python
tests/test_explainers/test_core.py
openitnovem/readml
ce5323ff18a1796aef3a81590bb480cdcdcef0b9
[ "Apache-2.0" ]
null
null
null
tests/test_explainers/test_core.py
openitnovem/readml
ce5323ff18a1796aef3a81590bb480cdcdcef0b9
[ "Apache-2.0" ]
12
2022-01-19T13:02:58.000Z
2022-03-31T15:17:23.000Z
tests/test_explainers/test_core.py
openitnovem/readml
ce5323ff18a1796aef3a81590bb480cdcdcef0b9
[ "Apache-2.0" ]
4
2022-01-11T16:37:50.000Z
2022-03-18T19:48:58.000Z
import os import pandas as pd from fbd_interpreter.explainers.ml.explain_ml import ExplainML from fbd_interpreter.logger import ROOT_DIR FEATURES = ["a", "b"] PREDICTIONS = [0, 0, 0, 0, 1, 1, 1, 1] TARGETS = pd.Series([0, 0, 0, 0, 1, 1, 1, 1]) DATA = pd.DataFrame( {"a": [0, 1, 2, 3, 4, 5, 6, 7], "b": [1, 1, 1, 1, 2, 2, 2, 2], "target": TARGETS} ) class DummyModel(object): """ Dummy class that acts like a scikit-learn supervised learning model. Always makes the same predictions. """ def __init__( self, ) -> None: self.predict = lambda x: PREDICTIONS self.classes_ = [0, 1] self.predict_proba = lambda x: [[0.9, 0.1]] # TODO """ def test_global_pdp_ice() -> None: interpreter = ExplainML( model=DummyModel(), task_name="classification", tree_based_model=False, features_name=FEATURES, features_to_interpret=FEATURES, target_col="target", out_path=os.path.join(ROOT_DIR, "../outputs/tests"), ) interpreter.global_pdp_ice(DATA) """
24.318182
85
0.613084
151
1,070
4.18543
0.509934
0.028481
0.028481
0.018987
0.025316
0.025316
0.025316
0.025316
0
0
0
0.046856
0.242056
1,070
43
86
24.883721
0.732429
0.101869
0
0
0
0
0.017575
0
0
0
0
0.023256
0
1
0.058824
false
0
0.235294
0
0.352941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
51e08e485ba5b37c52a2c10b26fc9af31001557a
5,836
py
Python
tests/test_preprocessing/test_encoding.py
ig248/timeserio
afc2a953a83e763418d417059493ef13a17d349c
[ "MIT" ]
63
2019-07-12T17:16:27.000Z
2022-02-22T11:06:50.000Z
tests/test_preprocessing/test_encoding.py
ig248/timeserio
afc2a953a83e763418d417059493ef13a17d349c
[ "MIT" ]
34
2019-07-30T11:52:09.000Z
2022-03-28T12:42:02.000Z
tests/test_preprocessing/test_encoding.py
ig248/timeserio
afc2a953a83e763418d417059493ef13a17d349c
[ "MIT" ]
12
2019-08-14T05:51:22.000Z
2021-03-15T09:34:15.000Z
import numpy as np import numpy.testing as npt import pytest from sklearn.preprocessing import OneHotEncoder from timeserio.preprocessing import ( FeatureIndexEncoder, StatelessOneHotEncoder, StatelessTemporalOneHotEncoder, StatelessPeriodicEncoder ) from timeserio.preprocessing.encoding import PeriodicEncoder class TestFeatureIndexEncoder: @pytest.mark.parametrize( 'n_labels, expected_encoding', [ (1, np.arange(1)), (2, np.arange(2)), (3, np.arange(3)), ] ) def test_feature_encoder(self, n_labels, expected_encoding): encoder = FeatureIndexEncoder() labels = np.array( [f'label{idx}' for idx in range(n_labels)] ).reshape(-1, 1) new_ids = encoder.fit_transform(labels) assert isinstance(new_ids, np.ndarray) assert len(new_ids.shape) == 2 assert new_ids.shape[1] == 1 assert set(new_ids.ravel() == set(expected_encoding.ravel())) class TestStatelessOneHotEncoder: n_rows = 10 def test_invalid_n_values(self): with pytest.raises(ValueError): StatelessOneHotEncoder(n_features=1, n_values='auto') @pytest.mark.parametrize( 'n_features, n_values, categories', [ (1, 3, [[0, 1, 2]]), (2, 3, [[0, 1, 2], [0, 1, 2]]) ] ) def test_same_as_stateful( self, n_features, n_values, categories, random ): x = np.random.randint( 0, np.min(n_values), size=(self.n_rows, n_features) ) stateful_enc = OneHotEncoder( categories=categories, sparse=False ) stateless_enc = StatelessOneHotEncoder( n_features=n_features, n_values=n_values, sparse=False ) x0 = stateful_enc.fit_transform(x) x1 = stateless_enc.transform(x) npt.assert_allclose(x1, x0) @pytest.mark.parametrize( 'n_features, n_values, categories', [ (1, [3], [[0, 1, 2]]), (2, [3, 2], [[0, 1, 2], [0, 1]]) ] ) def test_same_as_stateful_for_multiple_n_values( self, n_features, n_values, categories, random ): x = np.hstack([ np.random.randint(0, np.min(_n_values), size=(self.n_rows, 1)) for _n_values in n_values ]) stateful_enc = OneHotEncoder( categories=categories, sparse=False ) stateless_enc = StatelessOneHotEncoder( n_features=n_features, n_values=n_values, sparse=False ) x0 = stateful_enc.fit_transform(x) x1 = stateless_enc.transform(x) npt.assert_allclose(x1, x0) class TestStatelessTemporalOneHotEncoder: n_rows = 3 @pytest.mark.parametrize('n_values', ['all', [True], [0]]) def test_invalid_n_values(self, n_values): with pytest.raises(ValueError): StatelessTemporalOneHotEncoder(n_features=1, n_values=n_values) def test_temporal_onehot(self): x = np.array([ [0, 0, 1, 1], [0, 1, 0, 1], ]) y_expected = np.array( [ [1, 1, 0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 0, 1, 0, 1], ] ) n_values = 2 enc = StatelessTemporalOneHotEncoder( n_features=x.shape[1], n_values=n_values, sparse=False ) y = enc.fit_transform(x) npt.assert_allclose(y, y_expected) class TestPeriodicEncoder: n_rows = 10 column = np.linspace(0, 1, num=n_rows) column_sin = np.sin(2 * np.pi * column) column_cos = np.cos(2 * np.pi * column) column_stacked = np.vstack([column_sin, column_cos]).T def array(self, n_features): x = np.arange(n_features) y = self.column _, X = np.meshgrid(x, y) return X @pytest.mark.parametrize('periodic_features', [[], [False]]) def test_single_column_no_transform(self, periodic_features): enc = PeriodicEncoder(periodic_features=periodic_features, period=1) X = self.array(n_features=1) Xt = enc.fit_transform(X) npt.assert_array_equal(X, Xt) @pytest.mark.parametrize('periodic_features', ['all', [0], [True]]) def test_single_column(self, periodic_features): enc = PeriodicEncoder(periodic_features=periodic_features, period=1) X = self.array(n_features=1) Xt = enc.fit_transform(X) npt.assert_allclose(Xt, self.column_stacked) @pytest.mark.parametrize('n_features', [2]) @pytest.mark.parametrize( 'periodic_features', ['all', [0, 1], [True, True]] ) def test_multi_column(self, n_features, periodic_features): enc = PeriodicEncoder(periodic_features=periodic_features, period=1) X = self.array(n_features=2) Xt = enc.fit_transform(X) npt.assert_allclose(Xt[:, ::2], self.column_stacked) npt.assert_allclose(Xt[:, 1::2], self.column_stacked) class TestStatelessPeriodicEncoder: n_rows = 10 @pytest.mark.parametrize( 'n_features, periodic_features, period', [ (1, 'all', 1.), (2, 'all', 1.), (2, 'all', [1., 2.]), (2, [True, False], 3), (2, [1], 3) ] ) def test_same_as_stateful(self, n_features, periodic_features, period): x = np.random.randint(0, 10, size=(self.n_rows, n_features)) stateful_enc = PeriodicEncoder( periodic_features=periodic_features, period=period ) stateless_enc = StatelessPeriodicEncoder( n_features=n_features, periodic_features=periodic_features, period=period ) x0 = stateful_enc.fit_transform(x) x1 = stateless_enc.transform(x) npt.assert_array_equal(x1, x0)
32.422222
76
0.600583
699
5,836
4.792561
0.153076
0.064478
0.056418
0.033433
0.546866
0.460597
0.412836
0.362687
0.333731
0.297015
0
0.028578
0.2805
5,836
179
77
32.603352
0.769231
0
0
0.310127
0
0
0.039239
0
0
0
0
0
0.075949
1
0.06962
false
0
0.037975
0
0.196203
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51e1a411ad9558e7543b3930124feeca7cd75ff5
11,654
py
Python
paper_examples/ex51_validation2D/main.py
jhabriel/mixdim-estimates
aa7041fe3fc7a13b820ef41dacefb759f4b592ff
[ "MIT" ]
3
2022-02-15T14:56:16.000Z
2022-03-24T10:20:00.000Z
paper_examples/ex51_validation2D/main.py
jhabriel/mixdim-estimates
aa7041fe3fc7a13b820ef41dacefb759f4b592ff
[ "MIT" ]
3
2021-06-15T16:23:46.000Z
2021-12-05T10:25:41.000Z
paper_examples/ex51_validation2D/main.py
jhabriel/mixdim-estimates
aa7041fe3fc7a13b820ef41dacefb759f4b592ff
[ "MIT" ]
null
null
null
# Importing modules import numpy as np import porepy as pp import itertools from time import time from model import model #%% Functions def make_constrained_mesh(h=0.1): """ Creates unstructured mesh for a given target mesh size for the case of a single vertical fracture embedded in the domain Parameters ---------- h : float, optional Target mesh size. The default is 0.1. Returns ------- gb : PorePy Object Porepy grid bucket object. """ domain = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1} network_2d = pp.fracture_importer.network_2d_from_csv("network.csv", domain=domain) # Target lengths target_h_bound = h target_h_fract = h mesh_args = {"mesh_size_bound": target_h_bound, "mesh_size_frac": target_h_fract} # Construct grid bucket gb = network_2d.mesh(mesh_args, constraints=[1, 2]) return gb def create_non_matching_gridbucket(h_2d, h_1d, h_mortar): """ Generates a gridbucket containing non-matching grids Parameters ---------- h_2d : Float Mesh size of the higher-dimensional grid h_1d : Float Mesh size of the lower-dimensional grid h_mortar : Float Mesh size of the mortar grid Raises ------ Warning If the subdomain cells are smaller than the mortar cell Returns ------- gb : PorePy object Grid bucket """ # Sanity check if (h_2d > h_mortar) or (h_1d > h_mortar): warning_msg = "Subdomain cell are smaller than mortar cells " warning_msg += "and this may lead to inconsistent results." raise Warning(warning_msg) # NOTE: The easiest way to construct the non-matching gridbucket is to # replace the lower-dimensional grid and the mortar grids into the # higher-dimensional grid # Create a grid bucket using h_2d as target mesh size gb_h = make_constrained_mesh(h_2d) gl_old = gb_h.grids_of_dimension(1)[0] # extract 1d-grid mg_old = gb_h.get_mortar_grids()[0] # extract mortar-grid # Obtain fracture and mortar grids to be replaced into gl_new = make_constrained_mesh(h_1d).grids_of_dimension(1)[0] mg_new = make_constrained_mesh(h_mortar).get_mortar_grids()[0] # Create the mapping dictionaries g_map = {gl_old: gl_new} mg_map = {mg_old: mg_new.side_grids} # Replace grids gb = gb_h.copy() gb.replace_grids(g_map=g_map) gb.replace_grids(mg_map=mg_map) return gb #%% Defining numerical methods, and obtaining grid buckets num_methods = ["TPFA", "MPFA", "RT0", "MVEM"] levels = 5 # coarsening levels coarsening_factor = 2 h_2d_ref = 0.003125 # reference 2D mesh size h_1d_ref = h_2d_ref * 1.5 # reference 1D mesh size h_mortar_ref = h_2d_ref * 2.0 # reference mortar mesh size h_2d = coarsening_factor ** np.arange(levels) * h_2d_ref h_1d = coarsening_factor ** np.arange(levels) * h_1d_ref h_mortar = coarsening_factor ** np.arange(levels) * h_mortar_ref grid_buckets = [] tic = time() print("Assembling non-matching grid buckets...", end="") for counter in range(levels): grid_buckets.append( create_non_matching_gridbucket(h_2d[counter], h_1d[counter], h_mortar[counter]) ) grid_buckets = grid_buckets[::-1] print(f"\u2713 Time {time() - tic}\n") #%% Create dictionary and initialize fields d = {k: {} for k in num_methods} for method in num_methods: d[method] = { "mesh_size": [], "error_estimate_2d": [], "true_error_pressure_2d": [], "true_error_velocity_2d": [], "mesh_size_2d": [], "error_estimate_1d": [], "true_error_pressure_1d": [], "true_error_velocity_1d": [], "mesh_size_1d": [], "error_estimate_mortar": [], "true_error_pressure_mortar": [], "true_error_velocity_mortar": [], "mesh_size_mortar": [], "majorant": [], "true_error_pressure": [], "true_error_velocity": [], "I_eff_pressure": [], "I_eff_velocity": [], "I_eff_combined": [], } #%% Populate fields (NOTE: This loop may take considerable time) for i in itertools.product(num_methods, grid_buckets): # Print info in the console print("Solving with", i[0], "for refinement level", grid_buckets.index(i[1]) + 1) # Get hold of errors tic = time() ( h_max, error_estimate_2d, true_error_pressure_2d, true_error_velocity_2d, mesh_size_2d, error_estimate_1d, true_error_pressure_1d, true_error_velocity_1d, mesh_size_1d, error_estimates_mortar, true_error_pressure_mortar, true_error_velocity_mortar, mesh_size_mortar, majorant, true_error_pressure, true_error_velocity, I_eff_pressure, I_eff_velocity, I_eff_combined, ) = model(i[1], i[0]) print(f"Done. Time {time() - tic}\n") # Store errors in the dictionary d[i[0]]["mesh_size"].append(h_max) d[i[0]]["error_estimate_2d"].append(error_estimate_2d) d[i[0]]["true_error_pressure_2d"].append(true_error_pressure_2d) d[i[0]]["true_error_velocity_2d"].append(true_error_velocity_2d) d[i[0]]["mesh_size_2d"].append(mesh_size_2d) d[i[0]]["error_estimate_1d"].append(error_estimate_1d) d[i[0]]["true_error_pressure_1d"].append(true_error_pressure_1d) d[i[0]]["true_error_velocity_1d"].append(true_error_velocity_1d) d[i[0]]["mesh_size_1d"].append(mesh_size_1d) d[i[0]]["error_estimate_mortar"].append(error_estimates_mortar) d[i[0]]["true_error_pressure_mortar"].append(true_error_pressure_mortar) d[i[0]]["true_error_velocity_mortar"].append(true_error_velocity_mortar) d[i[0]]["mesh_size_mortar"].append(mesh_size_mortar) d[i[0]]["majorant"].append(majorant) d[i[0]]["true_error_pressure"].append(true_error_pressure) d[i[0]]["true_error_velocity"].append(true_error_velocity) d[i[0]]["I_eff_pressure"].append(I_eff_pressure) d[i[0]]["I_eff_velocity"].append(I_eff_velocity) d[i[0]]["I_eff_combined"].append(I_eff_combined) #%% Exporting # Permutations rows = len(num_methods) * len(grid_buckets) # Initialize lists num_method_name = [] diam_2d = [] diam_1d = [] diam_mortar = [] col_2d_estimate = [] col_1d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(levels)): num_method_name.append(i[0]) diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]]) diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]]) diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) # Prepare for exporting export = np.zeros(rows, dtype=[ ('var2', 'U6'), ('var3', float), ('var4', float), ('var5', float), ('var6', float), ('var7', float), ('var8', float), ('var9', float), ('var10', float), ('var11', float), ('var12', float), ('var13', float), ('var14', float) ]) # Declaring column variables export['var2'] = num_method_name export['var3'] = diam_2d export['var4'] = diam_1d export['var5'] = diam_mortar export['var6'] = col_2d_estimate export['var7'] = col_1d_estimate export['var8'] = col_mortar_estimate export['var9'] = col_majorant export['var10'] = col_true_error_pressure export['var11'] = col_true_error_velocity export['var12'] = I_eff_pressure export['var13'] = I_eff_velocity export['var14'] = I_eff_combined # Formatting string fmt = "%6s %2.5f %2.5f %2.5f %2.2e %2.2e " fmt += "%2.2e %2.2e %2.2e %2.2e %2.2f %2.2f %2.2f" # Headers header = "num_method h_2d, h_1d, h_mortar, eta_2d eta_1d eta_mortar " header += "majorant true_error_p true_error_u I_eff_p I_eff_u I_eff_pu" # Writing into txt np.savetxt('validation2d.txt', export, delimiter=',', fmt=fmt, header=header) #%% Exporting to LaTeX # Permutations rows = len(num_methods) * len(grid_buckets) # Initialize lists ampersend = [] for i in range(rows): ampersend.append('&') num_method_name = [] diam_2d = [] diam_1d = [] diam_mortar = [] col_2d_estimate = [] col_1d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(levels)): num_method_name.append(i[0]) diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]]) diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]]) diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) # Prepare for exporting export = np.zeros(rows, dtype=[ ('var2', 'U6'), ('var3', float), ('var4', float), ('var5', float), ('var6', float), ('amp1', 'U6'), ('var7', float), ('amp2', 'U6'), ('var8', float), ('amp3', 'U6'), ('var9', float), ('amp4', 'U6'), ('var10', float), ('amp5', 'U6'), ('var11', float), ('amp6', 'U6'), ('var12', float), ('amp7', 'U6'), ('var13', float), ('amp8', 'U6'), ('var14', float) ]) # Prepare for exporting export['var2'] = num_method_name export['var3'] = diam_2d export['var4'] = diam_1d export['var5'] = diam_mortar export['var6'] = col_2d_estimate export['amp1'] = ampersend export['var7'] = col_1d_estimate export['amp2'] = ampersend export['var8'] = col_mortar_estimate export['amp3'] = ampersend export['var9'] = col_majorant export['amp4'] = ampersend export['var10'] = col_true_error_pressure export['amp5'] = ampersend export['var11'] = col_true_error_velocity export['amp6'] = ampersend export['var12'] = I_eff_pressure export['amp7'] = ampersend export['var13'] = I_eff_velocity export['amp8'] = ampersend export['var14'] = I_eff_combined # Formatting string fmt = "%6s %2.5f %2.5f %2.5f %2.2e %1s %2.2e %1s %2.2e " fmt += "%1s %2.2e %1s %2.2e %1s %2.2e %1s %2.2f %1s %2.2f %1s %2.2f" # Headers header = "num_method h_2d h_1d h_mortar eta_2d & eta_1d & eta_mortar & " header += "majorant & true_error_p & true_error_u & I_eff_p & I_eff_u & I_eff_pu" np.savetxt('validation2d_tex.txt', export, delimiter=',', fmt=fmt, header=header )
32.282548
87
0.643041
1,706
11,654
4.094373
0.139508
0.067001
0.018468
0.030923
0.565211
0.538153
0.434503
0.397423
0.393414
0.393414
0
0.039266
0.204565
11,654
360
88
32.372222
0.714239
0.144843
0
0.395062
0
0.016461
0.213826
0.037169
0
0
0
0
0
1
0.00823
false
0
0.024691
0
0.041152
0.016461
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51e203fe102c1df6470ac30a413c9d445b9bab48
1,850
py
Python
sa/profiles/Alcatel/AOS/get_portchannel.py
xUndero/noc
9fb34627721149fcf7064860bd63887e38849131
[ "BSD-3-Clause" ]
1
2019-09-20T09:36:48.000Z
2019-09-20T09:36:48.000Z
sa/profiles/Alcatel/AOS/get_portchannel.py
ewwwcha/noc
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
[ "BSD-3-Clause" ]
null
null
null
sa/profiles/Alcatel/AOS/get_portchannel.py
ewwwcha/noc
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------- # Alcatel.AOS.get_portchannel # ---------------------------------------------------------------------- # Copyright (C) 2007-2016 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- from noc.core.script.base import BaseScript from noc.sa.interfaces.igetportchannel import IGetPortchannel import re class Script(BaseScript): name = "Alcatel.AOS.get_portchannel" interface = IGetPortchannel rx_line = re.compile(r"^\s+(?P<port>\d+)\s+(Static|Dynamic)", re.MULTILINE) rx_line1 = re.compile(r"\s+(?P<interface>\d+\/\d+)\s+\S+\s+", re.MULTILINE) rx_line2 = re.compile( r"^\s+(?P<interface>\d+\/\d+)\s+\S+\s+\d+\s+\S+\s+(?P<port>\d+)", re.MULTILINE ) def execute(self): r = [] data = self.cli("show linkagg") data1 = "" for match in self.rx_line.finditer(data): port = int(match.group("port")) members = [] if self.match_version(version__gte="6.3.4"): data1 = self.cli("show linkagg %i port" % port) for match1 in self.rx_line1.finditer(data1): members += [match1.group("interface")] else: if not data1: data1 = self.cli("show linkagg port") for match1 in self.rx_line2.finditer(data1): if int(match1.group("port")) == port: members += [match1.group("interface")] r += [ { "interface": "%i" % port, "members": members, # <!> TODO: port-channel type detection "type": "L", } ] return r
37.755102
86
0.452973
191
1,850
4.329843
0.387435
0.01451
0.036276
0.039903
0.183797
0.113664
0.062878
0.062878
0.062878
0.062878
0
0.020769
0.297297
1,850
48
87
38.541667
0.615385
0.196757
0
0.055556
0
0.027778
0.177387
0.107651
0
0
0
0.020833
0
1
0.027778
false
0
0.083333
0
0.305556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
51e23600088ad593a6f5956c2914181ca7e20049
3,398
py
Python
mysite/finance/migrations/0001_initial.py
brzx/djfinance
958768e3e665a23dad7118741a9c9fcf769574df
[ "BSD-2-Clause" ]
null
null
null
mysite/finance/migrations/0001_initial.py
brzx/djfinance
958768e3e665a23dad7118741a9c9fcf769574df
[ "BSD-2-Clause" ]
null
null
null
mysite/finance/migrations/0001_initial.py
brzx/djfinance
958768e3e665a23dad7118741a9c9fcf769574df
[ "BSD-2-Clause" ]
null
null
null
# Generated by Django 2.1.3 on 2019-04-11 09:09 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('djfauth', '0001_initial'), ] operations = [ migrations.CreateModel( name='AccountTurnOver', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ac_type', models.CharField(choices=[('out', '支出'), ('in', '收入'), ('exp', '消费')], max_length=50)), ('amount', models.DecimalField(decimal_places=2, default=0, max_digits=20)), ('comment', models.CharField(max_length=500)), ('create_dt', models.DateField(verbose_name='created date')), ('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')), ], ), migrations.CreateModel( name='Asset', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('as_iden', models.CharField(max_length=200)), ('as_organ', models.CharField(max_length=200)), ('amount', models.DecimalField(decimal_places=2, default=0, max_digits=20)), ('bill_date', models.CharField(blank=True, max_length=200, null=True)), ('repayment_date', models.CharField(blank=True, max_length=200, null=True)), ('credit_limit', models.CharField(blank=True, max_length=200, null=True)), ('year_rate', models.CharField(blank=True, max_length=200, null=True)), ('comment', models.CharField(blank=True, max_length=500, null=True)), ('create_dt', models.DateField(verbose_name='created date')), ('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')), ], ), migrations.CreateModel( name='AssetType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('assetname', models.CharField(max_length=200)), ('detailtype', models.CharField(max_length=200)), ('create_dt', models.DateField(verbose_name='created date')), ('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='updated date')), ], ), migrations.AddField( model_name='asset', name='as_type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.AssetType'), ), migrations.AddField( model_name='asset', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djfauth.User'), ), migrations.AddField( model_name='accountturnover', name='asset', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finance.Asset'), ), migrations.AddField( model_name='accountturnover', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djfauth.User'), ), ]
45.918919
115
0.587699
357
3,398
5.436975
0.263305
0.085008
0.049459
0.056672
0.779495
0.723854
0.631633
0.631633
0.631633
0.586296
0
0.023657
0.266039
3,398
73
116
46.547945
0.754611
0.013243
0
0.590909
1
0
0.126828
0
0
0
0
0
0
1
0
false
0
0.030303
0
0.090909
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
51e5e2f9e6590f0c3de91a80f943ddc82d5c24e6
226
py
Python
libpaas/camp/planparser.py
hvishwanath/libpaas
3df07adca59c003ee754c4e919cf506c13953be1
[ "MIT" ]
null
null
null
libpaas/camp/planparser.py
hvishwanath/libpaas
3df07adca59c003ee754c4e919cf506c13953be1
[ "MIT" ]
null
null
null
libpaas/camp/planparser.py
hvishwanath/libpaas
3df07adca59c003ee754c4e919cf506c13953be1
[ "MIT" ]
null
null
null
__author__ = 'hvishwanath' import yaml from models import * class PlanParser(object): @classmethod def parse(cls, planfile): y = yaml.safe_load(file(planfile)) return CAMPPlan.create_from_dict(y)
15.066667
43
0.685841
27
226
5.481481
0.814815
0
0
0
0
0
0
0
0
0
0
0
0.221239
226
14
44
16.142857
0.840909
0
0
0
0
0
0.049327
0
0
0
0
0
0
1
0.125
false
0
0.25
0
0.625
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
51e7204783742c4d06205cf3ac4d3a46079650b3
22,987
py
Python
slottools/PhotometryConfigWidget.py
apodemus/pysalt3
97bb790ad7bcf1137e3ffd2a7b32840ae7167358
[ "BSD-3-Clause" ]
null
null
null
slottools/PhotometryConfigWidget.py
apodemus/pysalt3
97bb790ad7bcf1137e3ffd2a7b32840ae7167358
[ "BSD-3-Clause" ]
null
null
null
slottools/PhotometryConfigWidget.py
apodemus/pysalt3
97bb790ad7bcf1137e3ffd2a7b32840ae7167358
[ "BSD-3-Clause" ]
1
2021-07-15T19:43:59.000Z
2021-07-15T19:43:59.000Z
################################# LICENSE ################################## # Copyright (c) 2009, South African Astronomical Observatory (SAAO) # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer # # in the documentation and/or other materials provided with the # # distribution. # # * Neither the name of the South African Astronomical Observatory # # (SAAO) nor the names of its contributors may be used to endorse # # or promote products derived from this software without specific # # prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR # # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # # DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY # # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ """ Module containing generic graphical user interface widgets. """ # Ensure python 2.5 compatibility import matplotlib.cm # General imports import pyfits import numpy as np # Gui library imports try: from PyQt4.QtCore import QString except ImportError: QString = str from PyQt4 import QtGui, QtCore from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure from matplotlib.patches import CirclePolygon, Rectangle # Salt imports import saltsafeio from salterror import SaltError, SaltIOError from saltimagetools import find_object, zscale class PhotometryConfigWidget(QtGui.QWidget): """Configure dialog for photometry. Has settings for: * target position, size * target background * type (anulus/region) * parameters * comparison position, size * comparison background * type (anulus/region) * parameters """ def __init__(self, imdisplay, config, imlist=None, number=1, parent=None): """Setup widget. *imdisplay* a `FitsDisplay` derived fits display widget, *imlist* a list of fits image filenames, *config* filename used for output configuration file, *number* image number to load on startup, *parent* parent widget. """ # Set default parameters self.imlist=imlist self.number=number self.config=config self.amp={'target' : 1, 'comparison' : 1 } # Set default marker self.mark_with='circle' # Set default search distance for recentering self.distance=5 # Default line style parameters self.line={ 'target' : { 'color' : 'g', 'width' : 2 }, 'comparison' : { 'color' : 'g', 'width' : 2 }} # Import gui from ui_photometryconfigwidget import Ui_PhotometryConfigWidget # Setup widget QtGui.QWidget.__init__(self, parent) # Bind gui to widget self.ui = Ui_PhotometryConfigWidget() self.ui.setupUi(self) # Destroy widget on close self.setAttribute(QtCore.Qt.WA_DeleteOnClose) # Connect to display window self.imdisplay=imdisplay # Connect position selected signal from display to event handler self.connect(self.imdisplay, QtCore.SIGNAL('positionSelected(float, float)'), self.selectionHandler) # Set current display widget for positionSelected signal self.xdisplay=[] self.ydisplay=[] self.rdisplay=[] # Keep track of currently displayed objects self.display={'target' : {'position' : False, 'annulus' : False, 'region' : False }, 'comparison' : {'position' : False, 'annulus' : False, 'region' : False }} # Keep track of input widgets self.parameters=['x','y','r','r1','r2','x1','y1','x2','y2'] self.input={'target' : { 'x' : self.ui.tgtXLineEdit, 'y' : self.ui.tgtYLineEdit, 'r' : self.ui.tgtRLineEdit, 'r1' : self.ui.tgtR1LineEdit, 'r2' : self.ui.tgtR2LineEdit, 'x1' : self.ui.tgtX1LineEdit, 'y1' : self.ui.tgtY1LineEdit, 'x2' : self.ui.tgtX2LineEdit, 'y2' : self.ui.tgtY2LineEdit}, 'comparison' : { 'x' : self.ui.cmpXLineEdit, 'y' : self.ui.cmpYLineEdit, 'r' : self.ui.cmpRLineEdit, 'r1' : self.ui.cmpR1LineEdit, 'r2' : self.ui.cmpR2LineEdit, 'x1' : self.ui.cmpX1LineEdit, 'y1' : self.ui.cmpY1LineEdit, 'x2' : self.ui.cmpX2LineEdit, 'y2' : self.ui.cmpY2LineEdit}} # Keep track of capture buttons self.buttons=['position','radius','annulus','region'] self.capture={'target' \ : {'position' : self.ui.captureTgt, 'radius' : self.ui.captureTgtRadius, 'annulus' : self.ui.captureTgtAnulusBackground, 'region' : self.ui.captureTgtRegionBackground}, 'comparison' \ : {'position' : self.ui.captureCmp, 'radius' : self.ui.captureCmpRadius, 'annulus' : self.ui.captureCmpAnulusBackground, 'region' : self.ui.captureCmpRegionBackground}} # Keep track of checkbox recenter widgets self.recenter={'target' : self.ui.tgtRecenterCheckBox, 'comparison' : self.ui.cmpRecenterCheckBox} self.centered={'target' : False, 'comparison' : False} # Enable blocking of redraws self.block={'target' : { 'x' : False, 'y' : False, 'r' : False, 'r1' : False, 'r2' : False, 'x1' : False, 'y1' : False, 'x2' : False, 'y2' : False}, 'comparison' : { 'x' : False, 'y' : False, 'r' : False, 'r1' : False, 'r2' : False, 'x1' : False, 'y1' : False, 'x2' : False, 'y2' : False}} # Set validator to ensure valid input on lineEdit input widgets self.validator = QtGui.QDoubleValidator(self) for object in ['target','comparison']: for key in self.parameters: self.input[object][key].setValidator(self.validator) # Set signal mapper for lineEdit updates self.drawMapper = QtCore.QSignalMapper(self) # Connect lineEdit updates to signal mapper for object in ['target','comparison']: for key in self.parameters: # Add signal map entry self.drawMapper.setMapping(self.input[object][key], QString(object+','+key)) # Connect to signal mapper self.connect(self.input[object][key], QtCore.SIGNAL('textChanged(QString)'), self.drawMapper, QtCore.SLOT('map()')) # Connect signal mapper to draw handler self.connect(self.drawMapper, QtCore.SIGNAL('mapped(QString)'), self.textUpdated) # Set signal mapper for capture buttons self.captureMapper = QtCore.QSignalMapper(self) # Connect capture button signals to signal mapper for object in ['target','comparison']: for key in self.buttons: # Add signal map entry self.captureMapper.setMapping(self.capture[object][key], QString(object+','+key)) # Connect to signal mapper self.connect(self.capture[object][key], QtCore.SIGNAL('clicked()'), self.captureMapper, QtCore.SLOT('map()')) # Connect signal mapper to capture handler self.connect(self.captureMapper, QtCore.SIGNAL('mapped(QString)'), self.captureHandler) # Connect save button self.connect(self.ui.saveButton, QtCore.SIGNAL('clicked()'), self.save) # If an image list is given if self.imlist is not None: # Connect image selection spinBox to event handlers self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.loadImage) self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.redraw) # Load first image self.setImageNumber(self.number) # Hide end selection widgets (not implemented here) self.ui.tgtEndPosLabel.hide() self.ui.tgtEndXLabel.hide() self.ui.tgtEndYLabel.hide() self.ui.cmpEndPosLabel.hide() self.ui.cmpEndXLabel.hide() self.ui.cmpEndYLabel.hide() self.ui.tgtXEndLineEdit.hide() self.ui.tgtYEndLineEdit.hide() self.ui.cmpXEndLineEdit.hide() self.ui.cmpYEndLineEdit.hide() self.ui.captureTgtEnd.hide() self.ui.captureCmpEnd.hide() def setImageNumber(self,number): """Set the image number.""" self.ui.imageSpinBox.setValue(number) def loadImage(self, number): """Loads a new image. *number* is the image number to be loaded. This function uses `saltsafeio.getexposure` to get the correct exposure from a list of fits files containing an arbitrary number of extensions. """ # Emit signal self.emit(QtCore.SIGNAL("imageNumberUpdated(int)"), number) # Load image from file self.img=saltsafeio.get_exposure(self.imlist,number) # Display image self.imdisplay.loadImage(self.img) # Redraw canvas self.imdisplay.redraw_canvas() def mark(self,*args,**kwargs): if self.mark_with=='square': self.imdisplay.addSquare(*args,**kwargs) elif self.mark_with=='circle': self.imdisplay.addCircle(*args,**kwargs) def textUpdated(self,key): # Get object and parameter from key obj,par=str(key).split(',') # Check block if self.block[obj][par]: return # Set block to prevent infinite repeat self.block[obj][par]=True # Recenter on object if requested if par=='x' and self.recenter[obj].isChecked() and not self.centered[obj]: x=float(self.input[obj]['x'].text()) y=float(self.input[obj]['y'].text()) r=float(self.input[obj]['r'].text()) x,y=find_object(self.img,x,y,self.distance) self.input[obj]['x'].setText(str(x)) self.input[obj]['y'].setText(str(y)) self.centered[obj]=not(self.centered[obj]) # Check if object region size locking is on if self.ui.lockObjectSizes.isChecked(): if par=='r': r=self.input[obj]['r'].text() if obj=='target': self.input['comparison']['r'].setText(r) elif obj=='comparison': self.input['target']['r'].setText(r) # Check if background size locking is on if self.ui.lockBackgroundSize.isChecked(): if par in ['r1','r2']: r=self.input[obj][par].text() if obj=='target': self.ui.cmpAnulusRadioButton.setChecked(True) self.input['comparison'][par].setText(r) elif obj=='comparison': self.ui.tgtAnulusRadioButton.setChecked(True) self.input['target'][par].setText(r) elif par in ['x1','y1','x2','y2']: c=self.input[obj][par].text() if obj=='target': self.ui.cmpRegionRadioButton.setChecked(True) self.input['comparison'][par].setText(c) elif obj=='comparison': self.ui.tgtRegionRadioButton.setChecked(True) self.input['target'][par].setText(c) # Check if background region centering if self.ui.allignTgtVerticalCenter.isChecked(): if par in ['y1','y2']: y=float(self.input[obj][par].text()) center=self.img.shape[0]/2.0 height=abs(y-center) self.input[obj]['y1'].setText(str(center+height)) self.input[obj]['y2'].setText(str(center-height)) # Draw markers self.draw(key) # Unset block self.block[obj][par]=False def draw(self,key): """Draws markers for object positions, and backgrounds. To be called when any input widget value changes. *key* is given by the signal mapper and consists of a string with the object and parameter separated by a comma. """ # Get object and parameter from key obj,par=str(key).split(',') try: # Set amplifier self.amp[obj]=self.getCurrentAmp() # Draw markers if par=='x' or par=='y' or par=='r': x=float(self.input[obj]['x'].text()) y=float(self.input[obj]['y'].text()) r=float(self.input[obj]['r'].text()) self.display[obj]['position']=True self.mark(obj,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width']) elif par=='r1' or par=='r2': # Annulus is selected so remove region marker self.imdisplay.removePatch(obj+'_region') x=float(self.input[obj]['x'].text()) y=float(self.input[obj]['y'].text()) r=float(self.input[obj][par].text()) # Keep track of the selected background mode self.display[obj]['annulus']=True self.display[obj]['region']=False self.mark(obj+'_'+par,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width']) elif par=='x1' or par=='y1' or par=='x2' or par=='y2': # Region is selected so remove annulus markers self.imdisplay.removePatch(obj+'_r1') self.imdisplay.removePatch(obj+'_r2') x1=float(self.input[obj]['x1'].text()) y1=float(self.input[obj]['y1'].text()) x2=float(self.input[obj]['x2'].text()) y2=float(self.input[obj]['y2'].text()) # Keep track of the selected background mode self.display[obj]['annulus']=False self.display[obj]['region']=True self.imdisplay.addRectangle(obj+'_region',x1,y1,x2,y2, color=self.line[obj]['color'],lw=self.line[obj]['width']) # Redraw canvas self.imdisplay.redraw_canvas(keepzoom=True) except ValueError: pass def redraw(self, number): """Redraws object and background markers for all objects on the currently displayed amplifier *number*. """ self.imdisplay.reset() # Find wich amplifier is currently displayed amp=self.getCurrentAmp() # (Re)draw markers for obj in ['target','comparison']: if self.amp[obj]==amp: if self.display[obj]['position']: self.draw(obj+','+'r') if self.display[obj]['annulus']: self.draw(obj+','+'r1') self.draw(obj+','+'r2') if self.display[obj]['region']: self.draw(obj+','+'y2') def getCurrentAmp(self, namps=4): """Returns the currently displayed amplifier. *namps* is the number of amplifiers on the CCD. """ # Get exposure number n=int(self.ui.imageSpinBox.value()) # Convert exposure number to current amplifier number amp=n%namps if amp==0: amp=namps return amp def captureHandler(self, key): """Called when a capture button is clicked. *key* is given by the signal mapper and consists of a string with the object and parameter separated by a comma. Depending on the *key* input widgets are added to the current display lists. Subsequent calls to `self.selectionHandler` get displayed in the listed widgets. """ # Get object and parameter from key obj,par=str(key).split(',') # Add input widgets to lists if par=='position': self.xdisplay=[self.input[obj]['x']] self.ydisplay=[self.input[obj]['y']] self.rdisplay=[] elif par=='radius': self.xdisplay=[] self.ydisplay=[] self.x=float(self.input[obj]['x'].text()) self.y=float(self.input[obj]['y'].text()) self.rdisplay=[self.input[obj]['r']] elif par=='annulus': self.xdisplay=[] self.ydisplay=[] self.x=float(self.input[obj]['x'].text()) self.y=float(self.input[obj]['y'].text()) self.rdisplay=[self.input[obj]['r1'], self.input[obj]['r2']] elif par=='region': self.xdisplay=[self.input[obj]['x1'], self.input[obj]['x2']] self.ydisplay=[self.input[obj]['y1'], self.input[obj]['y2']] self.rdisplay=[] def selectionHandler(self, x, y): """Event handler for click in image display window. *x*, *y* is the position (in image pixel coordinates) of the click. These positions are inserted into the first input widgets in the display lists. If a radius is requested this is calculated from the position given in (self.x, self.y) which should be set to the current object. """ if len(self.xdisplay)>0: display=self.xdisplay.pop(0) display.setText(str(x)) if len(self.ydisplay)>0: display=self.ydisplay.pop(0) display.setText(str(y)) if len(self.rdisplay)>0: r=np.sqrt((x-self.x)**2+(y-self.y)**2) display=self.rdisplay.pop(0) display.setText(str(r)) def setSearchDistance(self, distance): """Set search distance used for recentering.""" self.distance=int(distance) def setMarkerType(self, marker): """Set marker type to 'circle' or 'square'.""" if marker in ['circle','square']: self.mark_with=marker else: raise SaltIOError('Unknown marker type '+str(marker)) def setLineColor(self, object, color): """Changes the default line color used for marking.""" self.line[object]['color']=color def setLineWidth(self, object, width): """Changes the default line width used for marking.""" self.line[object]['width']=width def save(self): """Save configuration. The format is:: For objects that use an anullus: object amp x y r r1 r2 For objects that use a region: object amp x y r x1 y1 x2 y2 """ if (self.ui.tgtAnulusRadioButton.isChecked() and self.ui.cmpRegionRadioButton.isChecked()) or \ (self.ui.tgtRegionRadioButton.isChecked() and self.ui.cmpAnulusRadioButton.isChecked()): msg='SLOTPREVIEW--SLOTPHOT can not handle different background types' raise SaltError(msg) # Write values to file with open(self.config,'w') as f: for i,obj in enumerate(['target','comparison']): b_type='region' if obj=='target': print(obj, self.ui.tgtAnulusRadioButton.isChecked()) if self.ui.tgtAnulusRadioButton.isChecked(): b_type='annulus' elif obj=='comparison': if self.ui.cmpAnulusRadioButton.isChecked(): b_type='annulus' # If r1 is not zero, assumes annulus line='%i\t%i\t' % (i+1, self.amp[obj]) if b_type=='annulus': line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'r1', 'r2']) else: line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'x1', 'y2', 'x2', 'y2']) # Write string to configfile f.write(line.rstrip()+'\n') # Exit program self.close()
39.428816
131
0.525253
2,404
22,987
5.009567
0.206323
0.030391
0.035871
0.028232
0.250851
0.19638
0.17313
0.150046
0.150046
0.150046
0
0.008578
0.355897
22,987
582
132
39.496564
0.804809
0.274938
0
0.196013
0
0
0.070532
0.004204
0
0
0
0
0
1
0.049834
false
0.003322
0.043189
0
0.10299
0.003322
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51e720cfeb8235927a5ad18a3477edc29e509a46
1,895
py
Python
Linked Lists/add_two_numbers.py
fredricksimi/leetcode
f6352c26914ca77f915f5994746ecf0b36efc89b
[ "MIT" ]
null
null
null
Linked Lists/add_two_numbers.py
fredricksimi/leetcode
f6352c26914ca77f915f5994746ecf0b36efc89b
[ "MIT" ]
null
null
null
Linked Lists/add_two_numbers.py
fredricksimi/leetcode
f6352c26914ca77f915f5994746ecf0b36efc89b
[ "MIT" ]
1
2021-12-05T12:27:46.000Z
2021-12-05T12:27:46.000Z
""" Add Two Numbers: Leetcode 2 You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list. You may assume the two numbers do not contain any leading zero, except the number 0 itself. """ # Definition for singly-linked list. # class ListNode(object): # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution(object): """ This is how addition works (Elementary Math): 111 <- carried values ||| 7692 +3723 ----- 0426 ----- """ # O(max(m,n)) time | O(max(m,n)) space | m=len(l1), n=len(l2) def addTwoNumbers(self, l1, l2): # declare pointers p1 = l1 p2 = l2 # used to store the carry value carry = 0 # declare result linked list result = ListNode() res_curr = result # position on the result linked list # remember to add the 'carry' edge case to the while loop # example 119 + 119 while p1 != None or p2 != None or carry != 0: top = 0 bottom = 0 if p1 != None: top = p1.val p1 = p1.next if p2 != None: bottom = p2.val p2 = p2.next my_sum = carry + top + bottom # check if we'll carry # max of my_sum is 19 if my_sum > 9: # carry value res_curr.next = ListNode(val=my_sum-10) carry = 1 else: res_curr.next = ListNode(val=my_sum) carry = 0 res_curr = res_curr.next # skip the node we created during initialization of the linked list return result.next
25.958904
91
0.543008
256
1,895
3.964844
0.472656
0.049261
0.032512
0.011823
0.053202
0.053202
0.053202
0
0
0
0
0.044764
0.375198
1,895
72
92
26.319444
0.8125
0.510818
0
0.08
0
0
0
0
0
0
0
0
0
1
0.04
false
0
0
0
0.12
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51e80509e8407b12965b7a40a0711a4c2dc1fa8b
243
py
Python
module1-introduction-to-sql/buddymove_create.py
alexmjn/DS-Unit-3-Sprint-2-SQL-and-Databases
89d66757d25b7d07090bfb6280a78b897f87fc69
[ "MIT" ]
1
2020-02-17T17:02:43.000Z
2020-02-17T17:02:43.000Z
module1-introduction-to-sql/buddymove_create.py
alexmjn/DS-Unit-3-Sprint-2-SQL-and-Databases
89d66757d25b7d07090bfb6280a78b897f87fc69
[ "MIT" ]
null
null
null
module1-introduction-to-sql/buddymove_create.py
alexmjn/DS-Unit-3-Sprint-2-SQL-and-Databases
89d66757d25b7d07090bfb6280a78b897f87fc69
[ "MIT" ]
null
null
null
import pandas as pd import sqlite3 df = pd.read_csv("buddymove_holidayiq.csv") connection = sqlite3.connect("buddymove_holidayiq.sqlite3") df.to_sql("review", connection) print(connection.execute("SELECT * FROM review LIMIT 10").fetchall())
27
69
0.781893
33
243
5.636364
0.666667
0.096774
0
0
0
0
0
0
0
0
0
0.022624
0.090535
243
8
70
30.375
0.819005
0
0
0
0
0
0.349794
0.205761
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
51e82124f47a81e7a21c3ded6ba32d9e42bbdaa0
6,539
py
Python
darts_socket/darts_server/display.py
y-azuma/opencv-softdarts
644778be219fb96cdde32b884157899d39fc14e5
[ "MIT" ]
9
2019-05-01T18:42:47.000Z
2021-09-05T09:49:44.000Z
darts_socket/darts_server/display.py
y-azuma/opencv-softdarts
644778be219fb96cdde32b884157899d39fc14e5
[ "MIT" ]
null
null
null
darts_socket/darts_server/display.py
y-azuma/opencv-softdarts
644778be219fb96cdde32b884157899d39fc14e5
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import tkinter as tk import sound import socket import threading class ConnClient(): ''' ソケット通信によりラズベリーパイから画像情報を受け取る。 ''' def __init__(self,conn, addr): self.conn_socket = conn self.addr = addr self.recvdata = 0 self.recvdata1 = 0 self.recvdata2 = 0 self.data_list=0 def run(self): try: self.recvdata = self.conn_socket.recv(2359296) self.recvdata1 = self.recvdata.decode('utf-8') self.recvdata2 = self.recvdata1.split(",") self.data_list = [int(s) for s in self.recvdata2] except socket.error: print("connect error") def stop(self): self.conn_socket.close() def main(): global recvlist s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s_socket.bind((HOSTNAME, PORT)) s_socket.listen(CLIENTNUM) while (1): conn, addr = s_socket.accept() print("Conneted by" + str(addr)) connClientThread = ConnClient(conn, addr) connClientThread.run() recvlist = connClientThread.data_list print(recvlist) bullsystem(recvlist[0]) def bullsystem(flag): global throw_number, score, round_total,recvlist photoorder = recvlist[1] throw_number += 1 round_total = recvlist[2] first_throw = recvlist[3] second_throw = recvlist[4] third_throw = recvlist[5] canvas.itemconfig(on_canvas_text1, text=str(first_throw)) canvas.itemconfig(on_canvas_text2, text=str(second_throw)) canvas.itemconfig(on_canvas_text3, text=str(third_throw)) if flag == 1: play_sounds.sound1() score += 50 lb.insert(tk.END, str(throw_number)+ "BULL " + str(score)) canvas.itemconfig( on_canvas_text, text=str(score) ) else: lb.insert(tk.END, str(throw_number)+"NO BULL"+ str(score)) if photoorder == 3 and round_total > 0: changeimg() def memo(): value = entry.get() if not value: lb.insert(tk.END, "入力してね") else: lb.insert(tk.END, value) entry.delete(0, tk.END) def changeimg(): global canvas, on_canvas, score, round_total canvas.move( on_canvas_text, 1000, 1000 ) canvas.move( on_canvas_text1, 1000, 1000 ) canvas.move( on_canvas_text2, 1000, 1000 ) canvas.move( on_canvas_text3, 1000, 1000 ) if round_total == 50: canvas.itemconfig( on_canvas, image=images[1] ) elif round_total == 100: canvas.itemconfig( on_canvas, image=images[2] ) elif round_total == 150: canvas.itemconfig( on_canvas, image=images[3] ) root.after(3900, play_sounds.sound2) root.after(7000, rechangeimg) def rechangeimg(): global root, canvas canvas.itemconfig( on_canvas, image=images[0] ) canvas.move( on_canvas_text, -1000, -1000 ) canvas.move( on_canvas_text1, -1000, -1000 ) canvas.move( on_canvas_text2, -1000, -1000 ) canvas.move( on_canvas_text3, -1000, -1000 ) def buffer(): #ソケット通信を並列処理 th_body = threading.Thread(target=main, name='main') th_body.setDaemon(True) th_body.start() def rungui(): global root, canvas, on_canvas, images, lb, entry, on_canvas_text, score global on_canvas_text1, on_canvas_text2, on_canvas_text3 #メインウィンドウ root = tk.Tk() root.geometry("1140x675") root.title("DARTS BULL GAME") font = ("Helevetica", 14) font_log = ("Helevetica", 11) # menubar menubar = tk.Menu(root) root.config(menu=menubar) # startmenu startmenu = tk.Menu(menubar) menubar.add_cascade(label="BULL GAME", menu=startmenu) startmenu.add_command(label="開始する", command=lambda: buffer()) # canvas make canvas = tk.Canvas( root, width=960, height=600, relief=tk.RIDGE, bd=2 ) canvas.place(x=175, y=0) # image images.append(tk.PhotoImage(file="501.png")) images.append(tk.PhotoImage(file="onebull.png")) images.append(tk.PhotoImage(file="lowton.png")) images.append(tk.PhotoImage(file="hattrick.png")) on_canvas = canvas.create_image( 0, 0, image=images[0], anchor=tk.NW ) on_canvas_text = canvas.create_text( 480, 300, text=str(score), font=("Helvetica", 250, "bold") ) on_canvas_text1 = canvas.create_text( 850, 145, text=0, font=("Helvetica", 40, "bold"), fill='white') on_canvas_text2 = canvas.create_text( 850, 195, text=0, font=("Helvetica", 40, "bold"), fill='white') on_canvas_text3 = canvas.create_text( 850, 245, text=0, font=("Helvetica", 40, "bold"), fill='white') # response_area response_area = tk.Label( root, width=106, height=4, bg="gray", font=font, relief=tk.RIDGE, bd=2 ) response_area.place(x=176, y=600) # entrybox entry = tk.Entry( root, width=75, font=font ) entry.place(x=230, y=630) entry.focus_set() # listbox lb = tk.Listbox( root, width=20, height=43, font=font_log ) # scroolbar1 sb1 = tk.Scrollbar( root, orient=tk.VERTICAL, command=lb.yview ) # スクロールバーと連動 lb.configure(yscrollcommand=sb1.set) lb.grid(row=0, column=0) sb1.grid(row=0, column=1, sticky=tk.NS) # button button = tk.Button( root, bg='black', command=lambda: buffer(), text="START", width=19, ) button.place(x=0, y=655) # button2 button2 = tk.Button( root, width=15, text="MEMO", command=lambda: memo()) button2.place(x=950, y=630) # mainloop root.mainloop() if __name__ == "__main__": lb = None on_canvas = None on_canvas_text = None on_canvas_text1 = None on_canvas_text2 = None on_canvas_text3 = None images = [] entry = None response_area = None score = 0 throw_number = 0 play_sounds = sound.Sounds() HOSTNAME = "192.168.0.3" PORT = 12345 CLIENTNUM = 1 rungui()
21.093548
76
0.570577
782
6,539
4.620205
0.283887
0.070855
0.039856
0.053141
0.217825
0.179352
0.114863
0.099917
0.090783
0.090783
0
0.058837
0.31121
6,539
309
77
21.161812
0.743339
0.027986
0
0.240506
0
0
0.040468
0
0
0
0
0
0
1
0.042194
false
0
0.016878
0
0.063291
0.012658
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51e93219bcf0932e974ca77a974c31a6703b7426
159
py
Python
api/app/api/mutations/types/TokenData.py
VidroX/recommdo
fe518158b1a63225816054fb129f680e1d0c7d9c
[ "MIT" ]
null
null
null
api/app/api/mutations/types/TokenData.py
VidroX/recommdo
fe518158b1a63225816054fb129f680e1d0c7d9c
[ "MIT" ]
null
null
null
api/app/api/mutations/types/TokenData.py
VidroX/recommdo
fe518158b1a63225816054fb129f680e1d0c7d9c
[ "MIT" ]
null
null
null
import graphene class TokenData(graphene.ObjectType): access_token = graphene.String(required=False) refresh_token = graphene.String(required=False)
22.714286
51
0.786164
18
159
6.833333
0.611111
0.211382
0.308943
0.439024
0.520325
0
0
0
0
0
0
0
0.125786
159
6
52
26.5
0.884892
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
51eaff77f11bb6623d586c575371ad21566bce38
857
py
Python
prosperpy/overlays/bollinger_bands.py
CaptainBriot/prosperpy
831abb9c9e3b730c81895647e33a59854c4e4648
[ "MIT" ]
2
2018-01-28T06:11:37.000Z
2018-02-04T16:01:30.000Z
prosperpy/overlays/bollinger_bands.py
CaptainBriot/prosperpy
831abb9c9e3b730c81895647e33a59854c4e4648
[ "MIT" ]
1
2018-03-20T12:10:40.000Z
2018-03-21T00:08:04.000Z
prosperpy/overlays/bollinger_bands.py
CaptainBriot/prosperpy
831abb9c9e3b730c81895647e33a59854c4e4648
[ "MIT" ]
2
2019-04-06T14:33:26.000Z
2020-06-25T23:34:32.000Z
from . import moving_average from .. import indicators class BollingerBands: def __init__(self, values, multiplier=2, moving_average_class=moving_average.SimpleMovingAverage): self.moving_average_class = moving_average_class self.multiplier = multiplier self.moving_average = self.moving_average_class(values) self.standard_deviation = indicators.StandardDeviation(values) def add(self, value): self.moving_average.add(value) self.standard_deviation.add(value) @property def upper(self): return self.moving_average.value + (self.standard_deviation.value * self.multiplier) @property def lower(self): return self.moving_average.value - (self.standard_deviation.value * self.multiplier) @property def bandwidth(self): return self.upper - self.lower
31.740741
102
0.72112
98
857
6.081633
0.244898
0.218121
0.171141
0.130872
0.38255
0.278523
0.278523
0.278523
0.278523
0.278523
0
0.001449
0.194866
857
26
103
32.961538
0.862319
0
0
0.15
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.1
0.15
0.55
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
51eb7396a14c0a9adcd7a3d4b7b068d93d1985e2
2,566
py
Python
neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py
p0i0/openstack-neutron
df2ee28ae9a43cc511482bd6ece5396eb1288814
[ "Apache-2.0" ]
null
null
null
neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py
p0i0/openstack-neutron
df2ee28ae9a43cc511482bd6ece5396eb1288814
[ "Apache-2.0" ]
null
null
null
neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py
p0i0/openstack-neutron
df2ee28ae9a43cc511482bd6ece5396eb1288814
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2016 Red Hat # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils import testtools from neutron.common import utils as common_utils from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.tests import base NATIVE_OVSDB_CONNECTION = ( 'neutron.agent.ovsdb.impl_idl.OvsdbIdl.ovsdb_connection') class TrunkParentPortTestCase(base.BaseTestCase): def setUp(self): super(TrunkParentPortTestCase, self).setUp() # Mock out connecting to ovsdb mock.patch(NATIVE_OVSDB_CONNECTION).start() trunk_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() trunk_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) self.trunk = trunk_manager.TrunkParentPort( trunk_id, port_id, trunk_mac) def test_multiple_transactions(self): def method_inner(trunk): with trunk.ovsdb_transaction() as txn: return id(txn) def method_outer(trunk): with trunk.ovsdb_transaction() as txn: return method_inner(trunk), id(txn) with self.trunk.ovsdb_transaction() as txn1: mock_commit = mock.patch.object(txn1, 'commit').start() txn_inner_id, txn_outer_id = method_outer(self.trunk) self.assertFalse(mock_commit.called) self.assertTrue(mock_commit.called) self.assertTrue(id(txn1) == txn_inner_id == txn_outer_id) def test_transaction_raises_error(self): class MyException(Exception): pass with testtools.ExpectedException(MyException): with self.trunk.ovsdb_transaction() as txn1: mock.patch.object(txn1, 'commit').start() raise MyException() self.assertIsNone(self.trunk._transaction) with self.trunk.ovsdb_transaction() as txn2: mock.patch.object(txn2, 'commit').start() self.assertIsNot(txn1, txn2)
37.735294
79
0.684334
324
2,566
5.274691
0.429012
0.035108
0.061439
0.067291
0.203043
0.167934
0.093622
0.093622
0
0
0
0.013138
0.228761
2,566
67
80
38.298507
0.85043
0.243959
0
0.097561
0
0
0.046778
0.028067
0
0
0
0
0.121951
1
0.121951
false
0.02439
0.146341
0
0.365854
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51ec05cacfb6953f807cc60da921092f7aeb6965
873
py
Python
app/schema/queries/todo.py
rjNemo/graphql_python_template
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
[ "MIT" ]
1
2021-05-02T01:47:57.000Z
2021-05-02T01:47:57.000Z
app/schema/queries/todo.py
rjNemo/graphql_python_template
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
[ "MIT" ]
null
null
null
app/schema/queries/todo.py
rjNemo/graphql_python_template
14bc5fd657f6bdba8d7293f21cfcec821fa6374f
[ "MIT" ]
null
null
null
""" Defines the query and how to interact with """ from app.schema.types.todo import TodoListResponseField, TodoResponseField from app.usecases.todo import read_all_todos, read_todo_by_id def resolve_list_todos(self, info) -> TodoListResponseField: try: todos = read_all_todos() is_success = True error_message = None except Exception as e: error_message = str(e) is_success = False todos = None return TodoListResponseField( todos=todos, is_success=is_success, error_message=error_message ) def resolve_get_todo(self, info, todo_id: str) -> TodoResponseField: todo, is_success = read_todo_by_id(todo_id) error_message = "This element does not exist." if not is_success else None return TodoResponseField( todo=todo, is_success=is_success, error_message=error_message )
28.16129
78
0.717068
115
873
5.173913
0.426087
0.121008
0.040336
0.040336
0.141176
0.141176
0.141176
0.141176
0
0
0
0
0.213058
873
30
79
29.1
0.866084
0.04811
0
0
0
0
0.034022
0
0
0
0
0
0
1
0.1
false
0
0.1
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51edfd8aa7141c159d7380cdc94b6b92242facba
469
py
Python
models/EntryRating.py
felix19350/Nature-Trails
11b91643e38139ec8c15674289fa67dc0fe65ac4
[ "Unlicense" ]
null
null
null
models/EntryRating.py
felix19350/Nature-Trails
11b91643e38139ec8c15674289fa67dc0fe65ac4
[ "Unlicense" ]
null
null
null
models/EntryRating.py
felix19350/Nature-Trails
11b91643e38139ec8c15674289fa67dc0fe65ac4
[ "Unlicense" ]
null
null
null
from google.appengine.ext import db from models.Trail import Trail class EntryRating(db.Model): ''' Stores the ratings users give to locations Parameters: entry_key user_id rating ''' trail = db.ReferenceProperty(Trail) user = db.UserProperty(required = True) rating = db.RatingProperty(required = True) def __str__(self) : return "%s: %s - %f" % (self.user.nickname(), self.title, self.rating)
26.055556
78
0.635394
56
469
5.214286
0.660714
0.082192
0
0
0
0
0
0
0
0
0
0
0.260128
469
18
79
26.055556
0.841499
0.19403
0
0
0
0
0.0317
0
0
0
0
0
0
1
0.125
false
0
0.25
0.125
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
51ef4f3354164b3ce73be7d1b2f9128704be0733
8,884
py
Python
meiduo_mall/apps/orders/views.py
canarysama/meiduo_project
906cf667e27fa205b18aeb10b009d76dec19b211
[ "MIT" ]
null
null
null
meiduo_mall/apps/orders/views.py
canarysama/meiduo_project
906cf667e27fa205b18aeb10b009d76dec19b211
[ "MIT" ]
null
null
null
meiduo_mall/apps/orders/views.py
canarysama/meiduo_project
906cf667e27fa205b18aeb10b009d76dec19b211
[ "MIT" ]
null
null
null
import json from datetime import datetime from decimal import Decimal from django import http from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import render # Create your views here. from django.views import View from django_redis import get_redis_connection from apps.goods.models import SKU from apps.orders.models import OrderInfo, OrderGoods from apps.users.models import Address, User from meiduo_mall.settings.dev import logger from utils.response_code import RETCODE class OrderSettlementView(LoginRequiredMixin,View): def get(self, request): user = request.user try: addresses = Address.objects.filter(user=user,is_deleted=False) except Exception as e: addresses = None redis_client = get_redis_connection('carts') carts_data = redis_client.hgetall(user.id) carts_dict = {} for key,value in carts_data.items(): sku_key = int(key.decode()) sku_dict = json.loads(value.decode()) if sku_dict["selected"]: carts_dict[sku_key] = sku_dict skus = SKU.objects.filter(id__in = carts_dict.keys()) total_count = 0 total_amount = Decimal('0.00') for sku in skus: sku.count = carts_dict[sku.id]['count'] sku.amount = sku.price * sku.count total_count += sku.count total_amount += sku.price * sku.count freight = Decimal('10.00') context = { 'addresses': addresses, 'skus': skus, 'total_count': total_count, 'total_amount': total_amount, 'freight': freight, 'payment_amount': total_amount + freight, 'default_address_id': user.default_address_id } return render(request, 'place_order.html', context) class OrderCommitView(LoginRequiredMixin,View): def post(self,request): #接收参数 json_dict = json.loads(request.body.decode()) address_id = json.loads(request.body.decode())['address_id'] pay_method = json.loads(request.body.decode())['pay_method'] user = request.user #效验 try: address = Address.objects.get(id=address_id) except Address.DoesNotExist: return http.HttpResponseForbidden('WUXIAO') if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],OrderInfo.PAY_METHODS_ENUM['ALIPAY']]: return http.HttpResponseForbidden('不支持') #订单表__生成订单号 时间戳+9为 # user = request.user order_id = datetime.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id) #事务 from django.db import transaction with transaction.atomic(): # --------事物保存点-------- save_id = transaction.savepoint() try: order = OrderInfo.objects.create( order_id=order_id, user = user, address = address, total_count = 0, total_amount = Decimal('0.00'), freight = Decimal("10.00"), pay_method = pay_method, status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND'] ) redis_client = get_redis_connection('carts') carts_data = redis_client.hgetall(user.id) carts_dict = {} for key,value in carts_data.items(): sku_id = int(key.decode()) sku_dict = json.loads(value.decode()) if sku_dict['selected']: carts_dict[sku_id] = sku_dict sku_ids = carts_dict.keys() for sku_id in sku_ids: while True: sku = SKU.objects.get(id=sku_id) # sku.stock -= cart_count # sku.sales += cart_count # sku.sava() original_stock = sku.stock original_sales = sku.sales #判断库存 cart_count = carts_dict[sku_id]['count'] if cart_count > sku.stock: transaction.savepoint_rollback(save_id) return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'}) import time # time.sleep(10) new_stock = original_stock - cart_count new_sales = original_sales + cart_count result = SKU.objects.filter(id=sku_id, stock=original_stock).update(stock=new_stock,sales=new_sales) if result == 0: continue sku.stock -= cart_count sku.sales += cart_count sku.save() sku.spu.sales += cart_count sku.spu.save() # 创建订单商品数据 OrderGoods.objects.create( order_id = order_id, sku = sku, count = cart_count, price = sku.price, ) #总个数和总金额(没运费) order.total_count += cart_count order.total_amount += sku.price * cart_count #下单成功或者失败退出 break #加运费 总金额 order.total_amount += order.freight order.save() except Exception as e : logger.error(e) transaction.savepoint_rollback(save_id) return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'}) transaction.savepoint_commit(save_id) #清空购物车 # redis_client.hdel(user.id, *carts_dict) return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id}) class OrderSuccessView(View): def get(self,request): order_id = request.GET.get("order_id") pay_method = request.GET.get("pay_method") payment_amount = request.GET.get("payment_amount") context={ "order_id":order_id, "pay_method":pay_method, "payment_amount":payment_amount, } return render(request,'order_success.html',context) class OrderShowView(LoginRequiredMixin,View): def get(self,request,page_num): username = request.COOKIES.get('username') user = User.objects.get(username=username) user_id = user.id order_data = OrderInfo.objects.all() goods_data = OrderGoods.objects.all() order_ids = order_data.filter(user_id=user_id).values('order_id') # order_ids = OrderInfo.objects.filter(user_id=user_id) page_orders = {} # 所有订单号的列表 order_list = [] order_id_count = goods_data.values('order_id', 'count') order_id_set = set() for order_data_co in order_id_count: a = order_data_co['order_id'] order_list.append(a) order_list =list(set(order_list)) print(order_list) for order_id in order_ids: order_id = order_id['order_id'] # 订单号 time_old = order_data.filter(order_id=order_id).values('create_time') # 时间 time = str(time_old[0]['create_time']) time_new = time[0:16] # 时间 freight = time_old.values('freight')[0]['freight'] # 运费 """<QuerySet [{'address_id': 1, 'user_id': 19, 'total_count': 1, 'order_id': '20190927003440000000019', 'status': 1, 'pay_method': 2, 'create_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 214624, tzinfo=<UTC>), 'update_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 235034, tzinfo=<UTC>), 'freight': Decimal('10.00'), 'total_amount': Decimal('6698.00')}]> """ # if total_amount-freight == 0.00 or total_amount == 0.00: # continue # # page_orders = {} # for Goods in goods_data: # page_orders.setdefault(order_id,[time,freight,]).append(Goods) page_num = 1 """ 下单时间 订单号 商品信息 数量 单价 总价 运费 支付方式 订单状态 """ context = { "page_orders": page_orders, # # # 总页数 # # 'total_page': total_page, # # # 当前页 'page_num': page_num, } return render(request,'user_center_order.html',context)
31.842294
124
0.533431
945
8,884
4.798942
0.225397
0.038589
0.023815
0.018523
0.261963
0.213892
0.174201
0.158765
0.144653
0.114223
0
0.019319
0.364926
8,884
278
125
31.956835
0.784474
0.06281
0
0.173913
0
0
0.060637
0.002826
0
0
0
0
0
1
0.024845
false
0
0.093168
0
0.192547
0.006211
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f094af4d80238e40eb72e75e31a6ae810f3f62
11,159
py
Python
pie/tray_icon.py
sabaatworld/pie-indexing-service-py
f48ee18023f9c15e18fdb4296ba651fd343aef01
[ "MIT" ]
2
2020-03-30T18:00:40.000Z
2020-05-30T17:09:04.000Z
pie/tray_icon.py
sabaatworld/pie-indexing-service-py
f48ee18023f9c15e18fdb4296ba651fd343aef01
[ "MIT" ]
null
null
null
pie/tray_icon.py
sabaatworld/pie-indexing-service-py
f48ee18023f9c15e18fdb4296ba651fd343aef01
[ "MIT" ]
null
null
null
import json import logging import os import ssl import webbrowser from multiprocessing import Event, Queue from urllib.request import urlopen import certifi from PySide2 import QtCore, QtGui, QtWidgets from packaging import version from pie.core import IndexDB, IndexingHelper, MediaProcessor from pie.domain import IndexingTask, Settings from pie.log_window import LogWindow from pie.preferences_window import PreferencesWindow from pie.util import MiscUtils, QWorker class TrayIcon(QtWidgets.QSystemTrayIcon): __APP_VER = "1.0.2" __logger = logging.getLogger('TrayIcon') def __init__(self, log_queue: Queue): super().__init__(QtGui.QIcon(MiscUtils.get_app_icon_path())) self.log_queue = log_queue self.preferences_window: PreferencesWindow = None self.log_window: LogWindow = None self.indexing_stop_event: Event = None self.observer = None self.indexDB = IndexDB() self.threadpool: QtCore.QThreadPool = QtCore.QThreadPool() self.__logger.debug("QT multithreading with thread pool size: %s", self.threadpool.maxThreadCount()) self.setToolTip("Batch Media Compressor") self.activated.connect(self.trayIcon_activated) tray_menu = QtWidgets.QMenu('Main Menu') self.startIndexAction = tray_menu.addAction('Start Processing', self.startIndexAction_triggered) self.stopIndexAction = tray_menu.addAction('Stop Processing', self.stopIndexAction_triggered) self.stopIndexAction.setEnabled(False) tray_menu.addSeparator() self.clearIndexAction = tray_menu.addAction('Clear Indexed Files', self.clearIndexAction_triggered) self.clearOutputDirsAction = tray_menu.addAction('Clear Ouput Directories', self.clearOutputDirsAction_triggered) tray_menu.addSeparator() self.editPrefAction = tray_menu.addAction('Edit Preferences', self.editPreferencesAction_triggered) self.viewLogsAction = tray_menu.addAction('View Logs', self.viewLogsAction_triggered) tray_menu.addSeparator() self.updateCheckAction = tray_menu.addAction('Check for Updates', self.updateCheckAction_triggered) self.coffeeAction = tray_menu.addAction('Buy me a Coffee', self.coffeeAction_triggered) tray_menu.addSeparator() tray_menu.addAction('Quit', self.quitMenuAction_triggered) self.setContextMenu(tray_menu) self.apply_process_changed_setting() if self.indexDB.get_settings().auto_update_check: self.update_check_worker = QWorker(self.auto_update_check) self.threadpool.start(self.update_check_worker) def trayIcon_activated(self, reason): pass def startIndexAction_triggered(self): if self.indexDB.get_settings().auto_show_log_window: self.show_view_logs_window() self.background_processing_started() self.indexing_stop_event = Event() self.indexing_worker = QWorker(self.start_indexing) self.indexing_worker.signals.finished.connect(self.background_processing_finished) self.threadpool.start(self.indexing_worker) self.stopIndexAction.setEnabled(True) def stopIndexAction_triggered(self): response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question( None, "Confirm Action", "Are you sure you want to stop the current task?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No ) if QtWidgets.QMessageBox.Yes == response: self.stopIndexAction.setEnabled(False) self.stop_async_tasks() def clearIndexAction_triggered(self): response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question( None, "Confirm Action", "Forget indexed files and delete all output files?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No ) if QtWidgets.QMessageBox.Yes == response: self.background_processing_started() self.deletion_worker = QWorker(self.start_deletion, True) self.deletion_worker.signals.finished.connect(self.background_processing_finished) self.threadpool.start(self.deletion_worker) def clearOutputDirsAction_triggered(self): response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question( None, "Confirm Action", "Delete all output files?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No ) if QtWidgets.QMessageBox.Yes == response: self.background_processing_started() self.deletion_worker = QWorker(self.start_deletion, False) self.deletion_worker.signals.finished.connect(self.background_processing_finished) self.threadpool.start(self.deletion_worker) def start_deletion(self, clearIndex: bool): MiscUtils.debug_this_thread() with IndexDB() as indexDB: if clearIndex: indexDB.clear_indexed_files() self.__logger.info("Index cleared") settings: Settings = indexDB.get_settings() MiscUtils.recursively_delete_children(settings.output_dir) MiscUtils.recursively_delete_children(settings.unknown_output_dir) self.__logger.info("Output directories cleared") def editPreferencesAction_triggered(self): if self.preferences_window is None: self.preferences_window = PreferencesWindow(self.apply_process_changed_setting) self.preferences_window.show() def viewLogsAction_triggered(self): self.show_view_logs_window() def show_view_logs_window(self): if self.log_window is None: self.log_window = LogWindow(self.threadpool) self.log_window.show() def updateCheckAction_triggered(self): self.check_for_updates(True) def auto_update_check(self): MiscUtils.debug_this_thread() self.check_for_updates(False) def check_for_updates(self, display_not_found: bool): api_url = "https://api.github.com/repos/sabaatworld/batch-media-compressor/releases/latest" releases_url = "https://github.com/sabaatworld/batch-media-compressor/releases" update_found = False try: ssl_context = ssl.create_default_context(cafile=certifi.where()) response = urlopen(api_url, context=ssl_context) response_string = response.read().decode('utf-8') response_json = json.loads(response_string) tag_name: str = response_json["tag_name"] if tag_name is not None: release_version = version.parse(tag_name.replace("v", "")) current_version = version.parse(self.__APP_VER) self.__logger.info("Updated Check successful: Current Version: %s, Latest Release: %s", str(current_version), str(release_version)) if current_version < release_version: update_found = True except: self.__logger.exception("Failed to check for updates") if update_found: if QtWidgets.QMessageBox.information( None, "Update Check", "New version available. Do you wish to download the latest release now?\n\nCurrent Verion: {}\nNew Version: {}".format(str(current_version), str(release_version)), QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No ) == QtWidgets.QMessageBox.Yes: webbrowser.open(releases_url) elif display_not_found: QtWidgets.QMessageBox.information(None, "Update Check", "No updates found.\n\nIf you think this is an error, please check your internet connection and try again.", QtWidgets.QMessageBox.Ok) def coffeeAction_triggered(self): webbrowser.open('https://paypal.me/sabaat') def quitMenuAction_triggered(self): QtWidgets.QApplication.quit() def start_indexing(self): MiscUtils.debug_this_thread() with IndexDB() as indexDB: indexing_task = IndexingTask() indexing_task.settings = indexDB.get_settings() if self.settings_valid(indexing_task.settings): misc_utils = MiscUtils(indexing_task) misc_utils.create_root_marker() indexing_helper = IndexingHelper(indexing_task, self.log_queue, self.indexing_stop_event) (scanned_files, _) = indexing_helper.scan_dirs() indexing_helper.remove_slate_files(indexDB, scanned_files) indexing_helper.lookup_already_indexed_files(indexDB, scanned_files) if not self.indexing_stop_event.is_set(): indexing_helper.create_media_files(scanned_files) if not self.indexing_stop_event.is_set(): media_processor = MediaProcessor(indexing_task, self.log_queue, self.indexing_stop_event) media_processor.save_processed_files(indexDB) if not self.indexing_stop_event.is_set(): misc_utils.cleanEmptyOutputDirs() def settings_valid(self, settings: Settings) -> bool: error_msg: str = None if settings.monitored_dir is None: error_msg = "Directory to scan not configured" elif not os.path.isdir(settings.monitored_dir): error_msg = "Directory to scan is invalid" elif settings.output_dir is None: error_msg = "Media with Capture Date directory not configured" elif not os.path.isdir(settings.output_dir): error_msg = "Media with Capture Date directory is invalid" elif settings.unknown_output_dir is None: error_msg = "Media without Capture Date directory not configured" elif not os.path.isdir(settings.unknown_output_dir): error_msg = "Media without Capture Date directory is invalid" if error_msg is not None: self.__logger.error("Cannot start processing: %s. Please update preferences and try again.", error_msg) return False else: return True def background_processing_started(self): self.startIndexAction.setEnabled(False) self.clearIndexAction.setEnabled(False) self.clearOutputDirsAction.setEnabled(False) self.editPrefAction.setEnabled(False) if self.preferences_window is not None: self.preferences_window.hide() def background_processing_finished(self): self.startIndexAction.setEnabled(True) self.stopIndexAction.setEnabled(False) self.clearIndexAction.setEnabled(True) self.clearOutputDirsAction.setEnabled(True) self.editPrefAction.setEnabled(True) def stop_async_tasks(self): if self.indexing_stop_event: self.indexing_stop_event.set() def cleanup(self): if self.preferences_window is not None: self.preferences_window.cleanup() if self.log_window is not None: self.log_window.cleanup() self.indexDB.disconnect_db() def apply_process_changed_setting(self): pass
46.690377
201
0.692087
1,232
11,159
6.039773
0.212662
0.056444
0.019352
0.0254
0.384491
0.27241
0.232899
0.213547
0.192313
0.180218
0
0.00058
0.226992
11,159
238
202
46.886555
0.862045
0
0
0.183575
0
0.014493
0.111928
0
0
0
0
0
0
1
0.10628
false
0.009662
0.072464
0
0.202899
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f26ee900478e90c670143eacc36e4d621147c8
392
py
Python
app/api/views/reports/brief.py
ArenaNetworks/dto-digitalmarketplace-api
d0d58924719d889503ed112b0d5801b528b0398c
[ "MIT" ]
6
2017-06-09T03:38:53.000Z
2021-12-22T02:42:15.000Z
app/api/views/reports/brief.py
ArenaNetworks/dto-digitalmarketplace-api
d0d58924719d889503ed112b0d5801b528b0398c
[ "MIT" ]
47
2016-08-02T05:21:31.000Z
2022-03-28T01:14:17.000Z
app/api/views/reports/brief.py
AusDTO/dto-digitalmarketplace-api
937843c9c01a71518cf4688b4daa55bbe7df1965
[ "MIT" ]
7
2016-09-13T13:07:18.000Z
2021-02-17T10:16:21.000Z
from flask import jsonify from app.api import api from app.api.helpers import require_api_key_auth from app.api.services.reports import briefs_service @api.route('/reports/brief/published', methods=['GET']) @require_api_key_auth def get_published_briefs(): result = briefs_service.get_published_briefs() return jsonify({ 'items': result, 'total': len(result) })
26.133333
55
0.737245
54
392
5.12963
0.462963
0.075812
0.108303
0.122744
0
0
0
0
0
0
0
0
0.155612
392
14
56
28
0.836858
0
0
0
0
0
0.094388
0.061224
0
0
0
0
0
1
0.083333
false
0
0.333333
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
51f40f22276db646a119642572727553613d35f0
8,903
py
Python
vis_utils/graphics/geometry/procedural_primitives.py
eherr/vis_utils
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
[ "MIT" ]
4
2020-05-20T03:55:19.000Z
2020-12-24T06:33:40.000Z
vis_utils/graphics/geometry/procedural_primitives.py
eherr/vis_utils
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
[ "MIT" ]
1
2020-05-18T11:21:35.000Z
2020-07-07T21:25:57.000Z
vis_utils/graphics/geometry/procedural_primitives.py
eherr/vis_utils
b757b01f42e6da02ad62130c3b0e61e9eaa3886f
[ "MIT" ]
1
2020-07-20T06:57:13.000Z
2020-07-20T06:57:13.000Z
#!/usr/bin/env python # # Copyright 2019 DFKI GmbH. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the # following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. import math from copy import copy import numpy as np def merge_vertices_and_normals(vertices, normals): data = [] for i in range(len(vertices)): data.append(vertices[i] + normals[i]) return data def construct_triangle_sphere(slices, stacks, diameter): """ src: http://jacksondunstan.com/articles/1904 """ stepTheta = (2.0 * math.pi) / slices stepPhi = math.pi / stacks verticesPerStack = slices + 1 positions = [] normals = [] triangles = [] # Pre-compute half the sin/cos of thetas halfCosThetas = [] halfSinThetas = [] curTheta = 0 for slice in range(verticesPerStack): halfCosThetas.append(math.cos(curTheta) * 0.5) halfSinThetas.append(math.sin(curTheta) * 0.5) curTheta += stepTheta # Generate positions curPhi = math.pi for stack in range(stacks + 1): curY = math.cos(curPhi) * 0.5 * diameter sinCurPhi = math.sin(curPhi) for slice in range(verticesPerStack): point = [halfCosThetas[slice] * sinCurPhi * diameter, curY, halfSinThetas[slice] * sinCurPhi * diameter] positions.append(point) normals.append([point[0], point[1], point[2]]) curPhi -= stepPhi # Generate triangles lastStackFirstVertexIndex = 0 curStackFirstVertexIndex = verticesPerStack for stack in range(stacks): for slice in range(slices): # Bottom tri of the quad a = lastStackFirstVertexIndex + slice + 1 b = curStackFirstVertexIndex + slice c = lastStackFirstVertexIndex + slice triangles.append([a, b, c]) # Top tri of the quad a = lastStackFirstVertexIndex + slice + 1 b = curStackFirstVertexIndex + slice + 1 c = curStackFirstVertexIndex + slice triangles.append([a, b, c]) lastStackFirstVertexIndex += verticesPerStack curStackFirstVertexIndex += verticesPerStack data = merge_vertices_and_normals(positions, normals) return data, triangles def construct_quad_box(width, height, depth): print("create box", width, height, depth) data = np.array([ # north [-width / 2, -height / 2, -depth / 2, 0, 0, -1], [-width / 2, height / 2, -depth / 2, 0, 0, -1], [width / 2, height / 2, -depth / 2, 0, 0, -1], [width / 2, -height / 2, -depth / 2, 0, 0, -1], # ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2] ###west [-width / 2, -height / 2, -depth / 2, -1, 0, 0], [-width / 2, height / 2, -depth / 2, -1, 0, 0], [-width / 2, height / 2, depth / 2, -1, 0, 0], [-width / 2, -height / 2, depth / 2, -1, 0, 0], ###south [-width / 2, -height / 2, depth / 2, 0, 0, 1], [-width / 2, height / 2, depth / 2, 0, 0, 1], [width / 2, height / 2, depth / 2, 0, 0, 1], [width / 2, -height / 2, depth / 2, 0, 0, 1], ###east [width / 2, -height / 2, -depth / 2, 1, 0, 0], [width / 2, height / 2, -depth / 2, 1, 0, 0], [width / 2, height / 2, depth / 2, 1, 0, 0], [width / 2, -height / 2, depth / 2, 1, 0, 0], ##bottom [-width / 2, -height / 2, -depth / 2, 0, -1, 0], [-width / 2, -height / 2, depth / 2, 0, -1, 0], [width / 2, -height / 2, depth / 2, 0, -1, 0], [width / 2, -height / 2, -depth / 2, 0, -1, 0], ##top [-width / 2, height / 2, -depth / 2, 0, 1, 0], [-width / 2, height / 2, depth / 2, 0, 1, 0], [width / 2, height / 2, depth / 2, 0, 1, 0], [width / 2, height / 2, -depth / 2, 0, 1, 0] ], 'f') return data def construct_quad_box_based_on_height(width, height, depth): data = np.array([ # north [-width / 2, 0.0, -depth / 2, 0, 0, -1], [-width / 2, height, -depth / 2, 0, 0, -1], [width / 2, height, -depth / 2, 0, 0, -1], [width / 2, 0.0, -depth / 2, 0, 0, -1], # ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2] ###west [-width / 2, 0.0, -depth / 2, -1, 0, 0], [-width / 2, height, -depth / 2, -1, 0, 0], [-width / 2, height, depth / 2, -1, 0, 0], [-width / 2, 0.0, depth / 2, -1, 0, 0], ###south [-width / 2, 0.0, depth / 2, 0, 0, 1], [-width / 2, height, depth / 2, 0, 0, 1], [width / 2, height, depth / 2, 0, 0, 1], [width / 2, 0.0, depth / 2, 0, 0, 1], ###east [width / 2, 0.0, -depth / 2, 1, 0, 0], [width / 2, height, -depth / 2, 1, 0, 0], [width / 2, height, depth / 2, 1, 0, 0], [width / 2, 0.0, depth / 2, 1, 0, 0], ##bottom [-width / 2, 0.0, -depth / 2, 0, 1, 0], [-width / 2, 0.0, depth / 2, 0, 1, 0], [width / 2, 0.0, depth / 2, 0, 1, 0], [width / 2, 0.0, -depth / 2, 0, 1, 0], ##top [-width / 2, height, -depth / 2, 0, -1, 0], [-width / 2, height, depth / 2, 0, -1, 0], [width / 2, height, depth / 2, 0, -1, 0], [width / 2, height, -depth / 2, 0, -1, 0] ], 'f') return data def construct_triangle_cylinder(slices, radius, length): """ http://monsterden.net/software/ragdoll-pyode-tutorial http://wiki.unity3d.com/index.php/ProceduralPrimitives """ half_length = length / 2.0 vertices = [] normals = [] triangles = [] v_idx = 0 #bottom vertices.append([0, 0, half_length]) normals.append([0, 0, -1]) for i in range(0, slices+1): angle = i / float(slices) * 2.0 * np.pi ca = np.cos(angle) sa = np.sin(angle) vertices.append([radius * ca, radius * sa, half_length]) normals.append([0, 0, 1]) for idx in range(0, slices): triangles.append([0, v_idx+1, v_idx+2]) v_idx += 1 #sides for i in range(0, slices+1): angle = i / float(slices) * 2.0 * np.pi ca = np.cos(angle) sa = np.sin(angle) vertices.append([radius * ca, radius * sa, half_length]) vertices.append([radius * ca, radius * sa, -half_length]) normals.append([ca, sa, 0]) normals.append([ca, sa, 0]) for idx in range(0, slices*2): triangles.append([v_idx, v_idx + 1, v_idx + 2]) v_idx += 1 #top start = len(vertices) vertices.append([0, 0, -half_length]) normals.append([0, 0, -1]) for i in range(0, slices+1): angle = i / float(slices) * 2.0 * np.pi ca = np.cos(angle) sa = np.sin(angle) vertices.append([radius * ca, radius * sa, -half_length]) normals.append([0, 0, -1]) for idx in range(0, slices): triangles.append([start, v_idx+1, v_idx + 2]) v_idx += 1 return merge_vertices_and_normals(vertices, normals), triangles def construct_triangle_capsule(slices, stacks, diameter, length, direction="z"): data, triangles = construct_triangle_sphere(slices, stacks, diameter) data = np.array(data) half_idx = int(len(data)/2.0) half_len = length/2 data[:half_idx, 1] -= half_len data[half_idx:, 1] += half_len if direction == "x": m = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]]) data = transform_vertex_data(data, m) elif direction == "z": m = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) data = transform_vertex_data(data, m) return data, triangles def transform_vertex_data(data, m): transformed_data = [] for v in data: t_v = np.zeros(6) t_v[:3] = np.dot(m, v[:3])[:3] t_v[3:] = np.dot(m, v[3:])[:3] transformed_data.append(t_v) return transformed_data
35.75502
116
0.550826
1,255
8,903
3.858964
0.165737
0.023126
0.099112
0.07516
0.516209
0.479455
0.410489
0.410489
0.404295
0.375594
0
0.064516
0.296642
8,903
248
117
35.899194
0.708879
0.173986
0
0.274854
0
0
0.002062
0
0
0
0
0
0
1
0.040936
false
0
0.017544
0
0.099415
0.005848
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f4f2785dcaeca906879c7e62e621e4a846114b
9,212
py
Python
src/tokentype.py
londav28/chasm
a5cd97ab2732af30d20aaf05842f3ddad7618660
[ "MIT" ]
null
null
null
src/tokentype.py
londav28/chasm
a5cd97ab2732af30d20aaf05842f3ddad7618660
[ "MIT" ]
null
null
null
src/tokentype.py
londav28/chasm
a5cd97ab2732af30d20aaf05842f3ddad7618660
[ "MIT" ]
null
null
null
# MIT LICENSE Copyright (c) 2018 David Longnecker # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # Luckily module initialization code is only run once. _enum_counter = 0 _enum_strs = [] # So that I can reorder enum values at will. def _intern(string): global _enum_counter result = _enum_counter _enum_strs.append(string) _enum_counter += 1 return result # Fetch tokentype name from enum value. def get_tokentype_str(toktype): if toktype >= 0 and toktype < len(_enum_strs): return _enum_strs[toktype] return None # These must appear in _strict_ order corresponding to opcode value. t_op_nop = _intern('op:nop') t_op_ldl = _intern('op:ldl') t_op_stl = _intern('op:stl') t_op_ldg = _intern('op:ldg') t_op_stg = _intern('op:stg') t_op_lfd = _intern('op:lfd') t_op_sfd = _intern('op:sfd') t_op_ldsc = _intern('op:ldsc') t_op_pop = _intern('op:pop') t_op_swp = _intern('op:swp') t_op_dup = _intern('op:dup') t_op_psh_b = _intern('op:psh_b') t_op_psh_s = _intern('op:psh_s') t_op_psh_d = _intern('op:psh_d') t_op_psh_q = _intern('op:psh_q') t_op_psh_f = _intern('op:psh_f') t_op_psh_a = _intern('op:psh_a') t_op_psh_nil = _intern('op:psh_nil') t_op_par_b = _intern('op:par_b') t_op_par_s = _intern('op:par_s') t_op_par_d = _intern('op:par_d') t_op_par_q = _intern('op:par_q') t_op_par_f = _intern('op:par_f') t_op_par_a = _intern('op:par_a') t_op_lai = _intern('op:lai') t_op_sai = _intern('op:sai') t_op_alen = _intern('op:alen') t_op_and = _intern('op:and') t_op_or = _intern('op:or') t_op_xor = _intern('op:xor') t_op_not = _intern('op:not') t_op_shl = _intern('op:shl') t_op_shr = _intern('op:shr') t_op_add_q = _intern('op:add_q') t_op_sub_q = _intern('op:sub_q') t_op_mul_q = _intern('op:mul_q') t_op_div_q = _intern('op:div_q') t_op_mod_q = _intern('op:mod_q') t_op_neg_q = _intern('op:neg_q') t_op_add_f = _intern('op:add_f') t_op_sub_f = _intern('op:sub_f') t_op_mul_f = _intern('op:mul_f') t_op_div_f = _intern('op:div_f') t_op_mod_f = _intern('op:mod_f') t_op_neg_f = _intern('op:neg_f') t_op_cst_qf = _intern('op:cst_qf') t_op_cst_fq = _intern('op:cst_fq') t_op_cmp_q = _intern('op:cmp_q') t_op_cmp_f = _intern('op:cmp_f') t_op_refcmp = _intern('op:refcmp') t_op_jmp_eqz = _intern('op:jmp_eqz') t_op_jmp_nez = _intern('op:jmp_nez') t_op_jmp_ltz = _intern('op:jmp_ltz') t_op_jmp_lez = _intern('op:jmp_lez') t_op_jmp_gtz = _intern('op:jmp_gtz') t_op_jmp_gez = _intern('op:jmp_gez') t_op_jmp = _intern('op:jmp') t_op_typeof = _intern('op:typeof') t_op_call = _intern('op:call') t_op_ret = _intern('op:ret') t_op_leave = _intern('op:leave') t_op_break = _intern('op:break') t_op_throw = _intern('op:throw') # Additional values to be used in the lexer/parser! t_eof = _intern('eof') t_unknown = _intern('unknown') # Recognized whitespace tokens. t_comment = _intern('comment') t_spaces = _intern('spaces') # LL(1) formatting characters. t_newline = _intern('newline') t_tab = _intern('tab') # LL(1) braces and brackets. t_lparen = _intern('lparen') t_rparen = _intern('rparen') t_lbrace = _intern('lbrace') t_rbrace = _intern('rbrace') t_lbracket = _intern('lbracket') t_rbracket = _intern('rbracket') # LL(1) comparison operators. t_less = _intern('less') t_greater = _intern('greater') # LL(1) punctuation characters. t_semicolon = _intern('semicolon') t_comma = _intern('comma') t_period = _intern('period') t_colon = _intern('colon') # LL(1) operators and meta symbols. t_assign = _intern('assign') t_star = _intern('star') t_fslash = _intern('fslash') t_percent = _intern('percent') t_amper = _intern('amper') t_at = _intern('at') t_dollar = _intern('dollar') # Literal values. t_int = _intern('int') t_str = _intern('str') t_flt = _intern('flt') t_hex = _intern('hex') t_bin = _intern('bin') # There's gonna be a whole lotta these! t_symbol = _intern('symbol') # Additional assembler keywords. t_method = _intern('kw:method') t_object = _intern('kw:object') t_try = _intern('kw:try') t_except = _intern('kw:except') t_void = _intern('kw:void') # Relies on opcode tokens being interned first! def get_opcode_str(op): if op < t_op_nop or op > t_op_eox: return 'unknown' return get_tokentype_str(op) _keywords = [ t_method, t_object, t_try, t_except, t_void ] _whitespace = [ t_comment, t_spaces, t_newline, t_tab ] _literals = [ t_int, t_str, t_flt, t_hex, t_bin ] _instruction = [ t_op_nop, t_op_ldl, t_op_stl, t_op_ldg, t_op_stg, t_op_lfd, t_op_sfd, t_op_ldsc, t_op_pop, t_op_swp, t_op_dup, t_op_psh_b, t_op_psh_s, t_op_psh_d, t_op_psh_q, t_op_psh_f, t_op_psh_a, t_op_psh_nil, t_op_par_b, t_op_par_s, t_op_par_d, t_op_par_q, t_op_par_f, t_op_par_a, t_op_lai, t_op_sai, t_op_alen, t_op_and, t_op_or, t_op_xor, t_op_not, t_op_shl, t_op_shr, t_op_add_q, t_op_sub_q, t_op_mul_q, t_op_div_q, t_op_mod_q, t_op_neg_q, t_op_add_f, t_op_sub_f, t_op_mul_f, t_op_div_f, t_op_mod_f, t_op_neg_f, t_op_cst_qf, t_op_cst_fq, t_op_cmp_q, t_op_cmp_f, t_op_refcmp, t_op_jmp_eqz, t_op_jmp_nez, t_op_jmp_ltz, t_op_jmp_lez, t_op_jmp_gtz, t_op_jmp_gez, t_op_jmp, t_op_typeof, t_op_call, t_op_ret, t_op_leave, t_op_break, t_op_throw ] _jump = [ t_op_jmp_eqz, t_op_jmp_nez, t_op_jmp_ltz, t_op_jmp_lez, t_op_jmp_gtz, t_op_jmp_gez, t_op_jmp ] _interned_arg = [ t_op_psh_a, t_op_par_a, t_op_call, t_op_ldsc, t_op_psh_q, t_op_psh_f ] _has_immediate_u8 = [ t_op_ldl, t_op_stl ] _has_immediate_u16 = [ t_op_ldg, t_op_stg, t_op_lfd, t_op_sfd ] _has_immediate_u32 = _jump + [ t_op_psh_a, t_op_par_a, t_op_call, t_op_ldsc, t_op_psh_q, t_op_psh_f ] _has_immediate_u64 = [] _has_immediate_i8 = [ t_op_psh_b ] _has_immediate_i16 = [ t_op_psh_s ] _has_immediate_i32 = [ t_op_psh_d ] _has_immediate_i64 = [] _has_immediate_f32 = [] _has_immediate_f64 = [] _has_immediate = ( _has_immediate_u8 + _has_immediate_u16 + _has_immediate_u32 + _has_immediate_u64 + _has_immediate_i8 + _has_immediate_i16 + _has_immediate_i32 + _has_immediate_i64 + _has_immediate_f32 + _has_immediate_f64 ) # Tokens that can have varying values. _non_static = _literals + [t_symbol] + [t_comment] + [t_spaces] def is_keyword(v): return v in _keywords def is_literal(v): return v in _literals def is_non_static(v): return v in _non_static def is_whitespace(v): return v in _whitespace def is_instruction(v): return v in _instruction def is_jump(v): return v in _jump def has_interned_arg(v): return v in _interned_arg def has_immediate_u8(v): return v in _has_immediate_u8 def has_immediate_u16(v): return v in _has_immediate_u16 def has_immediate_u32(v): return v in _has_immediate_u32 def has_immediate_u64(v): return v in _has_immediate_u64 def has_immediate_i8(v): return v in _has_immediate_i8 def has_immediate_i16(v): return v in _has_immediate_i16 def has_immediate_i32(v): return v in _has_immediate_i32 def has_immediate_i64(v): return v in _has_immediate_i64 def has_immediate_f32(v): return v in _has_immediate_f32 def has_immediate_f64(v): return v in _has_immediate_f64 def has_immediate(v): return v in _has_immediate
21.98568
78
0.65469
1,536
9,212
3.434896
0.18099
0.088704
0.026156
0.034117
0.347612
0.272176
0.192381
0.072024
0.069371
0.052312
0
0.012102
0.246526
9,212
418
79
22.038278
0.748019
0.180743
0
0.127148
0
0
0.091962
0
0
0
0
0
0
1
0.072165
false
0
0
0.061856
0.151203
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f6b55bf08cd3caaf58fe60b17883de8d75c7aa
1,803
py
Python
src/cr/sparse/_src/lop/reshape.py
carnot-shailesh/cr-sparse
989ebead8a8ac37ade643093e1caa31ae2a3eda1
[ "Apache-2.0" ]
42
2021-06-11T17:11:29.000Z
2022-03-29T11:51:44.000Z
src/cr/sparse/_src/lop/reshape.py
carnot-shailesh/cr-sparse
989ebead8a8ac37ade643093e1caa31ae2a3eda1
[ "Apache-2.0" ]
19
2021-06-04T11:36:11.000Z
2022-01-22T20:13:39.000Z
src/cr/sparse/_src/lop/reshape.py
carnot-shailesh/cr-sparse
989ebead8a8ac37ade643093e1caa31ae2a3eda1
[ "Apache-2.0" ]
5
2021-11-21T21:01:11.000Z
2022-02-28T07:20:03.000Z
# Copyright 2021 CR.Sparse Development Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import reduce import jax.numpy as jnp from .lop import Operator def reshape(in_shape, out_shape): """Returns a linear operator which reshapes vectors from model space to data space Args: in_shape (int): Shape of vectors in the model space out_shape (int): Shape of vectors in the data space Returns: (Operator): A reshaping linear operator """ in_size = jnp.prod(jnp.array(in_shape)) out_size = jnp.prod(jnp.array(out_shape)) assert in_size == out_size, "Input and output size must be equal" times = lambda x: jnp.reshape(x, out_shape) trans = lambda x : jnp.reshape(x, in_shape) return Operator(times=times, trans=trans, shape=(out_shape,in_shape)) def arr2vec(shape): """Returns a linear operator which reshapes arrays to vectors Args: shape (int): Shape of arrays in the model space Returns: (Operator): An array to vec linear operator """ in_size = reduce((lambda x, y: x * y), shape) out_shape = (in_size,) times = lambda x: jnp.reshape(x, (in_size,)) trans = lambda x : jnp.reshape(x, shape) return Operator(times=times, trans=trans, shape=(out_shape,shape))
32.781818
86
0.702718
274
1,803
4.554745
0.390511
0.044872
0.041667
0.054487
0.298077
0.267628
0.190705
0.083333
0.083333
0.083333
0
0.00632
0.210205
1,803
54
87
33.388889
0.870084
0.552413
0
0
0
0
0.047425
0
0
0
0
0
0.0625
1
0.125
false
0
0.1875
0
0.4375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f6ed71939071c909f6f821aec31a10daf6299c
637
py
Python
innuy_lambda/settings/dev.py
innuy/innuy_lambda
739d2573919513f08925fe63cad6e301b69323f9
[ "MIT" ]
null
null
null
innuy_lambda/settings/dev.py
innuy/innuy_lambda
739d2573919513f08925fe63cad6e301b69323f9
[ "MIT" ]
1
2020-06-05T18:21:24.000Z
2020-06-05T18:21:24.000Z
innuy_lambda/settings/dev.py
innuy/innuy_lambda
739d2573919513f08925fe63cad6e301b69323f9
[ "MIT" ]
null
null
null
from .base import * INSTALLED_APPS += [ 'debug_toolbar', 'zappa_django_utils', 'storages', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] INTERNAL_IPS = [ '127.0.0.1', ] DATABASES = { 'default': { 'ENGINE': 'zappa_django_utils.db.backends.s3sqlite', 'NAME': 'sqlite.db', 'BUCKET': 'innuylambda' } } ALLOWED_HOSTS = ['*'] AWS_STORAGE_BUCKET_NAME = 'innuylambda-static' AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME STATIC_URL = "https://%s/" % AWS_S3_CUSTOM_DOMAIN STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
18.735294
70
0.6719
70
637
5.785714
0.628571
0.059259
0.079012
0.098765
0
0
0
0
0
0
0
0.022945
0.178964
637
34
71
18.735294
0.751434
0
0
0
0
0
0.413793
0.194357
0
0
0
0
0
1
0
false
0
0.041667
0
0.041667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f7369f83b845973e9526f6db9b7f120362742a
3,658
py
Python
UNET.py
ArtemBoyarintsev/cell_segmentation
9c0e70c1edbb20d661e392bab4c42002d13ebf06
[ "MIT" ]
null
null
null
UNET.py
ArtemBoyarintsev/cell_segmentation
9c0e70c1edbb20d661e392bab4c42002d13ebf06
[ "MIT" ]
null
null
null
UNET.py
ArtemBoyarintsev/cell_segmentation
9c0e70c1edbb20d661e392bab4c42002d13ebf06
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torchvision class UNET(nn.Module): THIRD_POOLING_INDEX = 16 FORTH_POOLING_INDEX = 23 def __init__(self, n_class = 1): super(UNET, self).__init__() # Contracting Path self.c1 = UNET.get_conv2d_block(3, 16, 3, 1) self.p1 = nn.MaxPool2d(2) self.d1 = nn.Dropout2d() self.c2 = UNET.get_conv2d_block(16, 32, 3, 1) self.p2 = nn.MaxPool2d(2) self.d2 = nn.Dropout2d() self.c3 = UNET.get_conv2d_block(32, 64, 3, 1) self.p3 = nn.MaxPool2d(2) self.d3 = nn.Dropout2d() self.c4 = UNET.get_conv2d_block(64, 128, 3, 1) self.p4 = nn.MaxPool2d(2) self.d4 = nn.Dropout2d() self.c5 = UNET.get_conv2d_block(128, 256, 3, 1) self.u6 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, padding=0) self.d6 = nn.Dropout2d() self.c6 = UNET.get_conv2d_block(256, 128, 3, 1) self.u7 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, padding=0) self.d7 = nn.Dropout2d() self.c7 = UNET.get_conv2d_block(128, 64, 3, 1) self.u8 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, padding=0) self.d8 = nn.Dropout2d() self.c8 = UNET.get_conv2d_block(64, 32, 3, 1) self.u9 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2, padding=0) self.d9 = nn.Dropout2d() self.c9 = UNET.get_conv2d_block(32, 16, 3, 1) self.c10 = nn.Conv2d(16, 1, 1) self.activation = nn.Sigmoid() #outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9) def forward(self, batch): c1_output = self.c1(batch) h = c1_output h = self.p1(h) h = self.d1(h) c2_output = self.c2(h) h = c2_output h = self.p2(h) h = self.d2(h) c3_output = self.c3(h) h = c3_output h = self.p3(h) h = self.d3(h) c4_output = self.c4(h) h = c4_output h = self.p4(h) h = self.d4(h) h = self.c5(h) u = self.u6(h) h = torch.cat((u, c4_output), dim=(1)) h = self.d6(h) h = self.c6(h) u = self.u7(h) h = torch.cat((u, c3_output), dim=(1)) h = self.d7(h) h = self.c7(h) u = self.u8(h) h = torch.cat((u, c2_output), dim=(1)) h = self.d8(h) h = self.c8(h) u = self.u9(h) h = torch.cat((u, c1_output), dim=(1)) h = self.d9(h) h = self.c9(h) h = self.c10(h) ret = self.activation(h) return ret @staticmethod def get_conv2d_block(input_size, output_size, kernel_size, padding): """Function to add 2 convolutional layers with the parameters passed to it""" # first layer # kernel_initializer = 'he_normal', padding = 'same' conv2d_block = nn.Sequential() conv2d = nn.Conv2d(input_size, output_size, kernel_size = kernel_size, padding=padding) conv2d_block.add_module('conv_0', conv2d) conv2d_block.add_module('batchnorm_0', nn.BatchNorm2d(output_size)) conv2d_block.add_module('relu0', nn.ReLU()) conv2d_2 = nn.Conv2d(output_size, output_size, kernel_size=kernel_size, padding=padding) conv2d_block.add_module('conv_1', conv2d_2) conv2d_block.add_module('batchnorm_1', nn.BatchNorm2d(output_size)) conv2d_block.add_module('relu0', nn.ReLU()) return conv2d_block
31.264957
96
0.5462
523
3,658
3.659656
0.193117
0.103448
0.073145
0.08464
0.360502
0.212121
0.194357
0.194357
0.131661
0.131661
0
0.092929
0.323401
3,658
117
97
31.264957
0.680404
0.056042
0
0.02381
0
0
0.012772
0
0
0
0
0
0
1
0.035714
false
0
0.035714
0
0.130952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f8bea04c27110192da44644da602e9cd13a9c7
4,538
py
Python
order_center/api.py
YuaShizuki/order_center
6f0a8831b7cef82cee5b2f6268822acbee9077c4
[ "MIT" ]
null
null
null
order_center/api.py
YuaShizuki/order_center
6f0a8831b7cef82cee5b2f6268822acbee9077c4
[ "MIT" ]
null
null
null
order_center/api.py
YuaShizuki/order_center
6f0a8831b7cef82cee5b2f6268822acbee9077c4
[ "MIT" ]
null
null
null
import frappe import os import json import datetime import uuid @frappe.whitelist(allow_guest=True) def clustering_and_scheduling(): for trip in json.loads(frappe.local.request.values["inputData"]): build_trip(trip) def build_trip(dat): d = frappe.get_doc({ "doctype":"DRS", "trip_name":dat["tripName"], "status":"Clustering And Scheduling", "driver_name":dat["driverName"], "vehicle":dat["vehicle"], "shipment_details":parse_shipment_details(dat["shipmentDetails"]) }) d.insert() frappe.db.commit() def parse_shipment_details(shdetails): result = [] for shipment in shdetails: d = dict() d["latitude"] = shipment["latitude"] d["longitude"] = shipment["longitude"] d["awb"] = shipment["clientShipmentId"] d["delivery_order"] = shipment["deliveryOrder"] d["Status"] = "Unknown" result.append(d) return result @frappe.whitelist(allow_guest=True) def dispatch_start_trip(): start_trip(json.loads(frappe.local.request.values["inputData"])) def start_trip(trip): t = frappe.get_list("DRS", fields=["*"], filters={"trip_name":trip["tripName"]})[0] tx = frappe.get_doc("DRS", t["name"]) tx.status = "Start Trip" tx.save() @frappe.whitelist(allow_guest=True) def load_items(): val = frappe.local.request.values["inputData"] awb = json.loads(val)["clientShipmentId"] t = frappe.get_list("Shipment Details", fields=["*"], filters={"awb":awb})[0] tx = frappe.get_doc("Shipment Details", t["name"]) tx.status = "Loaded" tx.save() @frappe.whitelist(allow_guest=True) def pickup(): val = frappe.local.request.values["inputData"] awb = json.loads(val)["clientShipmentId"] t = frappe.get_list("Shipment Details", fields=["*"], filters={"awb":awb})[0] tx = frappe.get_doc("Shipment Details", t["name"]) tx.status = "Picked Up" tx.save() @frappe.whitelist(allow_guest=True) def delivery_notification(): parcel = json.loads(frappe.local.request.values["inputData"]) set_deliverd(parcel, "Delivered") def set_deliverd(parcel, status): awb = parcel["clientShipmentId"] t = frappe.get_list("Shipment Details", fields=["*"], filters={"awb":awb})[0] tx = frappe.get_doc("Shipment Details", t["name"]) tx.status = status tx.latitude = parcel["latitude"] tx.longitude = parcel["longitude"] tx.save() @frappe.whitelist(allow_guest=True) def not_deliverd_notification(): parcel = json.loads(frappe.local.request.values["inputData"]) set_deliverd(parcel, "Not Delivered") @frappe.whitelist(allow_guest=True) def partial_delivery_notification(): parcel = json.loads(frappe.local.request.values["inputData"]) set_deliverd(parcel, "Partial Delivery") @frappe.whitelist(allow_guest=True) def arrival_end_trip(): trip = json.loads(frappe.local.request.values["inputData"]) t = frappe.get_list("DRS", fields=["*"], filters={"trip_name":trip["tripName"]})[0] tx = frappe.get_doc("DRS", t["name"]) tx.status = "End Trip" tx.save() #---------------------------------------------THROW----------------------------- @frappe.whitelist(allow_guest=True) def clear_all_cache(): frappe.clear_cache() return "cache cleard" #@frappe.whitelist(allow_guest=True) #def arrival_end_trip(): # open(os.path.expanduser("~/erp_data/arrival_end_trip.json"), # "a").write(frappe.local.request.data + "\n") @frappe.whitelist(allow_guest=True) def accept(): open(os.path.expanduser("~/erp_data/accept.json"), "a").write(frappe.local.request.data + "\n") @frappe.whitelist(allow_guest=True) def reject(): open(os.path.expanduser("~/erp_data/reject.json"), "a").write(frappe.local.request.data + "\n") @frappe.whitelist(allow_guest=True) def clustering_updates(): open(os.path.expanduser("~/erp_data/clustering_updates.json"), "a").write(frappe.local.request.data + "\n") #CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict([('inputData', u'[{"tripName":"TRIP-32","deliveryMediumName":"MEHUL","driverName":"","vehicle":"","shipmentDetails":[{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"222222201","deliveryOrder":4},{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"112000003","deliveryOrder":5},{"latitude":19.1076375,"longitude":72.8655789,"clientShipmentId":"test_order","deliveryOrder":6}]}]')])])
34.907692
473
0.653812
539
4,538
5.378479
0.211503
0.067265
0.089686
0.112108
0.618144
0.618144
0.547775
0.481545
0.385995
0.35426
0
0.020323
0.154253
4,538
129
474
35.178295
0.735018
0.160864
0
0.407767
0
0
0.184114
0.020516
0
0
0
0
0
1
0.15534
false
0
0.048544
0
0.223301
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51f93647e99d693a4f01deefb081bc1f04d2c371
1,365
py
Python
logic_bank/exec_trans_logic/row_sets.py
valhuber/LogicBank
0df8790395fe260778e910badcc15fba49c23e96
[ "BSD-3-Clause" ]
39
2020-10-14T02:23:59.000Z
2022-02-11T14:08:46.000Z
logic_bank/exec_trans_logic/row_sets.py
valhuber/LogicBank
0df8790395fe260778e910badcc15fba49c23e96
[ "BSD-3-Clause" ]
7
2020-10-14T23:59:49.000Z
2022-03-16T16:25:43.000Z
logic_bank/exec_trans_logic/row_sets.py
valhuber/Logic-Bank
3f31b47786ce3fae53fd96af8795cd693e20547b
[ "BSD-3-Clause" ]
7
2020-10-17T02:59:51.000Z
2022-03-26T08:56:46.000Z
from __future__ import annotations from typing import List, TypeVar, Dict from sqlalchemy.engine import base from sqlalchemy.ext.declarative import declarative_base from logic_bank.exec_row_logic.logic_row import LogicRow class RowSets(): """ Sets of rows used in transaction * processed_rows: Dict of all the logic_rows processed in this transaction, by row instance (no dups) Used to drive commit events/constraints * submitted_rows: set of rows submitted by client Used to avoid adjusting altered rows Presumes that sqlalchemy returns same instance for multiple queries. """ def __init__(self): self.processed_rows = {} # type: Dict[base, 'LogicRow'] self.submitted_row = set() def add_processed(self, logic_row: 'LogicRow'): """ Denote row processed, for later commit events/constraints """ if logic_row.row not in self.processed_rows: self.processed_rows[logic_row.row] = logic_row def add_submitted(self, row: base): self.submitted_row.add(row) def is_submitted(self, row: base) -> bool: result = row in self.submitted_row return result def remove_submitted(self, logic_row: LogicRow): if logic_row.row in self.submitted_row: self.submitted_row.remove(logic_row.row)
31.744186
109
0.688645
180
1,365
5.027778
0.366667
0.070718
0.088398
0.044199
0.046409
0
0
0
0
0
0
0
0.240293
1,365
42
110
32.5
0.87271
0.324542
0
0
0
0
0.009281
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
51f968ed80d1979d901e48aea3098c1013002f25
1,836
py
Python
monkeytools/array_methods.py
mr-devs/monkeytools
e56197befcc0a14d9d082eb4463ebe27fb967116
[ "MIT" ]
null
null
null
monkeytools/array_methods.py
mr-devs/monkeytools
e56197befcc0a14d9d082eb4463ebe27fb967116
[ "MIT" ]
null
null
null
monkeytools/array_methods.py
mr-devs/monkeytools
e56197befcc0a14d9d082eb4463ebe27fb967116
[ "MIT" ]
null
null
null
""" A collection of array-based algorithms 1. Find maximum sub-array - https://en.wikipedia.org/wiki/Maximum_subarray_problem Author: Matthew R. DeVerna """ from .utils import check_array def max_subarray_kadane(given_array): """ Find a contiguous subarray with the largest sum. Note: This algorithm is implemented with Kadane's algorithm with a slight change (we do not add 1 to the best_end) - https://en.wikipedia.org/wiki/Maximum_subarray_problem#Kadane's_algorithm Complexity: ---------- - O(n) Parameters: ---------- - given_array (list) : a numerical sequence Returns: ---------- - best_sum (int) : the total sum between `best_start` and `best_end` - best_start (int) : the first index in the largest sub-array (inclusive) - best_end (int) : the last index in the largest sub-array (inclusive) Exceptions: ---------- - TypeError Example: ---------- lst = [-45, -78, -2, -60, 27, 21, 71, 80, 22, 59] max_subarray(lst) # Output (280, 4, 10) Where 280 is the sum between lst[4] (27, inclusive) and lst[9] (59, inclusive) """ # Ensure array is a list and contains only numeric values check_array(given_array) best_sum = float('-inf') best_start = best_end = None current_sum = 0 for current_end, x in enumerate(given_array): if current_sum <= 0: # Start a new sequence at the current element current_start = current_end current_sum = x else: # Extend the existing sequence with the current element current_sum += x if current_sum > best_sum: best_sum = current_sum best_start = current_start best_end = current_end return best_sum, best_start, best_end
27.818182
83
0.622004
247
1,836
4.45749
0.437247
0.038147
0.032698
0.034514
0.143506
0.143506
0.143506
0.081744
0
0
0
0.028422
0.271786
1,836
66
84
27.818182
0.795064
0.609477
0
0
0
0
0.0067
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51fa66f7327624b0362e31f656c33d867492f9a3
2,922
py
Python
Library/ContractUtils.py
rccannizzaro/QC-StrategyBacktest
847dbd61680466bc60ce7893eced8a8f70d16b2e
[ "Apache-2.0" ]
11
2021-12-02T15:41:47.000Z
2022-03-14T03:49:22.000Z
Library/ContractUtils.py
ikamanu/QC-StrategyBacktest
847dbd61680466bc60ce7893eced8a8f70d16b2e
[ "Apache-2.0" ]
null
null
null
Library/ContractUtils.py
ikamanu/QC-StrategyBacktest
847dbd61680466bc60ce7893eced8a8f70d16b2e
[ "Apache-2.0" ]
5
2022-02-02T12:07:51.000Z
2022-02-13T02:24:19.000Z
######################################################################################## # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ######################################################################################## from Logger import * class ContractUtils: def __init__(self, context): # Set the context self.context = context # Set the logger self.logger = Logger(context, className = type(self).__name__, logLevel = context.logLevel) def getUnderlyingLastPrice(self, contract): # Get the context context = self.context # Get the object from the Securities dictionary if available (pull the latest price), else use the contract object itself if contract.UnderlyingSymbol in context.Securities: security = context.Securities[contract.UnderlyingSymbol] # Check if we have found the security if security != None: # Get the last known price of the security return context.GetLastKnownPrice(security).Price else: # Get the UnderlyingLastPrice attribute of the contract return contract.UnderlyingLastPrice def getSecurity(self, contract): # Get the Securities object Securities = self.context.Securities # Check if we can extract the Symbol attribute if hasattr(contract, "Symbol") and contract.Symbol in Securities: # Get the security from the Securities dictionary if available (pull the latest price), else use the contract object itself security = Securities[contract.Symbol] else: # Use the contract itself security = contract return security # Returns the mid-price of an option contract def midPrice(self, contract): security = self.getSecurity(contract) return 0.5*(security.BidPrice + security.AskPrice) def bidAskSpread(self, contract): security = self.getSecurity(contract) return abs(security.AskPrice - security.BidPrice)
48.7
132
0.558522
287
2,922
5.658537
0.393728
0.036946
0.018473
0.033251
0.166256
0.166256
0.166256
0.105911
0.105911
0.105911
0
0.003055
0.327858
2,922
59
133
49.525424
0.823829
0.485284
0
0.153846
0
0
0.004971
0
0
0
0
0
0
1
0.192308
false
0
0.038462
0
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51fad0b66c4e3fa317158550a35e046091d71c7e
24,945
py
Python
backtester/backtester.py
unbalancedparentheses/backtester_options
46efd30e405f360c560f8eae8b2ee7d26f4532db
[ "MIT" ]
91
2020-01-31T10:15:35.000Z
2022-03-27T19:15:12.000Z
backtester/backtester.py
unbalancedparentheses/backtester_options
46efd30e405f360c560f8eae8b2ee7d26f4532db
[ "MIT" ]
38
2019-05-12T02:00:46.000Z
2019-12-06T14:54:25.000Z
backtester/backtester.py
unbalancedparentheses/backtester_options
46efd30e405f360c560f8eae8b2ee7d26f4532db
[ "MIT" ]
20
2020-06-12T08:21:30.000Z
2022-03-28T05:52:59.000Z
from functools import reduce import numpy as np import pandas as pd import pyprind from .enums import * class Backtest: """Backtest runner class.""" def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100): assets = ('stocks', 'options', 'cash') total_allocation = sum(allocation.get(a, 0.0) for a in assets) self.allocation = {} for asset in assets: self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation self.initial_capital = initial_capital self.stop_if_broke = True self.shares_per_contract = shares_per_contract self._stocks = [] self._options_strategy = None self._stocks_data = None self._options_data = None @property def stocks(self): return self._stocks @stocks.setter def stocks(self, stocks): assert np.isclose(sum(stock.percentage for stock in stocks), 1.0, atol=0.000001), 'Stock percentages must sum to 1.0' self._stocks = list(stocks) return self @property def options_strategy(self): return self._options_strategy @options_strategy.setter def options_strategy(self, strat): self._options_strategy = strat @property def stocks_data(self): return self._stocks_data @stocks_data.setter def stocks_data(self, data): self._stocks_schema = data.schema self._stocks_data = data @property def options_data(self): return self._options_data @options_data.setter def options_data(self, data): self._options_schema = data.schema self._options_data = data def run(self, rebalance_freq=0, monthly=False, sma_days=None): """Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`) Args: rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0. monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False. Returns: pd.DataFrame: Log of the trades executed. """ assert self._stocks_data, 'Stock data not set' assert all(stock.symbol in self._stocks_data['symbol'].values for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data' assert self._options_data, 'Options data not set' assert self._options_strategy, 'Options Strategy not set' assert self._options_data.schema == self._options_strategy.schema option_dates = self._options_data['date'].unique() stock_dates = self.stocks_data['date'].unique() assert np.array_equal(stock_dates, option_dates), 'Stock and options dates do not match (check that TZ are equal)' self._initialize_inventories() self.current_cash = self.initial_capital self.trade_log = pd.DataFrame() self.balance = pd.DataFrame({ 'total capital': self.current_cash, 'cash': self.current_cash }, index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')]) if sma_days: self.stocks_data.sma(sma_days) dates = pd.DataFrame(self.options_data._data[['quotedate', 'volume']]).drop_duplicates('quotedate').set_index('quotedate') rebalancing_days = pd.to_datetime( dates.groupby(pd.Grouper(freq=str(rebalance_freq) + 'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else [] data_iterator = self._data_iterator(monthly) bar = pyprind.ProgBar(len(stock_dates), bar_char='█') for date, stocks, options in data_iterator: if (date in rebalancing_days): previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) - 1] if rebalancing_days.get_loc(date) != 0 else date self._update_balance(previous_rb_date, date) self._rebalance_portfolio(date, stocks, options, sma_days) bar.update() # Update balance for the period between the last rebalancing day and the last day self._update_balance(rebalancing_days[-1], self.stocks_data.end_date) self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital'] self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks) self.balance['stocks capital'].iloc[0] = 0 self.balance['options capital'].iloc[0] = 0 self.balance[ 'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital'] self.balance['% change'] = self.balance['total capital'].pct_change() self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod() return self.trade_log def _initialize_inventories(self): """Initialize empty stocks and options inventories.""" columns = pd.MultiIndex.from_product( [[l.name for l in self._options_strategy.legs], ['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']]) totals = pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']]) self._options_inventory = pd.DataFrame(columns=columns.append(totals)) self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty']) def _data_iterator(self, monthly): """Returns combined iterator for stock and options data. Each step, it produces a tuple like the following: (date, stocks, options) Returns: generator: Daily/monthly iterator over `self._stocks_data` and `self.options_data`. """ if monthly: it = zip(self._stocks_data.iter_months(), self._options_data.iter_months()) else: it = zip(self._stocks_data.iter_dates(), self._options_data.iter_dates()) return ((date, stocks, options) for (date, stocks), (_, options) in it) def _rebalance_portfolio(self, date, stocks, options, sma_days): """Reabalances the portfolio according to `self.allocation` weights. Args: date (pd.Timestamp): Current date. stocks (pd.DataFrame): Stocks data for the current date. options (pd.DataFrame): Options data for the current date. sma_days (int): SMA window size """ self._execute_option_exits(date, options) stock_capital = self._current_stock_capital(stocks) options_capital = self._current_options_capital(options) total_capital = self.current_cash + stock_capital + options_capital # buy stocks stocks_allocation = self.allocation['stocks'] * total_capital self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty']) # We simulate a sell of the stock positions and then a rebuy. # This would **not** work if we added transaction fees. self.current_cash = stocks_allocation + total_capital * self.allocation['cash'] self._buy_stocks(stocks, stocks_allocation, sma_days) # exit/enter contracts options_allocation = self.allocation['options'] * total_capital if options_allocation >= options_capital: self._execute_option_entries(date, options, options_allocation - options_capital) else: to_sell = options_capital - options_allocation current_options = self._get_current_option_quotes(options) self._sell_some_options(date, to_sell, current_options) def _sell_some_options(self, date, to_sell, current_options): sold = 0 total_costs = sum([current_options[i]['cost'] for i in range(len(current_options))]) for (exit_cost, (row_index, inventory_row)) in zip(total_costs, self._options_inventory.iterrows()): if (to_sell - sold > -exit_cost) and (to_sell - sold) > 0: qty_to_sell = (to_sell - sold) // exit_cost if -qty_to_sell <= inventory_row['totals']['qty']: qty_to_sell = (to_sell - sold) // exit_cost else: if qty_to_sell != 0: qty_to_sell = -inventory_row['totals']['qty'] if qty_to_sell != 0: trade_log_append = self._options_inventory.loc[row_index].copy() trade_log_append['totals', 'qty'] = -qty_to_sell trade_log_append['totals', 'date'] = date trade_log_append['totals', 'cost'] = exit_cost for i, leg in enumerate(self._options_strategy.legs): trade_log_append[leg.name, 'order'] = ~trade_log_append[leg.name, 'order'] trade_log_append[leg.name, 'cost'] = current_options[i].loc[row_index]['cost'] self.trade_log = self.trade_log.append(trade_log_append, ignore_index=True) self._options_inventory.at[row_index, ('totals', 'date')] = date self._options_inventory.at[row_index, ('totals', 'qty')] += qty_to_sell sold += (qty_to_sell * exit_cost) self.current_cash += sold - to_sell def _current_stock_capital(self, stocks): """Return the current value of the stocks inventory. Args: stocks (pd.DataFrame): Stocks data for the current time step. Returns: float: Total capital in stocks. """ current_stocks = self._stocks_inventory.merge(stocks, how='left', left_on='symbol', right_on=self._stocks_schema['symbol']) return (current_stocks[self._stocks_schema['adjClose']] * current_stocks['qty']).sum() def _current_options_capital(self, options): options_value = self._get_current_option_quotes(options) values_by_row = [0] * len(options_value[0]) if len(options_value[0]) != 0: for i in range(len(self._options_strategy.legs)): values_by_row += options_value[i]['cost'].values total = -sum(values_by_row * self._options_inventory['totals']['qty'].values) else: total = 0 return total def _buy_stocks(self, stocks, allocation, sma_days): """Buys stocks according to their given weight, optionally using an SMA entry filter. Updates `self._stocks_inventory` and `self.current_cash`. Args: stocks (pd.DataFrame): Stocks data for the current time step. allocation (float): Total capital allocation for stocks. sma_days (int): SMA window. """ stock_symbols = [stock.symbol for stock in self.stocks] query = '{} in {}'.format(self._stocks_schema['symbol'], stock_symbols) inventory_stocks = stocks.query(query) stock_percentages = np.array([stock.percentage for stock in self.stocks]) stock_prices = inventory_stocks[self._stocks_schema['adjClose']] if sma_days: qty = np.where(inventory_stocks['sma'] < stock_prices, (allocation * stock_percentages) // stock_prices, 0) else: qty = (allocation * stock_percentages) // stock_prices self.current_cash -= np.sum(stock_prices * qty) self._stocks_inventory = pd.DataFrame({'symbol': stock_symbols, 'price': stock_prices, 'qty': qty}) def _update_balance(self, start_date, end_date): """Updates self.balance in batch in a certain period between rebalancing days""" stocks_date_col = self._stocks_schema['date'] stocks_data = self._stocks_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format( date_col=stocks_date_col, start_date=start_date, end_date=end_date)) options_date_col = self._options_schema['date'] options_data = self._options_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format( date_col=options_date_col, start_date=start_date, end_date=end_date)) calls_value = pd.Series(0, index=options_data[options_date_col].unique()) puts_value = pd.Series(0, index=options_data[options_date_col].unique()) for leg in self._options_strategy.legs: leg_inventory = self._options_inventory[leg.name] cost_field = (~leg.direction).value for contract in leg_inventory['contract']: leg_inventory_contract = leg_inventory.query('contract == "{}"'.format(contract)) qty = self._options_inventory.loc[leg_inventory_contract.index]['totals']['qty'].values[0] options_contract_col = self._options_schema['contract'] current = leg_inventory_contract[['contract']].merge(options_data, how='left', left_on='contract', right_on=options_contract_col) current.set_index(options_date_col, inplace=True) if cost_field == Direction.BUY.value: current[cost_field] = -current[cost_field] if (leg_inventory_contract['type'] == Type.CALL.value).any(): calls_value = calls_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0) else: puts_value = puts_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0) stocks_current = self._stocks_inventory[['symbol', 'qty']].merge(stocks_data[['date', 'symbol', 'adjClose']], on='symbol') stocks_current['cost'] = stocks_current['qty'] * stocks_current['adjClose'] columns = [ stocks_current[stocks_current['symbol'] == stock.symbol].set_index(stocks_date_col)[[ 'cost' ]].rename(columns={'cost': stock.symbol}) for stock in self._stocks ] add = pd.concat(columns, axis=1) add['cash'] = self.current_cash add['options qty'] = self._options_inventory['totals']['qty'].sum() add['calls capital'] = calls_value add['puts capital'] = puts_value add['stocks qty'] = self._stocks_inventory['qty'].sum() for _index, row in self._stocks_inventory.iterrows(): symbol = row['symbol'] add[symbol + ' qty'] = row['qty'] # sort=False means we're assuming the updates are done in chronological order, i.e, # the dates in add are the immediate successors to the ones at the end of self.balance. # Pass sort=True to ensure self.balance is always sorted chronologically if needed. self.balance = self.balance.append(add, sort=False) def _execute_option_entries(self, date, options, options_allocation): """Enters option positions according to `self._options_strategy`. Calls `self._pick_entry_signals` to select from the entry signals given by the strategy. Updates `self._options_inventory` and `self.current_cash`. Args: date (pd.Timestamp): Current date. options (pd.DataFrame): Options data for the current time step. options_allocation (float): Capital amount allocated to options. """ self.current_cash += options_allocation # Remove contracts already in inventory inventory_contracts = pd.concat( [self._options_inventory[leg.name]['contract'] for leg in self._options_strategy.legs]) subset_options = options[~options[self._options_schema['contract']].isin(inventory_contracts)] entry_signals = [] for leg in self._options_strategy.legs: flt = leg.entry_filter cost_field = leg.direction.value leg_entries = subset_options[flt(subset_options)] # Exit if no entry signals for the current leg if leg_entries.empty: return fields = self._signal_fields(cost_field) leg_entries = leg_entries.reindex(columns=fields.keys()) leg_entries.rename(columns=fields, inplace=True) order = get_order(leg.direction, Signal.ENTRY) leg_entries['order'] = order # Change sign of cost for SELL orders if leg.direction == Direction.SELL: leg_entries['cost'] = -leg_entries['cost'] leg_entries['cost'] *= self.shares_per_contract leg_entries.columns = pd.MultiIndex.from_product([[leg.name], leg_entries.columns]) entry_signals.append(leg_entries.reset_index(drop=True)) # Append the 'totals' column to entry_signals total_costs = sum([leg_entry.droplevel(0, axis=1)['cost'] for leg_entry in entry_signals]) qty = options_allocation // abs(total_costs) totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qty, 'date': date}) totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns]) entry_signals.append(totals) entry_signals = pd.concat(entry_signals, axis=1) # Remove signals where qty == 0 entry_signals = entry_signals[entry_signals['totals']['qty'] > 0] entries = self._pick_entry_signals(entry_signals) # Update options inventory, trade log and current cash self._options_inventory = self._options_inventory.append(entries, ignore_index=True) self.trade_log = self.trade_log.append(entries, ignore_index=True) self.current_cash -= np.sum(entries['totals']['cost'] * entries['totals']['qty']) def _execute_option_exits(self, date, options): """Exits option positions according to `self._options_strategy`. Option positions are closed whenever the strategy signals an exit, when the profit/loss thresholds are exceeded or whenever the contracts in `self._options_inventory` are not found in `options`. Updates `self._options_inventory` and `self.current_cash`. Args: date (pd.Timestamp): Current date. options (pd.DataFrame): Options data for the current time step. """ strategy = self._options_strategy current_options_quotes = self._get_current_option_quotes(options) filter_masks = [] for i, leg in enumerate(strategy.legs): flt = leg.exit_filter # This mask is to ensure that legs with missing contracts exit. missing_contracts_mask = current_options_quotes[i]['cost'].isna() filter_masks.append(flt(current_options_quotes[i]) | missing_contracts_mask) fields = self._signal_fields((~leg.direction).value) current_options_quotes[i] = current_options_quotes[i].reindex(columns=fields.values()) current_options_quotes[i].rename(columns=fields, inplace=True) current_options_quotes[i].columns = pd.MultiIndex.from_product([[leg.name], current_options_quotes[i].columns]) exit_candidates = pd.concat(current_options_quotes, axis=1) # If a contract is missing we replace the NaN values with those of the inventory # except for cost, which we imput as zero. exit_candidates = self._impute_missing_option_values(exit_candidates) # Append the 'totals' column to exit_candidates qtys = self._options_inventory['totals']['qty'] total_costs = sum([exit_candidates[l.name]['cost'] for l in self._options_strategy.legs]) totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qtys, 'date': date}) totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns]) exit_candidates = pd.concat([exit_candidates, totals], axis=1) # Compute which contracts need to exit, either because of price thresholds or user exit filters threshold_exits = strategy.filter_thresholds(self._options_inventory['totals']['cost'], total_costs) filter_mask = reduce(lambda x, y: x | y, filter_masks) exits_mask = threshold_exits | filter_mask exits = exit_candidates[exits_mask] total_costs = total_costs[exits_mask] * exits['totals']['qty'] # Update options inventory, trade log and current cash self._options_inventory.drop(self._options_inventory[exits_mask].index, inplace=True) self.trade_log = self.trade_log.append(exits, ignore_index=True) self.current_cash -= sum(total_costs) def _pick_entry_signals(self, entry_signals): """Returns the entry signals to execute. Args: entry_signals (pd.DataFrame): DataFrame of option entry signals chosen by the strategy. Returns: pd.DataFrame: DataFrame of entries to execute. """ if not entry_signals.empty: # FIXME: This is a naive signal selection criterion, it simply picks the first one in `entry_singals` return entry_signals.iloc[0] else: return entry_signals def _signal_fields(self, cost_field): fields = { self._options_schema['contract']: 'contract', self._options_schema['underlying']: 'underlying', self._options_schema['expiration']: 'expiration', self._options_schema['type']: 'type', self._options_schema['strike']: 'strike', self._options_schema[cost_field]: 'cost', 'order': 'order' } return fields def _get_current_option_quotes(self, options): """Returns the current quotes for all the options in `self._options_inventory` as a list of DataFrames. It also adds a `cost` column with the cost of closing the position in each contract and an `order` column with the corresponding exit order type. Args: options (pd.DataFrame): Options data in the current time step. Returns: [pd.DataFrame]: List of DataFrames, one for each leg in `self._options_inventory`, with the exit cost for the contracts. """ current_options_quotes = [] for leg in self._options_strategy.legs: inventory_leg = self._options_inventory[leg.name] # This is a left join to ensure that the result has the same length as the inventory. If the contract # isn't in the daily data the values will all be NaN and the filters should all yield False. leg_options = inventory_leg[['contract']].merge(options, how='left', left_on='contract', right_on=leg.schema['contract']) # leg_options.index needs to be the same as the inventory's so that the exit masks that are constructed # from it can be correctly applied to the inventory. leg_options.index = self._options_inventory.index leg_options['order'] = get_order(leg.direction, Signal.EXIT) leg_options['cost'] = leg_options[self._options_schema[(~leg.direction).value]] # Change sign of cost for SELL orders if ~leg.direction == Direction.SELL: leg_options['cost'] = -leg_options['cost'] leg_options['cost'] *= self.shares_per_contract current_options_quotes.append(leg_options) return current_options_quotes def _impute_missing_option_values(self, exit_candidates): """Returns a copy of the inventory with the cost of all its contracts set to zero. Args: exit_candidates (pd.DataFrame): DataFrame of exit candidates with possible missing values. Returns: pd.DataFrame: Exit candidates with imputed values. """ df = self._options_inventory.copy() for leg in self._options_strategy.legs: df.at[:, (leg.name, 'cost')] = 0 return exit_candidates.fillna(df) def __repr__(self): return "Backtest(capital={}, allocation={}, stocks={}, strategy={})".format( self.current_cash, self.allocation, self._stocks, self._options_strategy)
47.066038
119
0.621768
2,949
24,945
5.016616
0.126823
0.049074
0.032446
0.013992
0.296945
0.191429
0.154049
0.107544
0.097066
0.084494
0
0.00344
0.277571
24,945
529
120
47.155009
0.817435
0.197394
0
0.09375
0
0
0.072942
0
0
0
0
0.00189
0.021875
1
0.078125
false
0
0.015625
0.015625
0.146875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51fb7539b1f2f6afaf1c56b1fdd1c2bde6b3883c
7,948
py
Python
kicktipper/predictor.py
Kricki/kicktipper
fae146c8df0d9ba9bebe84c1f20cf8df6fc39678
[ "ISC" ]
1
2016-11-23T16:09:46.000Z
2016-11-23T16:09:46.000Z
kicktipper/predictor.py
Kricki/kicktipper
fae146c8df0d9ba9bebe84c1f20cf8df6fc39678
[ "ISC" ]
1
2017-04-21T08:38:39.000Z
2018-07-19T20:46:10.000Z
kicktipper/predictor.py
Kricki/kicktipper
fae146c8df0d9ba9bebe84c1f20cf8df6fc39678
[ "ISC" ]
null
null
null
import numpy as np from scipy import stats import matplotlib.pyplot as plt class MatchPredictor: """ Class to calculates the probabilities for different scores (outcomes) of two teams. Attributes ---------- l1 : float Projected score for team 1 (expectation value for Poisson distribution) l2 : float Projected score for team 2 (expectation value for Poisson distribution) """ def __init__(self, l1=0.0, l2=0): self._poisson_n_bins = 8 self.l1 = l1 self.l2 = l2 def poisson_pmf(self, l, n_bins=None): """ Returns the probablity mass function of the Poissonian distribution with average number l See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html Parameters ---------- l : float Average number of events per interval ("shape parameter") n_bins : int Number of bins. If None (default), the value from the class attribute _poisson_n_bins is used. Returns ------- Probability mass function of Poisson distribution """ if n_bins is None: n_bins = self._poisson_n_bins n = np.arange(0, n_bins) return stats.poisson.pmf(n, l) def calculate_score_probs(self, mode='all'): """ Calculates the probabilities for different scores (outcomes) of two teams. The required information is the expection value for their goal distributions l1 and l2 (class attributes). Parameters ---------- mode : str, {'all' (default), 'draws', 'team1_wins', 'team2_wins'} If 'all', the complete probabiliy matrix is returned. If 'draw' only the diagonal elements (corresponding to all possible draws) are non-zero. If 'team1_wins', only the elements corresponding to outcomes where team 1 wins are non-zero. 'team2_wins' is analaog to 'team1_wins'. Returns ------- nd.array The returned matrix is a quadratic 2x2 matrix. The first dimension corresponds to team 1, second dimension to team 2. E.g. score_probs[2,1] gives the probability for the score being 2:1 """ y1 = self.poisson_pmf(self.l1) y2 = self.poisson_pmf(self.l2) score_probs = np.tensordot(y1, y2, axes=0) # vector * vector => matrix if mode == 'all': pass elif mode == 'draws': # diagonal elements correspond to probabilites of the draws (0:0, 1:1, 2:2, ...) score_probs = np.diag(np.diag(score_probs)) elif mode == 'team1_wins': # elements of lower left triangle (excluding diagonals => k=-1) correspond to probabilies for outcomes at # which team 1 wins (1:0, 2:0, 2:1, ...) score_probs = np.tril(score_probs, k=-1) elif mode == 'team2_wins': # elements of upper right triangle (excluding diagonals => k=1) correspond to probabilies for outcomes at # which team 2 wins (0:1, 0:2, 1:, ...) score_probs = np.triu(score_probs, k=1) else: raise(ValueError('Invalid value for "mode".')) return score_probs @staticmethod def plot_score_probs(score_probs): fig, ax = plt.subplots() fig.set_size_inches(5, 5) ax.imshow(score_probs, cmap='jet') ax.set_ylabel('Goals Team 1') ax.set_xlabel('Goals Team 2') ax.set_title('Score probabilites (%)') # write probability (in %) in each element of the matrix for (j, i), label in np.ndenumerate(score_probs): ax.text(i, j, round(label*100, 1), ha='center', va='center') plt.show() def plot_poisson_pmf(self): fig, ax = plt.subplots() fig.set_size_inches(5, 5) n_bins = np.arange(0, self._poisson_n_bins) y1 = self.poisson_pmf(self.l1) y2 = self.poisson_pmf(self.l2) ax.plot(n_bins, y1, 'o-', color='red', label='Team 1') ax.plot(n_bins, y2, 'o-', color='blue', label='Team 2') ax.set_xlabel('Scored goals') ax.set_ylabel('Probability') ax.set_title('Poisson distribution') ax.grid() ax.legend() plt.show() @property def probs_tendency(self): """ Calculate the probability for the "tendency" of the outcome for a match played by two teams. Returns ------- list with 3 elements [probability team 1 wins, probability team 2 wins, probabilty for a draw] """ p_team1 = np.sum(self.calculate_score_probs(mode='team1_wins')) p_team2 = np.sum(self.calculate_score_probs(mode='team2_wins')) p_draw = np.sum(self.calculate_score_probs(mode='draws')) return [p_team1, p_team2, p_draw] def prob_goal_difference(self, d, mode='all'): """ Calculate the probability for the goal difference of the match played by two teams to be d. Parameters ---------- d : int Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw mode : str Passed to call of calculate_score_probs. See definition there. Returns ------- float Probability """ score_probs = self.calculate_score_probs(mode=mode) k = -1*d # Parameter k: defines which diagonal axis offset to main diagonal is used. The axis offset by -d corresponds to # the outcomes with a goal difference of d. return np.sum(np.diag(score_probs, k=k)) def most_likely_goal_difference(self, mode='all'): # calculate probabilities for all possible goal differences (limited by the width of the Poisson distribution) d_ar = np.arange(-(self._poisson_n_bins-1), self._poisson_n_bins) prob = np.zeros(len(d_ar)) for idx, d in enumerate(d_ar): prob[idx] = self.prob_goal_difference(d, mode) return d_ar[np.argmax(prob)], np.max(prob) def most_likely_score(self, d=None, mode='all'): """ Returns the most likely score. Parameters "mode" and "d" set furhter constrains on the subset of score probabilites to be considered. Parameters ---------- d : int Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw mode : str Passed to call of calculate_score_probs. See definition there. Returns ------- tuple ([result], probability) e.g. ([2,1], 0.06) """ score_probs = self.calculate_score_probs(mode=mode) if d is not None: # Set all elements except the diagonal offset by -d to zero # Remaining non-zero elements correspond to results with a goal difference of d. score_probs = np.diag(np.diag(score_probs, k=-d), k=-d) result = list(np.unravel_index(np.argmax(score_probs), score_probs.shape)) # gets the indicies with the highest # probability inside score_probs as list. # See: https://stackoverflow.com/questions/9482550/argmax-of-numpy-array-returning-non-flat-indices prob = np.max(score_probs) return result, prob @property def predicted_score(self): # 1) Calculate most likely tendency tendency = np.argmax(self.probs_tendency) # 0: team 1 wins, 1: team 2 wins, 2: draw # 2) What is the most likely goal difference within the tendency if tendency == 0: mode ='team1_wins' elif tendency == 1: mode = 'team2_wins' elif tendency == 2: mode = 'draws' else: raise(ValueError('Invalid value for tendendy')) d, _ = self.most_likely_goal_difference(mode=mode) # 3) What is the most likely result with the predicted goal difference? return self.most_likely_score(d=d, mode=mode)
37.847619
120
0.6116
1,074
7,948
4.408752
0.234637
0.063358
0.032101
0.016895
0.293981
0.226822
0.196832
0.176558
0.145723
0.145723
0
0.021854
0.28611
7,948
209
121
38.028708
0.812654
0.45848
0
0.179775
0
0
0.070601
0
0
0
0
0
0
1
0.11236
false
0.011236
0.033708
0
0.235955
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51fbfcfac2b93e98239de7ce36bcc1077cb951a1
7,012
py
Python
app.py
ai4r/SGToolkit
684df2cfc830eeb8ea23c95a8af1c9199991ec99
[ "MIT" ]
16
2021-08-11T08:55:41.000Z
2022-02-11T02:45:55.000Z
app.py
ai4r/SGToolkit
684df2cfc830eeb8ea23c95a8af1c9199991ec99
[ "MIT" ]
9
2021-09-07T14:52:59.000Z
2022-03-24T13:33:00.000Z
app.py
ai4r/SGToolkit
684df2cfc830eeb8ea23c95a8af1c9199991ec99
[ "MIT" ]
2
2021-08-25T06:00:43.000Z
2021-10-07T00:57:49.000Z
from flask import Flask, render_template, request, send_file from flask_pymongo import PyMongo import json import sg_core_api as sgapi import os import pathlib import numpy as np from bson.json_util import dumps from bson.objectid import ObjectId from datetime import datetime from scipy.interpolate import CubicSpline app = Flask(__name__) gesture_generator = sgapi.get_gesture_generator() root_path = pathlib.Path(__file__).parent app.config["MONGO_URI"] = "mongodb://localhost" # setup your own db to enable motion library and rule functions mongo = PyMongo(app) @app.route('/') def index(): return render_template('index.html') @app.route('/api/motion', methods=['GET', 'POST']) def motion_library(): if request.method == 'POST': json = request.get_json() json["motion"] = sgapi.convert_pose_coordinate_for_ui(np.array(json["motion"])).tolist() result = {} try: mongo.db.motion.insert_one(json) result['msg'] = "success" except Exception as e: result['msg'] = "fail" return result elif request.method == 'GET': try: cursor = mongo.db.motion.find().sort("name", 1) except AttributeError as e: return {} # empty library motions = sgapi.convert_pose_coordinate_for_ui_for_motion_library(list(cursor)) return dumps(motions) else: assert False @app.route('/api/delete_motion/<id>', methods=['GET']) def delete_motion_library(id): result = mongo.db.motion.delete_one({'_id': ObjectId(id)}) msg = {} if result.deleted_count > 0: msg['msg'] = "success" else: msg['msg'] = "fail" return msg @app.route('/api/rule', methods=['GET', 'POST']) def rule(): if request.method == 'POST': json = request.get_json() result = {} try: json['motion'] = ObjectId(json['motion']) mongo.db.rule.insert_one(json) result['msg'] = "success" except Exception as e: print(json) print(e) result['msg'] = "fail" return result elif request.method == 'GET': pipeline = [{'$lookup': {'from': 'motion', 'localField': 'motion', 'foreignField': '_id', 'as': 'motion_info'}}, ] try: cursor = mongo.db.rule.aggregate(pipeline) except AttributeError as e: return {} # empty rules rules = sgapi.convert_pose_coordinate_for_ui_for_rule_library(cursor) rules = dumps(rules) return rules else: assert False @app.route('/api/delete_rule/<id>', methods=['GET']) def delete_rule(id): result = mongo.db.rule.delete_one({'_id': ObjectId(id)}) msg = {} if result.deleted_count > 0: msg['msg'] = "success" else: msg['msg'] = "fail" return msg @app.route('/api/input', methods=['POST']) def input_text_post(): content = request.get_json() input_text = content.get('text-input') if input_text is None or len(input_text) == 0: return {'msg': 'empty'} print('--------------------------------------------') print('request time:', datetime.now()) print('request IP:', request.remote_addr) print(input_text) kp_constraints = content.get('keypoint-constraints') if kp_constraints: pose_constraints_input = np.array(kp_constraints) pose_constraints = sgapi.convert_pose_coordinate_for_model(np.copy(pose_constraints_input)) else: pose_constraints = None pose_constraints_input = None style_constraints = content.get('style-constraints') if style_constraints: style_constraints = np.array(style_constraints) else: style_constraints = None result = {} result['msg'] = "success" result['input-pose-constraints'] = pose_constraints_input.tolist() if pose_constraints_input is not None else None result['input-style-constraints'] = style_constraints.tolist() if style_constraints is not None else None result['input-voice'] = content.get('voice') result['is-manual-scenario'] = content.get('is-manual-scenario') if content.get('is-manual-scenario'): # interpolate key poses n_frames = pose_constraints_input.shape[0] n_joints = int((pose_constraints_input.shape[1] - 1) / 3) key_idxs = [i for i, e in enumerate(pose_constraints_input) if e[-1] == 1] if len(key_idxs) >= 2: out_gesture = np.zeros((n_frames, n_joints * 3)) xs = np.arange(0, n_frames, 1) for i in range(n_joints): pts = pose_constraints_input[key_idxs, i * 3:(i + 1) * 3] cs = CubicSpline(key_idxs, pts, bc_type='clamped') out_gesture[:, i * 3:(i + 1) * 3] = cs(xs) result['output-data'] = out_gesture.tolist() result['audio-filename'] = os.path.split(result['input-voice'])[ 1] # WARNING: assumed manual mode uses external audio file else: result['msg'] = "fail" else: # run gesture generation model output = gesture_generator.generate(input_text, pose_constraints=pose_constraints, style_values=style_constraints, voice=content.get('voice')) if output is None: # something wrong result['msg'] = "fail" else: gesture, audio, tts_filename, words_with_timestamps = output gesture = sgapi.convert_pose_coordinate_for_ui(gesture) result['audio-filename'] = os.path.split(tts_filename)[1] # filename without path result['words-with-timestamps'] = words_with_timestamps result['output-data'] = gesture.tolist() return result @app.route('/media/<path:filename>/<path:new_filename>') def download_audio_file(filename, new_filename): return send_file(os.path.join('./cached_wav', filename), as_attachment=True, attachment_filename=new_filename, cache_timeout=0) @app.route('/mesh/<path:filename>') def download_mesh_file(filename): mesh_path = root_path.joinpath("static", "mesh", filename) return send_file(str(mesh_path), as_attachment=True, cache_timeout=0) @app.route('/upload_audio', methods=['POST']) def upload(): upload_dir = './cached_wav' file_names = [] for key in request.files: file = request.files[key] _, ext = os.path.splitext(file.filename) print('uploaded: ', file.filename) try: upload_path = os.path.join(upload_dir, "uploaded_audio" + ext) file.save(upload_path) file_names.append(upload_path) except: print('save fail: ' + os.path.join(upload_dir, file.filename)) return json.dumps({'filename': [f for f in file_names]}) if __name__ == '__main__': app.run()
33.075472
118
0.61138
847
7,012
4.864227
0.227863
0.050971
0.043689
0.031553
0.251942
0.20267
0.153155
0.107524
0.089563
0.089563
0
0.004599
0.255705
7,012
211
119
33.232227
0.784825
0.032801
0
0.301775
0
0
0.123284
0.032039
0
0
0
0
0.011834
1
0.053254
false
0
0.065089
0.011834
0.201183
0.047337
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51fd76c197f06b4f0e3cd25b7c5c3a572cdfb52f
6,662
py
Python
azure-devops/azext_devops/dev/common/credential_store.py
keithlemon/azure-devops-cli-extension
4989e5f53650f186e638ccc186605986c76d59bf
[ "MIT" ]
326
2019-04-10T12:38:23.000Z
2022-03-31T23:07:49.000Z
azure-devops/azext_devops/dev/common/credential_store.py
keithlemon/azure-devops-cli-extension
4989e5f53650f186e638ccc186605986c76d59bf
[ "MIT" ]
562
2019-04-10T07:36:12.000Z
2022-03-28T07:37:54.000Z
azure-devops/azext_devops/dev/common/credential_store.py
keithlemon/azure-devops-cli-extension
4989e5f53650f186e638ccc186605986c76d59bf
[ "MIT" ]
166
2019-04-10T07:59:40.000Z
2022-03-16T14:17:13.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import sys from knack.util import CLIError, ensure_dir from knack.log import get_logger from six.moves import configparser from .config import AZ_DEVOPS_GLOBAL_CONFIG_DIR from .pip_helper import install_keyring logger = get_logger(__name__) class CredentialStore: def __init__(self): self._initialize_keyring() def set_password(self, key, token): try: import keyring except ImportError: install_keyring() self._initialize_keyring() import keyring try: # check for and delete existing credential old_token = keyring.get_password(key, self._USERNAME) if old_token is not None: keyring.delete_password(key, self._USERNAME) logger.debug('Setting credential: %s', key) keyring.set_password(key, self._USERNAME, token) except Exception as ex: # pylint: disable=broad-except # store credentials in azuredevops config directory if keyring is missing or malfunctioning if sys.platform.startswith(self._LINUX_PLATFORM): logger.warning('Failed to store PAT using keyring; falling back to file storage.') logger.warning('You can clear the stored credential by running az devops logout.') logger.warning('Refer https://aka.ms/azure-devops-cli-auth to know more on sign in with PAT.') logger.debug('Keyring failed. ERROR :%s', ex) logger.debug('Storing credentials in the file: %s', self._PAT_FILE) creds_list = self._get_credentials_list() if key not in creds_list.sections(): creds_list.add_section(key) logger.debug('Added new entry to PAT file : %s ', key) creds_list.set(key, self._USERNAME, token) self._commit_change(creds_list) else: raise CLIError(ex) def get_password(self, key): try: import keyring except ImportError: return None token = None try: token = keyring.get_password(key, self._USERNAME) except Exception as ex: # pylint: disable=broad-except # fetch credentials from file if keyring is missing or malfunctioning if sys.platform.startswith(self._LINUX_PLATFORM): token = None else: raise CLIError(ex) # look for credential in file too for linux if token is None if token is None and sys.platform.startswith(self._LINUX_PLATFORM): token = self.get_PAT_from_file(key) return token def clear_password(self, key): try: import keyring except ImportError: install_keyring() self._initialize_keyring() import keyring if sys.platform.startswith(self._LINUX_PLATFORM): keyring_token = None file_token = None try: keyring_token = keyring.get_password(key, self._USERNAME) if keyring_token: keyring.delete_password(key, self._USERNAME) except Exception as ex: # pylint: disable=broad-except logger.debug("%s", ex) finally: file_token = self.get_PAT_from_file(key) if file_token: self.delete_PAT_from_file(key) if(keyring_token is None and file_token is None): raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG) else: try: keyring.delete_password(key, self._USERNAME) except keyring.errors.PasswordDeleteError: raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG) except RuntimeError as ex: # pylint: disable=broad-except raise CLIError(ex) def get_PAT_from_file(self, key): ensure_dir(AZ_DEVOPS_GLOBAL_CONFIG_DIR) logger.debug('Keyring not configured properly or package not found.' 'Looking for credentials with key:%s in the file: %s', key, self._PAT_FILE) creds_list = self._get_credentials_list() try: return creds_list.get(key, self._USERNAME) except (configparser.NoOptionError, configparser.NoSectionError): return None def delete_PAT_from_file(self, key): logger.debug('Keyring not configured properly or package not found.' 'Looking for credentials with key:%s in the file: %s', key, self._PAT_FILE) creds_list = self._get_credentials_list() if key not in creds_list.sections(): raise CLIError(self._CRDENTIAL_NOT_FOUND_MSG) creds_list.remove_section(key) self._commit_change(creds_list) @staticmethod def _get_config_parser(): if sys.version_info.major == 3: return configparser.ConfigParser(interpolation=None) return configparser.ConfigParser() @staticmethod def _get_credentials_list(): try: credential_list = CredentialStore._get_config_parser() credential_list.read(CredentialStore._PAT_FILE) return credential_list except BaseException: # pylint: disable=broad-except return CredentialStore._get_config_parser() @staticmethod def _commit_change(credential_list): with open(CredentialStore._PAT_FILE, 'w+') as creds_file: credential_list.write(creds_file) @staticmethod def _initialize_keyring(): try: import keyring except ImportError: return def _only_builtin(backend): return ( backend.__module__.startswith('keyring.backends.') and 'chain' not in backend.__module__ ) keyring.core.init_backend(_only_builtin) logger.debug('Keyring backend : %s', keyring.get_keyring()) # a value is required for the python config file that gets generated on some operating systems. _USERNAME = 'Personal Access Token' _LINUX_PLATFORM = 'linux' _PAT_FILE = os.path.join(AZ_DEVOPS_GLOBAL_CONFIG_DIR, 'personalAccessTokens') _CRDENTIAL_NOT_FOUND_MSG = 'The credential was not found'
41.123457
110
0.613029
745
6,662
5.238926
0.242953
0.021522
0.034589
0.04125
0.421214
0.366129
0.338201
0.257238
0.209582
0.209582
0
0.000212
0.290904
6,662
161
111
41.378882
0.825995
0.125038
0
0.459259
0
0.007407
0.111283
0
0
0
0
0
0
1
0.081481
false
0.081481
0.125926
0.007407
0.318519
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
51fd8b0ddb6e11c4e62343fed936c2c868f66e49
1,969
py
Python
fhir/resources/STU3/tests/test_adverseevent.py
cstoltze/fhir.resources
52f99738935b7313089d89daf94d73ce7d167c9d
[ "BSD-3-Clause" ]
144
2019-05-08T14:24:43.000Z
2022-03-30T02:37:11.000Z
fhir/resources/STU3/tests/test_adverseevent.py
cstoltze/fhir.resources
52f99738935b7313089d89daf94d73ce7d167c9d
[ "BSD-3-Clause" ]
82
2019-05-13T17:43:13.000Z
2022-03-30T16:45:17.000Z
fhir/resources/STU3/tests/test_adverseevent.py
cstoltze/fhir.resources
52f99738935b7313089d89daf94d73ce7d167c9d
[ "BSD-3-Clause" ]
48
2019-04-04T14:14:53.000Z
2022-03-30T06:07:31.000Z
# -*- coding: utf-8 -*- """ Profile: http://hl7.org/fhir/StructureDefinition/AdverseEvent Release: STU3 Version: 3.0.2 Revision: 11917 Last updated: 2019-10-24T11:53:00+11:00 """ from pydantic.validators import bytes_validator # noqa: F401 from .. import fhirtypes # noqa: F401 from .. import adverseevent def impl_adverseevent_1(inst): assert inst.category == "AE" assert inst.date == fhirtypes.DateTime.validate("2017-01-29T12:34:56+00:00") assert inst.description == "This was a mild rash on the left forearm" assert inst.id == "example" assert inst.identifier.system == "http://acme.com/ids/patients/risks" assert inst.identifier.value == "49476534" assert inst.recorder.reference == "Practitioner/example" assert inst.seriousness.coding[0].code == "Mild" assert inst.seriousness.coding[0].display == "Mild" assert ( inst.seriousness.coding[0].system == "http://hl7.org/fhir/adverse-event-seriousness" ) assert inst.subject.reference == "Patient/example" assert inst.suspectEntity[0].instance.reference == "Medication/example" assert inst.text.status == "generated" assert inst.type.coding[0].code == "304386008" assert inst.type.coding[0].display == "O/E - itchy rash" assert inst.type.coding[0].system == "http://snomed.info/sct" def test_adverseevent_1(base_settings): """No. 1 tests collection for AdverseEvent. Test File: adverseevent-example.json """ filename = base_settings["unittest_data_dir"] / "adverseevent-example.json" inst = adverseevent.AdverseEvent.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "AdverseEvent" == inst.resource_type impl_adverseevent_1(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "AdverseEvent" == data["resourceType"] inst2 = adverseevent.AdverseEvent(**data) impl_adverseevent_1(inst2)
35.8
80
0.702895
248
1,969
5.516129
0.5
0.116959
0.049708
0.059211
0.113304
0.046784
0
0
0
0
0
0.052153
0.162519
1,969
54
81
36.462963
0.777441
0.169629
0
0
0
0
0.234161
0.031056
0
0
0
0
0.529412
1
0.058824
false
0
0.088235
0
0.147059
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
51fdc866742d67c3e351348526ab6d8be86c0161
473
py
Python
pins/pins.py
evarga/composite-decomposition
07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5
[ "MIT" ]
null
null
null
pins/pins.py
evarga/composite-decomposition
07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5
[ "MIT" ]
null
null
null
pins/pins.py
evarga/composite-decomposition
07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5
[ "MIT" ]
null
null
null
from math import sqrt def num_pins_full_row(n: int, k: int) -> int: return (n // k + 1) * k + n % k + (n % k > 0) if n > 0 else 0 def num_pins_square(n: int, k: int) -> int: m = int(sqrt(n)) used_pins = (m + 1)**2 n -= m * m if 0 < n <= m: used_pins += n + 1 elif n > m: used_pins += n + 2 return used_pins if m > 0 else 0 data = tuple(map(int, input().split())) print(min(num_pins_full_row(*data), num_pins_square(*data)))
23.65
65
0.541226
90
473
2.688889
0.322222
0.115702
0.082645
0.115702
0.181818
0
0
0
0
0
0
0.032836
0.291755
473
20
66
23.65
0.689552
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.071429
0.071429
0.357143
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
51febf22b0facd552d35748a11b39de2fc7adf8c
265
py
Python
poezio/ui/consts.py
hrnciar/poezio
12b8af11df35dda535412b0c02ba792890095a7d
[ "Zlib" ]
50
2015-02-11T12:00:25.000Z
2022-01-18T05:26:40.000Z
poezio/ui/consts.py
hrnciar/poezio
12b8af11df35dda535412b0c02ba792890095a7d
[ "Zlib" ]
3
2017-11-27T20:55:42.000Z
2020-03-20T18:05:53.000Z
poezio/ui/consts.py
hrnciar/poezio
12b8af11df35dda535412b0c02ba792890095a7d
[ "Zlib" ]
15
2015-04-22T14:33:36.000Z
2021-09-29T21:33:50.000Z
from datetime import datetime FORMAT_CHAR = '\x19' # These are non-printable chars, so they should never appear in the input, # I guess. But maybe we can find better chars that are even less risky. FORMAT_CHARS = '\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x1A'
37.857143
74
0.754717
48
265
4.125
0.895833
0
0
0
0
0
0
0
0
0
0
0.10177
0.14717
265
6
75
44.166667
0.774336
0.535849
0
0
0
0.333333
0.433333
0.4
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
51ffbfd3d872b709f2360f5964f2dd0857b13249
462
py
Python
src/glitchygames/movement/horizontal.py
terrysimons/ghettogames
23773119d1994251b43c42db39c1c99c08386c24
[ "BSD-3-Clause" ]
1
2019-07-06T02:01:27.000Z
2019-07-06T02:01:27.000Z
src/glitchygames/movement/horizontal.py
terrysimons/ghettogames
23773119d1994251b43c42db39c1c99c08386c24
[ "BSD-3-Clause" ]
4
2021-12-31T04:18:01.000Z
2022-03-29T13:40:32.000Z
src/glitchygames/movement/horizontal.py
terrysimons/glitchygames
23773119d1994251b43c42db39c1c99c08386c24
[ "BSD-3-Clause" ]
1
2019-07-12T19:41:09.000Z
2019-07-12T19:41:09.000Z
""" Horizontal: Adds movement functions along the horizontal (X) axis to a game object """ class Horizontal: def __init__(self, speed): self.speed = speed self.current_speed = self.speed.x def _change_speed(self, value): self.current_speed = value def left(self): self._change_speed(-self.speed.x) def right(self): self._change_speed(self.speed.x) def stop(self): self._change_speed(0)
18.48
70
0.640693
62
462
4.548387
0.387097
0.191489
0.198582
0.159574
0.29078
0.22695
0.22695
0.22695
0
0
0
0.002899
0.253247
462
24
71
19.25
0.814493
0.177489
0
0
0
0
0
0
0
0
0
0
0
1
0.416667
false
0
0
0
0.5
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
51ffc4f6b6d23170b73a6eacebc5e2ad88e94b17
403
py
Python
utilities/py/factories/factory.py
ItsSeaJay/phphp
c430c1d03fb88a6a94e07b84eba3905d8090cc8e
[ "MIT" ]
null
null
null
utilities/py/factories/factory.py
ItsSeaJay/phphp
c430c1d03fb88a6a94e07b84eba3905d8090cc8e
[ "MIT" ]
null
null
null
utilities/py/factories/factory.py
ItsSeaJay/phphp
c430c1d03fb88a6a94e07b84eba3905d8090cc8e
[ "MIT" ]
null
null
null
import json class Factory: def __init__(self): pass @staticmethod def create(self): pass @staticmethod def get_template(self): pass def get_config(self): with open('json/config.json', 'r') as file: # Convert the contents of the file to a Python dictionary config = json.loads(file.read()) return config
20.15
69
0.575682
48
403
4.708333
0.604167
0.106195
0.176991
0.20354
0
0
0
0
0
0
0
0
0.342432
403
20
70
20.15
0.85283
0.136476
0
0.357143
0
0
0.048991
0
0
0
0
0
0
1
0.285714
false
0.214286
0.071429
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
3
a4011bd0d4b895040b6b7aa5271e8dea8a377f95
1,248
py
Python
NREL.py
gregario/NRELDownloader
c098762be22979ff6901079561694ab90206afa3
[ "BSD-3-Clause" ]
null
null
null
NREL.py
gregario/NRELDownloader
c098762be22979ff6901079561694ab90206afa3
[ "BSD-3-Clause" ]
null
null
null
NREL.py
gregario/NRELDownloader
c098762be22979ff6901079561694ab90206afa3
[ "BSD-3-Clause" ]
1
2018-08-07T06:52:45.000Z
2018-08-07T06:52:45.000Z
#coding: utf-8 # Configuration file for NREL data call # This file sets up the calls for data from NREL. # 26/05/2016 Greg Jackson AESE Labs Imperial College from datetime import date, datetime, timedelta debug = True # Location variables, determines locations from which calls will be made # locations = [("New York",40.7127837, -74.0059413),("Haiti", 18.5790242, -72.3544683 ),("Seattle", 47.6147628,-122.4759903 ),("Toronto", 43.7181557,-79.5181432 ),("Brazilia",-15.7217175,-48.0783247)] locations = [("New York",40.7127837, -74.0059413)] #Set collection variables api_key= # API Key here attributes = 'ghi,dhi,dni,wind_speed_10m_nwp,surface_air_temperature_nwp,solar_zenith_angle,clearsky_dhi,clearsky_dni,clearsky_ghi,cloud_type,dew_point,fill_flag,surface_pressure_background,surface_relative_humidity_nwp,wind_direction_10m_nwp,total_precipitable_water_nwp' years = [1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014] interval = '30' utc = 'false' your_name = # Name here reason_for_use = 'beta+testing' your_affiliation = 'Fantastic' your_email = #Email Here mailing_list = 'false' # Function calls for determining length of data collection start = datetime(1998,1,1) end = datetime(2015,1,1)
43.034483
272
0.77484
189
1,248
4.94709
0.73545
0.017112
0.034225
0.038503
0.072727
0.072727
0.072727
0
0
0
0
0.182469
0.104167
1,248
28
273
44.571429
0.653846
0.431891
0
0
0
0.066667
0.426934
0.368195
0
0
0
0
0
0
null
null
0
0.066667
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
a40166e450ee628d6a50ace1f3007f22ec4f1689
6,239
py
Python
blocking_utils.py
colebryant/DeepBlocker
e90bbe2c4fa75f53fccea20cbdebf71b9167584d
[ "BSD-3-Clause" ]
null
null
null
blocking_utils.py
colebryant/DeepBlocker
e90bbe2c4fa75f53fccea20cbdebf71b9167584d
[ "BSD-3-Clause" ]
null
null
null
blocking_utils.py
colebryant/DeepBlocker
e90bbe2c4fa75f53fccea20cbdebf71b9167584d
[ "BSD-3-Clause" ]
null
null
null
import pandas as pd import numpy as np def topK_neighbors_to_candidate_set(topK_neighbors): #We create a data frame corresponding to topK neighbors. # We are given a 2D matrix of the form 1: [a1, a2, a3], 2: [b1, b2, b3] # where a1, a2, a3 are the top-3 neighbors for tuple 1 and so on. # We will now create a two column DF fo the form (1, a1), (1, a2), (1, a3), (2, b1), (2, b2), (2, b3) topK_df = pd.DataFrame(topK_neighbors) topK_df["ltable_id"] = topK_df.index melted_df = pd.melt(topK_df, id_vars=["ltable_id"]) melted_df["rtable_id"] = melted_df["value"] candidate_set_df = melted_df[["ltable_id", "rtable_id"]] return candidate_set_df def thresholded_pairs_to_candidate_set(thresholded_pairs): # Merge record pair arrays to create DataFrame of candidate pairs merged_arr = np.vstack((thresholded_pairs[0], thresholded_pairs[1])).T candidate_set_df = pd.DataFrame(merged_arr, columns=["ltable_id", "rtable_id"]) return candidate_set_df #This accepts four inputs: # data frames for candidate set and ground truth matches # left and right data frames def compute_blocking_statistics(candidate_set_df, golden_df, left_df, right_df): #Now we have two data frames with two columns ltable_id and rtable_id # If we do an equi-join of these two data frames, we will get the matches that were in the top-K merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id']) # Added to calculate total false positives false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))] left_num_tuples = len(left_df) right_num_tuples = len(right_df) statistics_dict = { "left_num_tuples": left_num_tuples, "right_num_tuples": right_num_tuples, "candidate_set_length": len(candidate_set_df), "golden_set_length": len(golden_df), "merged_set_length": len(merged_df), "false_positives_length": len(false_pos), "precision": len(merged_df) / (len(merged_df) + len(false_pos)) if len(golden_df) > 0 else "N/A", "recall": len(merged_df) / len(golden_df) if len(golden_df) > 0 else "N/A", "cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples) } return statistics_dict def compute_join_percentage(candidate_set_df, left_df, right_df): THRESHOLD = 20 left_num_tuples = len(left_df) right_num_tuples = len(right_df) left_percent_join = 100 * round(candidate_set_df['ltable_id'].unique().shape[0] / left_num_tuples, 3) right_percent_join = 100 * round(candidate_set_df['rtable_id'].unique().shape[0] / right_num_tuples, 3) total_percent_join = 100 * round((candidate_set_df['ltable_id'].unique().shape[0] + candidate_set_df['rtable_id'].unique().shape[0]) / (left_num_tuples + right_num_tuples), 3) statistics_dict = { "left_num_tuples": left_num_tuples, "right_num_tuples": right_num_tuples, "candidate_set_length": len(candidate_set_df), "left_percent_join": f"{left_percent_join}%", "right_percent_join": f"{right_percent_join}%", "right_percent_join": f"{right_percent_join}%", "total_percent_join": f"{total_percent_join}%", "prediction": "JOIN" if max(left_percent_join, right_percent_join) > THRESHOLD else "NO JOIN", "cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples) } return statistics_dict def compute_column_statistics(table_names,candidate_set_df, golden_df,left_df, right_df): candidate_set_df = candidate_set_df.astype('str') candidate_set_df['ltable_id_table'] = candidate_set_df['ltable_id'].apply(lambda x: left_df.columns[int(x)]) candidate_set_df['ltable_id_table'] = table_names[0] + '.' + candidate_set_df['ltable_id_table'] candidate_set_df['rtable_id_table'] = candidate_set_df['rtable_id'].apply(lambda x: right_df.columns[int(x)]) candidate_set_df['rtable_id_table'] = table_names[1] + '.' + candidate_set_df['rtable_id_table'] candidate_set_df = candidate_set_df[['ltable_id_table','rtable_id_table']].rename(columns={'ltable_id_table':'ltable_id','rtable_id_table':'rtable_id'}) merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id']) # Added to calculate total false positives false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))] if len(golden_df) > 0 and (len(merged_df) + len(false_pos)) > 0: fp = float(len(merged_df)) / (len(merged_df) + len(false_pos)) else: fp = "N/A" left_num_columns = len(left_df.columns) right_num_columns = len(right_df.columns) statistics_dict = { "left_table": table_names[0], "right_table": table_names[1], "left_num_columns": left_num_columns, "right_num_columns": right_num_columns, "candidate_set_length": len(candidate_set_df), "candidate_set": candidate_set_df, "golden_set_length": len(golden_df), "golden_set": golden_df, "merged_set_length": len(merged_df), "merged_set": merged_df, "false_positives_length": len(false_pos), "false_positives": false_pos, "precision": fp, "recall": float(len(merged_df)) / len(golden_df) if len(golden_df) > 0 else "N/A", "cssr": len(candidate_set_df) / (left_num_columns * right_num_columns) } return statistics_dict #This function is useful when you download the preprocessed data from DeepMatcher dataset # and want to convert to matches format. #It loads the train/valid/test files, filters the duplicates, # and saves them to a new file called matches.csv def process_files(folder_root): df1 = pd.read_csv(folder_root + "/train.csv") df2 = pd.read_csv(folder_root + "/valid.csv") df3 = pd.read_csv(folder_root + "/test.csv") df1 = df1[df1["label"] == 1] df2 = df2[df2["label"] == 1] df3 = df3[df3["label"] == 1] df = pd.concat([df1, df2, df3], ignore_index=True) df[["ltable_id","rtable_id"]].to_csv(folder_root + "/matches.csv", header=True, index=False)
46.559701
179
0.702036
950
6,239
4.256842
0.184211
0.133531
0.131553
0.04451
0.56454
0.526212
0.481454
0.44634
0.369189
0.263106
0
0.014117
0.171181
6,239
133
180
46.909774
0.767937
0.150665
0
0.362637
0
0
0.197236
0.020254
0
0
0
0
0
1
0.065934
false
0
0.021978
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a4016fb77c28a57f0279db0ce2ae101971c1c395
4,316
py
Python
tests/test_models/test_place.py
lowercaselife/AirBnB_clone
503c2889a0e1f96dd6ddd497ce7649ca00589a24
[ "MIT" ]
null
null
null
tests/test_models/test_place.py
lowercaselife/AirBnB_clone
503c2889a0e1f96dd6ddd497ce7649ca00589a24
[ "MIT" ]
null
null
null
tests/test_models/test_place.py
lowercaselife/AirBnB_clone
503c2889a0e1f96dd6ddd497ce7649ca00589a24
[ "MIT" ]
null
null
null
#!/usr/bin/python3 """Place unittests""" import unittest from models.place import Place import datetime import time class TestPlace(unittest.TestCase): """class TestPlace""" def test_place_class_membership_and_attributes(self): """Place is right class with correct attrs""" place = Place() self.assertIsNotNone(place.id) self.assertIsNotNone(place.created_at) self.assertIsNotNone(place.updated_at) self.assertIsInstance(place, Place) self.assertIsNotNone(place.city_id) self.assertIsNotNone(place.user_id) self.assertIsNotNone(place.name) self.assertIsNotNone(place.description) self.assertIsNotNone(place.number_rooms) self.assertIsNotNone(place.number_bathrooms) self.assertIsNotNone(place.max_guest) self.assertIsNotNone(place.price_by_night) self.assertIsNotNone(place.latitude) self.assertIsNotNone(place.longitude) self.assertIsNotNone(place.amenity_ids) def test_place_attr_type(self): """Place attributes are correct type""" place = Place() self.assertIsInstance(place.id, str) self.assertEqual(len(place.id), 36) self.assertIsInstance(place.created_at, datetime.datetime) self.assertIsInstance(place.updated_at, datetime.datetime) self.assertIsInstance(place.city_id, str) self.assertIsInstance(place.user_id, str) self.assertIsInstance(place.name, str) self.assertIsInstance(place.description, str) self.assertIsInstance(place.number_rooms, int) self.assertIsInstance(place.number_bathrooms, int) self.assertIsInstance(place.max_guest, int) self.assertIsInstance(place.price_by_night, int) self.assertIsInstance(place.latitude, float) self.assertIsInstance(place.longitude, float) self.assertIsInstance(place.amenity_ids, list) def test_place_updated_at_matches_created_at_initialization(self): """Place updated_at is same as create_at""" place = Place() self.assertEqual(place.updated_at, place.created_at) def test_place_str_method(self): """Place str method creates accurate representation""" place = Place() place_str = place.__str__() self.assertIsInstance(place_str, str) self.assertEqual(place_str[:7], '[Place]') self.assertEqual(place_str[8:46], '({})'.format(place.id)) self.assertDictEqual(eval(place_str[47:]), place.__dict__) def test_place_save_method(self): """Place save method alters update_at date""" place = Place() time.sleep(0.0001) place.save() self.assertNotEqual(place.updated_at, place.created_at) def test_place_to_dict_method(self): """Place to_dict method creates accurate dictionary""" place = Place() place_dict = place.to_dict() self.assertIsInstance(place_dict, dict) self.assertEqual(place_dict['id'], place.id) self.assertEqual(place_dict['__class__'], type(place).__name__) self.assertEqual( place_dict['created_at'], place.created_at.isoformat()) self.assertEqual( place_dict['updated_at'], place.updated_at.isoformat()) self.assertIsInstance(place.created_at, datetime.datetime) self.assertIsInstance(place.updated_at, datetime.datetime) def test_place_dict_to_instance_with_kwargs(self): """Place can instantiate new object with dictionary""" place = Place() place.name = "Betty" place.number = 972 place_dict = place.to_dict() new_place = Place(**place_dict) new_place_dict = new_place.to_dict() self.assertFalse(new_place is place) self.assertDictEqual(new_place_dict, place_dict) def test_place_dict_to_instance_with_empty_kwargs(self): """Place can instantiate new object with empty dict""" place_dict = {} new_place = Place(**place_dict) new_place_dict = new_place.to_dict() self.assertIsInstance(new_place, Place) self.assertIsNotNone(new_place.id) self.assertIsNotNone(new_place.created_at) self.assertIsNotNone(new_place.updated_at) if __name__ == '__main__': unittest.main()
39.962963
71
0.686284
501
4,316
5.646707
0.193613
0.141393
0.167904
0.049487
0.302934
0.198303
0.189466
0.168257
0.138565
0.110286
0
0.00499
0.210612
4,316
107
72
40.336449
0.82536
0.091983
0
0.223529
0
0
0.014219
0
0
0
0
0
0.576471
1
0.094118
false
0
0.047059
0
0.152941
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
a401b7bfe3f33a4fcdd4603c34778103819c8259
1,989
py
Python
project/simulation/naive/iterated_prisoners.py
horken7/game-theory
c5484e6c338646e8143e90290efdc07acf397f22
[ "MIT" ]
null
null
null
project/simulation/naive/iterated_prisoners.py
horken7/game-theory
c5484e6c338646e8143e90290efdc07acf397f22
[ "MIT" ]
null
null
null
project/simulation/naive/iterated_prisoners.py
horken7/game-theory
c5484e6c338646e8143e90290efdc07acf397f22
[ "MIT" ]
null
null
null
# coding: utf-8 import numpy as np import pandas as pd import matplotlib.pyplot as plt both_coorporate_utility = 3 both_defect_utility = 1 looser_utility = 0 winner_utility = 3 a_resources = 2 b_resources = 2 a_actions = [] b_actions = [] a_utility = [] b_utility = [] rounds = 20 # Defect: action 0 # Cooperate: action 1 def evaluate_strategy(a, b): if(a == 1 and b == 1): # both coorporate return(both_coorporate_utility, both_coorporate_utility) elif(a == 1 and b == 0): # a coorporate, b defect return(looser_utility, winner_utility) elif(a == 0 and b == 1): # a defect, be coorporate return(winner_utility, looser_utility) elif(a == 0 and b == 0): # both defect return(both_defect_utility, both_defect_utility) def tit_for_tat(me, opponent, t): if(t == 0): return(1) return(opponent[t-1]) # play the game the defined amount of rounds for t in range(rounds): a_strategy = tit_for_tat(a_actions, b_actions, t) b_strategy = round(np.random.rand()) # random strategy a_actions.append(a_strategy) b_actions.append(b_strategy) [a_result, b_result] = evaluate_strategy(a_strategy, b_strategy) a_utility.append(a_result) b_utility.append(b_result) ax = plt.subplot(1,1,1) ax.plot(np.linspace(1,len(a_utility), len(a_utility)), a_utility, label='Tit for tat') ax.plot(np.linspace(1,len(b_utility), len(b_utility)), b_utility, label='Random') ax.set_title('Iteraded prisoners') ax.set_xlabel('Iterations') ax.set_ylabel('Utility') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) plt.show() ax = plt.subplot(1,1,1) ax.plot(np.linspace(1,len(a_actions), len(a_actions)), a_actions, label='Tit for tat') ax.plot(np.linspace(1,len(b_actions), len(b_actions)), b_actions, label='Random') ax.set_title('Iteraded prisoners') ax.set_xlabel('Iterations') ax.set_ylabel('Action') handles, labels = ax.get_legend_handles_labels() ax.legend(handles, labels) plt.show()
26.52
86
0.711413
322
1,989
4.177019
0.232919
0.035688
0.026766
0.047584
0.331599
0.331599
0.30632
0.30632
0.30632
0.30632
0
0.019059
0.155857
1,989
74
87
26.878378
0.782013
0.092509
0
0.226415
0
0
0.057446
0
0
0
0
0
0
1
0.037736
false
0
0.056604
0
0.09434
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a403cb1eb639ba6d23d5ad19b32afabf17e2a3db
844
py
Python
higher_lower/__init__.py
simonw/higher-lower
436810573bfcb0175738b6636b6b4d790b81183b
[ "Apache-2.0" ]
2
2021-02-16T08:45:24.000Z
2021-02-22T01:30:29.000Z
higher_lower/__init__.py
simonw/higher-lower
436810573bfcb0175738b6636b6b4d790b81183b
[ "Apache-2.0" ]
1
2021-02-16T07:17:21.000Z
2021-02-16T19:39:12.000Z
higher_lower/__init__.py
simonw/higher-lower
436810573bfcb0175738b6636b6b4d790b81183b
[ "Apache-2.0" ]
null
null
null
from enum import Enum class ActualIs(Enum): HIGHER = 1 MATCH = 0 LOWER = -1 def higher_lower(min_value, max_value, callback): assert isinstance(max_value, int) assert isinstance(min_value, int) assert max_value > min_value candidate = midpoint(min_value, max_value) while True: result = callback(candidate) if result is ActualIs.MATCH: return candidate elif result is ActualIs.LOWER: # lower max_value = candidate candidate = midpoint(min_value, candidate) elif result is ActualIs.HIGHER: # higher min_value = candidate candidate = midpoint(candidate, max_value) else: assert False, "Should be a ActualIs enum constant" def midpoint(x, y): return x + ((y - x) // 2)
25.575758
62
0.61019
100
844
5.02
0.36
0.095618
0.101594
0.063745
0.115538
0
0
0
0
0
0
0.006944
0.317536
844
32
63
26.375
0.864583
0.014218
0
0
0
0
0.041013
0
0
0
0
0
0.166667
1
0.083333
false
0
0.041667
0.041667
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a405507dac4880c95cf65af8bea272bbc90ef96d
4,819
py
Python
satcfe/resposta/enviardadosvenda.py
danielgoncalves/satcfe
b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f
[ "Apache-2.0" ]
38
2015-05-25T02:57:16.000Z
2022-01-18T21:01:49.000Z
satcfe/resposta/enviardadosvenda.py
danielgoncalves/satcfe
b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f
[ "Apache-2.0" ]
15
2015-08-19T13:30:46.000Z
2022-01-19T22:34:17.000Z
satcfe/resposta/enviardadosvenda.py
danielgoncalves/satcfe
b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f
[ "Apache-2.0" ]
13
2015-05-07T01:10:12.000Z
2022-02-04T14:30:01.000Z
# -*- coding: utf-8 -*- # # satcfe/resposta/enviardadosvenda.py # # Copyright 2015 Base4 Sistemas Ltda ME # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import xml.etree.ElementTree as ET from decimal import Decimal from io import StringIO from builtins import str as text from satcomum.ersat import dados_qrcode from ..excecoes import ExcecaoRespostaSAT from ..util import as_datetime from ..util import base64_to_str from .padrao import RespostaSAT from .padrao import analisar_retorno EMITIDO_COM_SUCESSO = '06000' class RespostaEnviarDadosVenda(RespostaSAT): """Lida com as respostas da função ``EnviarDadosVenda`` (veja o método :meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`). Os atributos esperados em caso de sucesso, são: .. sourcecode:: text numeroSessao (int) EEEEE (text) CCCC (text) mensagem (text) cod (text) mensagemSEFAZ (text) arquivoCFeSAT (text) timeStamp (datetime.datetime) chaveConsulta (text) valorTotalCFe (decimal.Decimal) CPFCNPJValue (text) assinaturaQRCODE (text) Em caso de falha, são esperados apenas os atributos: .. sourcecode:: text numeroSessao (int) EEEEE (text) CCCC (text) mensagem (text) cod (text) mensagemSEFAZ (text) Finalmente, como último recurso, a resposta poderá incluir apenas os atributos padrão, conforme descrito na constante :attr:`~satcfe.resposta.padrao.RespostaSAT.CAMPOS`. .. note:: Aqui, ``text`` diz respeito à um objeto ``unicode`` (Python 2) ou ``str`` (Python 3). Veja ``builtins.str`` da biblioteca ``future``. """ def xml(self): """Retorna o XML do CF-e-SAT decodificado de Base64. :rtype: str """ if self._sucesso(): return base64_to_str(self.arquivoCFeSAT) else: raise ExcecaoRespostaSAT(self) def qrcode(self): """Resulta nos dados que compõem o QRCode. :rtype: str """ if self._sucesso(): tree = ET.parse(StringIO(self.xml())) return dados_qrcode(tree) else: raise ExcecaoRespostaSAT(self) def _sucesso(self): return self.EEEEE == EMITIDO_COM_SUCESSO @staticmethod def analisar(retorno): """Constrói uma :class:`RespostaEnviarDadosVenda` a partir do retorno informado. :param str retorno: Retorno da função ``EnviarDadosVenda``. """ resposta = analisar_retorno( retorno, funcao='EnviarDadosVenda', classe_resposta=RespostaEnviarDadosVenda, campos=( ('numeroSessao', int), ('EEEEE', text), ('CCCC', text), ('mensagem', text), ('cod', text), ('mensagemSEFAZ', text), ('arquivoCFeSAT', text), ('timeStamp', as_datetime), ('chaveConsulta', text), ('valorTotalCFe', Decimal), ('CPFCNPJValue', text), ('assinaturaQRCODE', text), ), campos_alternativos=[ # se a venda falhar apenas os primeiros seis campos # especificados na ER deverão ser retornados... ( ('numeroSessao', int), ('EEEEE', text), ('CCCC', text), ('mensagem', text), ('cod', text), ('mensagemSEFAZ', text), ), # por via das dúvidas, considera o padrão de campos, # caso não haja nenhuma coincidência... RespostaSAT.CAMPOS, ] ) if resposta.EEEEE not in (EMITIDO_COM_SUCESSO,): raise ExcecaoRespostaSAT(resposta) return resposta
31.703947
76
0.564225
469
4,819
5.716418
0.45629
0.02238
0.02984
0.035808
0.23536
0.131294
0.131294
0.131294
0.131294
0.131294
0
0.007334
0.349243
4,819
151
77
31.913907
0.847577
0.426645
0
0.31746
0
0
0.073622
0
0
0
0
0.006623
0
1
0.063492
false
0
0.206349
0.015873
0.349206
0.015873
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a405cb29a4a6d5faed09ea2ed20051e186fb8ddd
100
py
Python
src/apps/songs_artists/apps.py
forever-Agriculture/lyrics_site
9acac576974578ac52b9e32a760089c8c95d3e6d
[ "BSD-3-Clause" ]
1
2017-05-05T12:07:31.000Z
2017-05-05T12:07:31.000Z
src/apps/songs_artists/apps.py
forever-Agriculture/lyrics_site
9acac576974578ac52b9e32a760089c8c95d3e6d
[ "BSD-3-Clause" ]
7
2020-02-11T23:57:39.000Z
2022-01-13T00:42:36.000Z
src/apps/songs_artists/apps.py
forever-Agriculture/lyrics_site
9acac576974578ac52b9e32a760089c8c95d3e6d
[ "BSD-3-Clause" ]
null
null
null
from django.apps import AppConfig class SongsArtistsConfig(AppConfig): name = 'songs_artists'
16.666667
36
0.78
11
100
7
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.15
100
5
37
20
0.905882
0
0
0
0
0
0.13
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
a406c16a052bf957c0176f5968826a2acaf62234
977
py
Python
setup.py
hacklabza/arnold
d51f6b751ce6530650555cd33bf707f00b60af59
[ "BSD-3-Clause" ]
2
2021-08-20T05:19:37.000Z
2022-01-11T09:39:39.000Z
setup.py
hacklabza/arnold
d51f6b751ce6530650555cd33bf707f00b60af59
[ "BSD-3-Clause" ]
null
null
null
setup.py
hacklabza/arnold
d51f6b751ce6530650555cd33bf707f00b60af59
[ "BSD-3-Clause" ]
null
null
null
from setuptools import setup, find_packages description_files = ['README.md', 'AUTHORS.md', 'CHANGELOG.md'] setup( name="arnold", description="RPi 4 Based Robotic Platform", long_description="".join([open(f, 'r').read() for f in description_files]), version="0.0.1", author='Hacklab', author_email="dev@hacklab.co.za", license="BSD", url="http://github.com/hacklabza/arnold", packages=find_packages(), dependency_links=[], install_requires=list(open('requirements.txt', 'r').read().splitlines()), classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent" ], zip_safe=False, include_package_data=True, entry_points={'console_scripts': ['arnold = arnold.cli:cli']} )
32.566667
79
0.637666
113
977
5.39823
0.681416
0.12459
0.163934
0.170492
0
0
0
0
0
0
0
0.014013
0.19652
977
29
80
33.689655
0.763057
0
0
0
0
0
0.414534
0
0
0
0
0
0
1
0
false
0
0.038462
0
0.038462
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a40784a5c0a876434055151e359d7b556dab11e3
769
py
Python
spaweb/migrations/0010_auto_20201120_2321.py
MokkoFm/formula-spa-django
c1149d61af833f4db2a6ccd483d357462c989765
[ "MIT" ]
null
null
null
spaweb/migrations/0010_auto_20201120_2321.py
MokkoFm/formula-spa-django
c1149d61af833f4db2a6ccd483d357462c989765
[ "MIT" ]
null
null
null
spaweb/migrations/0010_auto_20201120_2321.py
MokkoFm/formula-spa-django
c1149d61af833f4db2a6ccd483d357462c989765
[ "MIT" ]
1
2020-11-12T13:36:36.000Z
2020-11-12T13:36:36.000Z
# Generated by Django 3.1.3 on 2020-11-20 21:21 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('spaweb', '0009_product_number_users'), ] operations = [ migrations.RemoveField( model_name='product', name='number_users', ), migrations.RemoveField( model_name='product', name='category', ), migrations.AddField( model_name='product', name='category', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='category_products', to='spaweb.productcategory', verbose_name='категория'), ), ]
27.464286
182
0.616385
79
769
5.848101
0.531646
0.051948
0.103896
0.12987
0.255411
0.177489
0
0
0
0
0
0.033688
0.26658
769
27
183
28.481481
0.785461
0.058518
0
0.47619
1
0
0.177285
0.065097
0
0
0
0
0
1
0
false
0
0.095238
0
0.238095
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a4080e0aba5ecd64976ae2135ad806d510892f8d
93
py
Python
webapp/MemberApp/apps.py
AdaFactor/RamaProject
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
[ "MIT" ]
null
null
null
webapp/MemberApp/apps.py
AdaFactor/RamaProject
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
[ "MIT" ]
7
2018-02-16T11:18:24.000Z
2019-04-23T17:49:04.000Z
webapp/MemberApp/apps.py
AdaFactor/RamaProject
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
[ "MIT" ]
1
2018-01-29T05:15:13.000Z
2018-01-29T05:15:13.000Z
from django.apps import AppConfig class MemberappConfig(AppConfig): name = 'MemberApp'
15.5
33
0.763441
10
93
7.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.16129
93
5
34
18.6
0.910256
0
0
0
0
0
0.096774
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
a4097c2af9c121f0f61a10affe12d058da5aad64
2,769
py
Python
bocadillo_cli/helpers.py
bocadilloproject/bocadillo-cli
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
[ "MIT" ]
6
2019-04-17T17:07:46.000Z
2020-08-09T07:37:34.000Z
bocadillo_cli/helpers.py
bocadilloproject/bocadillo-cli
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
[ "MIT" ]
10
2019-04-17T21:27:46.000Z
2019-06-17T05:45:51.000Z
bocadillo_cli/helpers.py
bocadilloproject/bocadillo-cli
f11ec438504eb2edd3c4e8f5d2992e804b3da6b0
[ "MIT" ]
1
2019-05-12T17:32:45.000Z
2019-05-12T17:32:45.000Z
import pathlib import pkgutil import typing from contextlib import contextmanager import click from jinja2 import Template from . import formatutils as fmt class Templates: def __init__(self, context: dict): self.context = context @staticmethod def _get(name: str) -> Template: path = str(pathlib.Path("templates", name)) content: bytes = pkgutil.get_data("bocadillo_cli", path) if content is None: raise ValueError(f"Template not found: {name}") return Template(content.decode("utf-8")) def render(self, name: str) -> str: return self._get(f"{name}.jinja").render(self.context) class Writer: CREATE = fmt.success("CREATE") SKIP = fmt.muted("SKIP") def __init__(self, dry: bool, no_input: bool, templates: Templates): self.dry = dry self.no_input = no_input self.templates = templates self.root = None def mkdir(self, path: pathlib.Path, **kwargs): if path.exists(): action = self.SKIP else: action = self.CREATE if not self.dry: path.mkdir(**kwargs) click.echo(f"{action} {path} {fmt.muted('directory')}") def writefile(self, path: pathlib.Path, content: str): if path.exists() and ( self.no_input or not click.confirm( fmt.pre_warn( f"File {fmt.code(path)} already exists. Overwrite?" ) ) ): nbytes = None action = self.SKIP else: if not self.dry: with open(str(path), "w", encoding="utf-8") as f: f.write(content) f.write("\n") nbytes = len(content.encode()) action = self.CREATE nbytes_formatted = fmt.muted(f" ({nbytes} bytes)") if nbytes else "" click.echo(f"{action} {path}{nbytes_formatted}") def writetemplate(self, *names: str, root: pathlib.Path = None) -> None: if root is None: assert self.root is not None root = self.root for name in names: content = self.templates.render(name) path = pathlib.Path(root, name) self.writefile(path, content) @contextmanager def cd(self, directory: pathlib.Path): self.mkdir(directory, exist_ok=True) self.root = directory try: yield self finally: self.root = None def generate(self, config: typing.Dict[str, typing.List[str]]): for directory, filenames in config.items(): with self.cd(directory): for filename in filenames: self.writetemplate(filename)
29.774194
76
0.562658
320
2,769
4.80625
0.309375
0.042913
0.029259
0.019506
0.026008
0
0
0
0
0
0
0.001613
0.328277
2,769
92
77
30.097826
0.825269
0
0
0.131579
0
0
0.079812
0.017335
0
0
0
0
0.013158
1
0.118421
false
0
0.092105
0.013158
0.289474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a409daf46a3fa04de8f0e145bfe66c2e1af54b0c
1,637
py
Python
extras/cleanup-meshes.py
RQWorldblender/io_scene_numdlb
8a67c092f1aef9536e6de7cc7665dec77c0d52c6
[ "MIT" ]
2
2020-12-04T14:55:09.000Z
2021-03-04T02:11:27.000Z
extras/cleanup-meshes.py
RQWorldblender/io_scene_numdlb
8a67c092f1aef9536e6de7cc7665dec77c0d52c6
[ "MIT" ]
null
null
null
extras/cleanup-meshes.py
RQWorldblender/io_scene_numdlb
8a67c092f1aef9536e6de7cc7665dec77c0d52c6
[ "MIT" ]
1
2021-03-04T02:11:42.000Z
2021-03-04T02:11:42.000Z
import bpy, os # Select Expressions mesh_expr = ["*Blink*", "*Attack*", "*Ouch*", "*Talk*", "*Capture*", "*Ottotto*", "*Escape*", "*Half*", "*Pattern*", "*Result*", "*Harf*","*Hot*", "*Heavy*", "*Voice*", "*Fura*", "*Throw*", "*Catch*", "*Cliff*", "*FLIP*", "*Bound*", "*Down*", "*Bodybig*", "*Final*", "*Result*", "*StepPose*", "*Sorori*", "*Fall*", "*Appeal*", "*DamageFlyFront*", "*CameraHit*"] # Make collections for each expressions bpy.ops.object.select_all(action='DESELECT') for exp in mesh_expr: bpy.ops.object.select_pattern(pattern=exp) selectNum = 0 for obj in bpy.data.objects: if obj.select_get(): selectNum += 1 print(exp + " -> " + obj.name) co = bpy.data.collections if selectNum > 0: if exp in co: collect = co[exp] else: collect = co.new(name=exp) bpy.context.view_layer.active_layer_collection.collection.children.link(collect) for obj in bpy.data.objects: if obj.select_get(): bpy.ops.collection.objects_remove_active() collect.objects.link(obj) collect.hide_viewport = True collect.hide_render = True bpy.ops.object.select_all(action='DESELECT') #bpy.ops.object.select_all(action='TOGGLE') #bpy.ops.object.select_pattern(pattern="*Openblink*") #bpy.ops.object.select_pattern(pattern="*FaceN*") # Change image filepaths to be relative to the Blender file for image in bpy.data.images: filename = os.path.basename(image.filepath) image.filepath = os.path.join("//", filename)
38.97619
362
0.596823
194
1,637
4.948454
0.469072
0.04375
0.075
0.1125
0.276042
0.276042
0.147917
0.075
0.075
0.075
0
0.002364
0.224801
1,637
41
363
39.926829
0.754137
0.156384
0
0.222222
0
0
0.189047
0
0
0
0
0
0
1
0
false
0
0.037037
0
0.037037
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
a40bad40cfcd7ca531100b5e277511691257c17c
2,836
py
Python
inst/python/ee_manage.py
MartinHoldrege/rgee
8534e547884198f9375428dfbb7f507e3c406bf0
[ "Apache-2.0" ]
null
null
null
inst/python/ee_manage.py
MartinHoldrege/rgee
8534e547884198f9375428dfbb7f507e3c406bf0
[ "Apache-2.0" ]
null
null
null
inst/python/ee_manage.py
MartinHoldrege/rgee
8534e547884198f9375428dfbb7f507e3c406bf0
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import datetime import os import ee """ Generate a table with all the tasks that are running (or has finished) in GEE. >>> genreport() >>> quota(ID) This module, which is used for checking earth engine quota, were obtained from geeup: a Python CLI for Earth Engine Uploads with Selenium Support <https://github.com/samapriya/geeup>, the acknowledgement for this module should be always given to Samapriya Roy. This module is used in R/ee_manage.R """ def genreport(): """Generated report includes taskId, data time, task status and type Args: Examples: >>> genreport() """ taks_list = [] status = ee.data.getTaskList() for items in status: ttype = items["task_type"] tdesc = items["description"] tstate = items["state"] tid = items["id"] tcreate = datetime.datetime.fromtimestamp( items["creation_timestamp_ms"] / 1000 ).strftime("%Y-%m-%d %H:%M:%S") tstart = datetime.datetime.fromtimestamp( items["start_timestamp_ms"] / 1000 ).strftime("%Y-%m-%d %H:%M:%S") tupdate = datetime.datetime.fromtimestamp( items["update_timestamp_ms"] / 1000 ).strftime("%Y-%m-%d %H:%M:%S") tdiffstart = ( items["start_timestamp_ms"] / 1000 - items["creation_timestamp_ms"] / 1000 ) tdiffend = ( items["update_timestamp_ms"] / 1000 - items["start_timestamp_ms"] / 1000 ) try: error_message = items["error_message"] except: error_message = "NULL" dict_summary = { "tid": tid, "tstate": tstate, "tdesc": tdesc, "ttype": ttype, "tcreate": tcreate, "tdiffstart": tdiffstart, "tdiffend": tdiffend, "error_message": error_message, } taks_list.append(dict_summary) return taks_list """Function to return quota usage details for the asset root with the given ID. >>> humansize(nbytes) >>> quota(ID) This function, which is used for checking earth engine quota, were obtained from geeup: a Python CLI for Earth Engine Uploads with Selenium Support <https://github.com/samapriya/geeup>, the acknowledgement for these functions should be always given to Samapriya Roy. This function is used in R/ee_quota.R """ def quota(ID): """Print your earth engine quota quickly. Args: ID (str): The ID of the asset to check Examples: >>> quota('/users/csaybar') """ quota = ee.data.getAssetRootQuota(ID) total_msg = str(quota["asset_size"]["limit"]) used_msg = str(quota["asset_size"]["usage"]) # return 'Total Quota: %s \n Used Quota: %s' % (total_msg, used_msg) return [total_msg, used_msg]
28.36
86
0.616714
353
2,836
4.855524
0.371105
0.044924
0.06126
0.05951
0.394983
0.278296
0.278296
0.278296
0.235123
0.235123
0
0.013856
0.261989
2,836
99
87
28.646465
0.805065
0.119887
0
0.06383
1
0
0.197377
0.026234
0
0
0
0
0
1
0.042553
false
0
0.06383
0
0.148936
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a40db821cdd40995b507a0185d3e87cf108ec33d
3,104
py
Python
old/strip_3 - move to file server from Rawful.py
broxeph/ameryn
e1289c280ca865ec84625b712adc52c536b4b174
[ "MIT" ]
null
null
null
old/strip_3 - move to file server from Rawful.py
broxeph/ameryn
e1289c280ca865ec84625b712adc52c536b4b174
[ "MIT" ]
null
null
null
old/strip_3 - move to file server from Rawful.py
broxeph/ameryn
e1289c280ca865ec84625b712adc52c536b4b174
[ "MIT" ]
null
null
null
""" Strip silence from recorded audio, move to file server(s). (c) Ameryn Media LLC, 2015. All rights reserved. """ import os import datetime from ConfigParser import ConfigParser import shutil import pydub CONFIG_LOCATION = 'ameryn.ini' config = ConfigParser() config.read(CONFIG_LOCATION) # Input file parameters (name & location) overwrite_source = config.getboolean('strip', 'overwrite_source') output_suffix = config.getboolean('strip', 'output_suffix') input_path = config.get('strip', 'input_path') output_path = config.get('strip', 'output_path') silence_thresh = config.getint('strip', 'silence_thresh') # dBFS silence_chunk = config.getint('strip', 'silence_chunk') # Seconds recorded_archive = config.get('general', 'recorded_archive') def strip_file(input_filename): input_filename_fullpath = os.path.join(input_path, input_filename) if overwrite_source: output_filename = input_filename elif output_suffix: output_filename = input_filename.rsplit('.wav')[0]+'_stripped.wav' else: output_filename = input_filename output_filename_fullpath = os.path.join(output_path, output_filename) print input_filename_fullpath,'->', output_filename_fullpath audio = pydub.AudioSegment.from_wav(input_filename_fullpath) i = 0 new_audio = None done = False # Silence removal while not done: if i+silence_chunk > len(audio): # Last chunk current_chunk = audio[i:] done = True else: current_chunk = audio[i:i+silence_chunk] if current_chunk.dBFS > silence_thresh: if not new_audio: print 'NEWWWWWWWWW' new_audio = current_chunk else: new_audio += current_chunk print str(datetime.timedelta(milliseconds=i))+' - '+str(datetime.timedelta(milliseconds=i+silence_chunk))+': '+ \ str(round(current_chunk.dBFS, 2))+' dBFS' else: print str(datetime.timedelta(milliseconds=i))+' - '+str(datetime.timedelta(milliseconds=i+silence_chunk))+': '+ \ str(round(current_chunk.dBFS, 2))+' dBFS (Silence, below '+str(silence_thresh)+' dBFS)' i += silence_chunk # Export audio if new_audio: out_f = open(output_filename_fullpath, 'wb') new_audio.export(out_f, format='wav') else: silent_files.append(input_filename) # Move original .wav & .pkf files to Julius archive archive_path = os.path.join(recorded_archive, str(datetime.datetime.now().year) + '-' + str(datetime.datetime.now().month).zfill(2)) if not os.path.isdir(archive_path): os.makedirs(archive_path) shutil.move(input_filename_fullpath, os.path.join(archive_path, input_filename)) shutil.move(os.path.splitext(input_filename_fullpath)[0] + '.pkf', os.path.join(archive_path, os.path.splitext(input_filename)[0] + '.pkf')) silent_files = [] # Make input filename list if overwrite_source: output_path = input_path input_filename_list = [f for f in os.listdir(input_path) if f.endswith('.wav') and '_stripped' not in f] print 'Input filenames (folder):', input_filename_list # RUN THE TRAP for each in input_filename_list: print each strip_file(each) if silent_files: print print 'Silent files (re-record?):' for each in silent_files: print ' - ' + each
32
141
0.746778
432
3,104
5.145833
0.256944
0.099415
0.047233
0.05758
0.179037
0.128655
0.100765
0.100765
0.100765
0.100765
0
0.004065
0.128222
3,104
97
142
32
0.817443
0.058312
0
0.157143
0
0
0.105038
0
0
0
0
0
0
0
null
null
0
0.071429
null
null
0.128571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
cf8bc62feb8e0471e65e54c2a009471153f8ea88
636
py
Python
_sadm/listen/handlers/exec.py
jrmsdev/pysadm
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
[ "BSD-3-Clause" ]
1
2019-10-15T08:37:56.000Z
2019-10-15T08:37:56.000Z
_sadm/listen/handlers/exec.py
jrmsdev/pysadm
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
[ "BSD-3-Clause" ]
null
null
null
_sadm/listen/handlers/exec.py
jrmsdev/pysadm
0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com> # See LICENSE file. import json from bottle import request from _sadm import log from _sadm.listen.errors import error from _sadm.listen.webhook.repo.vcs.git import GitRepo __all__ = ['exech'] _taskman = { 'webhook.repo.git': GitRepo(), } def handle(task, action): log.debug("exec handle: %s %s" % (task, action)) taskman = _taskman.get(task, None) if taskman is None: raise error(500, "listen.exec task %s: no manager" % task) try: args = json.load(request.body) taskman.hook(action, args) except Exception as err: raise error(500, "%s" % err) return 'OK\n'
22.714286
60
0.709119
95
636
4.652632
0.578947
0.054299
0.063348
0
0
0
0
0
0
0
0
0.011215
0.158805
636
27
61
23.555556
0.814953
0.113208
0
0
0
0
0.135472
0
0
0
0
0
0
1
0.05
false
0
0.25
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf8d37dd7cc02823d08160694115cee5f64df311
255
py
Python
essentials/old_on_352.py
sonicrules1234/sonicbot
07a22d08bf86ed33dc715a800957aee3b45f3dde
[ "BSD-3-Clause" ]
1
2019-06-27T08:45:23.000Z
2019-06-27T08:45:23.000Z
essentials/old_on_352.py
sonicrules1234/sonicbot
07a22d08bf86ed33dc715a800957aee3b45f3dde
[ "BSD-3-Clause" ]
null
null
null
essentials/old_on_352.py
sonicrules1234/sonicbot
07a22d08bf86ed33dc715a800957aee3b45f3dde
[ "BSD-3-Clause" ]
null
null
null
import time minlevel = 1 arguments = ["self", "info"] keyword = "352" def main(self, info) : self.hostnames[info["words"][7]] = info["words"][5] self.whoislist[info["words"][7]] = info["words"][7] + "!" + info["words"][4] + "@" + info["words"][5]
31.875
105
0.576471
35
255
4.2
0.485714
0.367347
0.204082
0.285714
0.326531
0
0
0
0
0
0
0.046083
0.14902
255
7
106
36.428571
0.631336
0
0
0
0
0
0.168627
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.285714
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
cf8d4235cb878e12f5c58b025ed042e67c4a28fa
1,266
py
Python
app.py
Kaoushikkumarr/collab-service
87388d140e74fe18704e1e3bee4e53d64c63f1d2
[ "MIT" ]
null
null
null
app.py
Kaoushikkumarr/collab-service
87388d140e74fe18704e1e3bee4e53d64c63f1d2
[ "MIT" ]
null
null
null
app.py
Kaoushikkumarr/collab-service
87388d140e74fe18704e1e3bee4e53d64c63f1d2
[ "MIT" ]
null
null
null
import os from flask_migrate import Migrate from flask_restful import Api from flask import Flask from flask_cors import CORS from flask_sqlalchemy import SQLAlchemy from config_manager.config_manager import FileConfigManager from utils.request_controller import RequestsController app = Flask(__name__) CORS(app) api = Api(app) app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:p@192.168.43.141:5432/collaboration_db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if 'L2_CONFIG_PATH' in os.environ and os.environ['L2_CONFIG_PATH'] != 'None': configs = FileConfigManager(os.environ.get('L2_CONFIG_PATH')) else: print('No Configuration Manager Found') db = SQLAlchemy(app) db.init_app(app) migrate = Migrate(app, db) from routes import collaboration from models.model_collaboration import Collaboration_Model # Collaboration API Signatures api.add_resource(collaboration.CollaborationAPI, '/this_is_test_api_for_collaborate') # API Signature for Comment api.add_resource(collaboration.CommentReadUpdateDelete, '/comments') api.add_resource(collaboration.CommentWrite, '/comments') # API Signature for Feedback api.add_resource(collaboration.FeedbackWrite, '/feedback') api.add_resource(collaboration.FeedbackRead, '/feedback')
28.772727
102
0.814376
164
1,266
6.073171
0.414634
0.045181
0.070281
0.135542
0.070281
0
0
0
0
0
0
0.015707
0.094787
1,266
43
103
29.44186
0.853403
0.063981
0
0
0
0
0.218459
0.123624
0
0
0
0
0
1
0
false
0
0.37037
0
0.37037
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
cf8e095b2dfd6ed697713c2bc61fb4d1890cfd30
579
py
Python
EstruturaDeRepeticao/exercicio19.py
Nicolas-Wursthorn/exercicios-python-brasil
b2b564d48b519be04643636033ec0815e6d99ea1
[ "MIT" ]
null
null
null
EstruturaDeRepeticao/exercicio19.py
Nicolas-Wursthorn/exercicios-python-brasil
b2b564d48b519be04643636033ec0815e6d99ea1
[ "MIT" ]
null
null
null
EstruturaDeRepeticao/exercicio19.py
Nicolas-Wursthorn/exercicios-python-brasil
b2b564d48b519be04643636033ec0815e6d99ea1
[ "MIT" ]
null
null
null
# Altere o programa anterior para que ele aceite apenas números entre 0 e 1000 condition = True conjunto = [] while condition: numero = int(input("Digite os números do conjunto (Digite 0 para parar): ")) if numero == 0: break elif numero > 1000 or numero < 0: print("Digite somente números entre 0 e 1000.") else: conjunto.append(numero) print("Soma dos valores do conjunto: {}!".format(sum(conjunto))) print("O maior valor do conjunto: {}!".format(max(conjunto))) print("O menor valor do conjunto: {}!".format(min(conjunto)))
30.473684
80
0.659758
80
579
4.775
0.5375
0.104712
0.125654
0.073298
0.094241
0
0
0
0
0
0
0.037611
0.219344
579
19
81
30.473684
0.807522
0.131261
0
0
0
0
0.366534
0
0
0
0
0
0
1
0
false
0
0
0
0
0.307692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf8e7a839447ed7d56910db5a87efb42a19cb761
207
py
Python
study/aula04.py
RutyRibeiro/facial-recognition-python
c86d13fc7f9e17d86675f7337b70be8f805fcdda
[ "MIT" ]
2
2020-10-16T00:54:23.000Z
2021-11-11T02:05:32.000Z
study/aula04.py
RutyRibeiro/facial-recognition-python
c86d13fc7f9e17d86675f7337b70be8f805fcdda
[ "MIT" ]
null
null
null
study/aula04.py
RutyRibeiro/facial-recognition-python
c86d13fc7f9e17d86675f7337b70be8f805fcdda
[ "MIT" ]
null
null
null
import cv2 cam = cv2.VideoCapture(0) while True: camera,frame = cam.read() cv2.imshow('imagem camera', frame) if cv2.waitKey(1) == ord('f'): break cam.release() cv2.destroyAllWindows()
17.25
38
0.642512
28
207
4.75
0.714286
0.165414
0
0
0
0
0
0
0
0
0
0.042424
0.202899
207
12
39
17.25
0.763636
0
0
0
0
0
0.067308
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
cf8ef2b45d7d458a65d6c785888a76f8daf2c51b
750
py
Python
cogs/help.py
moisesjsanchez/anime-movie-discord-bot
2979dc28cc6250e56f713c2d6483aaaff7688176
[ "MIT" ]
null
null
null
cogs/help.py
moisesjsanchez/anime-movie-discord-bot
2979dc28cc6250e56f713c2d6483aaaff7688176
[ "MIT" ]
2
2021-05-03T04:48:46.000Z
2021-05-06T08:29:23.000Z
cogs/help.py
moisesjsanchez/anime-movie-discord-bot
2979dc28cc6250e56f713c2d6483aaaff7688176
[ "MIT" ]
null
null
null
import discord from discord.ext import commands class Help(commands.Cog): def __init__(self, client): self.client = client # settings up the custom help functions @commands.command() async def help(self, ctx): embed = discord.Embed( title='Fathom Chan', description="A bot for your Fathom anime film related needs. Below are a list of commands:", color=0xE69138) embed.add_field( name='.help', value='Calls up list of commands that user can perform', inline=False) embed.add_field( name='.movies', value='Fetchs current Fathom event anime movies playing', inline=False) await ctx.send(embed=embed) def setup(client): client.add_cog(Help(client))
28.846154
141
0.665333
100
750
4.92
0.58
0.04065
0.056911
0.069106
0
0
0
0
0
0
0
0.01049
0.237333
750
25
142
30
0.84965
0.049333
0
0.125
0
0
0.274262
0
0
0
0.011252
0
0
1
0.125
false
0
0.125
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf8fb65655bb2de90066c1c7dab90f53f736211b
2,255
py
Python
summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py
danieldeutsch/summarize
f36a86d58f381ff1f607f356dad3d6ef7b0e0224
[ "Apache-2.0" ]
15
2019-11-01T11:49:44.000Z
2021-01-19T06:59:32.000Z
summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py
CogComp/summary-cloze
b38e3e8c7755903477fd92a4cff27125cbf5553d
[ "Apache-2.0" ]
2
2020-03-30T07:54:01.000Z
2021-11-15T16:27:42.000Z
summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py
CogComp/summary-cloze
b38e3e8c7755903477fd92a4cff27125cbf5553d
[ "Apache-2.0" ]
3
2019-12-06T05:57:51.000Z
2019-12-11T11:34:21.000Z
import torch from allennlp.common.registrable import Registrable from typing import Tuple class CoverageMatrixAttention(torch.nn.Module, Registrable): """ The ``CoverageMatrixAttention`` computes a matrix of attention probabilities between the encoder and decoder outputs. The attention function has access to the cumulative probabilities that the attention has assigned to each input token previously. In addition to the attention probabilities, the function should return the coverage vectors which were used to compute the distribution at each time step as well as the new coverage vector which takes into account the function's computation. The module must compute the probabilities instead of the raw scores (like the ``MatrixAttention`` module does) because the coverage vector contains the accumulated probabilities. """ def forward(self, decoder_outputs: torch.Tensor, encoder_outputs: torch.Tensor, encoder_mask: torch.Tensor, coverage_vector: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Computes a matrix of attention scores and updates the coverage vector. Parameters ---------- decoder_outputs: (batch_size, num_decoder_tokens, hidden_dim) The decoder's outputs. encoder_outputs: (batch_size, num_encoder_tokens, hidden_dim) The encoder's outputs. encoder_mask: (batch_size, num_encoder_tokens) The encoder token mask. coverage_vector: (batch_size, num_encoder_tokens) The cumulative attention probability assigned to each input token thus far. Returns ------- torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens) The attention probabilities between each decoder and encoder hidden representations. torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens) The coverage vectors used to compute the corresponding attention probabilities. torch.Tensor: (batch_size, num_encoder_tokens) The latest coverage vector after computing """ raise NotImplementedError
45.1
98
0.701552
263
2,255
5.882129
0.34981
0.071105
0.054299
0.061409
0.229476
0.125404
0.071105
0.071105
0.071105
0.071105
0
0
0.244789
2,255
49
99
46.020408
0.908397
0.695344
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.3
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cf8fc6f83e41245e2996c4f4fbbce410b6cabeff
861
py
Python
accelerator/tests/factories/program_partner_factory.py
masschallenge/django-accelerator
8af898b574be3b8335edc8961924d1c6fa8b5fd5
[ "MIT" ]
6
2017-06-14T19:34:01.000Z
2020-03-08T07:16:59.000Z
accelerator/tests/factories/program_partner_factory.py
masschallenge/django-accelerator
8af898b574be3b8335edc8961924d1c6fa8b5fd5
[ "MIT" ]
160
2017-06-20T17:12:13.000Z
2022-03-30T13:53:12.000Z
accelerator/tests/factories/program_partner_factory.py
masschallenge/django-accelerator
8af898b574be3b8335edc8961924d1c6fa8b5fd5
[ "MIT" ]
null
null
null
# MIT License # Copyright (c) 2017 MassChallenge, Inc. from __future__ import unicode_literals import swapper from factory import ( Sequence, SubFactory, ) from factory.django import DjangoModelFactory from accelerator.tests.factories.partner_factory import PartnerFactory from accelerator.tests.factories.program_factory import ProgramFactory from accelerator.tests.factories.program_partner_type_factory import ( ProgramPartnerTypeFactory ) ProgramPartner = swapper.load_model('accelerator', 'ProgramPartner') class ProgramPartnerFactory(DjangoModelFactory): class Meta: model = ProgramPartner program = SubFactory(ProgramFactory) partner = SubFactory(PartnerFactory) partner_type = SubFactory(ProgramPartnerTypeFactory) description = Sequence( lambda n: "Description of Program Partner #{0}".format(n))
27.774194
70
0.788618
84
861
7.940476
0.464286
0.077961
0.089955
0.130435
0.107946
0
0
0
0
0
0
0.006793
0.14518
861
30
71
28.7
0.899457
0.058072
0
0
0
0
0.074257
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.619048
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1