content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# Copyright 2017 Regents of the University of California # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os, sys, time, copy, collections, math, json import numpy as np import scipy as sp import matplotlib from matplotlib import pyplot as plt import llops as yp # Custom scale bar object from matplotlib_scalebar.scalebar import ScaleBar # Libwallerlab imports from llops import display from llops import Roi
[ 2, 15069, 2177, 3310, 658, 286, 262, 2059, 286, 3442, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 17613, 11, 389, 10431, 2810, 326, 262, 1708, 3403, 389, 1138, 25, 198, 2, 198, 2, ...
3.819328
476
''' https://leetcode.com/contest/weekly-contest-150/problems/last-substring-in-lexicographical-order/ SA algorithm mostly copied from https://cp-algorithms.com/string/suffix-array.html Status: tle. probably py3 lists '''
[ 7061, 6, 198, 5450, 1378, 293, 316, 8189, 13, 785, 14, 3642, 395, 14, 45291, 12, 3642, 395, 12, 8628, 14, 1676, 22143, 14, 12957, 12, 7266, 8841, 12, 259, 12, 2588, 291, 17046, 12, 2875, 14, 198, 198, 4090, 11862, 4632, 18984, 422...
2.921053
76
from antlr4 import * from .antlr import GraphQLLexer, GraphQLListener, GraphQLParser from .codegen import CodegenTool, Class, String, ClassInstance, IfElse, If, Method, Expr, Variable import re from math import floor from datetime import datetime from .utils import strip_string_quotes, camel_case_to_snake_case, process_input_value_definition from .errors import ParsingError GraphQLParser = GraphQLParser.GraphQLParser graphene = 'graphene' built_in_scalars = [ 'Int', 'Float', 'String', 'Boolean', 'ID', 'Date', 'Datetime', 'Time' 'Decimal', 'JSONString', 'Base64', ]
[ 6738, 1885, 14050, 19, 1330, 1635, 198, 6738, 764, 415, 14050, 1330, 29681, 48, 3069, 1069, 263, 11, 29681, 48, 3069, 396, 877, 11, 29681, 9711, 46677, 198, 6738, 764, 8189, 5235, 1330, 6127, 5235, 25391, 11, 5016, 11, 10903, 11, 5016...
2.722467
227
from flask import Flask, request, jsonify,Blueprint from flask_marshmallow import Marshmallow from app.models import User, Group, Role from app import ma api = Blueprint('api', __name__) user_schema = UserSchema() users_schema = UserSchema(many=True) group_schema = GroupSchema() groups_schema = GroupSchema(many=True) role_schema = RoleSchema() roles_schema = RoleSchema(many=True) # endpoint to get user detail by id # endpoint to get group detail by id # endpoint to get group detail by id
[ 6738, 42903, 1330, 46947, 11, 2581, 11, 33918, 1958, 11, 14573, 4798, 198, 6738, 42903, 62, 76, 5406, 42725, 1330, 9786, 42725, 198, 6738, 598, 13, 27530, 1330, 11787, 11, 4912, 11, 20934, 198, 6738, 598, 1330, 17266, 628, 198, 15042, ...
3.210191
157
""" Setup script for samply """ from setuptools import setup import re extra_args = {} def get_property(prop, project): """ Helper function for retrieving properties from a project's __init__.py file @In, prop, string representing the property to be retrieved @In, project, string representing the project from which we will retrieve the property @Out, string, the value of the found property """ result = re.search( r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop), open(project + "/__init__.py").read(), ) return result.group(1) VERSION = get_property("__version__", "samply") def long_description(): """ Reads the README.rst file and extracts the portion tagged between specific LONG_DESCRIPTION comment lines. """ description = "" recording = False with open("README.rst") as f: for line in f: if "END_LONG_DESCRIPTION" in line: return description elif "LONG_DESCRIPTION" in line: recording = True continue if recording: description += line # Consult here: https://packaging.python.org/tutorials/distributing-packages/ setup( name="samply", packages=["samply"], version=VERSION, description="A library for computing samplings in arbitrary dimensions", long_description=long_description(), author="Dan Maljovec", author_email="maljovec002@gmail.com", license="BSD", test_suite="samply.tests", url="https://github.com/maljovec/samply", download_url="https://github.com/maljovec/samply/archive/" + VERSION + ".tar.gz", keywords=[""], # Consult here: https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Programming Language :: C++", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Mathematics", ], setup_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"], install_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"], python_requires=">=2.7, <4", )
[ 37811, 198, 220, 220, 220, 220, 220, 31122, 4226, 329, 264, 696, 306, 198, 37811, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 11748, 302, 628, 198, 26086, 62, 22046, 796, 23884, 628, 198, 4299, 651, 62, 26745, 7, 22930, 11, 1628, ...
2.489339
938
# -*- coding: utf-8 -*- # @Author: TD21forever # @Date: 2018-11-14 15:41:57 # @Last Modified by: TD21forever # @Last Modified time: 2018-11-15 16:50:48 file = open('input.txt','r')# # if __name__ == '__main__': while True: print("") print("\n1.\n2.\n3.\n4.\n5.\n6.\n") ans = "no" ans = input("5,55,2,3,3,1?,yesno:") if ans == 'yes': operate() else: print("\n") a = int(input("1.")) b = int(input("2.")) if b>80: b = int(input("80")) c = int(input("3.")) d = int(input("4.")) e = int(input("5.")) ff = int(input("6.")) operate(a,b,c,d,e,ff) f.close()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 2, 2488, 13838, 25, 13320, 2481, 754, 332, 201, 198, 2, 2488, 10430, 25, 220, 220, 2864, 12, 1157, 12, 1415, 1315, 25, 3901, 25, 3553, 201, 198, 2, 2488, 5956, ...
1.586275
510
from django.test import TestCase from authors.apps.authentication.models import ( User )
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 198, 6738, 7035, 13, 18211, 13, 41299, 3299, 13, 27530, 1330, 357, 198, 220, 220, 220, 11787, 198, 8, 628 ]
3.275862
29
#!/usr/bin/env python #-*- coding:utf-8 -*- """ Created on Nov 23, 2020 @author: Chengning Zhang """ ## simulation for Scenario A: generate X0 and X1. def MonteCarlo_1(T, n0, n1, u0, u1, sigma0, sigma1, log_bool = False): """simulation for first scenario: multivarite normal with equal variance T: number of simulation n0: sample size of class 0 n1: sample size of class 1 """ AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr'] for i in range(T): ### one monto carlo simulation of size n0 + n1 #i = 10 np.random.seed(seed= 100*i+ 4*i) X0 = multivariate_normal(u0, sigma0, size = n0) X1 = multivariate_normal(u1, sigma1, size = n1) if log_bool: X0 = np.exp(X0) X1 = np.exp(X1) # X = np.concatenate([X0,X1]) y = [0] * n0 y.extend([1]*n1); y = np.array(y) ## X,y is one simulation X = pd.DataFrame(data = X); y = pd.Series(y) ## within that particular MC simulation, do 10 folds CV cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42) AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders # for folder, (train_index, val_index) in enumerate(cv.split(X, y)): X_train,X_val = X.iloc[train_index],X.iloc[val_index] y_train,y_val = y.iloc[train_index],y.iloc[val_index] # X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val) for method in methods: model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train) _,_, auc = model.predict(X0_val,X1_val) AUC_folds[method].append(auc) #print(AUC_folds) for key, val in AUC_folds.items(): AUC[key].append( np.mean(np.array(val) )) print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()}) return AUC ## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ... def MonteCarlo_2(T, n, u, sigma): """simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... ) T: number of simulation n: sample size u: mean for X sigma: variance for X """ AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr'] for i in range(T): ### one monto carlo simulation of size n0 + n1 #i = 10 print(i) np.random.seed(seed= 100*i+ 4*i) X = multivariate_normal(u, sigma, size = n) X_trans = [ 10*sum(list(map(lambda x: np.sin(np.pi*x) , ele))) for ele in X] p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans)) y = bernoulli.rvs(p, size= n) X = pd.DataFrame(data = X); y = pd.Series(y) ## within that particular MC simulation, do 10 folds CV cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42) AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders # for folder, (train_index, val_index) in enumerate(cv.split(X, y)): X_train,X_val = X.iloc[train_index],X.iloc[val_index] y_train,y_val = y.iloc[train_index],y.iloc[val_index] # X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val) for method in methods: model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train) _,_, auc = model.predict(X0_val,X1_val) AUC_folds[method].append(auc) #print(AUC_folds) for key, val in AUC_folds.items(): AUC[key].append( np.mean(np.array(val) )) print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()}) return AUC ## Simulation scenario B: generate X first, then generate bernulli Y via logit(P(Y=1|X)) = ... def MonteCarlo_3(T, n, u, sigma): """simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... ) T: number of simulation n: sample size """ AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr'] for i in range(T): ### one monto carlo simulation of size n0 + n1 np.random.seed(seed= 100*i+ 4*i) X = multivariate_normal(u, sigma, size = n); #X = np.exp(X) X_trans = [ele[0] - ele[1] - ele[2]+ (ele[0] - ele[1])**2 - ele[3]**4 for ele in X] ## x1 - x2 - x3 + (x1-x2)^2 - x4^4 p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans)) y = bernoulli.rvs(p, size= n) X = pd.DataFrame(data = X); y = pd.Series(y) ## within that particular MC simulation, do 10 folds CV cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42) AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders # for folder, (train_index, val_index) in enumerate(cv.split(X, y)): X_train,X_val = X.iloc[train_index],X.iloc[val_index] y_train,y_val = y.iloc[train_index],y.iloc[val_index] # X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val) for method in methods: model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train) _,_, auc = model.predict(X0_val,X1_val) AUC_folds[method].append(auc) #print(AUC_folds) for key, val in AUC_folds.items(): AUC[key].append( np.mean(np.array(val) )) print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()}) return AUC
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 12, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 5267, 2242, 11, 12131, 198, 31, 9800, 25, 27692, 768, 19439, 198, 37811, 198, 198, 2235, 18640, 329...
2.196283
2,690
import requests if __name__ == '__main__': pages_crawler()
[ 11748, 7007, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 5468, 62, 66, 39464, 3419, 198 ]
2.64
25
import sys import logging import pytz logging.basicConfig(format='%(levelname)s: %(message)s') if (sys.version_info < (3, 0)):#NO MORE PYTHON 2!!! https://pythonclock.org/ logging.error(" ########################### ERROR ###########################") logging.error(" =============================================================") logging.error(" Invalid python version detected: "+str(sys.version_info[0])+"."+str(sys.version_info[1])) logging.error(" =============================================================") logging.error(" It seems your are still using python 2 even if you should") logging.error(" now it will be retire next 2020.") logging.error(" For more info please read https://pythonclock.org/") logging.error(" =============================================================") logging.error(" Try again typing: python3 /path/to/"+sys.argv[0]) logging.error(" =============================================================") logging.error(" ########################### ERROR ###########################") exit(0) import tempfile import argparse import os import requests import re import json italyTZ = pytz.timezone("Europe/Rome") from apiosintDS.modules import listutils, dosearch try: from urllib.parse import urlparse except ImportError as ierror: logging.error(ierror) logging.error("To run this script you need to install the \"urllib\" module") logging.error("Try typing: \"pip3 install urllib3\"") exit(0) try: import validators except ImportError as e: logging.error(e) logging.error("To run this script you need to install the \"validators\" module") logging.error("Try typing: \"pip3 install validators\"") exit(0) import platform if platform.system() not in ['Linux']: logging.warning("Script not testes on "+platform.system()+" systems. Use at your own risks.") scriptinfo = {"scriptname": "DigitalSide-API", "majorversion": "1", "minorversion": "8.3", "license": "MIT", "licenseurl": "https://raw.githubusercontent.com/davidonzo/Threat-Intel/master/LICENSE", "author": "Davide Baglieri", "mail": "info[at]digitalside.it", "pgp": "30B31BDA", "fingerprint": "0B4C F801 E8FF E9A3 A602 D2C7 9C36 93B2 30B3 1BDA", "git": "https://github.com/davidonzo/Threat-Intel/blob/master/tools/DigitalSide-API/v1", "DSProjectHP": "https://osint.digitalside.it", "DSGitHubHP": "https://github.com/davidonzo/Threat-Intel"} if __name__ == '__main__': main()
[ 11748, 25064, 198, 11748, 18931, 198, 11748, 12972, 22877, 198, 6404, 2667, 13, 35487, 16934, 7, 18982, 11639, 4, 7, 5715, 3672, 8, 82, 25, 4064, 7, 20500, 8, 82, 11537, 198, 361, 357, 17597, 13, 9641, 62, 10951, 1279, 357, 18, 11, ...
2.77416
952
#Stock inventory control system. #======================================= 1 =========================== #======================================= 2 =========================== #======================================= 3 ============================ #main prog below choice = 0 myStock = {} #empty dictionary for myStock try: infile = open("myStock.txt","r") read1LineStock = infile.readline() #read first line while read1LineStock !=" ": #while the file has not ended, myStock[read1LineStock.split(",")[0]] = int(read1LineStock.split(",")[1]) read1LineStock = infile.readline() print(myStock) #place item 0 in the split up sentence as the name for the item for myStock, #and whatever number you can find in item 1 of the split up sentence (ignore '\n') #as the 'quantity' for myStock. #eg myStock['apple'] = '1' #then, read the next line. infile.close() except: print("Welcome to the stock management system!") while choice != 9: choice = menu() #rmb to return choice to the global choice. #the choice inside menu() is a LOCAL choice. if choice ==1: newStock() elif choice ==2: addVolume() elif choice ==3: sell() #======================================= 8 =========================== elif choice ==8: print(myStock) #======================================= 9 =========================== print("Have a noice day")
[ 2, 26207, 13184, 1630, 1080, 13, 201, 198, 201, 198, 2, 10052, 1421, 18604, 220, 220, 220, 352, 220, 220, 36658, 2559, 855, 201, 198, 201, 198, 2, 10052, 1421, 18604, 220, 220, 220, 362, 220, 220, 36658, 2559, 855, 201, 198, 220, ...
2.607201
611
bind = "0.0.0.0:5000" threads = 10 worker_class = "gthread" accesslog = '-' errorlog = '-'
[ 21653, 796, 366, 15, 13, 15, 13, 15, 13, 15, 25, 27641, 1, 198, 16663, 82, 796, 838, 198, 28816, 62, 4871, 796, 366, 70, 16663, 1, 198, 15526, 6404, 796, 705, 19355, 198, 18224, 6404, 796, 705, 19355, 198 ]
2.275
40
""" 2017 Day 23 https://adventofcode.com/2017/day/23 """ from typing import Dict import aocd # type: ignore def main() -> None: """ Calculate and output the solutions based on the real puzzle input. """ data = aocd.get_data(year=2017, day=23) program = Program(data) program.run() print(f"Part 1: {program.mul_count}") print(f"Part 2: {run_program()}") if __name__ == "__main__": main()
[ 37811, 198, 5539, 3596, 2242, 198, 5450, 1378, 324, 1151, 1659, 8189, 13, 785, 14, 5539, 14, 820, 14, 1954, 198, 37811, 198, 198, 6738, 19720, 1330, 360, 713, 198, 11748, 257, 420, 67, 220, 1303, 2099, 25, 8856, 628, 628, 198, 198, ...
2.508671
173
import sys from sklearn.datasets import make_blobs from src.simulator.wsn.network import Network from src.simulator.wsn.utils import * from src.simulator.wsn.fcm import * from src.simulator.wsn.direct_communication import * from src.utils import complete, star seed = 1 np.random.seed(seed ) logging.basicConfig(stream=sys.stderr, level=logging.INFO) traces = {} topo = complete(cf.NB_CLUSTERS) # topo = independent(cf.NB_CLUSTERS) # topo = star(cf.NB_CLUSTERS) # topo = ring(cf.NB_CLUSTERS) centers = [[50, 225], [25, 110], [125, 20], [220, 80], [200, 225]] X, y = make_blobs(n_samples=100, centers=centers, n_features=2, random_state=seed, cluster_std=15) traces = {} network = Network(init_nodes=X, topo=topo) # network = Network(topo=topo) for routing_topology in ['FCM']:#, 'DC']: network.reset() routing_protocol_class = eval(routing_topology) network.init_routing_protocol(routing_protocol_class()) # traces[routing_topology] = network.simulate() for i in range(1000): print("--------Round %d--------"% i) network.activate_mix() traces[routing_topology] = network.simulate_one_round() network.deactivate_mix() if len(network.get_alive_nodes()) == 0 : break # plot_clusters(network) # plot_time_of_death(network) # print(network.energy_dis) # print(network.energy_dis['inter-comm']/ network.energy_dis['intra-comm']) print("All death round: ", i) print("First death round: ", network.first_depletion) print("Energy:", network.energy_dis) plot_traces(traces)
[ 11748, 25064, 201, 198, 6738, 1341, 35720, 13, 19608, 292, 1039, 1330, 787, 62, 2436, 8158, 201, 198, 201, 198, 6738, 12351, 13, 14323, 8927, 13, 86, 16184, 13, 27349, 1330, 7311, 201, 198, 6738, 12351, 13, 14323, 8927, 13, 86, 16184,...
2.364553
694
# -*- coding: utf-8 -*- try: from collections.abc import Iterable except ImportError: from collections import Iterable import numpy as np from numpy import ndarray from dewloosh.math.array import atleast1d from dewloosh.math.utils import to_range from .celldata import CellData from .utils import jacobian_matrix_bulk, points_of_cells, pcoords_to_coords_1d
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 28311, 25, 198, 220, 220, 220, 422, 17268, 13, 39305, 1330, 40806, 540, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 422, 17268, 1330, 40806, 540, 198, 198, 11748, ...
2.414201
169
from .rmq_item import RMQItem
[ 6738, 764, 26224, 80, 62, 9186, 1330, 29820, 48, 7449, 198 ]
2.727273
11
import os import numpy as np from gym import utils from mujoco_safety_gym.envs import fetch_env # Ensure we get the path separator correct on windows MODEL_XML_PATH = os.path.join('fetch', 'slide.xml')
[ 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 11550, 1330, 3384, 4487, 198, 6738, 285, 23577, 25634, 62, 44708, 62, 1360, 76, 13, 268, 14259, 1330, 21207, 62, 24330, 628, 198, 2, 48987, 356, 651, 262, 3108, 2880, 1...
2.942857
70
from setuptools import setup, find_packages setup( name="raspi_ip", version="1.0.0", author="atoy322", description="", long_description="" )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 81, 5126, 72, 62, 541, 1600, 198, 220, 220, 220, 2196, 2625, 16, 13, 15, 13, 15, 1600, 198, 220, 220, 220, 1772, 2625, ...
2.469697
66
__title__ = "open_kite_connect" __description__ = "Fork of the official Kite Connect python client, allowing free access to the api." __url__ = "https://kite.trade" __download_url__ = "https://github.com/AnjayGoel/pykiteconnect" __version__ = "4.0.0" __author__ = "Anjay Goel" __author_email__ = "anjay.goel@gmail.com" __license__ = "MIT"
[ 834, 7839, 834, 796, 366, 9654, 62, 74, 578, 62, 8443, 1, 198, 834, 11213, 834, 796, 366, 37, 967, 286, 262, 1743, 509, 578, 8113, 21015, 5456, 11, 5086, 1479, 1895, 284, 262, 40391, 526, 198, 834, 6371, 834, 796, 366, 5450, 1378,...
2.756098
123
# @Author: DivineEnder # @Date: 2018-03-08 22:24:45 # @Email: danuta@u.rochester.edu # @Last modified by: DivineEnder # @Last modified time: 2018-03-11 01:25:41 # -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html from dotenv import load_dotenv, find_dotenv from Utils import connection_utils as glc from psycopg2.extensions import AsIs import os
[ 2, 2488, 13838, 25, 13009, 36, 681, 198, 2, 2488, 10430, 25, 220, 220, 2864, 12, 3070, 12, 2919, 2534, 25, 1731, 25, 2231, 198, 2, 2488, 15333, 25, 220, 288, 20651, 64, 31, 84, 13, 305, 35983, 13, 15532, 198, 2, 2488, 5956, 9518...
2.652406
187
import os import json from enum import Enum from datetime import datetime,date import logging import pathlib from tqdm import tqdm from datastructures import Volume, IndexedFile,load_index_if_exists, save_index from os import listdir from os.path import isfile, join import itertools import csv logger = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) ############################################################################### index_dir = os.path.join(os.getcwd(), 'index') logger.info('finding index files') indexfiles = list([f for f in listdir(index_dir) if isfile(join(index_dir, f)) and f[-4:]=='json']) columns = ['VolumeName', 'VolumeSerialNumber', 'Directory', 'Name', 'InodeNumber', 'Modified On', 'Created On', 'SHA256'] exif_columns=set() logger.info('parsing index files') #Pass 1 = collect keys for index_file in indexfiles: index = load_index_if_exists(os.path.join(index_dir, index_file)) for vol in index: for ixf in vol.files: if ixf.EXIF is not None: for i in ixf.EXIF.keys(): exif_columns.add(i) logger.info('writing csv') #Pass 2 = write header with open(os.path.join(os.getcwd(), 'index.csv'), mode='w', encoding='utf-8', newline='') as f: writer = csv.writer(f) writer.writerow(columns+list(exif_columns)) #and now rows for index_file in indexfiles: index = load_index_if_exists(os.path.join(index_dir, index_file)) for vol in index: for ixf in vol.files: row = [ vol.VolumeName, vol.VolumeSerialNumber, ixf.Directory, ixf.Name, ixf.st_ino, ixf.st_mtime.strftime("%c"), ixf.st_ctime.strftime("%c"), ixf.SHA256 ] for ec in exif_columns: row.append(ixf.EXIF.get(ec, None)) writer.writerow(row)
[ 11748, 28686, 201, 198, 11748, 33918, 201, 198, 6738, 33829, 1330, 2039, 388, 201, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 4475, 201, 198, 11748, 18931, 201, 198, 11748, 3108, 8019, 201, 198, 6738, 256, 80, 36020, 1330, 256, 80, ...
2.108902
1,056
import unittest from collections import namedtuple from jike.objects.wrapper import * if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 6738, 474, 522, 13, 48205, 13, 48553, 1330, 1635, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12...
3.022222
45
#!/usr/bin/env python """ The various ChannelUI classes. Hazen 04/17 """ import os from PyQt5 import QtCore, QtWidgets # # The MIT License # # Copyright (c) 2017 Zhuang Lab, Harvard University # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 198, 464, 2972, 11102, 10080, 6097, 13, 198, 198, 39, 1031, 268, 8702, 14, 1558, 198, 37811, 198, 198, 11748, 28686, 198, 198, 6738, 9485, 48, 83, 20, 1330, 33734, 14055, 11, ...
3.641618
346
import sys import struct from io import FileIO, BufferedWriter import S9Compressor as S9 BLOCKSIZE = (64*1024) / 4 # number of Int LexiPos = 0 # record the current position for new lexicon writing lexiconBuffer = [] IIBuffer = [] WriteThreshold = 0 del IIBuffer[:] newII.close() return main()
[ 11748, 25064, 198, 11748, 2878, 198, 6738, 33245, 1330, 9220, 9399, 11, 8792, 1068, 34379, 198, 11748, 311, 24, 7293, 44292, 355, 311, 24, 198, 198, 9148, 11290, 33489, 796, 357, 2414, 9, 35500, 8, 1220, 604, 197, 197, 197, 2, 1271, ...
2.87037
108
#CASA script to create cutouts of fits cubes directoryA = '/Volumes/TARDIS/Work/askap/' directoryB = '/Volumes/NARNIA/pilot_cutouts/' import numpy as np sources=np.loadtxt('/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt',dtype='str') for i in range(0,sources.shape[0]): objectname=sources[i,0] POSSUMSB=sources[i,3] EMUSB=sources[i,4] ra=sources[i,1] dec=sources[i,2] sourcecentre=ra+','+dec fov=sources[i,6]#arcsec print(objectname) region='centerbox[['+sourcecentre+'], ['+fov+'arcsec, '+fov+'arcsec]]' possum_outfile=directoryB+objectname+'/'+objectname+'_POSSUM.fits' emu_outfile=directoryB+objectname+'/'+objectname+'_EMU.fits' #POSSUM if POSSUMSB == '5038': #this is the Early Science data possum_cont_filename = '/Volumes/NARNIA/PawseySync/DRAGN_1_0p8_A/DRAGN_1_0p8_A/image.i.SB5038.cont.restored.fits' else: possum_cont_filename = directoryA +'fullfields/image.i.SB'+POSSUMSB+'.cont.taylor.0.restored.fits' if POSSUMSB == '10035': print('Skipping POSSUM: bad SB10035') else: imsubimage(imagename=possum_cont_filename,outfile='possum_cont_temp',region=region,overwrite=True,dropdeg=True) exportfits(imagename='possum_cont_temp',fitsimage=possum_outfile,overwrite=True) #cubes i_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.i.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits' q_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.q.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits' u_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.u.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits' imsubimage(imagename=i_filename,outfile='i_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=q_filename,outfile='q_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=u_filename,outfile='u_im_temp',region=region,overwrite=True,dropdeg=True) exportfits(imagename='i_im_temp',fitsimage=objectname+'_POSSUM_i.fits',overwrite=True) exportfits(imagename='q_im_temp',fitsimage=objectname+'_POSSUM_q.fits',overwrite=True) exportfits(imagename='u_im_temp',fitsimage=objectname+'_POSSUM_u.fits',overwrite=True) #EMU if EMUSB != 'NaN': if EMUSB=='10083': i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10083.contcube.conv.fits' q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10083.contcube.conv.fits' u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10083.contcube.conv.fits' cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10083.cont.taylor.0.restored.conv.fits' imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True) exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True) exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True) exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True) exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True) elif EMUSB=='10635': i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10635.contcube.v2.conv.fits' q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10635.contcube.v2.conv.fits' u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10635.contcube.v2.conv.fits' cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10635.cont.taylor.0.restored.fits' imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True) imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True) exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True) exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True) exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True) exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True) else: #no cubes emu_filename= directoryA +'fullfields/image.i.SB'+EMUSB+'.cont.taylor.0.restored.fits' imsubimage(imagename=emu_filename,outfile='emu_cont_temp',region=region,overwrite=True,dropdeg=True) exportfits(imagename='emu_cont_temp',fitsimage=emu_outfile,overwrite=True) os.system("rm -r emu_cont_temp") #tidy up os.system("rm -r *_temp") os.system("mv *{}* {}/".format(objectname,objectname))
[ 2, 34, 1921, 32, 4226, 284, 2251, 2005, 5269, 286, 11414, 34896, 198, 198, 34945, 32, 796, 31051, 16598, 8139, 14, 51, 49608, 14, 12468, 14, 2093, 499, 14, 6, 198, 34945, 33, 796, 31051, 16598, 8139, 14, 45, 1503, 45, 3539, 14, 79...
2.281307
2,204
from cv_comparison_slider_window.cv_comparison_slider_window import CvComparisonSliderWindow
[ 6738, 269, 85, 62, 785, 1845, 1653, 62, 6649, 1304, 62, 17497, 13, 33967, 62, 785, 1845, 1653, 62, 6649, 1304, 62, 17497, 1330, 327, 85, 50249, 1653, 11122, 1304, 27703 ]
2.967742
31
from ..language import get_text from ..database.query import ( user_exist, is_admin) END = -1
[ 6738, 11485, 16129, 1330, 651, 62, 5239, 198, 6738, 11485, 48806, 13, 22766, 1330, 357, 198, 220, 220, 220, 2836, 62, 38476, 11, 198, 220, 220, 220, 318, 62, 28482, 8, 628, 198, 10619, 796, 532, 16, 628, 628, 628, 198 ]
2.682927
41
"""" This package contains the OpenGL demonstration classes """
[ 15931, 15931, 198, 1212, 5301, 4909, 262, 30672, 13646, 6097, 198, 37811 ]
5.25
12
#!/usr/bin/env python3 # -*- coding: ISO-8859-1 -*- # https://github.com/starze/openhab2 # https://github.com/roggmaeh/nilan-openhab import minimalmodbus import serial import os, sys import csv import httplib2 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True instrument = minimalmodbus.Instrument('/dev/ttyUSB0', 30, mode='rtu') # port name, slave address (in decimal) instrument.serial.port instrument.serial.baudrate = 19200 # Baud instrument.serial.bytesize = 8 instrument.serial.parity = serial.PARITY_EVEN instrument.serial.stopbits = 1 instrument.serial.timeout = 2 # seconds #instrument.debug = True h = httplib2.Http() with open('nilan_modbus.csv') as csvfile: reader = csv.DictReader(csvfile, delimiter=',') for row in reader: if row['Register Type'] == "Input": fc = 4 elif row['Register Type'] == "Holding": fc = 3 if row['Unit'] == "text" or row['Unit'] == "ascii": strRet = instrument.read_string(int(row['Address']), numberOfRegisters=1, functioncode=fc) lst = list(strRet) strRet = lst[1] + lst[0] elif row['Scale'] == "100": strRet = instrument.read_register(int(row['Address']), numberOfDecimals=2, functioncode=fc) else: strRet = instrument.read_register(int(row['Address']), numberOfDecimals=0, functioncode=fc) if row['Unit'] == "%" or row['Unit'] == "C": print("%s: %s %s" % (row['Name'], strRet, row['Unit'])) h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet)) else: print("%s: %s" % (row['Name'], strRet)) h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 19694, 12, 3459, 3270, 12, 16, 532, 9, 12, 198, 2, 3740, 1378, 12567, 13, 785, 14, 7364, 2736, 14, 9654, 5976, 17, 220, 198, 2, 3740, 1378, 12567, ...
2.53323
647
import cv2 import numpy as np import os
[ 11748, 269, 85, 17, 201, 198, 11748, 299, 32152, 355, 45941, 220, 201, 198, 11748, 28686, 201, 198, 201, 198 ]
2.3
20
"randompicker.py" import random "A very short practice program designed to spit out a random, user-determined sample of input names"
[ 1, 25192, 3361, 15799, 13, 9078, 1, 198, 198, 11748, 4738, 198, 198, 1, 32, 845, 1790, 3357, 1430, 3562, 198, 1462, 27591, 503, 257, 4738, 11, 2836, 12, 67, 23444, 220, 198, 39873, 286, 5128, 3891, 1, 628, 198 ]
3.45
40
from __future__ import absolute_import, division import numpy as np import matplotlib.pyplot as plt from matplotlib import gridspec from fisspy.analysis.filter import FourierFilter from interpolation.splines import LinearSpline from matplotlib.animation import FuncAnimation import astropy.units as u from astropy.time import Time __author__= "Juhyung Kang" __email__ = "jhkang@astro.snu.ac.kr"
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 2603, 29487, 8019, 1330, 50000, 43106, 198, 6738, 277, 747, 9078, 13,...
3.264463
121
""" Author: Andr Bento Date last modified: 26-02-2019 """ import subprocess import sys from os.path import dirname, abspath, join from setuptools import find_packages, Command, setup from setuptools.command.test import test as TestCommand this_dir = abspath(dirname(__file__)) NAME = 'graphy' VERSION = '0.0.1' # Readme with open(join(this_dir, 'README.md'), encoding='utf-8') as file: readme = file.read() # License with open(join(this_dir, 'LICENSE'), encoding='utf-8') as file: license_file = file.read() # Requirements with open(join(this_dir, 'requirements.txt')) as file: requirements = file.read().splitlines() setup( name=NAME, version=VERSION, description='A micro-services system monitor command line program in Python.', long_description=readme, # long_description_content_type='text/markdown', url='https://github.com/andrepbento/MScThesis/tree/master/Graphy', author='Andr Bento', author_email='apbento@student.dei.uc.pt', license=license_file, classifiers=[ # How mature is this project? Common values are # 1 - Project setup # 2 - Prototype # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 2 - Prototype', 'Intended Audience :: Developers', 'Topic :: Observing and Controlling Performance in Micro-services', 'License :: MIT License', 'Programming Language :: Python :: 3.6', ], keywords='cli', packages=find_packages(exclude=('tests*', 'docs')), install_requires=requirements, tests_require=['pytest'], extras_require={ 'test': ['coverage', 'pytest', 'pytest-cov'], }, cmdclass={ 'install': Install, 'run': Run, 'test': Test }, )
[ 37811, 198, 220, 220, 220, 6434, 25, 843, 81, 20421, 78, 198, 220, 220, 220, 7536, 938, 9518, 25, 2608, 12, 2999, 12, 23344, 198, 37811, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 6738, 28686, 13, 6978, 1330, 26672, 3672, 11, 2...
2.560284
705
import pytz from datetime import datetime, timedelta, timezone from rest_framework.decorators import api_view, parser_classes, renderer_classes from rest_framework.parsers import JSONParser from rest_framework.renderers import JSONRenderer from rest_framework.request import Request from rest_framework.response import Response from django.contrib.sessions.backends.db import SessionStore from polaris.utils import render_error_response, getLogger logger = getLogger(__name__)
[ 11748, 12972, 22877, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 11, 640, 11340, 198, 198, 6738, 1334, 62, 30604, 13, 12501, 273, 2024, 1330, 40391, 62, 1177, 11, 30751, 62, 37724, 11, 9851, 11882, 62, 37724, 198, 6738, ...
3.679389
131
import pandas as pd import datetime as dt def str2date( sDate ): """ Convert a string date to datetime.date """ try: dateTime = dt.datetime.strptime( sDate, "%Y%m%d" ) except ValueError: dateTime = dt.datetime.strptime( sDate, "%Y-%m-%d" ) return dateTime.date() def getHolidays( startDate, endDate ): """ Return China exchange holidays ( non-trading days ) from `startDate` to `endDate` """ with open( 'refData/holidays.txt', 'r' ) as f: holidays = f.read().strip().split('\n') holidays = [ date for date in map( str2date, holidays ) if date >= startDate and date <= endDate ] return holidays
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 4818, 8079, 355, 288, 83, 198, 198, 4299, 965, 17, 4475, 7, 264, 10430, 15179, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 38240, 257, 4731, 3128, 284, 4818, 8079, 13, 4475, 198, 220, ...
2.526316
266
import json
[ 11748, 33918, 628 ]
4.333333
3
import uvio
[ 11748, 334, 85, 952, 628 ]
2.6
5
# Copyright 2016 United States Government as represented by the Administrator # of the National Aeronautics and Space Administration. All Rights Reserved. # # Portion of this code is Copyright Geoscience Australia, Licensed under the # Apache License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of the License # at # # http://www.apache.org/licenses/LICENSE-2.0 # # The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gdal, osr import collections import gc import numpy as np import xarray as xr from datetime import datetime import collections from collections import OrderedDict import datacube from . import dc_utilities as utilities # Author: KMF # Creation date: 2016-06-14 # Modified by: AHDS # Last modified date: def create_mosaic_iterative(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None): """ Description: Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given, the 'cf_mask' variable must be included in the input dataset, as it will be used to create a clean mask ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain coordinates: time, latitude, longitude variables: variables to be mosaicked If user does not provide a clean_mask, dataset_in must also include the cf_mask variable Optional Inputs: clean_mask (nd numpy array with dtype boolean) - true for values user considers clean; if user does not provide a clean mask, one will be created using cfmask no_data (int/float) - no data pixel value; default: -9999 Output: dataset_out (xarray.Dataset) - mosaicked data with coordinates: latitude, longitude variables: same as dataset_in """ # Create clean_mask from cfmask if none given if clean_mask is None: cfmask = dataset_in.cf_mask clean_mask = utilities.create_cfmask_clean_mask(cfmask) dataset_in = dataset_in.drop('cf_mask') #masks data with clean_mask. all values that are clean_mask==False are set to nodata. for key in list(dataset_in.data_vars): dataset_in[key].values[np.invert(clean_mask)] = no_data if intermediate_product is not None: dataset_out = intermediate_product.copy(deep=True) else: dataset_out = None for index in reversed(range(len(clean_mask))): dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time') if dataset_out is None: dataset_out = dataset_slice.copy(deep=True) #clear out the params as they can't be written to nc. dataset_out.attrs = OrderedDict() else: for key in list(dataset_in.data_vars): dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999] return dataset_out def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None): """ Description: Method for calculating the median pixel value for a given dataset. ----- Input: dataset_in (xarray dataset) - the set of data with clouds and no data removed. Optional Inputs: no_data (int/float) - no data value. """ # Create clean_mask from cfmask if none given if clean_mask is None: cfmask = dataset_in.cf_mask clean_mask = utilities.create_cfmask_clean_mask(cfmask) dataset_in = dataset_in.drop('cf_mask') #required for np.nan dataset_in = dataset_in.astype("float64") for key in list(dataset_in.data_vars): dataset_in[key].values[np.invert(clean_mask)] = no_data dataset_out = dataset_in.isel(time=0).drop('time').copy(deep=True) dataset_out.attrs = OrderedDict() # Loop over every key. for key in list(dataset_in.data_vars): dataset_in[key].values[dataset_in[key].values==no_data] = np.nan dataset_out[key].values = np.nanmedian(dataset_in[key].values, axis=0) dataset_out[key].values[dataset_out[key].values==np.nan] = no_data return dataset_out.astype('int16') def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None): """ Description: Method for calculating the pixel value for the max ndvi value. ----- Input: dataset_in (xarray dataset) - the set of data with clouds and no data removed. Optional Inputs: no_data (int/float) - no data value. """ # Create clean_mask from cfmask if none given if clean_mask is None: cfmask = dataset_in.cf_mask clean_mask = utilities.create_cfmask_clean_mask(cfmask) dataset_in = dataset_in.drop('cf_mask') for key in list(dataset_in.data_vars): dataset_in[key].values[np.invert(clean_mask)] = no_data if intermediate_product is not None: dataset_out = intermediate_product.copy(deep=True) else: dataset_out = None for timeslice in range(clean_mask.shape[0]): dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time') ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red) ndvi.values[np.invert(clean_mask)[timeslice,::]] = -1000000000 dataset_slice['ndvi'] = ndvi if dataset_out is None: dataset_out = dataset_slice.copy(deep=True) #clear out the params as they can't be written to nc. dataset_out.attrs = OrderedDict() else: for key in list(dataset_slice.data_vars): dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] return dataset_out def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None): """ Description: Method for calculating the pixel value for the min ndvi value. ----- Input: dataset_in (xarray dataset) - the set of data with clouds and no data removed. Optional Inputs: no_data (int/float) - no data value. """ # Create clean_mask from cfmask if none given if clean_mask is None: cfmask = dataset_in.cf_mask clean_mask = utilities.create_cfmask_clean_mask(cfmask) dataset_in = dataset_in.drop('cf_mask') for key in list(dataset_in.data_vars): dataset_in[key].values[np.invert(clean_mask)] = no_data if intermediate_product is not None: dataset_out = intermediate_product.copy(deep=True) else: dataset_out = None for timeslice in range(clean_mask.shape[0]): dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time') ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red) ndvi.values[np.invert(clean_mask)[timeslice,::]] = 1000000000 dataset_slice['ndvi'] = ndvi if dataset_out is None: dataset_out = dataset_slice.copy(deep=True) #clear out the params as they can't be written to nc. dataset_out.attrs = OrderedDict() else: for key in list(dataset_slice.data_vars): dataset_out[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] return dataset_out
[ 198, 2, 15069, 1584, 1578, 1829, 5070, 355, 7997, 416, 262, 22998, 198, 2, 286, 262, 2351, 15781, 261, 2306, 873, 290, 4687, 8694, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 4347, 295, 286, 428, 2438, 318, 15069, 2269, 418, 4234, ...
2.636424
3,009
import cleaner as dataStream import plotly.graph_objects as go import plotly.io as pio #DONUT PLOT - CONDITIONS ----------------------------------------- labels = ['Diabetes','Hypertension','Coronary Heart(D)','Chronic Kidney(D)','No Conditions','Obstructive Pulmonary(D)'] values = dataStream.PIEList fig_cond = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3)]) #fig_cond.show() pio.write_html(fig_cond, file="templates/cond.html") #GROUP BAR PLOT - SYMPTOMS --------------------------------------- symplabel=['Symptoms'] fig_symp = go.Figure(data=[ go.Bar(name='Fever', x=symplabel, y=dataStream.Fever), go.Bar(name='Cough', x=symplabel, y=dataStream.Cough), go.Bar(name='Breathlessness', x=symplabel, y=dataStream.Breathlessness), go.Bar(name='Severe Acute Respiratory Syndrome', x=symplabel, y=dataStream.SARI), go.Bar(name='Influenza-like Illness', x=symplabel, y=dataStream.ILI), go.Bar(name='Asymptomatic', x=symplabel, y=dataStream.NONE_sym) ]) fig_symp.update_layout(barmode='group') #fig_symp.show() pio.write_html(fig_symp, file="templates/symp.html") #STACK BAR PLOT - AGE DATA ------------------------------------------ fig_age = go.Figure() fig_age.add_trace(go.Bar( y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'], x=dataStream.maleAgeList, name='Male Deaths', orientation='h', marker=dict( color='rgba(61, 112, 242, 0.6)', line=dict(color='rgba(61, 112, 242, 1.0)', width=2) ) )) fig_age.add_trace(go.Bar( y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'], x=dataStream.femaleAgeList, name='Female Deaths', orientation='h', marker=dict( color='rgba(242, 61, 221, 0.6)', line=dict(color='rgba(242, 61, 221, 1.0)', width=2) ) )) fig_age.update_layout(barmode='stack') #fig_age.show() pio.write_html(fig_age, file="templates/age.html")
[ 11748, 21723, 355, 1366, 12124, 198, 11748, 7110, 306, 13, 34960, 62, 48205, 355, 467, 198, 11748, 7110, 306, 13, 952, 355, 279, 952, 198, 198, 2, 41173, 3843, 9297, 2394, 532, 7102, 49828, 11053, 20368, 45537, 198, 198, 23912, 1424, ...
2.468137
816
from ..math import geometry as geo from ..image.color import Color import math
[ 6738, 11485, 11018, 1330, 22939, 355, 40087, 198, 6738, 11485, 9060, 13, 8043, 1330, 5315, 198, 11748, 10688, 628 ]
4.210526
19
import time from tkinter import * from PIL import Image, ImageTk from Configuration import config
[ 11748, 640, 198, 6738, 256, 74, 3849, 1330, 1635, 198, 6738, 350, 4146, 1330, 7412, 11, 7412, 51, 74, 198, 6738, 28373, 1330, 4566 ]
4.041667
24
import pytest from bank_ddd_es_cqrs.accounts import SocialSecurityNumber
[ 11748, 12972, 9288, 198, 6738, 3331, 62, 1860, 67, 62, 274, 62, 66, 80, 3808, 13, 23317, 82, 1330, 5483, 24074, 15057, 628, 198 ]
3.125
24
from setuptools import setup from setuptools import find_packages setup(name='rl_traders', version='0.1.0', description='Reinforcement Learning for Trading', url='https://github.com/jjakimoto/rl_traders.git', author='jjakimoto', author_email='f.j.akimoto@gmail.com', license='MIT', packages=find_packages() )
[ 6738, 900, 37623, 10141, 1330, 9058, 198, 6738, 900, 37623, 10141, 1330, 1064, 62, 43789, 198, 198, 40406, 7, 3672, 11639, 45895, 62, 2213, 9972, 3256, 198, 220, 220, 220, 220, 220, 2196, 11639, 15, 13, 16, 13, 15, 3256, 198, 220, 2...
2.503497
143
from pyrogram import Client import asyncio from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS from motor.motor_asyncio import AsyncIOMotorClient as MongoClient import time import uvloop from Music import config import importlib from pyrogram import Client as Bot from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID, OWNER_ID from pyrogram import Client from aiohttp import ClientSession from motor.motor_asyncio import AsyncIOMotorClient as MongoClient import time initialize() MONGODB_CLI = MongoClient(MONGO_DB_URI) db = MONGODB_CLI.wbb SUDOERS = SUDO_USERS OWNER = OWNER_ID loop = asyncio.get_event_loop() loop.run_until_complete(load_sudoers()) Music_START_TIME = time.time() loop = asyncio.get_event_loop() BOT_ID = 0 BOT_NAME = "" BOT_USERNAME = "" ASSID = 0 ASSNAME = "" ASSUSERNAME = "" ASSMENTION = "" app = Client( 'MusicBot', API_ID, API_HASH, bot_token=BOT_TOKEN, ) aiohttpsession = ClientSession() client = Client(config.SESSION_NAME, config.API_ID, config.API_HASH) app.start() client.start() all_info(app, client)
[ 6738, 12972, 39529, 1330, 20985, 198, 11748, 30351, 952, 198, 6738, 7849, 13, 11250, 1330, 7824, 62, 2389, 11, 7824, 62, 39, 11211, 11, 347, 2394, 62, 10468, 43959, 11, 25000, 11230, 62, 11012, 62, 47269, 11, 311, 8322, 46, 62, 2937, ...
2.648456
421
import random from random import shuffle import numpy as np import tensorflow as tf from tensorflow.python.tools import freeze_graph import datetime import time import queue import threading import logging from PIL import Image import itertools import yaml import re import os import glob import shutil import sys import copy import h5py from net_all import * from trainer_all import * season = None use_mask = True use_flip = False use_time = True model_name = 'neta' train_winter = ['-01-', '-02-', '-03-'] train_summer = ['-05-', '-04-', '-06-'] test_winter = ['-11-', '-12-'] test_summer = ['-07-', '-08-', '-09-', '-10-'] SEED = 0 num_train_file = 285 num_frame_per_day = 288 num_frame_before = 12 num_frame_sequence = 24 target_frames = [0, 1, 2, 5, 8, 11] num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1 height = 495 width = 436 num_channel = 9 num_channel_discretized = 8 # 4 * 2 visual_input_channels = 115 # 12 * 8 visual_output_channels = 6 * 8 # 6 * 8 vector_input_channels = 1 # start time point import json # n = 1 s = 255 e = 85 w = 170 tv = 16 ##############################Set the path############################################## data_root = './data' model_root = './jianjzhmodelstest' log_root = './output' ##############################Set the path############################################## # target_city = 'ISTANBUL' # ['BERLIN', 'MOSCOW', 'ISTANBUL'] # test_start_index_list = np.array([ 18, 57, 114, 174, 222], np.int32) # 'BERLIN' # test_start_index_list = np.array([ 45, 102, 162, 210, 246], np.int32) # 'Moscow' # 'Istanbul' input_static_data_path = data_root + '/' + target_city + '/' + target_city + '_static_2019.h5' input_mask_data_path = data_root + '/maskdata/' input_train_data_folder_path = data_root + '/' + target_city + '/training' input_val_data_folder_path = data_root + '/' + target_city + '/validation' input_test_data_folder_path = data_root + '/' + target_city + '/testing' save_model_path = model_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask) summary_path = log_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask) # batch_size_test = 5 learning_rate = 3e-4 load_model_path = model_root + '/' + 'ISTANBULneta' # load_model_path = '' is_training = False # premodel = os.path.join(model_root, 'BERLINneta', 'model-58000.cptk') global_step = 60000 if __name__ == '__main__': random.seed(SEED) np.random.seed(SEED) tf.set_random_seed(SEED) trainer = Trainer(height, width, visual_input_channels, visual_output_channels, vector_input_channels, learning_rate, save_model_path, load_model_path, summary_path, is_training, use_mask, model_name) tf.reset_default_graph() test_data_filepath_list = get_data_filepath_list(input_test_data_folder_path) if season == 'winter': tmp = [] for i in test_data_filepath_list: if any([j in i for j in test_winter]): tmp.append(i) data_filepath_list = tmp elif season == 'summer': tmp = [] for i in test_data_filepath_list: if any([j in i for j in test_summer]): tmp.append(i) data_filepath_list = tmp print('test_data_filepath_list\t', len(test_data_filepath_list), ) test_output_filepath_list = list() for test_data_filepath in test_data_filepath_list: filename = test_data_filepath.split('/')[-1] test_output_filepath_list.append('output/' + target_city + '/' + target_city + '_test' + '/' + filename) static_data = get_static_data(input_static_data_path) mask_data = get_mask_data(input_mask_data_path, target_city) try: if not os.path.exists('output'): os.makedirs('output') if not os.path.exists('output/' + target_city): os.makedirs('output/' + target_city) if not os.path.exists('output/' + target_city + '/' + target_city + '_test'): os.makedirs('output/' + target_city + '/' + target_city + '_test') except Exception: print('output path not made') exit(-1) with open('test_data.json') as f: test_json = json.load(f) for i in range(len(test_data_filepath_list)): file_path = test_data_filepath_list[i] out_file_path = test_output_filepath_list[i] fr = h5py.File(file_path, 'r') a_group_key = list(fr.keys())[0] data = fr[a_group_key] # assert data.shape[0] == num_frame_per_day data = np.array(data, np.uint8) test_data_batch_list = [] test_data_time_list = [] test_data_mask_list = [] batch_size_test = data.shape[0] for j in range(batch_size_test): test_data_time_list.append(float(j) / float(num_frame_per_day)) data_sliced = data[:, :, :, :, :num_channel] if use_time: for time_dict in test_json: time_data = list(time_dict.keys())[0] if time_data in file_path: time_data = time_dict[time_data] break time_id = np.ones_like(data_sliced)[:, :, :, :, :1] for m in range(len(time_data)): for n in range(num_frame_before): time_id[m, n] = time_id[m, n] * (time_data[m] + n) / 288.0 * 255.0 data_sliced = np.concatenate([data_sliced, time_id], axis=-1) data_mask = (np.max(data_sliced, axis=4) == 0) test_data_mask_list = data_mask[:, :, :, :] test_data_batch_list.append(data_sliced) test_data_time_list = np.asarray(test_data_time_list, np.float32) input_time = np.reshape(test_data_time_list, (batch_size_test, 1)) test_data_mask = test_data_mask_list input_data = np.concatenate(test_data_batch_list, axis=0).astype(np.float32) input_data[:, :, :, :, :] = input_data[:, :, :, :, :] / 255.0 input_data = np.moveaxis(input_data, 1, -1).reshape((batch_size_test, height, width, -1)) static_data_tmp = np.tile(static_data, [batch_size_test, 1, 1, 1]) input_data = np.concatenate([input_data, static_data_tmp], axis=-1) # input_data_mask = np.zeros((batch_size_test, num_frame_before, height, width, num_channel_discretized), np.bool) # input_data_mask[test_data_mask[:, :num_frame_before, :, :], :] = True # input_data_mask = np.moveaxis(input_data_mask, 1, -1).reshape((batch_size_test, height, width, -1)) # input_data[input_data_mask] = -1.0 true_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32) if use_mask: orig_label_mask = np.tile(mask_data, [1, 1, 1, len(target_frames)]) else: orig_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32) prediction_list = [] # print(input_data.shape) # assert 0 import scipy.misc as misc # trainer.load_model(premodel) # print('load model') for b in range(batch_size_test): run_out_one = trainer.infer(input_data[b, :, :, :][np.newaxis, :, :, :], input_time[b, :][np.newaxis, :], true_label_mask[b, :, :, :][np.newaxis, :, :, :], global_step) prediction_one = run_out_one['predict'] prediction_list.append(prediction_one) # print(input_data[b,:,:,:].shape) # for t in range(3): # misc.imsave('output_'+str(b)+'_'+str(t)+'.png', np.reshape(prediction_one, [495, 436, 3, 8])[:, :, t, 0]) # assert 0 prediction = np.concatenate(prediction_list, axis=0) prediction = np.moveaxis(np.reshape(prediction, ( batch_size_test, height, width, num_channel_discretized, len(target_frames),)), -1, 1) prediction = prediction.astype(np.float32) * 255.0 prediction = np.rint(prediction) prediction = np.clip(prediction, 0.0, 255.0).astype(np.uint8) assert prediction.shape == (batch_size_test, len(target_frames), height, width, num_channel_discretized) write_data(prediction, out_file_path)
[ 11748, 4738, 198, 6738, 4738, 1330, 36273, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 11192, 273, 11125, 13, 29412, 13, 31391, 1330, 16611, 62, 34960, 198, 11748, 4818, 8079, 198, 11748, 640,...
2.225824
3,702
from troposphere import Template, Ref, Parameter, GetAtt from troposphere.ec2 import SecurityGroup from troposphere.rds import DBSubnetGroup, DBInstance if __name__ == '__main__': create_rds_template()
[ 6738, 14673, 22829, 1330, 37350, 11, 6524, 11, 25139, 2357, 11, 3497, 8086, 198, 6738, 14673, 22829, 13, 721, 17, 1330, 4765, 13247, 198, 6738, 14673, 22829, 13, 4372, 82, 1330, 360, 4462, 549, 3262, 13247, 11, 20137, 33384, 628, 198, ...
3.265625
64
# Generated by Django 3.0.7 on 2021-01-26 09:57 import django.contrib.postgres.fields from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 22, 319, 33448, 12, 486, 12, 2075, 7769, 25, 3553, 198, 198, 11748, 42625, 14208, 13, 3642, 822, 13, 7353, 34239, 13, 25747, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, ...
2.931818
44
from conans import ConanFile, CMake, tools
[ 6738, 369, 504, 1330, 31634, 8979, 11, 327, 12050, 11, 4899, 198 ]
3.583333
12
""" # Sample code to perform I/O: name = input() # Reading input from STDIN print('Hi, %s.' % name) # Writing output to STDOUT # Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail """ # Write your code here import bisect t = int(input()) for _ in range(t): n, k = map(int, input().strip().split()) stones = list(map(int, input().strip().split())) low = 1 high = stones[-1] - stones[0] # Location of all stones are given in ascending order. while low <= high: mid = (low + high) // 2 if check(stones, mid, n, k): high = mid - 1 else: low = mid + 1 print(low)
[ 37811, 198, 2, 27565, 2438, 284, 1620, 314, 14, 46, 25, 198, 198, 3672, 796, 5128, 3419, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 1303, 11725, 5128, 422, 48571, 1268, 198, 4798, 10786, 1...
2.452962
287
from django.urls import path, re_path from . import views app_name = "departments" urlpatterns = [path("", views.departmentList, name="energy department"), path( "<slug:department>/", views.department_detail, name="department_detail", ), path( "<slug:department>/staff/", views.department_detail_staff, name="department_detail", ), path( "<slug:department>/thesis/", views.department_detail_thesis, name="department_detail", ), path( "<slug:department>/directions/", views.department_detail_directions, name="department_detail", ), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 302, 62, 6978, 198, 6738, 764, 1330, 5009, 628, 198, 1324, 62, 3672, 796, 366, 10378, 32514, 1, 198, 6371, 33279, 82, 796, 685, 6978, 7203, 1600, 5009, 13, 10378, 1823, 8053, 11, 143...
2.057803
346
# 5. How do you Count The Number Of Times Each Value Appears In An Array Of Integers? # [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9] # Answer should be array([4, 2, 1, 1, 3, 2, 0, 0, 0, 1]) which means 0 comes 4 times, 1 comes 2 times, 2 comes 1 time, 3 comes 1 time and so on. array = [0, 5, 4, 0, 4, 4, 3, 0, 0, 5, 2, 1, 1, 9] count_array_elements = [array.count(a) for a in set(array)] print(count_array_elements)
[ 2, 642, 13, 1374, 220, 466, 220, 345, 220, 2764, 220, 383, 220, 7913, 220, 3226, 220, 3782, 220, 5501, 220, 11052, 220, 31254, 220, 554, 220, 1052, 220, 15690, 220, 3226, 15995, 364, 30, 198, 2, 685, 15, 11, 642, 11, 604, 11, 65...
2.322581
186
from time import sleep print("Welcome to Tic Tac Toe! \nWe'll be playing in a sec, but, first..") general_board = {'7': ' ', '8': ' ', '9': ' ', '4': ' ', '5': ' ', '6': ' ', '1': ' ', '2': ' ', '3': ' '} # prints board structure # Choose which player goes first # Clear the board and reset the game if __name__ == "__main__": game() restart()
[ 6738, 640, 1330, 3993, 198, 198, 4798, 7203, 14618, 284, 309, 291, 26075, 1675, 68, 0, 3467, 77, 1135, 1183, 307, 2712, 287, 257, 792, 11, 475, 11, 717, 492, 4943, 198, 198, 24622, 62, 3526, 796, 1391, 6, 22, 10354, 705, 46083, 70...
2.353293
167
""" Auth Providers which provides LDAP login """ from typing import List, Dict from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE from ldap3.core.exceptions import LDAPSocketOpenError, LDAPBindError from ..login import LoginProvider from .. import APP, AUTH_LOGGER
[ 37811, 198, 30515, 7518, 4157, 543, 3769, 27178, 2969, 17594, 198, 37811, 198, 6738, 19720, 1330, 7343, 11, 360, 713, 198, 198, 6738, 300, 67, 499, 18, 1330, 26923, 11, 9652, 11, 47044, 46, 62, 33, 12115, 62, 51, 6561, 62, 12473, 30...
3.175824
91
#!/usr/bin/env python3 import re, os, glob template = """ <!doctype html> <html> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes"> <style> body { font-family: PMingLiu, HanaMinA, HanaMinB, Helvetica, arial, sans-serif; writing-mode: vertical-rl; -webkit-writing-mode: vertical-rl; } .sm { margin: 20px 0 10px; padding: 0; font-weight: bold; font-size: 30px; border-left: 1px solid #cccccc; margin: 0 5px; cursor: text; position: static; clear: both; text-align: right; } .sd, .sd2, .zy, .zi, .zi1, .yi { font-size: 10px; text-align: center; cursor: text; float: left; margin-left: 10px; margin-right: 10px; line-height: 10px; letter-spacing: 0.35em; } .sd, .sd2 { margin-right: 25px; clear: both; } .sd2 { margin-right: 20px; } .zi, .zi1 { padding-top: 20px; padding-bottom: 10px; font-size: 20px; line-height: 20px; } .zi1 { padding-top: 10px; } .yi { min-height: 40px; text-align: left; line-height: 12px; margin-right: 8px; } .clear { clear: both; } </style> <title></title> </head> <body> %s </body> </html> """ lines = list() copy_readme() for filename in glob.glob("wiki/??.md"): md2html(filename)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 302, 11, 28686, 11, 15095, 198, 198, 28243, 796, 37227, 198, 27, 0, 4598, 310, 2981, 27711, 29, 198, 27, 6494, 29, 198, 27, 2256, 29, 198, 27, 28961, 34534, 316, 26...
2.256272
558
# -*- encoding: utf-8 -*- from PyQt5.QtCore import QCoreApplication from PyQt5.QtWidgets import QMainWindow, QFrame import mobase
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 14055, 1330, 1195, 14055, 23416, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 13383, 27703, 11, 1195...
2.481481
54
from ._inv_prop_lr import InvPropLR from ._constant_lr import ConstantLR from ._step_size_lr import StepSizeLR from ._dynamic_step_size_lr import DynamicStepSizeLR
[ 6738, 47540, 16340, 62, 22930, 62, 14050, 1330, 10001, 24331, 35972, 198, 6738, 47540, 9979, 415, 62, 14050, 1330, 20217, 35972, 198, 6738, 47540, 9662, 62, 7857, 62, 14050, 1330, 5012, 10699, 35972, 198, 6738, 47540, 67, 28995, 62, 9662,...
3.28
50
# -*- coding: utf-8 -*- """ utils/strava.py ================= Utility class to Strava API """ import json import time from configparser import ConfigParser, NoOptionError from datetime import datetime from pathlib import Path from typing import Tuple from loguru import logger from stravalib import Client, exc from utils.parameters import SECRET from utils.constants import CONFIG_PATH, CODE_ID_FILE_NAME, TOKEN_FILE_NAME from utils.files_handler import check_folder from utils.parameters import STRAVA, CLIENT_ID def get_client_id(app_config: ConfigParser) -> int: """ Obtains the client ID from the configuration file. Args: app_config (ConfigParser): app configuration. Returns: int: client id from the configuration file. Raises: NoOptionError: If the `client_id` key is not present in the configuration. ValueError: If the client id is not an integer. """ try: client_id = app_config.getint(STRAVA, CLIENT_ID) except NoOptionError: raise ValueError('The client id has not been set in the configuration.') except ValueError: logger.exception('Invalid client id format.') raise return client_id def get_secret(app_config: ConfigParser) -> str: """ Obtains the secret from the configuration file. Args: app_config (ConfigParser): app configuration. Returns: str: secret from the configuration file. Raises: NoOptionError: If the `secret` key is not present in the configuration. """ try: secret = app_config.get(STRAVA, SECRET) except NoOptionError: raise ValueError('The client id has not been set in the configuration.') return secret def get_strava_token_from_code_id(config: ConfigParser) -> str: """ Method that interchange the temporary authentication code obtained when `src/request_auth.py` is executed. The method reads the file `config/code_id.txt` that contains the temporal authentication and generates the POST request to obtain the final access token which is saved in `config/token.json`. This method requires the Strava application `client_id` and `secret` that has to be set in the configuration file (`config/config.ini`). Args: config (ConfigParser): app configuration. Returns: str: Strava access token. Raises: ValueError: If no token is found in the configuration. """ code_id_path = Path(CONFIG_PATH, CODE_ID_FILE_NAME) if not code_id_path.is_file(): raise ValueError('The file with the temporal authentication code (`config/code_id.txt`)' 'was NOT found. Execute `request_auth.py` to obtain the temporal access.') with open(code_id_path, 'r') as file: logger.debug('The file with the temporal authentication code (`config/code_id.txt`)' 'was found.') code_id = file.read() if not code_id: raise ValueError('No valid temporal code access found. Rerun `request_auth.py` ' 'to obtain the temporal access.') client = Client() token = client.exchange_code_for_token(client_id=get_client_id(config), client_secret=get_secret(config), code=code_id) logger.debug('Obtained access until {}:\n' '- token: {}.' '- refresh token: {}.', datetime.utcfromtimestamp(int(token['expires_at'])).strftime('%d-%m-%Y %H:%M:%S'), token['access_token'], token['refresh_token']) # Save JSON with the response save_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME) with open(save_path, 'w') as file: logger.info('Writing token information to `{}`.', save_path) json.dump(token, file, indent=4) return token['access_token'] def get_strava_client(config: ConfigParser) -> Client: """ Checks the authentication token and generates the Strava client. Args: config (ConfigParser): app configuration. Returns: if exist, strava client configured with the authentication token. """ token_file_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME) if token_file_path.is_file(): logger.debug('The token info file (`config/token.json`) was found.') with open(token_file_path, 'r') as file: token_data = json.load(file) token = token_data.get('access_token') # If the file exists but no access token found, check against the temporary auth if not token: logger.warning('The token info file (`config/token.json`) was found' ' but the access token could not be read.') token = get_strava_token_from_code_id(config) else: logger.info('The token info file (`config/token.json`) was NOT found. ' 'Retrieving from the temporal authentication code.') token = get_strava_token_from_code_id(config) client = Client(access_token=token) return client def upload_activity(client: Client, activity_type: str, file_path: Path) -> bool: """ Helper method to upload the activity to Strava. This method will handle the different possibilities when uploading an activity. Args: client (Client): configured Strava client. activity_type (str): Strava activity string. file_path (Path): Path to the `*.tcx` activity file. Returns: bool: True if the activity have been uploaded successfully. False otherwise. Raises: RateLimitExceeded: When the API limits have been reached. Generally when more than 1000 petitions have been done during the day. ConnectionError: When it has been impossible to connect the Strava servers. Exception: Unknown exceptions that will be logged in detail. """ try: activity_file = open(file_path, 'r') client.upload_activity( activity_file=activity_file, data_type='tcx', activity_type=activity_type, private=False ) except exc.ActivityUploadFailed: logger.exception('Error uploading the activity `{}`.', file_path.stem) return False except exc.RateLimitExceeded: logger.exception('Exceeded the API rate limit.') raise except ConnectionError: logger.exception('No internet connection.') raise except Exception: logger.exception('Unknown exception') raise # If no error return true logger.debug('Activity `{}` uploaded sucessfully.', file_path.stem) return True def handle_rate_limit(start_time: float, requests: int) -> Tuple[float, int]: """ Method to handle the 15 minutes API limit. This method will check the elapsed time since the first request and the number of them. Three cases are possible: - Less than 15 minutes elapsed from the first request and less than 100 requests -> continue. - More than 15 minutes elapsed from the first request and less than 100 requests -> reset timer and request number to count from 0 again. - Less than 15 minutes elapsed from the first request but more than 100 requests -> sleep until the 15 minutes block is over and reset timer and request number to count from 0 again. Args: start_time (float): timestamp of the first request of the block. requests (int): number of request done in the block. Returns: float, int: updated start time and number of requests following the possible cases. """ requests += 1 elapsed_time = time.time() - start_time if elapsed_time <= 60 * 15: if requests >= 100: remaining_time_stopped = 60 * 15 - elapsed_time mins, secs = divmod(remaining_time_stopped, 60) logger.warning('The number of allowed request per 15 minutes have' 'been reached. Sleeping for {:0.0f} minutes, {:0.1f} seconds.', mins, secs) time.sleep(remaining_time_stopped) # Reset values. Include petition to be processed logger.info('Waiting time elapsed. Continuing with the process.') requests = 1 start_time = time.time() else: logger.debug('15 minutes have been elapsed. Resetting requests and time.') # Reset values. Include petition to be processed requests = 1 start_time = time.time() return start_time, requests
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 26791, 14, 301, 4108, 64, 13, 9078, 198, 4770, 28, 198, 18274, 879, 1398, 284, 520, 4108, 64, 7824, 198, 37811, 198, 11748, 33918, 198, 11748, 640, 198, 67...
2.642115
3,272
""" Author: brooklyn train with synthText """ import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import os from net.craft import CRAFT import sys from utils.cal_loss import cal_synthText_loss from dataset.synthDataset import SynthDataset import argparse from eval import eval_net device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') parser = argparse.ArgumentParser(description='CRAFT Train Fine-Tuning') parser.add_argument('--gt_path', default='/media/brooklyn/EEEEE142EEE10425/SynthText/gt.mat', type=str, help='SynthText gt.mat') parser.add_argument('--synth_dir', default='/media/brooklyn/EEEEE142EEE10425/SynthText', type=str, help='SynthText image dir') parser.add_argument('--label_size', default=96, type=int, help='target label size') parser.add_argument('--batch_size', default=16, type=int, help='training data batch size') parser.add_argument('--test_batch_size', default=16, type=int, help='test data batch size') parser.add_argument('--test_interval', default=40, type=int, help='test interval') parser.add_argument('--max_iter', default=50000, type=int, help='max iteration') parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate') parser.add_argument('--epochs', default=500, type=int, help='training epochs') parser.add_argument('--test_iter', default=10, type=int, help='test iteration') args = parser.parse_args() image_transform = transforms.Compose([ transforms.Resize((args.label_size * 2, args.label_size * 2)), transforms.ToTensor() ]) label_transform = transforms.Compose([ transforms.Resize((args.label_size,args.label_size)), transforms.ToTensor() ]) if __name__ == "__main__": batch_size = args.batch_size test_batch_size = args.test_batch_size epochs = args.epochs # lr = args.lr # test_interval = args.test_interval # max_iter = args.max_iter net = CRAFT(pretrained=True) # craft net = net.to(device) model_save_prefix = 'checkpoints/craft_netparam_' try: train(net=net, batch_size=batch_size, test_batch_size=test_batch_size, lr=lr, test_interval=test_interval, max_iter=max_iter, epochs=epochs, model_save_path=model_save_prefix) except KeyboardInterrupt: torch.save(net.state_dict(), 'INTERRUPTED1.pth') print('Saved interrupt') try: sys.exit(0) except SystemExit: os._exit(0)
[ 37811, 198, 13838, 25, 1379, 482, 6213, 198, 198, 27432, 351, 33549, 8206, 198, 37811, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 11748, 28034, 10178, 13, 7645, 23914, 35...
2.587639
987
from django.db import models # Keeps track of FriendCircle memberships MATCH_STATUS = ( ('O', 'Not swiped',), ('V', 'Swiped Right',), ('X', 'Swiped Left',), ) # Keeps track of matches. If both parties swiped right, the user can be added to FriendCircleMembership
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 628, 198, 2, 9175, 82, 2610, 286, 9182, 31560, 293, 1866, 5748, 628, 198, 44, 11417, 62, 35744, 2937, 796, 357, 198, 220, 220, 220, 19203, 46, 3256, 705, 3673, 1509, 46647, 3256, 828, 198, 2...
2.896907
97
"""Unit tests for the Jira issues collector.""" from .base import JiraTestCase
[ 37811, 26453, 5254, 329, 262, 449, 8704, 2428, 22967, 526, 15931, 198, 198, 6738, 764, 8692, 1330, 449, 8704, 14402, 20448, 628 ]
3.681818
22
import numpy as np import pandas as pd import pytest import xarray as xr import cf_xarray as cfxr
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 12972, 9288, 198, 11748, 2124, 18747, 355, 2124, 81, 198, 198, 11748, 30218, 62, 87, 18747, 355, 269, 21373, 81, 628 ]
2.857143
35
"""The base class of Socket Mode client implementation. If you want to build asyncio-based ones, use `AsyncBaseSocketModeHandler` instead. """ import logging import signal import sys from threading import Event from slack_sdk.socket_mode.client import BaseSocketModeClient from slack_sdk.socket_mode.request import SocketModeRequest from slack_bolt import App from slack_bolt.util.utils import get_boot_message
[ 37811, 464, 2779, 1398, 286, 47068, 10363, 5456, 7822, 13, 198, 1532, 345, 765, 284, 1382, 30351, 952, 12, 3106, 3392, 11, 779, 4600, 42367, 14881, 39105, 19076, 25060, 63, 2427, 13, 198, 37811, 198, 11748, 18931, 198, 11748, 6737, 198,...
3.869159
107
# coding=utf-8 import numpy as np import scipy.interpolate as intpl import scipy.sparse as sprs def to_sparse(D, format="csc"): """ Transform dense matrix to sparse matrix of return_type bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format. csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix. lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix :param D: Dense matrix :param format: how to save the sparse matrix :return: sparse version """ if format == "bsr": return sprs.bsr_matrix(D) elif format == "coo": return sprs.coo_matrix(D) elif format == "csc": return sprs.csc_matrix(D) elif format == "csr": return sprs.csr_matrix(D) elif format == "dia": return sprs.dia_matrix(D) elif format == "dok": return sprs.dok_matrix(D) elif format == "lil": return sprs.lil_matrix(D) else: return to_dense(D) def next_neighbors_periodic(p, ps, k, T=None): """ This function gives for a value p the k points next to it which are found in in the vector ps and the points which are found periodically. :param p: value :param ps: ndarray, vector where to find the next neighbors :param k: integer, number of neighbours :return: ndarray, with the k next neighbors and an array containing the """ if T is None: T = ps[-1]-2*ps[0]+ps[1] p_bar = p - np.floor(p/T)*T ps = ps - ps[0] distance_to_p = [] for tk in ps: d1 = tk+T-p_bar d2 = tk-p_bar d3 = tk-T-p_bar min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)]) if np.abs(d1) == min_d: distance_to_p.append(d1) elif np.abs(d2) == min_d: distance_to_p.append(d2) else: distance_to_p.append(d3) distance_to_p = np.asarray(distance_to_p) value_index = [] for d,i in zip(distance_to_p, range(distance_to_p.size)): value_index.append((d, i)) # sort by distance value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0]) if k % 2 == 1: value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k] else: value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0]) return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign) def next_neighbors(p, ps, k): """ This function gives for a value p the k points next to it which are found in in the vector ps :param p: value :param ps: ndarray, vector where to find the next neighbors :param k: integer, number of neighbours :return: ndarray, with the k next neighbors """ distance_to_p = np.abs(ps-p) # zip it value_index = [] for d,i in zip(distance_to_p, range(distance_to_p.size)): value_index.append((d,i)) # sort by distance value_index_sorted = sorted(value_index, key=lambda s: s[0]) # take first k indices with least distance and sort them return sorted(map(lambda s: s[1], value_index_sorted[0:k])) def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0): """ We construct the restriction matrix between two 1d grids, using lagrange interpolation. :param fine_grid: a one dimensional 1d array containing the nodes of the fine grid :param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid :param k: order of the restriction :return: a restriction matrix """ M = np.zeros((coarse_grid.size, fine_grid.size)) n_g = coarse_grid.size for i, p in zip(range(n_g), coarse_grid): if periodic: nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T) circulating_one = np.asarray([1.0]+[0.0]*(k-1)) lag_pol = [] for l in range(k): lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l))) M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol)) else: nn = next_neighbors(p, fine_grid, k) # construct the lagrange polynomials for the k neighbors circulating_one = np.asarray([1.0]+[0.0]*(k-1)) lag_pol = [] for l in range(k): lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l))) M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol)) return to_sparse(M, return_type) def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0): """ We construct the interpolation matrix between two 1d grids, using lagrange interpolation. :param fine_grid: a one dimensional 1d array containing the nodes of the fine grid :param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid :param k: order of the restriction :return: a interpolation matrix """ M = np.zeros((fine_grid.size, coarse_grid.size)) n_f = fine_grid.size for i, p in zip(range(n_f), fine_grid): if periodic: nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T) circulating_one = np.asarray([1.0]+[0.0]*(k-1)) lag_pol = [] for l in range(k): lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l))) M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol)) else: nn = next_neighbors(p, coarse_grid, k) # construct the lagrange polynomials for the k neighbors circulating_one = np.asarray([1.0]+[0.0]*(k-1)) lag_pol = [] for l in range(k): lag_pol.append(intpl.lagrange(coarse_grid[nn], np.roll(circulating_one, l))) M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol)) return to_sparse(M, return_type) def kron_on_list(matrix_list): """ :param matrix_list: a list of sparse matrices :return: a matrix """ if len(matrix_list) == 2: return sprs.kron(matrix_list[0], matrix_list[1]) elif len(matrix_list) == 1: return matrix_list[0] else: return sprs.kron(matrix_list[0], kron_on_list(matrix_list[1:])) def interpolate_to_t_end(nodes_on_unit, values): """ Assume a GaussLegendre nodes, we are interested in the value at the end of the interval, but we now only the values in the interior of the interval. We compute the value by legendre interpolation. :param nodes_on_unit: nodes transformed to the unit interval :param values: values on those nodes :return: interpolation to the end of the interval """ n = nodes_on_unit.shape[0] circulating_one = np.asarray([1.0]+[0.0]*(n-1)) lag_pol = [] result = np.zeros(values[0].shape) for i in range(n): lag_pol.append(intpl.lagrange(nodes_on_unit, np.roll(circulating_one, i))) result += values[i]*lag_pol[-1](1.0) return result
[ 2, 19617, 28, 40477, 12, 23, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 3849, 16104, 378, 355, 493, 489, 198, 11748, 629, 541, 88, 13, 82, 29572, 355, 599, 3808, 628, 198, 4299, 284, 62, 82, 29572, 7, 35, ...
2.246447
3,307
"""rbtree_graphviz.py - create a graphviz representation of a LLRBT. The purpose of this module is to visually show how the shape of a LLRBT changes when keys are inserted in it. For every insert, sub graph (tree) is added to the main graph. `initialization_list` holds the values that are inserted in the tree. This list can be changed for a list of anything that can be compared with > == <. For example, with `initialization_list = range(50)` keys from 0 to 49 will be inserted in the tree. Consider that for every key, a graph is going to be generated. """ from graphviz import Digraph from trees.rbtree import LLRBT, is_red NODE_SHAPE = "circle" NONE_NODE_SHAPE = "point" TITLE_SHAPE = "box" RED_COLOR = "#b8000f" DEFAULT_GRAPH_NODE_ATTR = { "shape": NODE_SHAPE, "color": "black", "style": "filled", "fillcolor": "#cfd3d6", } RED_NODE_ATTR = { "fontcolor": "white", "fillcolor": RED_COLOR } DEFAULT_GRAPH_EDGE_ATTR = { "color": "black", "arrowhead": "vee", "style": "solid", } def add_node(graph, node): """Add `node` to `graph`. `node` is a tuple with the following shape: (node_id, {<node attributes>}, {<graph's node attributes>}) ^ ^ ^ string see graphviz documentation""" node_id, node_attr, graph_node_attr = node graph.node(node_id, **node_attr, **graph_node_attr) return graph def add_edge(graph, edge): """Add edge from `edge[0]` to `edge[1]` to `graph`. `edge` is a tuple with the following shape: (source_node_id, destiny_node_id, {<graph's edge attributes>}) ^ ^ ^ string string see graphviz documentation""" source_node_id, destiny_node_id, graph_edge_attr = edge graph.edge(source_node_id, destiny_node_id, **graph_edge_attr) return graph if __name__ == "__main__": initialization_list = ["Z", "W", "F", "D", "S", "E", "A", "R", "C", "H", "X", "M", "P", "L"] # initialization_list = ["A", "B", "C", "D"] tree = LLRBT() # graph = generate_graph(tree, initialization_list) graph = generate_graph_per_insert(tree, initialization_list) print(graph.source) graph.render("trees/rbtree.gv", view=True)
[ 37811, 81, 18347, 631, 62, 34960, 85, 528, 13, 9078, 532, 2251, 257, 4823, 85, 528, 10552, 286, 257, 27140, 49, 19313, 13, 198, 198, 464, 4007, 286, 428, 8265, 318, 284, 22632, 905, 703, 262, 5485, 286, 257, 27140, 49, 19313, 198, ...
2.435432
937
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 201, 198, 201, 198 ]
3.083333
12
from pathlib import Path from typing import Generator from airflow.hooks.base_hook import BaseHook from azure.storage.filedatalake import FileSystemClient from azure.storage.filedatalake._generated.models._models_py3 import ( StorageErrorException, )
[ 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 35986, 198, 6738, 45771, 13, 25480, 82, 13, 8692, 62, 25480, 1330, 7308, 39, 566, 198, 6738, 35560, 495, 13, 35350, 13, 69, 3902, 10254, 539, 1330, 9220, 11964, 11792, 198, 6738, ...
3.555556
72
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-14 17:10 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 18, 319, 2177, 12, 2998, 12, 1415, 1596, 25, 940, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.736842
57
from time import sleep import pyqtgraph as pg import threading from graphtiny.api import IChart, IDataStreamWindow from graphtiny.domain import DataStreamWindow, Chart
[ 6738, 640, 1330, 3993, 198, 11748, 12972, 80, 25297, 1470, 355, 23241, 198, 11748, 4704, 278, 198, 6738, 4823, 44152, 13, 15042, 1330, 314, 45488, 11, 4522, 1045, 12124, 27703, 198, 6738, 4823, 44152, 13, 27830, 1330, 6060, 12124, 27703, ...
3.822222
45
from typing import Generator, Dict, Any from fastapi import Depends, HTTPException, status from fastapi.security import OAuth2PasswordBearer from jose import jwt from pydantic import ValidationError from sqlalchemy.orm import Session from tlbx import json, pp from zillion.configs import load_warehouse_config, zillion_config from zillion.model import Warehouses from zillion.warehouse import Warehouse from app import app from app import crud, models, schemas from app.core import security from app.core.config import settings from app.db.session import SessionLocal reusable_oauth2 = OAuth2PasswordBearer( tokenUrl=f"{settings.API_V1_STR}/login/access-token" ) warehouses = {} def get_warehouses() -> Dict[str, Any]: """NOTE: this assumes Zillion Web DB is same as Zillion DB""" global warehouses if warehouses: # TODO: cache control? return warehouses print("Building warehouses...") db = SessionLocal() try: result = db.query(Warehouses).all() for row in result: warehouses[row.id] = Warehouse.load(row.id) pp(warehouses) return warehouses finally: db.close() def get_current_user( db: Session = Depends(get_db), token: str = Depends(reusable_oauth2) ) -> models.User: try: payload = jwt.decode( token, settings.SECRET_KEY, algorithms=[security.ALGORITHM] ) token_data = schemas.TokenPayload(**payload) except (jwt.JWTError, ValidationError): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="Could not validate credentials", ) user = crud.user.get(db, id=token_data.sub) if not user: raise HTTPException(status_code=404, detail="User not found") return user def get_current_active_user( current_user: models.User = Depends(get_current_user), ) -> models.User: if not crud.user.is_active(current_user): raise HTTPException(status_code=400, detail="Inactive user") return current_user def get_current_active_superuser( current_user: models.User = Depends(get_current_user), ) -> models.User: if not crud.user.is_superuser(current_user): raise HTTPException( status_code=400, detail="The user doesn't have enough privileges" ) return current_user
[ 6738, 19720, 1330, 35986, 11, 360, 713, 11, 4377, 198, 198, 6738, 3049, 15042, 1330, 2129, 2412, 11, 14626, 16922, 11, 3722, 198, 6738, 3049, 15042, 13, 12961, 1330, 440, 30515, 17, 35215, 3856, 11258, 198, 6738, 474, 577, 1330, 474, ...
2.665533
882
from BufferStockModel import BufferStockModelClass updpar = dict() updpar["Np"] = 1500 updpar["Nm"] = 1500 updpar["Na"] = 1500 model = BufferStockModelClass(name="baseline",solmethod="egm",**updpar) model.test()
[ 6738, 47017, 26207, 17633, 1330, 47017, 26207, 17633, 9487, 198, 929, 67, 1845, 796, 8633, 3419, 198, 929, 67, 1845, 14692, 45, 79, 8973, 796, 20007, 198, 929, 67, 1845, 14692, 45, 76, 8973, 796, 20007, 198, 929, 67, 1845, 14692, 2670...
2.826667
75
#! /usr/bin/env python3 # ## Copyright (C) 2015-2018 Rolf Neugebauer. All rights reserved. ## Copyright (C) 2015 Netronome Systems, Inc. All rights reserved. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. """A simple script to generate data for PCIe and ethernet bandwidth estimates""" import sys from optparse import OptionParser from model import pcie, eth, mem_bw # pylint: disable=too-many-locals OUT_FILE = "pcie_bw.dat" def main(): """Main""" usage = """usage: %prog [options]""" parser = OptionParser(usage) parser.add_option('--mps', dest='MPS', type="int", action='store', default=256, help='Set the maximum payload size of the link') parser.add_option('--mrrs', dest='MRRS', type="int", action='store', default=512, help='Set the maximum read request size of the link') parser.add_option('--rcb', dest='RCB', type="int", action='store', default=64, help='Set the read completion boundary of the link') parser.add_option('--lanes', dest='lanes', type="string", action='store', default='x8', help='Set num lanes (x2, x4, x8, x16, or x32)') parser.add_option('--gen', dest='gen', type="string", action='store', default='gen3', help='Set PCIe version (gen1, gen2, gen3, gen4, or gen5)') parser.add_option('--addr', dest='addr', type="int", action='store', default=64, help='Set the number of address bits (32 or 64)') parser.add_option('--ecrc', dest='ecrc', type="int", action='store', default=0, help='Use ECRC (0 or 1)') parser.add_option('-o', '--outfile', dest='FILE', default=OUT_FILE, action='store', help='File where to write the data to') (options, _) = parser.parse_args() pciecfg = pcie.Cfg(version=options.gen, lanes=options.lanes, addr=options.addr, ecrc=options.ecrc, mps=options.MPS, mrrs=options.MRRS, rcb=options.RCB) print("PCIe Config:") pciecfg.pp() ethcfg = eth.Cfg('40GigE') tlp_bw = pciecfg.TLP_bw bw_spec = pcie.BW_Spec(tlp_bw, tlp_bw, pcie.BW_Spec.BW_RAW) dat = open(options.FILE, "w") dat.write("\"Payload(Bytes)\" " "\"PCIe Write BW\" " "\"PCIe Write Trans/s\" " "\"PCIe Read BW\" " "\"PCIe Read Trans/s\" " "\"PCIe Read/Write BW\" " "\"PCIe Read/Write Trans/s\" " "\"40G Ethernet BW\" " "\"40G Ethernet PPS\" " "\"40G Ethernet Frame time (ns)\" " "\n") for size in range(1, 1500 + 1): wr_bw = mem_bw.write(pciecfg, bw_spec, size) rd_bw = mem_bw.read(pciecfg, bw_spec, size) rdwr_bw = mem_bw.read_write(pciecfg, bw_spec, size) wr_trans = (wr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size rd_trans = (rd_bw.rx_eff * 1000 * 1000 * 1000 / 8) / size rdwr_trans = (rdwr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size if size >= 64: eth_bw = ethcfg.bps_ex(size) / (1000 * 1000 * 1000.0) eth_pps = ethcfg.pps_ex(size) eth_lat = 1.0 * 1000 * 1000 * 1000 / eth_pps dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f %.2f %d %.2f\n" % (size, wr_bw.tx_eff, wr_trans, rd_bw.rx_eff, rd_trans, rdwr_bw.tx_eff, rdwr_trans, eth_bw, eth_pps, eth_lat)) else: dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f\n" % (size, wr_bw.tx_eff, wr_trans, rd_bw.rx_eff, rd_trans, rdwr_bw.tx_eff, rdwr_trans)) dat.close() if __name__ == '__main__': sys.exit(main())
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2235, 15069, 357, 34, 8, 1853, 12, 7908, 371, 4024, 3169, 2217, 65, 16261, 13, 220, 1439, 2489, 10395, 13, 198, 2235, 15069, 357, 34, 8, 1853, 3433, 1313, 462, 11998,...
1.949874
2,374
import subprocess
[ 11748, 850, 14681, 198 ]
4.5
4
# Copyright (c) 2021 kamyu. All rights reserved. # # Google Code Jam 2014 Round 2 - Problem A. Data Packing # https://codingcompetitions.withgoogle.com/codejam/round/0000000000432fed/0000000000432b8d # # Time: O(NlogN) # Space: O(1) # for case in xrange(input()): print 'Case #%d: %s' % (case+1, data_packing())
[ 2, 15069, 357, 66, 8, 33448, 479, 14814, 84, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 3012, 6127, 9986, 1946, 10485, 362, 532, 20647, 317, 13, 6060, 350, 5430, 198, 2, 3740, 1378, 66, 7656, 5589, 316, 1756, 13, 4480, 13297, 13, ...
2.628099
121
""" Description: Class for training CNNs using a nested cross-validation method. Train on the inner_fold to obtain optimized hyperparameters. Train outer_fold to obtain classification performance. """ from braindecode.datautil.iterators import BalancedBatchSizeIterator from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or from braindecode.torch_ext.util import set_random_seeds, np_to_var, var_to_np from braindecode.datautil.signal_target import SignalAndTarget from braindecode.torch_ext.functions import square, safe_log import torch as th from sklearn.model_selection import train_test_split from BiModNeuroCNN.training.training_utils import current_acc, current_loss from BiModNeuroCNN.data_loader.data_utils import smote_augmentation, multi_SignalAndTarget from BiModNeuroCNN.results.results import Results as res from torch.nn.functional import nll_loss, cross_entropy from BiModNeuroCNN.training.bimodal_training import Experiment import numpy as np import itertools as it import torch from torch import optim import logging from ast import literal_eval from BiModNeuroCNN.results.metrics import cross_entropy import warnings warnings.filterwarnings("ignore", category=UserWarning) log = logging.getLogger(__name__) torch.backends.cudnn.deterministic = True
[ 37811, 201, 198, 11828, 25, 5016, 329, 3047, 8100, 82, 1262, 257, 28376, 3272, 12, 12102, 341, 2446, 13, 16835, 319, 262, 8434, 62, 11379, 284, 7330, 201, 198, 40085, 1143, 8718, 17143, 7307, 13, 16835, 12076, 62, 11379, 284, 7330, 17...
3.336709
395
import datetime from collections import Counter from functools import wraps from dateparser import parse as parse_date from calmlib import get_current_date, get_current_datetime, to_date, trim from .base import Task from .str_database import STRDatabase from .telegram_bot import TelegramBot, command, catch_errors DEFAULT_PERIOD = 4 TASK_PER_DAY_LIMIT = 3 def run(self): with self.db: super().run() def actualize_tasks(self): if self._last_actualize_date < get_current_date(): self._actualize_tasks() self._last_actualize_date = get_current_date() def _actualize_tasks(self): """ Go over all tasks and update date/reschedule """ for user in self.db.user_names: for task in self.db.get_users_tasks(user): today = get_current_datetime() while to_date(task.date) < to_date(today): if task.reschedule: # if task is past due and to be rescheduled - reschedule it on today task.date = today else: task.date += datetime.timedelta(days=task.period) self.db.update_task(task)
[ 11748, 4818, 8079, 198, 6738, 17268, 1330, 15034, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 198, 6738, 3128, 48610, 1330, 21136, 355, 21136, 62, 4475, 198, 198, 6738, 2386, 4029, 571, 1330, 651, 62, 14421, 62, 4475, 11, 651, 62, ...
2.169284
573
# lower , title , upper operations on string x = "spider" y = "MAN" v=x.upper() # all letters will become uppercase w=y.lower() # all letters will become lowercase z=y.title() # only first letter will become upper and rest of all lowercase print(v,w,z)
[ 2, 2793, 837, 3670, 837, 6727, 4560, 319, 4731, 198, 198, 87, 796, 366, 2777, 1304, 1, 198, 88, 796, 366, 10725, 1, 198, 198, 85, 28, 87, 13, 45828, 3419, 1303, 477, 7475, 481, 1716, 334, 39921, 589, 198, 86, 28, 88, 13, 21037, ...
3.035294
85
"""Setup script for sfcpy""" import os.path from setuptools import setup # The directory containing this file HERE = os.path.abspath(os.path.dirname(__file__)) # The text of the README file with open(os.path.join(HERE, "README.md"), encoding='utf-8') as fid: README = fid.read() # This call to setup() does all the work setup( name="sfcpy", version="1.2.3", description="Space-Filling Curve library for image-processing tasks", long_description=README, long_description_content_type="text/markdown", url="https://github.com/adadesions/sfcpy", author="adadesions", author_email="adadesions@gmail.com", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", ], packages=["sfcpy"], include_package_data=True, tests_require=['pytest'], install_requires=[ "numpy", "matplotlib", "Pillow" ], entry_points={"console_scripts": ["sfcpy=sfcpy.__main__:main"]}, )
[ 37811, 40786, 4226, 329, 264, 16072, 9078, 37811, 198, 198, 11748, 28686, 13, 6978, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 2, 383, 8619, 7268, 428, 2393, 198, 39, 9338, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7, 418, ...
2.596529
461
#!/usr/bin/env python3 import csv import os from collections import namedtuple import string from nameparser import HumanName if __name__ == '__main__': issues_dict = csv_to_dict('3rdStageSourceCSVs/Interviews.csv') make_csv_data(issues_dict)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 269, 21370, 198, 11748, 28686, 198, 6738, 17268, 1330, 3706, 83, 29291, 198, 11748, 4731, 198, 198, 6738, 1438, 48610, 1330, 5524, 5376, 628, 628, 628, 628, 628, 628, 6...
2.77551
98
# -*- coding: utf-8 -*- from typing import Optional, Any import redis from pip_services3_commons.config import IConfigurable, ConfigParams from pip_services3_commons.errors import ConfigException, InvalidStateException from pip_services3_commons.refer import IReferenceable, IReferences from pip_services3_commons.run import IOpenable from pip_services3_components.auth import CredentialResolver from pip_services3_components.cache import ICache from pip_services3_components.connect import ConnectionResolver
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 19720, 1330, 32233, 11, 4377, 198, 198, 11748, 2266, 271, 198, 6738, 7347, 62, 30416, 18, 62, 9503, 684, 13, 11250, 1330, 314, 16934, 11970, 11, 17056, 10044, 4105...
3.58042
143
# coding: utf-8 from __future__ import absolute_import from google.appengine.ext import ndb from api import fields import model import util FIELDS = { 'auto_now': fields.Boolean, 'auto_now_add': fields.Boolean, 'autofocus': fields.Boolean, 'choices': fields.String, 'default': fields.String, 'description': fields.String, 'email_filter': fields.Boolean, 'field_property': fields.String, 'forms_property': fields.String, 'kind': fields.String, 'name': fields.String, 'ndb_property': fields.String, 'placeholder': fields.String, 'rank': fields.Integer, 'readonly': fields.Boolean, 'repeated': fields.Boolean, 'required': fields.Boolean, 'sort_filter': fields.Boolean, 'strip_filter': fields.Boolean, 'verbose_name': fields.String, 'wtf_property': fields.String, } FIELDS.update(model.Base.FIELDS)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 23645, 13, 1324, 18392, 13, 2302, 1330, 299, 9945, 198, 198, 6738, 40391, 1330, 7032, 198, 11748, 2746, 198, 11748, 7736, 628, ...
2.434896
384
import pytest from wired import ServiceContainer
[ 11748, 12972, 9288, 198, 6738, 28217, 1330, 4809, 29869, 628, 628 ]
4.727273
11
# -*- coding: utf-8 -*- """ Created on Mon May 17 21:24:53 2021 @author: Akshay Prakash """ import pandas as pd import numpy as np import matplotlib.pyplot as plt table = pd.read_csv(r'\1617table.csv') table.head() plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = 'skyblue') plt.plot(table['Pts'], np.arange(1,21), "o") plt.yticks(np.arange(1,21), table['team']) plt.show() teamColours = ['#034694','#001C58','#5CBFEB','#D00027', '#EF0107','#DA020E','#274488','#ED1A3B', '#000000','#091453','#60223B','#0053A0', '#E03A3E','#1B458F','#000000','#53162f', '#FBEE23','#EF6610','#C92520','#BA1F1A'] plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = teamColours) plt.plot(table['Pts'], np.arange(1,21), "o") plt.yticks(np.arange(1,21), table['team']) plt.xlabel('Points') plt.ylabel('Teams') plt.title("Premier league 16/17")
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 2892, 1737, 1596, 2310, 25, 1731, 25, 4310, 33448, 201, 198, 201, 198, 31, 9800, 25, 317, 50133, 323, 350, 17716, 1077, 201, 198, 378...
1.912176
501
#!/usr/bin/env python import sys import lxml.etree if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 198, 11748, 300, 19875, 13, 316, 631, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.384615
39
# -*- coding: utf-8 -*- DESC = "partners-2018-03-21" INFO = { "AgentPayDeals": { "params": [ { "name": "OwnerUin", "desc": "uin" }, { "name": "AgentPay", "desc": "10" }, { "name": "DealNames", "desc": "" } ], "desc": "/" }, "DescribeAgentBills": { "params": [ { "name": "SettleMonth", "desc": "2018-02" }, { "name": "ClientUin", "desc": "ID" }, { "name": "PayMode", "desc": "prepay/postpay" }, { "name": "OrderId", "desc": "" }, { "name": "ClientRemark", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "AgentTransferMoney": { "params": [ { "name": "ClientUin", "desc": "ID" }, { "name": "Amount", "desc": "" } ], "desc": "" }, "DescribeRebateInfos": { "params": [ { "name": "RebateMonth", "desc": "2018-02" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "ModifyClientRemark": { "params": [ { "name": "ClientRemark", "desc": "" }, { "name": "ClientUin", "desc": "ID" } ], "desc": "" }, "DescribeAgentClients": { "params": [ { "name": "ClientUin", "desc": "ID" }, { "name": "ClientName", "desc": "" }, { "name": "ClientFlag", "desc": "a/b" }, { "name": "OrderDirection", "desc": "ASC/DESC " }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "DescribeClientBalance": { "params": [ { "name": "ClientUin", "desc": "()ID" } ], "desc": "" }, "DescribeAgentAuditedClients": { "params": [ { "name": "ClientUin", "desc": "ID" }, { "name": "ClientName", "desc": "" }, { "name": "ClientFlag", "desc": "a/b" }, { "name": "OrderDirection", "desc": "ASC/DESC " }, { "name": "ClientUins", "desc": "ID" }, { "name": "HasOverdueBill", "desc": "01" }, { "name": "ClientRemark", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" }, { "name": "ClientType", "desc": "new()/assign()/old()/" }, { "name": "ProjectType", "desc": "self()/platform()/repeat( )/" } ], "desc": "" }, "AuditApplyClient": { "params": [ { "name": "ClientUin", "desc": "ID" }, { "name": "AuditResult", "desc": "accept/reject" }, { "name": "Note", "desc": "B" } ], "desc": "" } }
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 30910, 34, 796, 366, 3911, 2741, 12, 7908, 12, 3070, 12, 2481, 1, 198, 10778, 796, 1391, 198, 220, 366, 36772, 19197, 5005, 874, 1298, 1391, 198, 220, 220, 220, 366, ...
1.65493
1,988
""" The default configuration, from which all others should be derived """ import os, plugins, sandbox, console, rundependent, comparetest, batch, performance, subprocess, operator, logging from copy import copy from string import Template from fnmatch import fnmatch from threading import Thread # For back-compatibility from runtest import RunTest, Running, Killed from scripts import *
[ 198, 37811, 383, 4277, 8398, 11, 422, 543, 477, 1854, 815, 307, 10944, 37227, 198, 198, 11748, 28686, 11, 20652, 11, 35204, 11, 8624, 11, 374, 917, 8682, 11, 552, 8984, 395, 11, 15458, 11, 2854, 11, 850, 14681, 11, 10088, 11, 18931,...
4.268817
93
# This problem was recently asked by LinkedIn: # Given a non-empty array where each element represents a digit of a non-negative integer, add one to the integer. # The most significant digit is at the front of the array and each element in the array contains only one digit. # Furthermore, the integer does not have leading zeros, except in the case of the number '0'. num = [2, 9, 9] print(Solution().plusOne(num)) # [3, 0, 0]
[ 2, 770, 1917, 373, 2904, 1965, 416, 27133, 25, 198, 198, 2, 11259, 257, 1729, 12, 28920, 7177, 810, 1123, 5002, 6870, 257, 16839, 286, 257, 1729, 12, 31591, 18253, 11, 751, 530, 284, 262, 18253, 13, 198, 2, 383, 749, 2383, 16839, ...
3.724138
116
import subprocess import sys import os
[ 11748, 850, 14681, 198, 198, 11748, 25064, 198, 11748, 28686, 628, 628, 628 ]
3.461538
13
from pathlib import Path import altair as alt import folium import matplotlib.pyplot as plt import numpy as np import pandas as pd import plotly.graph_objects as p_go import pytest from bokeh.layouts import column from bokeh.models import ColumnDataSource from bokeh.plotting import figure from pandas.io.formats.style import Styler from datapane.client.api.files import save data = pd.DataFrame({"x": np.random.randn(20), "y": np.random.randn(20)}) # NOTE - test disabled until pip release of altair_pandas - however should work if altair test passes # NOTE - test disabled updated pip release of pdvega that tracks git upstream - however should work if altair test passes def test_save_table(tmp_path: Path): # tests saving a DF directly to a html file save(data) # save styled table save(Styler(data))
[ 6738, 3108, 8019, 1330, 10644, 198, 198, 11748, 5988, 958, 355, 5988, 198, 11748, 5955, 1505, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, ...
3.158491
265
NEWS_API_KEY= '138b22df68394ecbaa9c9af0d0377adb' SECRET_KEY= 'f9bf78b9a18ce6d46a0cd2b0b86df9da'
[ 49597, 62, 17614, 62, 20373, 28, 705, 20107, 65, 1828, 7568, 3104, 34626, 721, 7012, 64, 24, 66, 24, 1878, 15, 67, 15, 26514, 324, 65, 6, 198, 23683, 26087, 62, 20373, 28, 705, 69, 24, 19881, 3695, 65, 24, 64, 1507, 344, 21, 67,...
1.637931
58
import os import logging import sys import structlog from structlog.stdlib import LoggerFactory, add_log_level _configured = False def configure(force = False): """ Configures logging & structlog modules Keyword Arguments: force: Force to reconfigure logging. """ global _configured if _configured and not force: return # Check whether debug flag is set debug = os.environ.get('DEBUG_MODE', False) # Set appropriate log level if debug: log_level = logging.DEBUG else: log_level = logging.INFO # Set logging config logging.basicConfig( level = log_level, format = "%(message)s", ) # Configure structlog structlog.configure( logger_factory = LoggerFactory(), processors = [ add_log_level, # Add timestamp structlog.processors.TimeStamper('iso'), # Add stack information structlog.processors.StackInfoRenderer(), # Set exception field using exec info structlog.processors.format_exc_info, # Render event_dict as JSON structlog.processors.JSONRenderer() ] ) _configured = True def get_logger(**kwargs): """ Get the structlog logger """ # Configure logging modules configure() # Return structlog return structlog.get_logger(**kwargs)
[ 11748, 28686, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 11748, 2878, 6404, 198, 6738, 2878, 6404, 13, 19282, 8019, 1330, 5972, 1362, 22810, 11, 751, 62, 6404, 62, 5715, 198, 198, 62, 11250, 1522, 796, 10352, 628, 198, 4299, 17425,...
2.460069
576
# setup.py # Copyright (c) 2015-2017 Arkadiusz Bokowy # # This file is a part of pyexec. # # This project is licensed under the terms of the MIT license. from setuptools import setup import pyexec with open("README.rst") as f: long_description = f.read() setup( name="pyexec", version=pyexec.__version__, author="Arkadiusz Bokowy", author_email="arkadiusz.bokowy@gmail.com", url="https://github.com/Arkq/pyexec", description="Signal-triggered process reloader", long_description=long_description, license="MIT", py_modules=["pyexec"], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries", "Topic :: Utilities", ], )
[ 2, 9058, 13, 9078, 198, 2, 15069, 357, 66, 8, 1853, 12, 5539, 9128, 324, 3754, 89, 47390, 322, 88, 198, 2, 198, 2, 770, 2393, 318, 257, 636, 286, 12972, 18558, 13, 198, 2, 198, 2, 770, 1628, 318, 11971, 739, 262, 2846, 286, 26...
2.668693
329
'''Functional tests for the equational constraint.''' import cytest # from ./lib; must be first
[ 7061, 6, 22203, 282, 5254, 329, 262, 1602, 864, 32315, 2637, 7061, 198, 11748, 3075, 9288, 1303, 422, 24457, 8019, 26, 1276, 307, 717, 628 ]
3.88
25
# # (c) 2022 Sven Lieber # KBR Brussels # #import xml.etree.ElementTree as ET import lxml.etree as ET import os import json import itertools import enchant import hashlib import csv from optparse import OptionParser import utils import stdnum NS_MARCSLIM = 'http://www.loc.gov/MARC21/slim' ALL_NS = {'marc': NS_MARCSLIM} # ----------------------------------------------------------------------------- def addAuthorityFieldsToCSV(elem, writer, natWriter, stats): """This function extracts authority relevant data from the given XML element 'elem' and writes it to the given CSV file writer.""" # # extract relevant data from the current record # authorityID = utils.getElementValue(elem.find('./marc:controlfield[@tag="001"]', ALL_NS)) namePerson = utils.getElementValue(elem.find('./marc:datafield[@tag="100"]/marc:subfield[@code="a"]', ALL_NS)) nameOrg = utils.getElementValue(elem.find('./marc:datafield[@tag="110"]/marc:subfield[@code="a"]', ALL_NS)) nationalities = utils.getElementValue(elem.findall('./marc:datafield[@tag="370"]/marc:subfield[@code="c"]', ALL_NS)) gender = utils.getElementValue(elem.find('./marc:datafield[@tag="375"]/marc:subfield[@code="a"]', ALL_NS)) birthDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="f"]', ALL_NS)) deathDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="g"]', ALL_NS)) isniRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and (text()="isni" or text()="ISNI")]/../marc:subfield[@code="a"]', namespaces=ALL_NS)) viafRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and text()="viaf"]/../marc:subfield[@code="a"]', namespaces=ALL_NS)) countryCode = utils.getElementValue(elem.find('./marc:datafield[@tag="043"]/marc:subfield[@code="c"]', ALL_NS)) (familyName, givenName) = utils.extractNameComponents(namePerson) birthDate = '' deathDate = '' datePatterns = ['%Y', '(%Y)', '[%Y]', '%Y-%m-%d', '%d/%m/%Y', '%Y%m%d'] if birthDateRaw: birthDate = utils.parseDate(birthDateRaw, datePatterns) if deathDateRaw: deathDate = utils.parseDate(deathDateRaw, datePatterns) name = f'{namePerson} {nameOrg}'.strip() if nationalities: nationalityURIString = utils.createURIString(nationalities, ';', 'http://id.loc.gov/vocabulary/countries/') for n in nationalityURIString.split(';'): natWriter.writerow({'authorityID': authorityID, 'nationality': n}) newRecord = { 'authorityID': authorityID, 'name': name, 'family_name': familyName, 'given_name': givenName, 'gender': gender, 'birth_date': birthDate, 'death_date': deathDate, 'isni_id': utils.extractIdentifier(authorityID, f'ISNI {isniRaw}', pattern='ISNI'), 'viaf_id': utils.extractIdentifier(authorityID, f'VIAF {viafRaw}', pattern='VIAF'), 'country_code': countryCode } writer.writerow(newRecord) # ----------------------------------------------------------------------------- def main(): """This script reads an XML file in MARC slim format and extracts several fields to create a CSV file.""" parser = OptionParser(usage="usage: %prog [options]") parser.add_option('-i', '--input-file', action='store', help='The input file containing MARC SLIM XML records') parser.add_option('-o', '--output-file', action='store', help='The output CSV file containing selected MARC fields') parser.add_option('-n', '--nationality-csv', action='store', help='The output CSV file containing the IDs of authorities and their nationality') (options, args) = parser.parse_args() # # Check if we got all required arguments # if( (not options.input_file) or (not options.output_file) or (not options.nationality_csv) ): parser.print_help() exit(1) # # Instead of loading everything to main memory, stream over the XML using iterparse # with open(options.output_file, 'w') as outFile, \ open(options.nationality_csv, 'w') as natFile: stats = {} outputFields = ['authorityID', 'name', 'family_name', 'given_name', 'gender', 'birth_date', 'death_date', 'isni_id', 'viaf_id', 'country_code'] outputWriter = csv.DictWriter(outFile, fieldnames=outputFields, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) outputWriter.writeheader() nationalityWriter = csv.DictWriter(natFile, fieldnames=['authorityID', 'nationality'], delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) nationalityWriter.writeheader() for event, elem in ET.iterparse(options.input_file, events=('start', 'end')): # The parser finished reading one authority record, get information and then discard the record if event == 'end' and elem.tag == ET.QName(NS_MARCSLIM, 'record'): addAuthorityFieldsToCSV(elem, outputWriter, nationalityWriter, stats) main()
[ 2, 198, 2, 357, 66, 8, 33160, 44611, 12060, 527, 198, 2, 509, 11473, 16445, 198, 2, 198, 2, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 198, 11748, 300, 19875, 13, 316, 631, 355, 12152, 198, 11748, 28686, 198, 11748, ...
2.819804
1,737
#!/bin/python import time import random from multiprocessing import Pool, Process, Pipe, TimeoutError from multiprocessing.pool import ThreadPool
[ 2, 48443, 8800, 14, 29412, 198, 11748, 640, 198, 11748, 4738, 198, 6738, 18540, 305, 919, 278, 1330, 19850, 11, 10854, 11, 36039, 11, 3862, 448, 12331, 198, 6738, 18540, 305, 919, 278, 13, 7742, 1330, 14122, 27201, 628 ]
3.769231
39
from canvas.cache_patterns import CachedCall from drawquest import knobs from drawquest.apps.drawquest_auth.models import User from drawquest.apps.drawquest_auth.details_models import UserDetails from drawquest.pagination import FakePaginator def _paginate(redis_obj, offset, request=None): ''' items should already start at the proper offset. ''' if offset == 'top': items = redis_obj.zrevrange(0, knobs.FOLLOWERS_PER_PAGE, withscores=True) else: items = redis_obj.zrevrangebyscore('({}'.format(offset), '-inf', start=0, num=knobs.FOLLOWERS_PER_PAGE, withscores=True) try: next_offset = items[-1][1] next_offset = next_offset.__repr__() except IndexError: next_offset = None items = [item for item, ts in items] pagination = FakePaginator(items, offset=offset, next_offset=next_offset) return items, pagination def followers(user, viewer=None, offset='top', direction='next', request=None): """ The users who are following `user`. """ if direction != 'next': raise ValueError("Follwers only supports 'next' - scrolling in one direction.") if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)): user_ids = user.redis.new_followers.zrevrange(0, -1) pagination = None else: user_ids, pagination = _paginate(user.redis.new_followers, offset, request=request) users = UserDetails.from_ids(user_ids) if request is None or request.app_version_tuple < (3, 0): users = _sorted(users) return _for_viewer(users, viewer=viewer), pagination def following(user, viewer=None, offset='top', direction='next', request=None): """ The users that `user` is following. """ if direction != 'next': raise ValueError("Following only supports 'next' - scrolling in one direction.") if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)): user_ids = user.redis.new_following.zrange(0, -1) pagination = None else: user_ids, pagination = _paginate(user.redis.new_following, offset, request=request) users = UserDetails.from_ids(user_ids) if request is None or request.app_version_tuple < (3, 0): users = _sorted(users) return _for_viewer(users, viewer=viewer), pagination
[ 6738, 21978, 13, 23870, 62, 33279, 82, 1330, 327, 2317, 14134, 198, 6738, 3197, 6138, 1330, 638, 8158, 198, 6738, 3197, 6138, 13, 18211, 13, 19334, 6138, 62, 18439, 13, 27530, 1330, 11787, 198, 6738, 3197, 6138, 13, 18211, 13, 19334, ...
2.4666
1,003
import numpy as np
[ 11748, 299, 32152, 355, 45941, 628 ]
3.333333
6
from ._src.multiply import multiply_outer as outer # noqa: F401
[ 6738, 47540, 10677, 13, 16680, 541, 306, 1330, 29162, 62, 39605, 355, 12076, 220, 1303, 645, 20402, 25, 376, 21844, 198 ]
3.095238
21
#!/usr/bin/env python from os.path import join from setuptools import find_packages, setup # DEPENDENCIES core_deps = requirements_from_pip("requirements.txt") dev_deps = requirements_from_pip("requirements_dev.txt") # DESCRIPTION with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setup( author="Sharework", author_email="root@sharework.co", description="JSON API specification for Django services", extras_require={"all": dev_deps, "dev": dev_deps}, install_requires=core_deps, long_description=long_description, long_description_content_type="text/markdown", name="django-json-api", package_data={"django_json_api": ["resources/VERSION"]}, packages=find_packages(), python_requires=">=3.8", url="https://github.com/share-work/django-json-api", version=open(join("django_json_api", "resources", "VERSION")).read().strip(), classifiers=[ "Programming Language :: Python :: 3.8", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 28686, 13, 6978, 1330, 4654, 198, 198, 6738, 900, 37623, 10141, 1330, 1064, 62, 43789, 11, 9058, 628, 198, 2, 5550, 47, 10619, 24181, 11015, 628, 198, 7295, 62, 10378, 82, 796, ...
2.7325
400
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .jwplatform import JWPlatformIE
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 764, 11321, 1330, 14151, 11627, 40450, 198, 6738, 764, 73, 86, 24254, 1330, 449, 54, 37148, 10008, 628 ]
3.307692
39
import plotly.express as px import pandas as pd data = pd.read_csv('kc_house_data.csv') data_mapa = data[['id', 'lat', 'long', 'price']] grafico1 = px.scatter_mapbox(data_mapa, lat='lat', lon='long', hover_name='id', hover_data=['price'], color_discrete_sequence=['fuchsia'], zoom=3, height=300) grafico1.update_layout(mapbox_style='open-street-map') grafico1.update_layout(height=600, margin={'r': 0, 't': 0, 'l': 0, 'b': 0}) grafico1.show() grafico1.write_html('map_house_rocket.html')
[ 11748, 7110, 306, 13, 42712, 355, 279, 87, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 7890, 796, 279, 67, 13, 961, 62, 40664, 10786, 74, 66, 62, 4803, 62, 7890, 13, 40664, 11537, 198, 7890, 62, 8899, 64, 796, 1366, 58, 17816,...
1.972696
293