content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable, Union
from albumentations.augmentations.transforms import RandomFog as RandomFogAlb
from fastestimator.op.numpyop.univariate.univariate import ImageOnlyAlbumentation
from fastestimator.util.traceability_util import traceable
@traceable()
class RandomFog(ImageOnlyAlbumentation):
"""Add fog to an image.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
fog_coef_lower: Lower limit for fog intensity coefficient. Should be in the range [0, 1].
fog_coef_upper: Upper limit for fog intensity coefficient. Should be in the range [0, 1].
alpha_coef: Transparency of the fog circles. Should be in the range [0, 1].
Image types:
uint8, float32
"""
| [
2,
15069,
13130,
383,
12549,
22362,
320,
1352,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.252073 | 603 |
from locintel.traces.loaders import DruidLoader
import pytest
from unittest.mock import Mock, patch, call
@pytest.fixture
@pytest.fixture
| [
6738,
1179,
48779,
13,
2213,
2114,
13,
2220,
364,
1330,
32685,
17401,
198,
198,
11748,
12972,
9288,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
11,
8529,
11,
869,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
... | 3 | 48 |
#if length of string is even,all characters should occure even times. If its odd,only one should occur odd times to occupy space in middle bcz rest even characters will be in pairs ,making string as palindrome
from collections import Counter
print(gameOfThrones(input())) | [
2,
361,
4129,
286,
4731,
318,
772,
11,
439,
3435,
815,
1609,
495,
772,
1661,
13,
1002,
663,
5629,
11,
8807,
530,
815,
3051,
5629,
1661,
284,
22265,
2272,
287,
3504,
275,
26691,
1334,
772,
3435,
481,
307,
287,
14729,
837,
8601,
4731,... | 4.2 | 65 |
"""Search implementation using a database of Bluetooth networks."""
from collections import defaultdict
from ichnaea.api.locate.constants import (
MAX_BLUE_CLUSTER_METERS,
MAX_BLUES_IN_CLUSTER,
BLUE_MAX_ACCURACY,
BLUE_MIN_ACCURACY,
)
from ichnaea.api.locate.mac import (
aggregate_cluster_position,
cluster_networks,
query_macs,
)
from ichnaea.api.locate.result import (
Position,
PositionResultList,
Region,
RegionResultList,
)
from ichnaea.api.locate.score import station_score
from ichnaea.geocode import GEOCODER
from ichnaea.models import BlueShard
from ichnaea.models.constants import MIN_BLUE_SIGNAL
from ichnaea import util
class BluePositionMixin(object):
"""
A BluePositionMixin implements a position search using
the Bluetooth models and a series of clustering algorithms.
"""
raven_client = None
result_list = PositionResultList
result_type = Position
class BlueRegionMixin(object):
"""
A BlueRegionMixin implements a region search using our Bluetooth data.
"""
raven_client = None
result_list = RegionResultList
result_type = Region
| [
37811,
18243,
7822,
1262,
257,
6831,
286,
19263,
7686,
526,
15931,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
220,
488,
2616,
18213,
13,
15042,
13,
75,
13369,
13,
9979,
1187,
1330,
357,
198,
220,
220,
220,
25882,
62,
91... | 2.860697 | 402 |
from data.database import db, db_transaction
config = Config()
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
# moving the minimal number of things to _basequery
from data.model import (
appspecifictoken,
blob,
build,
gc,
image,
label,
log,
message,
modelutil,
notification,
oauth,
organization,
permission,
repositoryactioncount,
repo_mirror,
release,
repo_mirror,
repository,
service_keys,
storage,
team,
token,
user,
)
| [
6738,
1366,
13,
48806,
1330,
20613,
11,
20613,
62,
7645,
2673,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
198,
11250,
796,
17056,
3419,
628,
198,
2,
1318,
17191,
5626,
307,
597,
18620,
20086,
1022,
777,
46... | 2.612335 | 227 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import omaha.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
11748,
39030,
12236,
13,
25747,
628
] | 2.953488 | 43 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simple IRC bot to query for the last entries in the CVE database
#
# current command supported is:
#
# last <max>
# cvetweet <max>
# browse
# search <vendor>\<product>
# get <cve>
#
# You need to connect the IRC bot to the IRC Server you want to access it from.
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2015 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Imports
import argparse
import irc.bot
import irc.strings
import json
import os
import signal
import ssl
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
# BSON MongoDB include ugly stuff that needs to be processed for standard JSON
from bson import json_util
from web.api import API
argParser = argparse.ArgumentParser(description='IRC bot to query cve-search')
argParser.add_argument('-s', type=str, help='server ip', default='localhost')
argParser.add_argument('-p', type=int, help='server port)', default=6667)
argParser.add_argument('-n', type=str, help='nickname', default='cve-search')
argParser.add_argument('-w', type=str, help='password')
argParser.add_argument('-u', type=str, help='username', default='cve-search')
argParser.add_argument('-c', nargs="*", help='channel list', default=['cve-search'])
argParser.add_argument('-t', type=str, help='trigger prefix', default='.')
argParser.add_argument('-v', action='store_true', help='channel list', default=['cve-search'])
argParser.add_argument('-m', type=int, help='maximum query amount', default=20)
argParser.add_argument('--ssl', action='store_true', help='Use SSL')
args = argParser.parse_args()
# signal handlers
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
17427,
30039,
10214,
284,
12405,
329,
262,
938,
12784,
287,
262,
24640,
6831,
198,
2,
198,
2,
1459,
... | 2.920923 | 607 |
'''
Few basic examples of the tools in TpTnOsc.utils.
See also osc_exp_examples.ipynb notebook for examples of tools in TpTnOsc.osc_exp.
Execute: python basic_examples.py
'''
import numpy as np
import TpTnOsc.utils as ut
# computing the p'th order multiplicative compound matrix
A = np.array([[1,6,0,0], [2,13,4,20], [2,13,5,25], [0,0,3,16]])
p = 2
mc, lp = ut.compute_MC_matrix( A, p )
print(f"mc=\n{mc}\nlp=\n{lp}")
print(f"\nA is I-TN: {ut.is_ITN(A)}")
# SEB factorization
Lmat, Dmat, Umat, Um, valsL, valsU = ut.EB_factorization_ITN(A)
print(f"\nvalsL={valsL}, valsU={valsU}")
# generare an oscillatory matrix
valsL = np.array([1,0,1,2,1,0])
valsU = np.array([1,3,2,3,0,0])
valsD = np.array([2,1,4,2])
B = ut.compute_matrix_from_EB_factorization( valsL, valsD, valsU )
print(f"\nB=\n{B}\nB is OSC={ut.is_OSC(B, tol=10*np.finfo(np.float).eps)}")
print(f"\nSEB factorization = {ut.show_EB_config(valsL, valsU, valsD, True)}")
# format matrix in latex form
print("\nB in Latex form:")
ut.show_mat_latex_format(B)
# sign variations
v = np.array([-1,2,-3,0])
print(f"\ns-(v)={ut.s_minus(v)}\ns+(v)={ut.s_plus(v)}")
print(f"\nsc-(v)={ut.sc_minus(v)}\nsc+(v)={ut.sc_plus(v)}")
# draw SEB factorization
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 6))
ut.draw_EB_factorization_ITN( valsL, valsD, valsU, ax)
plt.show()
# computing the exponent of an oscillatory matrix
import TpTnOsc.osc_exp as oscp
print("\n\nComputing families of vertex-disjoing paths and exponent (r()) of B:\n")
osc_cls = oscp.OSC_exp(B)
osc_cls.run()
print("lower-left and upper-right corner minors families of vertex-disjoint paths:")
osc_cls.display_results()
for k in osc_cls.G.keys():
print(f'Triangle graph of {k}:')
_, ax = plt.subplots(figsize=(9,6))
osc_cls.G[k].draw_me(ax, nd_offset=[0.3, 0.4])
ax.margins(.1, .1)
plt.show()
plt.close()
| [
7061,
6,
198,
32351,
4096,
6096,
286,
262,
4899,
287,
309,
79,
51,
77,
46,
1416,
13,
26791,
13,
198,
198,
6214,
635,
267,
1416,
62,
11201,
62,
1069,
12629,
13,
541,
2047,
65,
20922,
329,
6096,
286,
4899,
287,
309,
79,
51,
77,
46... | 2.141391 | 877 |
import json
import tornado.web
| [
11748,
33918,
198,
11748,
33718,
13,
12384,
198
] | 3.875 | 8 |
import pandas as pd
import pprint as pp
r=[1000,25000,100000,500000]
df=pd.read_csv("f.csv");
# df=df.sort(["金额"],ascending=False)
print(df)
# df.to_csv("f.csv",index=False,encoding="utf-8")
df_sum={"0~1000":{},"1000~25000":{},"25000~50000":{},"50000~500000":{},">500000":{}}
for index,rows in df.iterrows():
if rows.性别 == "男":
if rows.金额<r[0]:
if constrain_age(rows.年龄) in df_sum["0~1000"]:
df_sum["0~1000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["0~1000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[0])&(rows.金额<r[1]):
if constrain_age(rows.年龄) in df_sum["1000~25000"]:
df_sum["1000~25000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["1000~25000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[1])&(rows.金额<r[2]):
if constrain_age(rows.年龄) in df_sum["25000~50000"]:
df_sum["25000~50000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["25000~50000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[2])&(rows.金额<r[3]):
if constrain_age(rows.年龄) in df_sum["50000~500000"]:
df_sum["50000~500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["50000~500000"][constrain_age(rows.年龄)]=1#rows.金额
elif rows.金额>r[3]:
if constrain_age(constrain_age(rows.年龄)) in df_sum[">500000"]:
# print("in")
df_sum[">500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum[">500000"][constrain_age(rows.年龄)]=1#rows.金额
s=pd.DataFrame(df_sum)
s=s.T
print(s)
s.to_csv("f_people_nan.csv",index=True,encoding="utf-8")
# pp.pprint(df_sum)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
279,
4798,
355,
9788,
628,
198,
81,
41888,
12825,
11,
1495,
830,
11,
3064,
830,
11,
4059,
830,
60,
198,
198,
7568,
28,
30094,
13,
961,
62,
40664,
7203,
69,
13,
40664,
15341,
628,
198,
2... | 1.671723 | 923 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
import numpy as np
import PropensityFunctions as Propensity
import CoreClasses as Core
import random
import math
import OutputFunctions as Out
import InitializeFunctions as Init
####################################################
### Load C library
####################################################
from ctypes import cdll
from ctypes import byref, c_int, c_ulong, c_double, POINTER
def get_libpath():
"""
Get the library path of the the distributed SSA library.
"""
import os
import re
from os.path import dirname, abspath, realpath, join
from platform import system
root = dirname(abspath(realpath(__file__)))
if system() == 'Linux':
library = 'Linux-SSA.so'
elif system() == 'Darwin':
library = 'OSX-SSA.so'
elif system() == 'Windows':
library = "Win-SSA.so"
else:
raise RuntimeError("unsupported platform - \"{}\"".format(system()))
return os.path.join(root, 'clibs', library)
_SSA_LIB = cdll.LoadLibrary(get_libpath())
# current_t, next_t, r_seed, max_x, max_y, num_m, num_r, concentrations, constants propensity_ints, reaction_arr, catalyst_arr
_SSA_LIB.SSA_update.argtypes = (c_double, c_double, c_int, c_int, c_int, c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_int), POINTER(c_int), POINTER(c_double))
_SSA_LIB.SSA_update.restype = c_double
SSA_update = _SSA_LIB.SSA_update ### Renaming function for convinence
####################################################
####################################################
def pick_reaction(dice_roll, CRS, concentrations, **kwargs):
''' Picks a reaction to occur stochastically
Arguements:
- dice_roll: float which should be a number between zero and the total propensity of reactions
- CRS: the CRS object which contains all possible reactions and molecules
- concentrations: the list of concentrations indexed by molecule ID
- propensity_function: which propensity function to use, default: standard
Return:
- rxn: a Reaction object'''
checkpoint = 0.0
for rxn in CRS.reactions:
reactant_concentrations = [concentrations[i] for i in rxn.reactants]
catalyst_concentrations = [concentrations[i] for i in rxn.catalysts]
reactant_coeff = rxn.reactant_coeff
catalyzed_constants = rxn.catalyzed_constants
#print rxn.catalysts
if rxn.prop == 'STD':
# print "Reactant concentrations: ", reactant_concentrations
# print 'Product ID numbers: ',rxn.products
checkpoint += Propensity.standard_propensity(rxn, CRS, concentrations)
#print "dice_roll: ", dice_roll, ' checkpoint: ', checkpoint
if checkpoint >= dice_roll:
break
elif rxn.prop == 'RM8':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator8(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM2':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM1':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop[:2] == 'MM':
expon = int(rxn.prop[2])
kcat = 10**expon
checkpoint += Propensity.MM_kinetics(rxn, CRS, concentrations, kcat)
if checkpoint >= dice_roll:
break
#raw_input("Enter")
return rxn
####################################################
def execute_rxn(rxn, CRS, concentrations):
''' Executes a single reaction instance
Arguements:
- rxn: Reaction object to execute_rxn
- CRS: CRS object containing the entire system
- concentrations: list of molecule concentrations indexed by ID
Return:
- concentrations: updated list of molecule concentrations indexed by ID '''
num_reactants = len(rxn.reactants)
num_products = len(rxn.products)
# Reduce Reactants
for i in range(num_reactants):
reactant_index = rxn.reactants[i]
concentrations[reactant_index] -= rxn.reactant_coeff[i]
# Increase Products
for i in range(num_products):
product_index =rxn.products[i]
concentrations[product_index] += rxn.product_coeff[i]
return concentrations
####################################################
####################################################
def pick_replicator8(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], nA - eA)*pow(reactant_concentrations[1], eA) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA)*pow(reactant_concentrations[1], nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], nB - eB)*pow(reactant_concentrations[0], eB) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
def pick_replicator2(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
if seq_found == True:
break
#Here eA is the number of errors in copying A-residues
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*(reactant_concentrations[0]*nA)*(reactant_concentrations[1]*nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB))) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB*( reactant_concentrations[0]*(nA - eA +eB)*reactant_concentrations[1]*(nB - eB + eA ) )
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
####################################################
def pick_replicator1(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], (nA - eA)/R_L )*pow(reactant_concentrations[1], eA/R_L) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA/R_L)*pow(reactant_concentrations[1], nB/R_L)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], (nB - eB)/R_L)*pow(reactant_concentrations[0], eB/R_L) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
8772,
6377,
24629,
2733,
355,
8772,
6377,
201,
198,
11748,
7231,
9487,
274,
355,
7231,
220,
201,
198,
11748,
4738,
201,
198,
11748,
10688,
201,
198,
11748,
25235,
24629,
2733,
355,
3806,
2... | 2.517577 | 6,571 |
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
from siamreppoints.core.config import cfg
from siamreppoints.models.backbone import get_backbone
from siamreppoints.models.head import get_rpn_head
from siamreppoints.models.neck import get_neck
| [
2,
15069,
357,
66,
8,
24956,
7575,
13,
1439,
6923,
33876,
13,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37... | 3.198529 | 136 |
import argparse
import sys
import time
from pathlib import Path
from sqlite3 import DatabaseError
from misc import update_check
from misc.version import VERSION
from pymatgen.core.lattice import Lattice
from searcher.database_handler import DatabaseRequest, StructureTable
from searcher.filecrawler import put_files_in_db
from searcher.misc import vol_unitcell
parser = argparse.ArgumentParser(description='Command line version of StructureFinder to collect .cif/.res files to a '
'database.\n'
'StructureFinder will search for cif files in the given directory(s) '
'recursively. (Either -c, -r or both options must be active!)')
parser.add_argument("-d",
dest="dir",
metavar='"directory"',
type=str,
action='append',
help='Directory(s) where cif files are located.')
parser.add_argument("-e",
dest="ex",
metavar='"directory"',
type=str,
action='append',
help='Directory names to be excluded from the file search. Default is:\n'
'"ROOT", ".OLEX", "TMP", "TEMP", "Papierkorb", "Recycle.Bin" '
'Modifying -e option discards the default.')
parser.add_argument("-o",
dest="outfile",
metavar='"file name"',
type=str,
help='Name of the output database file. Default: "structuredb.sqlite"')
parser.add_argument("-c",
dest="fillcif",
default=False,
action='store_true',
help='Add .cif files (crystallographic information file) to the database.')
parser.add_argument("-r",
dest="fillres",
default=False,
action='store_true',
help='Add SHELX .res files to the database.')
parser.add_argument("--delete",
dest="delete",
default=False,
action='store_true',
help="Delete and do not append to previous database.")
parser.add_argument("-f",
dest='cell',
#nargs=6,
type=lambda s: [float(item) for item in s.split()],
help='Search for the specified unit cell.'
)
def find_cell(cell: list):
"""
Searches for unit cells by command line parameters
"""
cell = [float(x) for x in cell]
no_result = '\nNo similar unit cell found.'
if args.outfile:
dbfilename = args.outfile
else:
dbfilename = 'structuredb.sqlite'
db, structures = get_database(dbfilename)
# if args.more_results:
# # more results:
# print('more results on')
# vol_threshold = 0.04
# ltol = 0.08
# atol = 1.8
# else:
# regular:
vol_threshold = 0.02
ltol = 0.03
atol = 1.0
volume = vol_unitcell(*cell)
# the fist number in the result is the structureid:
cells = structures.find_by_volume(volume, vol_threshold)
idlist = []
if not cells:
print(no_result)
sys.exit()
lattice1 = Lattice.from_parameters(*cell)
for num, curr_cell in enumerate(cells):
try:
lattice2 = Lattice.from_parameters(*curr_cell[1:7])
except ValueError:
continue
mapping = lattice1.find_mapping(lattice2, ltol, atol, skip_rotation_matrix=True)
if mapping:
idlist.append(curr_cell[0])
if not idlist:
print(no_result)
sys.exit()
else:
print('\n{} Structures found:'.format(len(idlist)))
searchresult = structures.get_all_structure_names(idlist)
print('ID | path | filename | data ')
print('-' * 130)
for res in searchresult:
Id = res[0]
path, filename, dataname = [x.decode('utf-8') for x in res if isinstance(x, bytes)]
print('{:} | {:70s} | {:<25s} | {:s}'.format(Id, path, filename, dataname, ))
if __name__ == '__main__':
args = parser.parse_args()
if args.cell:
find_cell(args.cell)
else:
try:
if not args.dir:
parser.print_help()
check_update()
sys.exit()
except IndexError:
print("No valid search directory given.\n")
print("Please run this as 'python3 stdb_cmd.py -d [directory]'\n")
print("stdb_cmd will search for .cif files in [directory] recoursively.")
run_index(args)
# find_cell('10.5086 20.9035 20.5072 90.000 94.130 90.000'.split())
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
44161,
578,
18,
1330,
24047,
12331,
198,
198,
6738,
12747,
1330,
4296,
62,
9122,
198,
6738,
12747,
13,
9641,
1330,
44156,
2849,
198,
... | 1.981025 | 2,477 |
#! /usr/bin/python3
# coding=utf-8
"""
把今天最好的表现当作明天最新的起点..~
いま 最高の表現 として 明日最新の始発..~
Today the best performance as tomorrow newest starter!
author: xiaomo
github: https://github.com/syoubaku
email: xiaomo@xiamoo.info
QQ_NO: 83387856
Date: 17/5/31 14:55
Description: 更新服务器
Copyright(©) 2017 by xiaomo.
"""
import os
import shutil
import sys
import time
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: python updateServer_lanyue.py version")
exit(1)
# 开服/关服脚本
shell_path = "/data/game/server/s1/bin/"
# 目标根路径
target_base_url = "/data/game/server/s1/"
# 目标jar的路径
target_jar_path = target_base_url + "core/"
# 配置表根目录
target_data_base_path = target_base_url + "data/"
# 版本号
version = sys.argv[1]
# 关服
# 拷贝文件
# 修改版本号
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
198,
10545,
232,
232,
20015,
232,
25465,
17312,
222,
25001,
121,
21410,
26193,
101,
163,
236,
108,
37605,
241,
43291,
23626,
236,
25465,
... | 1.705376 | 465 |
# coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Dongwei Jiang; Xiaoning Lei
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" speech transformer implementation"""
from absl import logging
import tensorflow as tf
from .base import BaseModel
from ..loss import Seq2SeqSparseCategoricalCrossentropy
from ..metrics import Seq2SeqSparseCategoricalAccuracy
from ..utils.misc import generate_square_subsequent_mask, insert_sos_in_labels
from ..layers.commons import PositionalEncoding
from ..layers.transformer import Transformer
from ..utils.hparam import register_and_parse_hparams
from ..tools.beam_search import BeamSearchDecoder
from ..tools.lm_scorer import NGramScorer, RNNScorer
class SpeechTransformer(BaseModel):
""" Standard implementation of a SpeechTransformer. Model mainly consists of three parts:
the x_net for input preparation, the y_net for output preparation and the transformer itself
"""
default_config = {
"return_encoder_output": False,
"num_filters": 512,
"d_model": 512,
"num_heads": 8,
"num_encoder_layers": 12,
"num_decoder_layers": 6,
"dff": 1280,
"rate": 0.1,
"schedual_sampling_rate": 0.9,
"label_smoothing_rate": 0.0
}
@staticmethod
def _create_masks(x, input_length, y):
r""" Generate a square mask for the sequence. The masked positions are
filled with float(1.0). Unmasked positions are filled with float(0.0).
"""
input_mask, output_mask = None, None
if x is not None:
input_mask = 1.0 - tf.sequence_mask(
input_length, tf.shape(x)[1], dtype=tf.float32
)
input_mask = input_mask[:, tf.newaxis, tf.newaxis, :]
input_mask.set_shape([None, None, None, None])
if y is not None:
output_mask = tf.cast(tf.math.equal(y, 0), tf.float32)
output_mask = output_mask[:, tf.newaxis, tf.newaxis, :]
look_ahead_mask = generate_square_subsequent_mask(tf.shape(y)[1])
output_mask = tf.maximum(output_mask, look_ahead_mask)
output_mask.set_shape([None, None, None, None])
return input_mask, output_mask
def compute_logit_length(self, samples):
""" used for get logit length """
input_length = tf.cast(samples["input_length"], tf.float32)
logit_length = tf.math.ceil(input_length / 2)
logit_length = tf.math.ceil(logit_length / 2)
logit_length = tf.cast(logit_length, tf.int32)
return logit_length
def time_propagate(self, history_logits, history_predictions, step, enc_outputs):
""" TODO: doctring
last_predictions: the predictions of last time_step, [beam_size]
history_predictions: the predictions of history from 0 to time_step,
[beam_size, time_steps]
states: (step)
"""
# merge
(encoder_output, memory_mask) = enc_outputs
step = step + 1
output_mask = generate_square_subsequent_mask(step)
# propagate 1 step
logits = self.y_net(tf.transpose(history_predictions.stack()), training=False)
logits = self.transformer.decoder(
logits,
encoder_output,
tgt_mask=output_mask,
memory_mask=memory_mask,
training=False,
)
logits = self.final_layer(logits)
logits = logits[:, -1, :]
history_logits = history_logits.write(step - 1, logits)
return logits, history_logits, step
def decode(self, samples, hparams, lm_model=None, return_encoder=False):
""" beam search decoding """
x0 = samples["input"]
batch = tf.shape(x0)[0]
x = self.x_net(x0, training=False)
input_length = self.compute_logit_length(samples)
input_mask, _ = self._create_masks(x, input_length, None)
encoder_output = self.transformer.encoder(x, input_mask, training=False)
if return_encoder:
return encoder_output, input_mask
# init op
last_predictions = tf.ones([batch], dtype=tf.int32) * self.sos
history_predictions = tf.TensorArray(
tf.int32, size=1, dynamic_size=True, clear_after_read=False
)
step = 0
history_predictions.write(0, last_predictions)
history_predictions = history_predictions.stack()
init_cand_states = [history_predictions]
beam_size = 1 if not hparams.beam_search else hparams.beam_size
beam_search_decoder = BeamSearchDecoder(
self.num_class, self.sos, self.eos, beam_size=beam_size
)
beam_search_decoder.build(self.time_propagate)
if hparams.lm_weight != 0:
if hparams.lm_path is None:
raise ValueError("lm path should not be none")
if hparams.lm_type == "ngram":
lm_scorer = NGramScorer(
hparams.lm_path,
self.sos,
self.eos,
self.num_class,
lm_weight=hparams.lm_weight,
)
elif hparams.lm_type == "rnn":
lm_scorer = RNNScorer(
lm_model,
lm_weight=hparams.lm_weight)
beam_search_decoder.add_scorer(lm_scorer)
predictions = beam_search_decoder(
history_predictions, init_cand_states, step, (encoder_output, input_mask)
)
return predictions
class SpeechTransformer2(SpeechTransformer):
""" Decoder for SpeechTransformer2 works for two pass schedual sampling"""
def mix_target_sequence(self, gold_token, predicted_token, training, top_k=5):
""" to mix gold token and prediction
param gold_token: true labels
param predicted_token: predictions by first pass
return: mix of the gold_token and predicted_token
"""
mix_result = tf.TensorArray(
tf.float32, size=1, dynamic_size=True, clear_after_read=False
)
for i in tf.range(tf.shape(gold_token)[-1]):
if self.random_num([1]) > self.hparams.schedual_sampling_rate:# do schedual sampling
selected_input = predicted_token[:, i, :]
selected_idx = tf.nn.top_k(selected_input, top_k).indices
embedding_input = self.y_net.layers[1](selected_idx, training=training)
embedding_input = tf.reduce_mean(embedding_input, axis=1)
mix_result = mix_result.write(i, embedding_input)
else:
selected_input = tf.reshape(gold_token[:, i], [-1, 1])
embedding_input = self.y_net.layers[1](selected_input, training=training)
mix_result = mix_result.write(i, embedding_input[:, 0, :])
final_input = self.y_net.layers[2](tf.transpose(mix_result.stack(), [1, 0, 2]),
training=training)
final_input = self.y_net.layers[3](final_input, training=training)
return final_input
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
357,
34,
8,
13130,
317,
4221,
45510,
37195,
20673,
26,
45641,
648,
7455,
26,
28831,
42990,
32294,
26,
22450,
12484,
48579,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
... | 2.303571 | 3,416 |
""" Test of bcforms.rest
:Author: Mike Zheng <xzheng20@colby.edu>
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2019-7-4
:Copyright: 2019, Karr Lab
:License: MIT
"""
import bcforms
from bcforms import core
from bcforms import rest
import unittest
| [
37811,
6208,
286,
47125,
23914,
13,
2118,
198,
198,
25,
13838,
25,
4995,
44583,
1279,
87,
89,
31753,
1238,
31,
4033,
1525,
13,
15532,
29,
198,
25,
13838,
25,
11232,
509,
3258,
1279,
74,
3258,
31,
76,
824,
76,
13,
15532,
29,
198,
2... | 2.777778 | 90 |
from django.conf import settings
from django.db.models.signals import post_save
from guardian.shortcuts import assign_perm
from timezone_field import TimeZoneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from task.models import Task
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=254, unique=True)
name = models.CharField(_('first name'), max_length=30, blank=True)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = CustomUserManager()
timezone = TimeZoneField(default='UTC')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
from django.contrib.auth.models import Group
from core.models import Department, Application, Environment
post_save.connect(DepartmentGroup.on_create_department, sender=Department)
post_save.connect(DepartmentGroup.on_create_application, sender=Application)
post_save.connect(DepartmentGroup.on_create_environment, sender=Environment)
post_save.connect(DepartmentGroup.on_create_task, sender=Task) | [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
198,
6738,
21688,
13,
19509,
23779,
1330,
8333,
62,
16321,
198,
6738,
640,
11340,
62,
3245,
1330,
3862,
2696... | 3.109319 | 558 |
"""Utilities to support iteration."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterator, List, Tuple, TypeVar
import numpy as np
from landshark.basetypes import FixedSlice
T = TypeVar("T")
def batch(it: Iterator[T], batchsize: int) -> Iterator[List[T]]:
"""Group iterator into batches."""
while True:
batch = list(itertools.islice(it, batchsize))
if not batch:
return
yield batch
def batch_slices(batchsize: int, total_size: int) -> Iterator[FixedSlice]:
"""Group range indices into slices of a given batchsize."""
n = total_size // batchsize
ret = [(i * batchsize, (i + 1) * batchsize) for i in range(n)]
if total_size % batchsize != 0:
ret.append((n * batchsize, total_size))
for start, stop in ret:
yield FixedSlice(start, stop)
def with_slices(it: Iterator[np.ndarray]) -> Iterator[Tuple[FixedSlice, np.ndarray]]:
"""Add slice into vstacked array to each sub array in `it`."""
start_idx = 0
for d in it:
end_idx = start_idx + d.shape[0]
yield FixedSlice(start_idx, end_idx), d
start_idx = end_idx
| [
37811,
18274,
2410,
284,
1104,
24415,
526,
15931,
198,
198,
2,
15069,
13130,
9429,
43708,
357,
6601,
5333,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407... | 2.816225 | 604 |
"""
HAVE NOT TESTED FOR SPEED (but second one seems better)
Decent link here about ip spoofing -
http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
I think it has already been incorporated
and my code works on his committed change.
"""
__author__ = 'donal'
__project__ = 'ribcage'
from json import loads
from urllib2 import urlopen
from flask import request
from flask.ext.login import current_user
from config_vars import VALID_IP
# ========================
# PRIMARY CALL
# ========================
| [
37811,
198,
7801,
6089,
5626,
43001,
1961,
7473,
6226,
41841,
357,
4360,
1218,
530,
2331,
1365,
8,
198,
198,
10707,
298,
2792,
994,
546,
20966,
42078,
278,
532,
198,
4023,
1378,
274,
67,
13,
952,
14,
14036,
14,
2704,
2093,
12,
18211,
... | 3.331169 | 154 |
import os
print(os.listdir('.'))
print(os.listdir('avo'))
print(os.listdir('avo/pai'))
print(os.listdir('avo/mae')) | [
11748,
28686,
198,
4798,
7,
418,
13,
4868,
15908,
10786,
2637,
4008,
198,
4798,
7,
418,
13,
4868,
15908,
10786,
615,
78,
6,
4008,
198,
4798,
7,
418,
13,
4868,
15908,
10786,
615,
78,
14,
49712,
6,
4008,
198,
4798,
7,
418,
13,
4868,... | 2.169811 | 53 |
def test_two_runs_no_clean():
"""
>>> report_fixture = getfixture('allure_report_with_params')
>>> allure_report_first_run = report_fixture(cache=False)
>>> allure_report_second_run = report_fixture(cache=False)
>>> assert_that(allure_report_second_run,
... has_only_n_test_cases('test_two_runs_no_clean', 2)
... )
"""
assert True
| [
4299,
1332,
62,
11545,
62,
48381,
62,
3919,
62,
27773,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13163,
989,
62,
69,
9602,
796,
651,
69,
9602,
10786,
439,
495,
62,
13116,
62,
4480,
62,
37266,
11537,
198,
220,
220,
220,
... | 2.36875 | 160 |
import re
import pandas as pd
m = re.search('(?<=abc)def', 'abcdef')
print(m.group(0))
print("###### Example 1 ###########")
example_1 = """
1916-1918 subscales for a subject
1998-1914 subscales for a subject
subscales for a subject 1998-1920
"""
r = re.sub('(\d{2})(\d{2})', '20\\2', example_1)
print(r)
print("###### Example 2 ###########")
example_2 = """
1234
23
14a
1a3
234
1.39
"""
m = re.search('\\d', 'abcdef')
print(m)
print("###### Example 3 ###########")
example_3 = """
"CL_ID" = "ClientID"
, "RMSEQ" = "RemovedSequence"
, "RMVL_BEGDT" = "RemovalBeginDate"
, "RMVL_ENDDT" = "RemovalEndDate"
, "END_TYP_CDE" = "EndTypeID"
, "REMOVED_FROM_TYP_CDE" = "RemovedFromTypeID"
, "CURR_RMVL_TYP_CDE" = "RemovalTypeCurrentID"
, "ORIG_RMVL_TYP_CDE" = "RemovalTypeOriginalID"
, "FMLY_STRUCTURE_TYP_CDE" = "FamilyStructureTypeID"
, "ADDRESS" = "Address"
, "CITY" = "City"
, "STATE" = "StateID"
, "ZIP" = "ZipFull"
, "COUNTY_TYP_CDE" = "CountyOfficeID"
, "REFER_THAT_CAUSED_RMVL" = "ReferralCausedRemoval"
, "REFERRAL_DT" = "ReferralDate"
, "CARE_TAKER1_ID" = "CareTaker1ID"
, "CARE_TAKER2_ID" = "CareTaker2ID"
"""
print("###### Example 4 ###########")
example_4 = """
requireNamespace("dplyr", quietly=T) #hadley/dplyr
requireNamespace("lubridate")
requireNamespace("OuhscMunge", quietly=TRUE) #OuhscBbmc/OuhscMunge
"""
print("###### Example 5 ###########")
example_5 = """
9
4
34
3
62
43
1
"""
print("###### Example 6 ###########")
example_6 = """
Time,Gender,Genetype,Treatment,MouseID,OR-Recognition Index,FC-t-F %,FC-b-F %,FC-a-F %
4M,Male,WILD,Control,c9-1,0.32,11.9,0,25.7
4M,Male,WILD,Control,c13-2,0.47,23.7,0,11.
4M,Male,WILD,Prozac,c10-2,0.62,40.7,11.4,51.4
4M,Male,WILD,Prozac,c14-3,0.63,10.2,0,28.6
4M,Male,YFP,Control,c9-2,0.42,42.4,11.4,22.9
4M,Male,YFP,Control,c13-1,0.5,15.3,0,54.1
4M,Male,YFP,Control,c13-nm,1,27.1,0,31.4
4M,Male,YFP,Prozac,c10-1,0.65,20.3,17.1,54.3
4M,Male,YFP,Prozac,c10-4,0.43,44.1,5.7,40
4M,Male,YFP,Prozac,c10-nm,0.5,15.3,5.7,34.3
4M,Male,YFP,Prozac,c14-1,0.47,8.5,0,60
4M,Male,YFP,Prozac,c14-2,0.65,16.9,0,8.6
4M,Male,YFP,Prozac,c14-3,1,30.5,5.7,20
"""
| [
11748,
302,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
76,
796,
302,
13,
12947,
10786,
7,
30,
27,
28,
39305,
8,
4299,
3256,
705,
39305,
4299,
11537,
198,
4798,
7,
76,
13,
8094,
7,
15,
4008,
198,
198,
4798,
7203,
4242,
2235,
... | 1.756437 | 1,437 |
import cv2
target_imgPath = input("Introduce Path for Target Image: ") #ex_img/img1.png
input_img = cv2.imread(target_imgPath, cv2.IMREAD_COLOR)
winName = 'Input Image'
cv2.namedWindow(winName, cv2.WINDOW_NORMAL)
cv2.imshow(winName, input_img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
198,
198,
16793,
62,
9600,
15235,
796,
5128,
7203,
15005,
344,
10644,
329,
12744,
7412,
25,
366,
8,
1303,
1069,
62,
9600,
14,
9600,
16,
13,
11134,
198,
15414,
62,
9600,
796,
269,
85,
17,
13,
320,
961,
7,
16793,... | 2.42735 | 117 |
# Generated by Django 3.2.2 on 2021-09-17 07:04
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
17,
319,
33448,
12,
2931,
12,
1558,
8753,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
##########################################
############ Utility Commands ############
##########################################
import discord
import random
from discord.ext import commands
from .musicutils.paginator import Pages
from config import *
| [
29113,
7804,
2235,
198,
7804,
4242,
34030,
49505,
1303,
7804,
21017,
198,
29113,
7804,
2235,
198,
198,
11748,
36446,
198,
11748,
4738,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
764,
14664,
13554,
4487,
13,
79,
363,
20900,
1330,
... | 5.382979 | 47 |
import argparse
import tarfile
from pathlib import Path
import tqdm
import pandas as pd
route_path = Path('..')
"""
# Compress necessary files of the dataset
Source
* pdb / dataset_name / target / sampling
* fasta / dataset_name
* native_pdb / dataset_name
* score
Output
* README.md
write about dataset details
* pdb
* native_pdb
* fasta
* score
"""
rename_columns_dict = {'model': 'Model', 'target': 'Target', 'template': 'Template', 'seq_len': 'SeqLength'}
label_columns = [
'Model', 'Target', 'Template', 'GDT_TS', 'GDT_HA',
'SeqLength', 'identity', 'positive', 'coverage',
'identity(-misres)', 'positive(-misres)', 'coverage(-misres)', 'num_misres'
]
score_columns = [
'Model', 'Target', 'identity(%)', 'positive(%)', 'coverage(%)',
'identity(-misres)(%)', 'positive(-misres)(%)', 'coverage(-misres)(%)',
'DOPE', 'SOAP', 'SBROD', 'ProQ2D', 'ProQRosCenD', 'ProQRosFAD', 'ProQ3D',
'P3CMQA', 'DeepAccNet', 'DeepAccNet-Bert'
]
def make_scop_score(dataset_name: str, output_dir: Path) -> (str, str, str):
"""load scop final score and split it into target, label, and mqa score.
Args:
dataset_name (str): Created dataset name
output_dir (Path): Output directory path
Return:
(str): path to target.csv
(str): path to label.csv
(str): path to score.csv
"""
csv_path = route_path / 'score' / dataset_name / (dataset_name + '_final_all_score.csv')
df = pd.read_csv(csv_path, index_col=0)
output_score_dir = output_dir / 'score'
output_score_dir.mkdir(exist_ok=True)
# Rename columns
df = df.rename(rename_columns_dict, axis=1)
# Drop columns
label_df = df[label_columns]
label_output_path = output_score_dir / 'label.csv'
label_df.to_csv(label_output_path)
if dataset_name[: 4] == 'scop':
target_df = df[[
'Target', 'SeqLength', 'SF-DOMID', 'SF', 'len_SF',
'FA-DOMID', 'FA-PDBID', 'FA-PDBREG', 'FA-UNIID', 'FA-UNIREG', 'SF-PDBID',
'SF-PDBREG', 'SF-UNIID', 'SF-UNIREG', 'TP', 'CL', 'CF', 'FA', 'Class'
]]
elif dataset_name[: 6] == 'pisces':
target_df = df[[
'Target', 'SeqLength', 'IDs', 'Exptl.', 'resolution', 'R-factor',
'FreeRvalue', 'PDB_ID', 'Chain', 'Domain_num'
]]
target_df = target_df.rename({'Domain_num': 'DomainNum'}, axis=1)
else:
raise ValueError()
target_df = target_df.groupby('Target').head(1).reset_index(drop=True)
target_output_path = output_score_dir / 'target.csv'
target_df.to_csv(target_output_path)
score_df = df[score_columns]
score_output_path = output_score_dir / 'mqa_score.csv'
score_df.to_csv(score_output_path)
return target_output_path, label_output_path, score_output_path
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
13422,
7753,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
256,
80,
36020,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
38629,
62,
6978,
796,
10644,
10786,
492,
11537,
198,
198,
37811,
198,
2,
... | 2.297976 | 1,235 |
"""Test pre running stuff"""
import warnings
from datetime import datetime
from datetime import timedelta
from typing import Any
from typing import AsyncGenerator
from typing import Dict
from typing import Optional
from uuid import UUID
from uuid import uuid4
import pytest
from asyncpg import ObjectInUseError
from fastapi import FastAPI
from tortoise import Tortoise
from tortoise.exceptions import DBConnectionError
from app.models.db import AuthAccount
from app.models.db import Challenge
from app.models.db import Playlist
from app.models.db import Submission
from app.models.db import Text
from app.models.db import Track
from app.models.db import User
from app.models.db import Vote
from app.models.db.user import AuthProvider
from app.services.auth.base import bearer_auth
from app.settings import APP_MODELS
from app.settings import TORTOISE_TEST_DB
from tests.test_services.test_auth.test_base import USER_UUID
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp # pylint: disable=unused-import
@pytest.fixture(scope="function", autouse=True)
@pytest.mark.asyncio
async def test_db() -> AsyncGenerator: # type: ignore
"""Initialize db connection before run test."""
try:
await Tortoise.init(db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS})
except DBConnectionError:
await Tortoise.init(
db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS}, _create_db=True,
)
await Tortoise.generate_schemas()
yield
try:
await Tortoise._drop_databases() # pylint: disable=protected-access
except ObjectInUseError:
pass
await Tortoise.close_connections()
POPULATE_TRACK_ID: str = str(uuid4())
async def bearer_auth_mock() -> str:
"""Auth method mock."""
return str(USER_UUID)
def mock_auth(application: FastAPI) -> FastAPI:
"""Mock auth dependency and token middleware."""
application.dependency_overrides[bearer_auth] = bearer_auth_mock
application.user_middleware = []
application.middleware_stack = application.build_middleware_stack()
return application
@pytest.fixture()
@pytest.mark.asyncio
async def populate_texts() -> Text:
"""Populate text for utils routes tests."""
text, _ = await Text.get_or_create(content="test")
return text
test_track_info: Dict[str, Any] = {
"id": POPULATE_TRACK_ID,
"name": "test",
"author_name": "test",
"cover_url": "test",
"preview_url": "test",
"youtube_id": "test",
"spotify_id": "test",
"recommended": True,
"meta": {},
}
async def populate_track() -> Track:
"""Populate track for routes tests."""
track, _ = await Track.get_or_create(**test_track_info)
return track
@pytest.fixture()
@pytest.mark.asyncio
async def track_fixture() -> Track:
"""Populate track for utils routes tests."""
return await populate_track()
async def populate_playlist(track: Optional[Track] = None) -> Playlist:
"""Populate playlist with track for routes tests."""
playlist, _ = await Playlist.get_or_create(
name="test",
url="test",
spotify_id="test",
recommended=True,
)
if not track:
track = await populate_track()
await playlist.tracks.add(track)
return playlist
@pytest.fixture()
@pytest.mark.asyncio
async def playlist_fixture() -> Playlist:
"""Populate playlist with track for routes testing."""
return await populate_playlist()
async def populate_user(user_id: Optional[UUID] = USER_UUID) -> User:
"""Populate user for routes testing."""
# if not user_id:
# user_id = uuid4()
user, _ = await User.get_or_create(id=user_id)
await AuthAccount.get_or_create(
_id="test",
name="test",
image="test",
url="test",
provider=AuthProvider.DEFAULT,
access_token="test",
refresh_token="test",
expires=0,
user=user,
)
await user.fetch_related("auth_accounts")
return user
@pytest.fixture()
@pytest.mark.asyncio
async def user_fixture() -> User:
"""Default user tests fixture."""
return await populate_user()
POPULATE_CHALLENGE_ID = uuid4()
POPULATE_CHALLENGE_SECRET = Challenge(id=POPULATE_CHALLENGE_ID).secret_key()
POPULATE_CHALLENGE_FOREIGN_ID = uuid4()
POPULATE_CHALLENGE_FOREIGN_SECRET = Challenge(
id=POPULATE_CHALLENGE_FOREIGN_ID, is_public=False,
).secret_key()
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge()
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_vote_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in voting
"""
return await populate_challenge(challenge_status="vote")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_end_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge ended
"""
return await populate_challenge(challenge_status="end")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_private_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is private
- Challenge is open
"""
return await populate_challenge(is_public=False)
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_foreign_fixture() -> Challenge:
"""
Populate challenge with:
- Random user
- Is private
- Challenge is open
"""
return await populate_challenge(
is_public=False,
user_id=None,
challenge_id=POPULATE_CHALLENGE_FOREIGN_ID,
)
POPULATE_SUBMISSION_ID = uuid4()
async def populate_submission(
challenge: Challenge,
submission_id: Optional[UUID] = POPULATE_SUBMISSION_ID,
) -> Submission:
"""Populate submission for routes testing."""
if not submission_id:
submission_id = uuid4()
submission, _ = await Submission.get_or_create(
id=submission_id,
url="test",
challenge=challenge,
user=challenge.owner,
)
return submission
@pytest.fixture()
@pytest.mark.asyncio
async def submission_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge in process
- Challenge is open
"""
challenge: Challenge = await populate_challenge()
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_vote_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is voting
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="vote")
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_ended_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is ended
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="end")
return await populate_submission(challenge=challenge)
async def populate_vote(submission: Submission) -> Vote:
"""Populate vote for routes testing."""
vote, _ = await Vote.get_or_create(
submission=submission,
user=submission.challenge.owner, # type: ignore
)
return vote
@pytest.fixture()
@pytest.mark.asyncio
async def vote_fixture() -> Vote:
"""Vote fixture with challenge on voting."""
challenge: Challenge = await populate_challenge(challenge_status="vote")
submission: Submission = await populate_submission(challenge=challenge)
return await populate_vote(submission=submission)
| [
37811,
14402,
662,
2491,
3404,
37811,
198,
11748,
14601,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
1081,
13361,
8645,
1352,
198,
6738,
19... | 2.653553 | 3,406 |
# QRFactors.py
#
# Daniel R. Reynolds
# SMU Mathematics
# Math 4315
# imports
import numpy
def QRFactors(A):
"""
usage: Q, R = QRFactors(A)
Function to compute the QR factorization of a (possibly rank-deficient)
'thin' matrix A (m x n, with m >=n) using Householder reflection matrices.
Input: A - thin matrix
Outputs: Q - orthogonal matrix
R - "upper triangular" matrix, i.e. R = [ Rhat ]
[ 0 ]
with Rhat an (n x n) upper-triangular matrix
"""
# get dimensions of A
m, n = numpy.shape(A)
# initialize results
Q = numpy.identity(m)
R = A.copy()
# iterate over columns
for k in range(n):
# extract subvector from diagonal down and compute norm
z = R[k:m,k]
v = -z;
v[0] = -numpy.sign(z[0])*numpy.linalg.norm(z) - z[0];
vnorm = numpy.linalg.norm(v)
# if subvector has norm zero, continue to next column
if (vnorm < numpy.finfo(float).eps):
continue
# compute u = u = v/||v||;
# the Householder matrix is then Qk = I-2*u*u'
u = v/vnorm
# update rows k through m of R
for j in range(k,n):
utR = 2 * u.T @ R[k:m, j]
R[k:m, j] -= u*utR
# update rows k through m of Q
for j in range(m):
utQ = 2 * u.T @ Q[k:m, j]
Q[k:m, j] -= u*utQ
# transpose Q before return
Q = Q.T
return [Q, R]
# end function
| [
2,
42137,
29054,
669,
13,
9078,
198,
2,
198,
2,
7806,
371,
13,
21995,
198,
2,
9447,
52,
39448,
198,
2,
16320,
5946,
1314,
198,
198,
2,
17944,
198,
11748,
299,
32152,
198,
198,
4299,
42137,
29054,
669,
7,
32,
2599,
198,
220,
220,
... | 1.989651 | 773 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2014, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
__package_name__ = u'nexusformat'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__documentation_author__ = u'Ray Osborn'
__documentation_copyright__ = u'2013-2016, Ray Osborn'
__license__ = u'BSD'
__author_name__ = u'NeXpy Development Team'
__author_email__ = u'nexpydev@gmail.com'
__author__ = __author_name__ + u' <' + __author_email__ + u'>'
__url__ = u'http://nexpy.github.io/nexpy/'
__download_url__ = u'https://github.com/nexpy/nexusformat/'
__description__ = u'nexusformat: Python API to access NeXus data'
__long_description__ = \
u"""
This package provides a Python API to open, create, and manipulate `NeXus data
<http://www.nexusformat.org/>`_ written in the HDF5 format. The 'nexusformat'
package provides the underlying API for `NeXpy
<http://nexpy.github.io/nexpy>`_, which provides a GUI interface. It also
contains a command-line script, `nxstack` for merging TIFF or CBF files into a
single HDF5 array.
The latest development version is always available from `NeXpy's GitHub
site <https://github.com/nexpy/nexusformat>`_.
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2211,
12,
4967,
11,
3169,
55,
9078,
7712,
4816,
13,
198,
2,
... | 3.276688 | 459 |
from flask import Flask
from flask import request
from flask_restful import Resource, Api, reqparse
from Calculator import Calculator
app = Flask(__name__)
api = Api(app)
api.add_resource(Add, '/add')
if __name__ == '__main__':
app.run(port='5002') | [
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
2581,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
11,
43089,
29572,
198,
6738,
43597,
1330,
43597,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
15042,
796,
59... | 3.023529 | 85 |
"""syphon.tests.test_context.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
from syphon import Context
| [
37811,
1837,
746,
261,
13,
41989,
13,
9288,
62,
22866,
13,
9078,
628,
220,
220,
15069,
357,
66,
8,
2177,
12,
7908,
14926,
1636,
43953,
11,
11419,
13,
198,
220,
220,
49962,
739,
17168,
357,
5450,
1378,
12567,
13,
785,
14,
17231,
439,... | 2.819444 | 72 |
__source__ = 'https://leetcode.com/problems/redundant-connection-ii/'
# Time: O(V) numbert of vertices
# Space: O(V)
#
# Description: Leetcode # 685. Redundant Connection II
#
# In this problem, a rooted tree is a directed graph such that,
# there is exactly one node (the root) for which all other nodes are descendants of this node,
# plus every node has exactly one parent, except for the root node which has no parents.
#
# The given input is a directed graph that started as a rooted tree with N nodes (with distinct values 1, 2, ..., N),
# with one additional directed edge added. The added edge has two different vertices chosen from 1 to N,
# and was not an edge that already existed.
#
# The resulting graph is given as a 2D-array of edges.
# Each element of edges is a pair [u, v] that represents a directed edge connecting nodes u and v,
# where u is a parent of child v.
#
# Return an edge that can be removed so that the resulting graph is a rooted tree of N nodes.
# If there are multiple answers, return the answer that occurs last in the given 2D-array.
#
# Example 1:
# Input: [[1,2], [1,3], [2,3]]
# Output: [2,3]
# Explanation: The given directed graph will be like this:
# 1
# / \
# v v
# 2-->3
#
# Example 2:
# Input: [[1,2], [2,3], [3,4], [4,1], [1,5]]
# Output: [4,1]
# Explanation: The given directed graph will be like this:
# 5 <- 1 -> 2
# ^ |
# | v
# 4 <- 3
# Note:
# The size of the input 2D-array will be between 3 and 1000.
# Every integer represented in the 2D-array will be between 1 and N, where N is the size of the input array.
#
# Companies
# Google
# Related Topics
# Graph
# Similar Questions
# Redundant Connection
#
import collections
import unittest
# 40ms 24.16%
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/redundant-connection-ii/solution/
# https://leetcode.com/problems/redundant-connection-ii/discuss/108058/one-pass-disjoint-set-solution-with-explai
# https://leetcode.com/problems/redundant-connection-ii/discuss/218692/Swift-union-find-solution
# summary:
# 1) Check whether there is a node having two parents.
# If so, store them as candidates A and B, and set the second edge invalid.
# 2) Perform normal union find.
# If the tree is now valid
# simply return candidate B
# else if candidates not existing
# we find a circle, return current edge;
# else
# remove candidate A instead of B.
#
# In the following code,
# last == -1 means "no cycle found" which is scenario 1 or 2
# second != -1 && last != -1 means "one edge removed and the result tree has cycle" which is scenario 3
# second == -1 means "no edge skipped or removed" which is scenario 4
#
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] roots = new int[edges.length + 1], ds = new int[edges.length + 1];
Arrays.fill(roots, -1);
int first = -1, second = -1, last = -1;
for (int i = 0; i < edges.length; i++) {
int parent = edges[i][0];
int child = edges[i][1];
if (roots[child] != -1) {
first = roots[child];
second = i;
continue;
}
roots[child] = i;
int x = find(ds, parent);
if (x == child) last = i;
else ds[child] = x;
}
if (last == -1) return edges[second];
if (second == -1) return edges[last];
return edges[first];
}
private int find(int[] ds, int x){
return ds[x] == 0 ? x : (ds[x] = find(ds, ds[x]));
}
}
Approach #1: Depth-First Search [Accepted]
Complexity Analysis
Time Complexity: O(N) where N is the number of vertices (and also the number of edges) in the graph.
We perform a depth-first search.
Space Complexity: O(N), the size of the graph.
# 11ms 12.82%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int N = edges.length;
Map<Integer, Integer> parent = new HashMap();
List<int[]> candidates = new ArrayList();
for (int[] edge: edges) {
if (parent.containsKey(edge[1])) {
candidates.add(new int[]{parent.get(edge[1]), edge[1]});
candidates.add(edge);
} else {
parent.put(edge[1], edge[0]);
}
}
int root = orbit(1, parent).node;
if (candidates.isEmpty()) {
Set<Integer> cycle = orbit(root, parent).seen;
int[] ans = new int[]{0, 0};
for (int[] edge: edges) {
if (cycle.contains(edge[0]) && cycle.contains(edge[1])) {
ans = edge;
}
}
return ans;
}
Map<Integer, List<Integer>> children = new HashMap();
for (int v: parent.keySet()) {
int pv = parent.get(v);
if (!children.containsKey(pv))
children.put(pv, new ArrayList<Integer>());
children.get(pv).add(v);
}
boolean[] seen = new boolean[N+1];
seen[0] = true;
Stack<Integer> stack = new Stack();
stack.add(root);
while (!stack.isEmpty()) {
int node = stack.pop();
if (!seen[node]) {
seen[node] = true;
if (children.containsKey(node)) {
for (int c: children.get(node))
stack.push(c);
}
}
}
for (boolean b: seen) if (!b)
return candidates.get(0);
return candidates.get(1);
}
public OrbitResult orbit(int node, Map<Integer, Integer> parent) {
Set<Integer> seen = new HashSet();
while (parent.containsKey(node) && !seen.contains(node)) {
seen.add(node);
node = parent.get(node);
}
return new OrbitResult(node, seen);
}
}
class OrbitResult {
int node;
Set<Integer> seen;
OrbitResult(int n, Set<Integer> s) {
node = n;
seen = s;
}
}
This problem is limited to a graph with N nodes and N edges.
No node is singled out if a edge is removed.
For example, [[1,2],[2,4],[3,4]], 4 nodes 3 edges, is not applicable to this problem.
You cannot remove [3,4] to single out node 3.
There are 3 cases:
Case 1) No loop, but there is one node who has 2 parents.
Case 2) A loop, and there is one node who has 2 parents, that node must be inside the loop.
Case 3) A loop, and every node has only 1 parent.
Case 1: e.g. [[1,2],[1,3],[2,3]] ,node 3 has 2 parents ([1,3] and [2,3]).
Return the edge that occurs last that is, return [2,3].
Case 2: e.g. [[1,2],[2,3],[3,1],[4,1]] , {1->2->3->1} is a loop, node 1 has 2 parents ([4,1] and [3,1]).
Return the edge that is inside the loop, that is, return [3,1].
Case 3: e.g. [[1,2],[2,3],[3,1],[1,4]] , {1->2->3->1} is a loop, you can remove any edge in a loop,
the graph is still valid. Thus, return the one that occurs last, that is, return [3,1].
Also, [[2,1],[3,1],[4,2],[1,4]] is a good example
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] ancestor = new int[edges.length + 1];
int[][] res = new int[2][2];
for(int[]node : edges) {
if(node[1] != getAncestor(ancestor, node[1]))
res[0] = node;
else if(getAncestor(ancestor, node[0]) == getAncestor(ancestor, node[1]))
res[1] = node;
else
ancestor[node[1]] = ancestor[node[0]];
if(res[0][0] != 0 && res[1][0] != 0)
return find(edges, ancestor, res[0], res[1]);
}
return res[0][0] == 0 ? res[1] : res[0];
}
public int getAncestor(int[] ancestor, int node) {
if(node != ancestor[node])
ancestor[node] = ancestor[node] == 0 ? node : getAncestor(ancestor, ancestor[node]);
return ancestor[node];
}
public int[] find(int[][] edges, int[] ancestor, int[] removed0, int[] removed1) {
for(int[] res : edges)
if(res[1] == removed0[1] && getAncestor(ancestor, res[1]) == getAncestor(ancestor, removed1[1]))
return res;
return new int[2];
}
}
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] parent = new int[edges.length+1];
for (int i = 0; i < parent.length; i++) {
parent[i] = i;
}
int[] cycleEdge = null;
int[] mParent = null;
for (int[] edge : edges) {
int x = find(parent, edge[0]);
int y = find(parent, edge[1]);
if (x == y)
cycleEdge = edge;
else {
if (y != edge[1])
mParent = edge;
else
parent[y] = x;
}
}
// means we only got the multiparent problem, and the edges we recorded using parent so far are good, so just return this one.
if (cycleEdge == null)
return mParent;
// means we only got the cycle problem, in this case we can remove any edge in the cycle, so just remove this one.
if (mParent == null)
return cycleEdge;
// now, it means we have both cycle and multi-parent problem.
// In my code, i didn't record an edge into parent if we think it's involved into the multi-parent problem,
// but we are still getting the cycle problem. Since in this problem we can only have edges point to the same
// node, so, current mParent edge is the wrong one, we need to remove the other one pointing to the same
// dest as mParent ex: [[2,1],[3,1],[4,2],[1,4]]
for (int[] edge : edges) {
if (edge[1] == mParent[1])
return edge;
}
return new int[2];
}
public int find(int[] parent, int x) {
if (parent[x] == x)
return x;
return find(parent, parent[x]);
}
}
'''
| [
834,
10459,
834,
796,
705,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
445,
917,
415,
12,
38659,
12,
4178,
14,
6,
198,
2,
3862,
25,
220,
440,
7,
53,
8,
1271,
83,
286,
9421,
1063,
198,
2,
4687,
25,
440,
7,
53,
8... | 2.27543 | 4,473 |
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
from datetime import datetime
import io
import os
import pathlib
import signal
import sys
from threading import Lock, Thread, Event
from threading import current_thread, main_thread
import time
from urllib.parse import unquote
import weakref
import zipfile
import tempfile
from zipfile import BadZipFile
# Related third party imports
import numpy as np
from genicam.genapi import NodeMap
from genicam.genapi import LogicalErrorException, RuntimeException
from genicam.genapi import ChunkAdapterGeneric, ChunkAdapterU3V, \
ChunkAdapterGEV
from genicam.gentl import TimeoutException, \
NotImplementedException, ParsingChunkDataException, NoDataException, \
ErrorException, InvalidBufferException, InvalidParameterException
from genicam.gentl import GenericException
from genicam.gentl import GenTLProducer, BufferToken, EventManagerNewBuffer
from genicam.gentl import DEVICE_ACCESS_FLAGS_LIST, EVENT_TYPE_LIST, \
ACQ_START_FLAGS_LIST, ACQ_STOP_FLAGS_LIST, ACQ_QUEUE_TYPE_LIST, \
PAYLOADTYPE_INFO_IDS
# Local application/library specific imports
from harvesters._private.core.port import ConcretePort
from harvesters._private.core.statistics import Statistics
from harvesters.util.logging import get_logger
from harvesters.util.pfnc import symbolics
from harvesters.util.pfnc import uint16_formats, uint32_formats, \
float32_formats, uint8_formats
from harvesters.util.pfnc import component_2d_formats
from harvesters.util.pfnc import lmn_444_location_formats, \
lmno_4444_location_formats, lmn_422_location_formats, \
lmn_411_location_formats, mono_location_formats, bayer_location_formats
_is_logging_buffer_manipulation = True if 'HARVESTERS_LOG_BUFFER_MANIPULATION' in os.environ else False
_sleep_duration_default = 0.000001 # s
class ThreadBase:
"""
By default, :class:`ImageAcquirer` class internally uses Python's
built-in :mod:`threading` module. However, you may want to use your
preferred threading module such as :class:`QThread` of PyQt for some
technical reasons. To allow us your preferred threading module, Harvester
provides you a base proxy class to allow you implementing your threading
functionality.
"""
def __init__(self, *, mutex=None, logger=None):
"""
:param mutex:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._is_running = False
self._mutex = mutex
def start(self):
"""
:return: None.
"""
self._is_running = True
self._start()
self._logger.debug(
'Started thread {:0X}.'.format(self.id_)
)
def _start(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Starts its worker running.
:return: None.
"""
raise NotImplementedError
def stop(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Stops its worker running.
:return: None.
"""
raise NotImplementedError
def acquire(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Acquires a mutex.
:return: None.
"""
raise NotImplementedError
def release(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Releases the acquired mutex.
:return: None.
"""
raise NotImplementedError
def is_running(self):
"""
:return: :const:`True` if the worker is still running. Otherwise :const:`False`.
"""
return self._is_running
@property
def worker(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@worker.setter
def worker(self, obj):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def mutex(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def id_(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
class ComponentBase:
"""
Is a base class of various data component types.
"""
def __init__(self, *, buffer=None):
"""
:param buffer:
"""
#
assert buffer
#
super().__init__()
#
self._buffer = buffer
self._data = None
@property
def data_format(self):
"""
:return: The data type of the data component.
"""
return self._buffer.data_format
@property
def data_format_namespace(self):
"""
:return: The data type namespace of the data component.
"""
return self._buffer.data_format
@property
def source_id(self):
"""
:return: The source ID of the data component.
"""
return self._buffer.source_id
@property
def data(self):
"""
:return: The component data.
"""
return self._data
class ComponentUnknown(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_UNKNOWN` by the GenTL Standard.
"""
class Component2DImage(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_2D_IMAGE` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, part=None, node_map=None, logger=None):
"""
:param buffer:
:param part:
:param node_map:
"""
#
assert buffer
assert node_map
#
super().__init__(buffer=buffer)
self._logger = logger or get_logger(name=__name__)
#
self._part = part
self._node_map = node_map
self._data = None
self._num_components_per_pixel = 0
symbolic = self.data_format
# Determine the data type:
if self.x_padding > 0:
# In this case, the client will have to trim the padding part.
# so we create a NumPy array that consists of uint8 elements
# first. The client will interpret the array in an appropriate
# dtype in the end once he trimmed:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
if symbolic in uint16_formats:
dtype = 'uint16'
bytes_per_pixel_data_component = 2
elif symbolic in uint32_formats:
dtype = 'uint32'
bytes_per_pixel_data_component = 4
elif symbolic in float32_formats:
dtype = 'float32'
bytes_per_pixel_data_component = 4
elif symbolic in uint8_formats:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
# Sorry, Harvester can't handle this:
self._data = None
return
# Determine the number of components per pixel:
if symbolic in lmn_444_location_formats:
num_components_per_pixel = 3.
elif symbolic in lmn_422_location_formats:
num_components_per_pixel = 2.
elif symbolic in lmn_411_location_formats:
num_components_per_pixel = 1.5
elif symbolic in lmno_4444_location_formats:
num_components_per_pixel = 4.
elif symbolic in mono_location_formats or \
symbolic in bayer_location_formats:
num_components_per_pixel = 1.
else:
# Sorry, Harvester can't handle this:
self._data = None
return
self._num_components_per_pixel = num_components_per_pixel
self._symbolic = symbolic
#
width = self.width
height = self.height
#
if self._part:
count = self._part.data_size
count //= bytes_per_pixel_data_component
data_offset = self._part.data_offset
else:
count = width * height
count *= num_components_per_pixel
count += self.y_padding
data_offset = 0
# Convert the Python's built-in bytes array to a Numpy array:
if _is_logging_buffer_manipulation:
self._logger.debug(
'Component 2D image ('
'len(raw_buffer): {0}, '
'int(count): {1}, '
'dtype: {2}, '
'offset: {3}, '
'pixel format: {4},'
'x padding: {5},'
'y padding: {6}'
')'.format(
len(self._buffer.raw_buffer),
int(count),
dtype,
data_offset,
symbolic,
self.x_padding,
self.y_padding,
)
)
self._data = np.frombuffer(
self._buffer.raw_buffer,
count=int(count),
dtype=dtype,
offset=data_offset
)
def represent_pixel_location(self):
"""
Returns a NumPy array that represents the 2D pixel location,
which is defined by PFNC, of the original image data.
You may use the returned NumPy array for a calculation to map the
original image to another format.
:return: A NumPy array that represents the 2D pixel location.
"""
if self.data is None:
return None
#
return self._data.reshape(
self.height + self.y_padding,
int(self.width * self._num_components_per_pixel + self.x_padding)
)
@property
def num_components_per_pixel(self):
"""
:return: The number of data components per pixel.
"""
return self._num_components_per_pixel
@property
def width(self):
"""
:return: The width of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.width
else:
value = self._buffer.width
except GenericException:
value = self._node_map.Width.value
return value
@property
def height(self):
"""
:return: The height of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.height
else:
value = self._buffer.height
except GenericException:
value = self._node_map.Height.value
return value
@property
def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except GenericException:
value = self._node_map.PixelFormat.value
return value
@property
def data_format(self):
"""
:return: The data type of the data component as string.
"""
return symbolics[self.data_format_value]
@property
def delivered_image_height(self):
"""
:return: The image height of the data component.
"""
try:
if self._part:
value = self._part.delivered_image_height
else:
value = self._buffer.delivered_image_height
except GenericException:
value = 0
return value
@property
def x_offset(self): # TODO: Check the naming convention.
"""
:return: The X offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.x_offset
else:
value = self._buffer.offset_x
except GenericException:
value = self._node_map.OffsetX.value
return value
@property
def y_offset(self):
"""
:return: The Y offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.y_offset
else:
value = self._buffer.offset_y
except GenericException:
value = self._node_map.OffsetY.value
return value
@property
def x_padding(self):
"""
Returns
:return: The X padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.x_padding
else:
value = self._buffer.padding_x
except GenericException:
value = 0
return value
@property
def y_padding(self):
"""
:return: The Y padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.y_padding
else:
value = self._buffer.padding_y
except GenericException:
value = 0
return value
class Buffer:
"""
Is provided by an :class:`ImageAcquire` object when you call its
:meth:`~harvesters.core.ImageAcquirer.fetch_buffer` method. It provides
you a way to access acquired data and its relevant information.
Note that it will never be necessary to create this object by yourself
in general.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._buffer = buffer
self._node_map = node_map
self._payload = self._build_payload(
buffer=buffer,
node_map=node_map,
logger=self._logger
)
@property
def timestamp_ns(self):
"""
:return: The timestamp in nano-second.
"""
return self._buffer.timestamp_ns
@property
def timestamp(self):
"""
:return: The timestamp in the TL specific unit.
"""
timestamp = 0
try:
timestamp = self._buffer.timestamp_ns
except GenericException:
try:
_ = self.timestamp_frequency
except GenericException:
pass
else:
try:
timestamp = self._buffer.timestamp
except GenericException:
timestamp = 0
return timestamp
@property
def timestamp_frequency(self):
"""
:return: The timestamp frequency which is used to represent a timestamp.
"""
#
frequency = 1000000000 # Hz
try:
_ = self._buffer.timestamp_ns
except GenericException:
try:
frequency = self._buffer.parent.parent.timestamp_frequency
except GenericException:
try:
frequency = self._node_map.GevTimestampTickFrequency.value
except GenericException:
pass
return frequency
@property
def payload_type(self):
"""
:return: The payload type that the :class:`Buffer` object contains.
"""
return self._buffer.payload_type
@property
def payload(self):
"""
:return: A containing object which derives from :class:`PayloadBase` class.
"""
return self._payload
def queue(self):
"""
Queues the buffer to prepare for the upcoming image acquisition. Once
the buffer is queued, the :class:`Buffer` object will be obsolete.
You'll have nothing to do with it.
Note that you have to return the ownership of the fetched buffers to
the :class:`ImageAcquirer` object before stopping image acquisition
calling this method because the :class:`ImageAcquirer` object tries
to clear the self-allocated buffers when it stops image acquisition.
"""
#
if _is_logging_buffer_manipulation:
self._logger.debug(
'Queued Buffer module #{0}'
' containing frame #{1}'
' to DataStream module {2}'
' of Device module {3}'
'.'.format(
self._buffer.context,
self._buffer.frame_id,
self._buffer.parent.id_,
self._buffer.parent.parent.id_
)
)
self._buffer.parent.queue_buffer(self._buffer)
@staticmethod
class PayloadBase:
"""
Is a base class of various payload types. The types are defined by the
GenTL Standard.
"""
def __init__(self, *, buffer=None, logger=None):
"""
:param buffer:
:param logger:
"""
#
assert buffer
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
self._buffer = buffer
self._components = []
@property
def payload_type(self):
"""
TODO:
:return:
"""
return self._buffer.payload_type
@property
def components(self):
"""
:return: A :class:`list` containing objects that derive from :const:`ComponentBase` class.
"""
return self._components
class PayloadUnknown(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_UNKNOWN`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadImage(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_IMAGE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
# Build data components.
self._components.append(
self._build_component(
buffer=buffer, node_map=node_map
)
)
class PayloadRawData(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_RAW_DATA`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadFile(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_FILE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG2000(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG2000`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadH264(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_H264` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadChunkOnly(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_CHUNK_ONLY` by the GenTL Standard.
"""
class PayloadMultiPart(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_MULTI_PART` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
#
# Build data components.
# We know the buffer consists of a set of "part" that is
# defined by the GenTL standard.
for i, part in enumerate(self._buffer.parts):
self._components.append(
self._build_component(
buffer=buffer, part=part, node_map=node_map
)
)
class ImageAcquirer:
"""
Manages everything you need to acquire images from the connecting device.
"""
#
_event = Event()
_specialized_tl_type = ['U3V', 'GEV']
def __init__(
self, *, parent=None, device=None,
profiler=None, logger=None,
sleep_duration=_sleep_duration_default,
file_path=None
):
"""
:param parent:
:param device:
:param profiler:
:param logger:
:param sleep_duration:
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
"""
#
self._logger = logger or get_logger(name=__name__)
#
assert parent
assert device
#
super().__init__()
#
self._parent = parent
#
interface = device.parent
system = interface.parent
env_var = 'HARVESTERS_XML_FILE_DIR'
if env_var in os.environ:
self._xml_dir = os.getenv(env_var)
else:
self._xml_dir = None
#
try:
node_map = _get_port_connected_node_map(
port=system.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._system = System(module=system, node_map=node_map)
#
try:
node_map = _get_port_connected_node_map(
port=interface.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._interface = Interface(
module=interface, node_map=node_map, parent=self._system
)
#
try:
node_map = _get_port_connected_node_map(
port=device.local_port, logger=self._logger,
xml_dir=self._xml_dir
) # Local device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._device = Device(
module=device, node_map=node_map, parent=self._interface
)
#
try:
node_map = _get_port_connected_node_map(
port=device.remote_port, logger=self._logger,
file_path=file_path, xml_dir=self._xml_dir
) # Remote device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._remote_device = RemoteDevice(
module=self._device, node_map=node_map, parent=self._device
)
#
self._data_streams = []
self._event_new_buffer_managers = []
self._create_ds_at_connection = True
if self._create_ds_at_connection:
self._setup_data_streams()
#
self._profiler = profiler
#
self._mutex = Lock()
self._thread_image_acquisition = _BuiltInThread(
mutex=self._mutex,
worker=self._worker_image_acquisition,
logger=self._logger,
sleep_duration=sleep_duration
)
# Prepare handling the SIGINT event:
self._threads = []
self._threads.append(self._thread_image_acquisition)
# Create a signal handler if it's being run in the main thread:
self._sigint_handler = None
if current_thread() is main_thread():
self._sigint_handler = _SignalHandler(
event=self._event, threads=self._threads, logger=self._logger
)
signal.signal(signal.SIGINT, self._sigint_handler)
self._logger.info('Created a signal handler for SIGINT.')
#
self._num_filled_buffers_to_hold = 1
#
self._num_images_to_acquire = -1
#
self._timeout_for_image_acquisition = 1 # ms
#
self._statistics = Statistics()
#
self._announced_buffers = []
self._holding_filled_buffers = []
#
self._has_acquired_1st_image = False
self._is_acquiring_images = False
self._keep_latest = True
# Determine the default value:
num_buffers_default = 16
try:
self._min_num_buffers = self._data_streams[0].buffer_announce_min
except InvalidParameterException as e:
# In general, a GenTL Producer should not raise the
# InvalidParameterException to the inquiry for
# STREAM_INFO_BUF_ANNOUNCE_MIN because it is totally legal
# but we have observed a fact that there is at least one on
# the market. As a workaround we involve this try-except block:
self._logger.debug(e, exc_info=True)
self._min_num_buffers = num_buffers_default
self._num_buffers = num_buffers_default
else:
self._num_buffers = max(
num_buffers_default, self._min_num_buffers
)
#
self._signal_stop_image_acquisition = None
#
self._logger.info(
'Instantiated an ImageAcquirer object for {0}.'.format(
self._device.id_
)
)
#
self._chunk_adapter = self._get_chunk_adapter(
device=self.device, node_map=self.remote_device.node_map
)
# A callback method when it's called when a new buffer is delivered:
self._on_new_buffer_arrival = None
#
self._finalizer = weakref.finalize(self, self._destroy)
@staticmethod
@property
@on_new_buffer_arrival.setter
@property
@keep_latest.setter
@property
@num_buffers.setter
@property
@property
@num_filled_buffers_to_hold.setter
@property
@property
@property
def remote_device(self):
"""
:return: The remote device.
"""
return self._remote_device
@property
def device(self):
"""
:return: The proxy :class:`Device` module object of the connecting remote device.
"""
return self._device
@property
def interface(self):
"""
:return: The parent :class:`Interface` module object of the connecting remote device.
"""
return self._interface
@property
def system(self):
"""
:return: The parent :class:`System` module object of the connecting remote device.
"""
return self._system
def is_acquiring_images(self):
"""
:return: :const:`True` if it's acquiring images. Otherwise :const:`False`.
"""
return self._is_acquiring_images
@property
@timeout_for_image_acquisition.setter
@property
@thread_image_acquisition.setter
@property
@signal_stop_image_acquisition.setter
@property
@keep_latest.setter
def start_image_acquisition(self):
"""
Starts image acquisition.
:return: None.
"""
if not self._create_ds_at_connection:
self._setup_data_streams()
#
num_required_buffers = self._num_buffers
for data_stream in self._data_streams:
try:
num_buffers = data_stream.buffer_announce_min
if num_buffers < num_required_buffers:
num_buffers = num_required_buffers
except GenericException as e:
num_buffers = num_required_buffers
self._logger.debug(e, exc_info=True)
if data_stream.defines_payload_size():
buffer_size = data_stream.payload_size
else:
buffer_size = self.remote_device.node_map.PayloadSize.value
raw_buffers = self._create_raw_buffers(
num_buffers, buffer_size
)
buffer_tokens = self._create_buffer_tokens(
raw_buffers
)
self._announced_buffers = self._announce_buffers(
data_stream=data_stream, _buffer_tokens=buffer_tokens
)
self._queue_announced_buffers(
data_stream=data_stream, buffers=self._announced_buffers
)
# Reset the number of images to acquire.
try:
acq_mode = self.remote_device.node_map.AcquisitionMode.value
if acq_mode == 'Continuous':
num_images_to_acquire = -1
elif acq_mode == 'SingleFrame':
num_images_to_acquire = 1
elif acq_mode == 'MultiFrame':
num_images_to_acquire = self.remote_device.node_map.AcquisitionFrameCount.value
else:
num_images_to_acquire = -1
except GenericException as e:
# The node doesn't exist.
num_images_to_acquire = -1
self._logger.debug(e, exc_info=True)
self._num_images_to_acquire = num_images_to_acquire
try:
# We're ready to start image acquisition. Lock the device's
# transport layer related features:
self.remote_device.node_map.TLParamsLocked.value = 1
except GenericException:
# SFNC < 2.0
pass
# Start image acquisition.
self._is_acquiring_images = True
for data_stream in self._data_streams:
data_stream.start_acquisition(
ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT,
self._num_images_to_acquire
)
#
if self.thread_image_acquisition:
self.thread_image_acquisition.start()
#
self.remote_device.node_map.AcquisitionStart.execute()
self._logger.info(
'{0} started image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def fetch_buffer(self, *, timeout=0, is_raw=False):
"""
Fetches the latest :class:`Buffer` object and returns it.
:param timeout: Set timeout value in second.
:param is_raw: Set :const:`True` if you need a raw GenTL Buffer module.
:return: A :class:`Buffer` object.
"""
if not self.is_acquiring_images():
raise TimeoutException
watch_timeout = True if timeout > 0 else False
buffer = None
base = time.time()
while buffer is None:
if watch_timeout and (time.time() - base) > timeout:
raise TimeoutException
else:
with MutexLocker(self.thread_image_acquisition):
if len(self._holding_filled_buffers) > 0:
if is_raw:
buffer = self._holding_filled_buffers.pop(0)
else:
# Update the chunk data:
_buffer = self._holding_filled_buffers.pop(0)
self._update_chunk_data(buffer=_buffer)
#
buffer = Buffer(
buffer=_buffer,
node_map=self.remote_device.node_map,
logger=self._logger
)
if _is_logging_buffer_manipulation:
self._logger.debug(
'Fetched Buffer module #{0}'
' containing frame #{1}'
' of DataStream module {2}'
' of Device module {2}'
'.'.format(
buffer._buffer.context,
buffer._buffer.frame_id,
buffer._buffer.parent.id_,
buffer._buffer.parent.parent.id_
)
)
return buffer
@staticmethod
@staticmethod
def stop_image_acquisition(self):
"""
Stops image acquisition.
:return: None.
"""
if self.is_acquiring_images():
#
self._is_acquiring_images = False
#
if self.thread_image_acquisition.is_running(): # TODO
self.thread_image_acquisition.stop()
with MutexLocker(self.thread_image_acquisition):
#
self.remote_device.node_map.AcquisitionStop.execute()
try:
# Unlock TLParamsLocked in order to allow full device
# configuration:
self.remote_device.node_map.TLParamsLocked.value = 0
except GenericException:
# SFNC < 2.0
pass
for data_stream in self._data_streams:
# Stop image acquisition.
try:
data_stream.stop_acquisition(
ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL
)
except GenericException as e:
self._logger.error(e, exc_info=True)
# Flash the queue for image acquisition process.
data_stream.flush_buffer_queue(
ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD
)
for event_manager in self._event_new_buffer_managers:
event_manager.flush_event_queue()
if self._create_ds_at_connection:
self._release_buffers()
else:
self._release_data_streams()
#
self._has_acquired_1st_image = False
#
self._chunk_adapter.detach_buffer()
#
self._logger.info(
'{0} stopped image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def _destroy(self):
"""
Destroys the :class:`ImageAcquirer` object. Once you called this
method, all allocated resources, including buffers and the remote
device, are released.
:return: None.
"""
# Ask its parent to destroy it:
if self._device:
self._parent._destroy_image_acquirer(self)
class Harvester:
"""
Is the class that works for you as Harvester Core. Everything begins with
this class.
"""
#
def __init__(self, *, profile=False, logger=None):
"""
:param profile:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._cti_files = []
self._producers = []
self._systems = []
self._interfaces = []
self._device_info_list = []
self._ias = []
#
self._has_revised_device_list = False
self._timeout_for_update = 1000 # ms
#
if profile:
from harvesters._private.core.helper.profiler import Profiler
self._profiler = Profiler()
else:
self._profiler = None
if self._profiler:
self._profiler.print_diff()
#
self._finalizer = weakref.finalize(self, self._reset)
@property
def cti_files(self):
"""
:return: A :class:`list` object containing :class:`str` objects.
"""
return self._cti_files
@property
def device_info_list(self):
"""
:return: A :class:`list` object containing :class:`DeviceInfo` objects
"""
return self._device_info_list
@property
@timeout_for_update.setter
@property
@has_revised_device_info_list.setter
def create_image_acquirer(
self, list_index=None, *, id_=None,
vendor=None, model=None, tl_type=None, user_defined_name=None,
serial_number=None, version=None,
sleep_duration=_sleep_duration_default, file_path=None,
privilege='exclusive'
):
"""
Creates an image acquirer for the specified remote device and return
the created :class:`ImageAcquirer` object.
:param list_index: (Optional) Set an item index of the list of :class:`DeviceInfo` objects.
:param id_: (Optional) Set an index of the device information list.
:param vendor: (Optional) Set a vendor name of the target device.
:param model: (Optional) Set a model name of the target device.
:param tl_type: (Optional) Set a transport layer type of the target device.
:param user_defined_name: (Optional) Set a user defined name string of the target device.
:param serial_number: (Optional) Set a serial number string of the target device.
:param version: (Optional) Set a version number string of the target device.
:param sleep_duration: (Optional) Set a sleep duration in second that is inserted after the image acquisition worker is executed.
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
:param privilege: (Optional) Set an access privilege. `exclusive`, `contorl`, and `read_only` are supported. The default is `exclusive`.
:return: An :class:`ImageAcquirer` object that associates with the specified device.
Note that you have to close it when you are ready to release the
device that you have been controlled. As long as you hold it, the
controlled device will be not available from other clients.
"""
#
if self.device_info_list is None:
# TODO: Throw an exception to tell clients that there's no
# device to connect.
return
# Instantiate a GenTL Device module.
if list_index is not None:
device = self.device_info_list[list_index].create_device()
else:
keys = [
'id_', 'vendor', 'model', 'tl_type',
'user_defined_name', 'serial_number', 'version',
]
# Create a copy of the list. Do not use the original list:
candidates = self.device_info_list.copy()
for key in keys:
key_value = eval(key)
if key_value:
items_to_be_removed = []
# Find out the times to be removed from the candidates.
for item in candidates:
try:
if key_value != eval('item.' + key):
items_to_be_removed.append(item)
except GenericException as e:
# The candidate doesn't support the information.
self._logger.warn(e, exc_info=True)
pass
# Remove irrelevant items from the candidates.
for item in items_to_be_removed:
candidates.remove(item)
num_candidates = len(candidates)
if num_candidates > 1:
raise ValueError(
'You have two or more candidates. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
elif num_candidates == 0:
raise ValueError(
'You have no candidate. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
else:
device = candidates[0].create_device()
# Then open it.
try:
#
if privilege == 'exclusive':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_EXCLUSIVE
elif privilege == 'control':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_CONTROL
elif privilege == 'read_only':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_READONLY
else:
raise NotImplementedError(
'{0} is not supported.'.format(privilege)
)
#
device.open(_privilege)
except GenericException as e:
self._logger.debug(e, exc_info=True)
# Just re-throw the exception. The decision should be made by
# the client but not Harvester:
raise
else:
self._logger.info(
'Opened Device module, {0}.'.format(device.id_)
)
# Create an :class:`ImageAcquirer` object and return it.
ia = ImageAcquirer(
parent=self, device=device, profiler=self._profiler,
logger=self._logger, sleep_duration=sleep_duration,
file_path=file_path
)
self._ias.append(ia)
if self._profiler:
self._profiler.print_diff()
return ia
def add_cti_file(self, file_path: str):
"""
Adds a CTI file to work with to the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if not os.path.exists(file_path):
self._logger.warning(
'Attempted to add {0} which does not exist.'.format(file_path)
)
if file_path not in self._cti_files:
self._cti_files.append(file_path)
self._logger.info(
'Added {0} to the CTI file list.'.format(file_path)
)
def remove_cti_file(self, file_path: str):
"""
Removes the specified CTI file from the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if file_path in self._cti_files:
self._cti_files.remove(file_path)
self._logger.info(
'Removed {0} from the CTI file list.'.format(file_path)
)
def remove_cti_files(self):
"""
Removes all CTI files in the CTI file list.
:return: None.
"""
self._cti_files.clear()
#
self._logger.info('Removed the all CTI file from the list.')
def _reset(self):
"""
Initializes the :class:`Harvester` object. Once you reset the
:class:`Harvester` object, all allocated resources, including buffers
and remote device, will be released.
:return: None.
"""
#
for ia in self._ias:
ia._destroy()
self._ias.clear()
#
self._logger.info('Started resetting the Harvester object.')
self.remove_cti_files()
self._release_gentl_producers()
if self._profiler:
self._profiler.print_diff()
#
self._logger.info('Completed resetting the Harvester object.')
def update_device_info_list(self):
"""
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
"""
#
self._release_gentl_producers()
try:
self._open_gentl_producers()
self._open_systems()
#
for system in self._systems:
#
system.update_interface_info_list(self.timeout_for_update)
#
for i_info in system.interface_info_list:
iface = i_info.create_interface()
try:
iface.open()
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened Interface module, {0}.'.format(iface.id_)
)
iface.update_device_info_list(self.timeout_for_update)
self._interfaces.append(iface)
for d_info in iface.device_info_list:
self.device_info_list.append(
DeviceInfo(device_info=d_info)
)
except GenericException as e:
self._logger.error(e, exc_info=True)
self._has_revised_device_list = False
else:
self._has_revised_device_list = True
#
self._logger.info('Updated the device information list.')
def _destroy_image_acquirer(self, ia):
"""
Releases all external resources including the controlling device.
"""
id_ = None
if ia.device:
#
ia.stop_image_acquisition()
#
ia._release_data_streams()
#
id_ = ia._device.id_
#
if ia.remote_device.node_map:
#
if ia._chunk_adapter:
ia._chunk_adapter.detach_buffer()
ia._chunk_adapter = None
self._logger.info(
'Detached a buffer from the chunk adapter of {0}.'.format(
id_
)
)
ia.device.node_map.disconnect()
self._logger.info(
'Disconnected the port from the NodeMap of {0}.'.format(
id_
)
)
#
if ia._device.is_open():
ia._device.close()
self._logger.info(
'Closed Device module, {0}.'.format(id_)
)
ia._device = None
#
if id_:
self._logger.info(
'Destroyed the ImageAcquirer object which {0} '
'had belonged to.'.format(id_)
)
else:
self._logger.info(
'Destroyed an ImageAcquirer.'
)
if self._profiler:
self._profiler.print_diff()
self._ias.remove(ia)
if __name__ == '__main__':
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
16529,
10541,
198,
2,
198,
2,
15069,
2864,
17228,
11731,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
74... | 2.077543 | 24,206 |
import os, sys
import time
import configparser
import argparse
sys.path.append('../src/')
from python import helper as hp
from python import fixed_parameters as FP
parser = argparse.ArgumentParser(description='Process generated data for proba extraction')
parser.add_argument('-c','--configfile', type=str, help='path to config file', required=True)
parser.add_argument('-f','--name_data', type=str, help='Name of the ft file', required=True)
parser.add_argument('-e','--epoch', type=str, help='Which epoch to sample from', required=True)
parser.add_argument('-r','--repeat', type=int, help='Number of repeats', required=True)
if __name__ == '__main__':
start = time.time()
####################################
# get back parameters
args = vars(parser.parse_args())
verbose = True
configfile = args['configfile']
config = configparser.ConfigParser()
config.read(configfile)
name_data = args['name_data']
epoch = args['epoch']
if len(epoch)==1:
epoch = f'0{epoch}'
repeat = args['repeat']
mode = str(config['EXPERIMENTS']['mode'])
if verbose: print('\nSTART PROCESSING')
####################################
####################################
# paths to save data and to generated smi
dir_exp = str(config['EXPERIMENTS']['dir'])
exp_name = configfile.split('/')[-1].replace('.ini','')
if repeat>0:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/{repeat}/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/{repeat}/'
else:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/'
os.makedirs(savepath, exist_ok=True)
####################################
####################################
# start
min_len = int(config['PROCESSING']['min_len'])
max_len = int(config['PROCESSING']['max_len'])
vocab = list(FP.CST_PRIOR.keys())
temp = float(config['SAMPLING']['temp'])
top_k = int(config['SAMPLING']['top_k'])
top_p = float(config['SAMPLING']['top_p'])
namefile = f'{epoch}_{temp}_{top_k}_{top_p}'
generated_smi = hp.load_obj(f'{dir_gen}{namefile}.pkl')
smis_for_extraction = {}
for i,smi in enumerate(generated_smi):
smi = smi.replace('G', '')
smi = smi.replace('E', '')
try:
tokenized_s, _ = hp.process_smi(smi, min_len, max_len, vocab)
if tokenized_s:
smis_for_extraction[i] = tokenized_s
except:
pass
hp.save_obj(smis_for_extraction, f'{savepath}{namefile}_for_extraction.pkl')
end = time.time()
if verbose: print(f'PROCESSING DONE in {end-start:.2f} seconds')
####################################
| [
11748,
28686,
11,
25064,
198,
11748,
640,
198,
11748,
4566,
48610,
198,
11748,
1822,
29572,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
10677,
14,
11537,
198,
6738,
21015,
1330,
31904,
355,
27673,
198,
6738,
21015,
1330,
5969,
62,... | 2.324154 | 1,271 |
from yacs.config import CfgNode as CN
_C = CN()
# directories
_C.ADDRESS = CN()
_C.ADDRESS.DATA = 'data/'
_C.ADDRESS.CHECK = 'checkpoints/'
# data
_C.DATA = CN()
_C.DATA.NUM_CONT_FEATURES = 7
# model
_C.MODEL = CN()
_C.MODEL.NAME = 'base_tab'
_C.MODEL.HIDDEN_SIZE = 32
_C.MODEL.NUM_LAYERS = 6
_C.MODEL.NUM_HEADS = 8
_C.MODEL.ATTN_DROP_RATE = 0.1
_C.MODEL.FF_DROP_RATE = 0.1
# train
_C.TRAIN = CN()
_C.TRAIN.RUN_NAME = 'v1'
_C.TRAIN.BATCH_SIZE = 64
_C.TRAIN.EPOCHS = 5
_C.TRAIN.PATIENCE = 2
_C.TRAIN.SCHEDULER = 'cos'
_C.TRAIN.FIRST_CYCLE_STEPS = 100
_C.TRAIN.CYCLE_MULT = 1.0
_C.TRAIN.MAX_LR = 0.1
_C.TRAIN.MIN_LR = 0.001
_C.TRAIN.WARMUP_STEPS = 0
_C.TRAIN.GAMMA = 1.0
def get_cfg_defaults():
"""
get a yacs CfgNode object with default values
"""
return _C.clone()
| [
6738,
331,
16436,
13,
11250,
1330,
327,
40616,
19667,
355,
31171,
628,
198,
62,
34,
796,
31171,
3419,
198,
198,
2,
29196,
198,
62,
34,
13,
2885,
7707,
7597,
796,
31171,
3419,
198,
62,
34,
13,
2885,
7707,
7597,
13,
26947,
796,
705,
... | 1.852459 | 427 |
"""Exceptions for release script"""
from subprocess import CalledProcessError
class InputException(Exception):
"""Exception raised for invalid input."""
class ReleaseException(Exception):
"""Exception raised for a command error due to some release status"""
class DependencyException(Exception):
"""Error if dependency is missing"""
class UpdateVersionException(Exception):
"""Error if the old version is invalid or cannot be found, or if there's a duplicate version"""
class VersionMismatchException(Exception):
"""Error if the version is unexpected"""
class StatusException(Exception):
"""Error if something happened when calculating the status"""
class AsyncCalledProcessError(CalledProcessError):
"""Extend CalledProcessError to print the stdout as well"""
| [
37811,
3109,
11755,
329,
2650,
4226,
37811,
198,
6738,
850,
14681,
1330,
34099,
18709,
12331,
628,
198,
4871,
23412,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
16922,
4376,
329,
12515,
5128,
526,
15931,
628,
198,
4871,
13868,
16922... | 4.160622 | 193 |
from flask import current_app
from flask import render_template
from app.custom_error import *
from app.template_filter import display_size
from app.models import Error
# error map
error_map = {
403: forbidden,
404: page_not_found,
413: file_is_too_big,
# custom error
FileIsEmpty: file_is_empty,
FileIsTooBig: file_is_too_big
}
| [
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
6738,
598,
13,
23144,
62,
18224,
1330,
1635,
198,
6738,
598,
13,
28243,
62,
24455,
1330,
3359,
62,
7857,
198,
6738,
598,
13,
27530,
1330,
13047... | 2.806202 | 129 |
#!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import torch
import pytest
import poptorch
import helpers
# Not need for mean or logsumexp
reduce_ops = [torch.sum, torch.prod]
test_tensors = [
torch.tensor([1.0, 2.0, 3.1]),
torch.tensor([1.1, 2.0, 3.0]),
torch.tensor([0.0, 0.0, 0.0])
]
@pytest.mark.parametrize("op", reduce_ops)
@pytest.mark.parametrize("t_1", test_tensors)
@pytest.mark.parametrize("t_2", test_tensors)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
33448,
29681,
7295,
12052,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
28034,
198,
11748,
12972,
9288,
198,
11748,
1461,
13165,
354,
198,
11748,
49385,
198,
1... | 2.297561 | 205 |
_base_ = [
'../_base_/datasets/minicoco500_detection_augm.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FOVEA',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
# learning policy
lr_config = dict(step=[75, 95])
total_epochs = 100
| [
62,
8692,
62,
796,
685,
198,
220,
220,
220,
705,
40720,
62,
8692,
62,
14,
19608,
292,
1039,
14,
1084,
291,
25634,
4059,
62,
15255,
3213,
62,
7493,
76,
13,
9078,
3256,
198,
220,
220,
220,
705,
40720,
62,
8692,
62,
14,
1416,
704,
... | 1.893029 | 832 |
# -*- coding: utf-8 -*-
r"""
Frequency-dependent foreground components.
This module implements the frequency-dependent component of common foreground
contaminants.
This package draws inspiration from FGBuster (Davide Poletti and Josquin Errard)
and BeFoRe (David Alonso and Ben Thorne).
"""
import inspect
import types
import numpy as np
from scipy import constants
from .model import Model
T_CMB = 2.72548
H_OVER_KT_CMB = constants.h * 1e9 / constants.k / T_CMB
def _bandpass_integration():
''' Bandpass integrated version of the caller
The caller should have
if isinstance(nu, list):
return _bandpass_integration()
at the very beginning.
This function
* iterates over the ``nu`` argument of the caller
(while keeping all the other arguments fixed)
* splits each element of the iteration in ``nu_band, transmittance``
* integrates the caller function over the bandpass.
``np.trapz(caller(nu_band) * transmittance, nu_band)``
Note that no normalization nor unit conversion is done to the
transmittance
* stacks the output of the iteration (the frequency dimension is the last)
and returns it
'''
# This piece of code is fairly complicated, we did because:
# 1) We want to call eval on each element of the nu list (i.e. we iterate
# over the bandpasses) but we don't want to define a new eval_bandpass
# function for every class
# 2) We don't want to use a decorator because it breaks the signature
# handling of eval and the modification of its defaults.
# _bandpass_integration does from the insides of eval the same thing that
# a decorator would do from the outside. This is achieved through the
# following pretty ugly kludge
# Simpler code that achieve the same result is welcome
# You are here because this function was called inside eval before any other
# variable was defined.
# We now retrieve the keyword arguments that were passed to eval because we
# have to use them for the evaluation of eval on each bandpass
# It assumes that _bandpass_integration was called inside
# f(self, **kw) -- f is typically the eval method.
frame = inspect.currentframe().f_back
kw = frame.f_locals
self = kw['self']
del kw['self'] # self was in the locals but is not a keyword argument
# We create a copy of eval itself, we'll call it for each bandpass
f = types.FunctionType(frame.f_code, frame.f_globals)
# Store the nu-transmittance list because the nu keyword argumnt has to be
# modified with the frequencies of each bandpass
nus_transmittances = kw['nu']
# Get the shape of the output from the result of the first bandpass
kw['nu'] = nus_transmittances[0][0]
res = np.trapz(f(self, **kw) * nus_transmittances[0][1], kw['nu'])
# Append the frequency dimension and put res in its first entry
res = res[..., np.newaxis] * np.array([1.]+[0.]*(len(nus_transmittances)-1))
# Fill the remaining entries by iterating over the rest of the bandpasses
for i_band, (nu, transmittance) in enumerate(nus_transmittances[1:], 1):
kw['nu'] = nu
res[..., i_band] = np.trapz(f(self, **kw) * transmittance, nu)
return res
class PowerLaw(Model):
r""" Power Law
.. math:: f(\nu) = (\nu / \nu_0)^{\beta}
"""
def eval(self, nu=None, beta=None, nu_0=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
beta: float or array
Spectral index. If array, the shape is ``(...)``.
nu_0: float or array
Reference frequency in the same units as `nu`. If array, the shape
is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- T, E and B synchrotron SEDs with the same reference frequency but
different spectral indices. `beta` is an array with shape ``(3)``,
`nu_0` is a scalar.
- SEDs of synchrotron and dust (approximated as power law). Both `beta`
and `nu_0` are arrays with shape ``(2)``
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
nu_0 = np.array(nu_0)[..., np.newaxis]
return (nu / nu_0)**beta * (_rj2cmb(nu) / _rj2cmb(nu_0))
class Synchrotron(PowerLaw):
""" Alias of :class:`PowerLaw`
"""
pass
class ModifiedBlackBody(Model):
r""" Modified black body in K_RJ
.. math:: f(\nu) = (\nu / \nu_0)^{\beta + 1} / (e^x - 1)
where :math:`x = h \nu / k_B T_d`
"""
def eval(self, nu=None, nu_0=None, temp=None, beta=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in GHz.
beta: float or array
Spectral index.
temp: float or array
Dust temperature.
nu_0: float
Reference frequency in Hz.
Returns
-------
sed: ndarray
The last dimension is the frequency dependence.
The leading dimensions are the broadcast between the hypothetic
dimensions of `beta` and `temp`.
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
temp = np.array(temp)[..., np.newaxis]
x = 1e+9 * constants.h * nu / (constants.k * temp)
x_0 = 1e+9 * constants.h * nu_0 / (constants.k * temp)
res = (nu / nu_0)**(beta + 1.0) * np.expm1(x_0) / np.expm1(x)
return res * (_rj2cmb(nu) / _rj2cmb(nu_0))
class CIB(ModifiedBlackBody):
""" Alias of :class:`ModifiedBlackBOdy`
"""
pass
class ThermalSZ(Model):
r""" Thermal Sunyaev-Zel'dovich in K_CMB
This class implements the
.. math:: f(\nu) = x \coth(x/2) - 4
where :math:`x = h \nu / k_B T_CMB`
"""
@staticmethod
def eval(self, nu=None, nu_0=None):
"""Compute the SED with the given frequency and parameters.
nu : float
Frequency in GHz.
T_CMB (optional) : float
"""
if isinstance(nu, list):
return _bandpass_integration()
return ThermalSZ.f(nu) / ThermalSZ.f(nu_0)
class FreeFree(Model):
r""" Free-free
.. math:: f(\nu) = EM * ( 1 + log( 1 + (\nu_{ff} / \nu)^{3/\pi} ) )
.. math:: \nu_{ff} = 255.33e9 * (Te / 1000)^{3/2}
"""
def eval(self, nu=None, EM=None, Te=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
EM: float or array
Emission measure in cm^-6 pc (usually around 300). If array, the shape is ``(...)``.
Te: float or array
Electron temperature (typically around 7000). If array, the shape is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- Free-free emission in temperature.
"""
if isinstance(nu, list):
return _bandpass_integration()
EM = np.array(EM)[..., np.newaxis]
Te = np.array(Te)[..., np.newaxis]
Teff = (Te / 1.e3)**(1.5)
nuff = 255.33e9 * Teff
gff = 1. + np.log(1. + (nuff / nu)**(np.sqrt(3) / np.pi))
print("warning: I need to check the units on this")
return EM * gff
class ConstantSED(Model):
"""Frequency-independent component."""
def eval(self, nu=None, amp=1.):
""" Evaluation of the SED
Parameters
----------
nu: float or array
It just determines the shape of the output.
amp: float or array
Amplitude (or set of amplitudes) of the constant SED.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``amp.shape + (freq)``.
If `nu` is scalar, the shape is ``amp.shape + (1)``.
Note that the last dimension is guaranteed to be the frequency.
"""
if isinstance(nu, list):
return _bandpass_integration()
amp = np.array(amp)[..., np.newaxis]
return amp * np.ones_like(np.array(nu))
class Join(Model):
""" Join several SED models together
"""
def __init__(self, *seds, **kwargs):
""" Join several SED models together
Parameters
----------
*sed:
Sequence of SED models to be joined together
"""
self._seds = seds
self.set_defaults(**kwargs)
@property
def eval(self, kwseq=None):
"""Compute the SED with the given frequency and parameters.
*kwseq
The length of ``kwseq`` has to be equal to the number of SEDs
joined. ``kwseq[i]`` is a dictionary containing the keyword
arguments of the ``i``-th SED.
"""
if kwseq:
seds = [sed(**kwargs) for sed, kwargs in zip(self._seds, kwseq)]
else: # Handles the case in which no parameter has to be passed
seds = [sed() for sed in self._seds]
res = np.empty((len(seds),) + np.broadcast(*seds).shape)
for i in range(len(seds)):
res[i] = seds[i]
return res
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
81,
37811,
198,
37,
28707,
12,
21186,
36282,
6805,
13,
198,
198,
1212,
8265,
23986,
262,
8373,
12,
21186,
7515,
286,
2219,
36282,
198,
3642,
5669,
1187,
13,
198,
198,
... | 2.385781 | 4,290 |
import json
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.python import to_unicode
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
| [
11748,
33918,
198,
11748,
18931,
198,
198,
6738,
1332,
69,
25506,
1330,
5972,
49630,
198,
6738,
19074,
13,
37675,
1330,
29135,
198,
6738,
19074,
13,
45994,
13,
403,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
15881,
88,
13,
4023,
1330,... | 3.947826 | 115 |
"""Setup module for Robot Framework Docker Library package."""
import os
from setuptools import setup
# get absolute source directory path
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
long_description = readme_file.read().split('long_description split')[1].strip()
setup(
name='robotframework-docker',
version='1.3.0',
description='A Robot Framework Docker Library',
long_description=long_description,
url='https://github.com/vogoltsov/robotframework-docker',
author='Vitaly Ogoltsov',
author_email='vitaly.ogoltsov@me.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Robot Framework :: Library',
],
keywords='testing testautomation robotframework docker docker-compose',
package_dir={'': 'src'},
py_modules=['DockerComposeLibrary'],
install_requires=[
'robotframework>=4,<5',
'packaging',
],
)
| [
37811,
40786,
8265,
329,
16071,
25161,
25716,
10074,
5301,
526,
15931,
198,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
651,
4112,
2723,
8619,
3108,
198,
1456,
796,
28686,
13,
6978,
13,
397,
2777,
776,... | 2.811715 | 478 |
import codecs | [
11748,
40481,
82
] | 4.333333 | 3 |
import os
import random
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from facade_project import NUM_IMAGES, NUM_ROTATIONS, FACADE_ROT_IMAGES_TENSORS_DIR, FACADE_ROT_HEATMAPS_TENSORS_DIR
from facade_project.data.facade_heatmap_dataset import HEATMAP_INFOS_PER_ROT
from facade_project.geometry.heatmap import HeatmapsInfo
class FacadeRandomRotDataset(Dataset):
"""
Facade Random Rotations
A dataset which return rotated version of an image randomly,
useful to build batches for data augmentation
Items of the dataset are: tuple(image, mask) or tuple(image, dict) if add_aux_channels_fn is used
A demo can be found in "notebook/nb_demo_datasets.ipynb"
Note that this dataset cannot makes use the CachedDataset directly because it samples images within
the available rotations. Hence a caching is available directly and implemented here enable
sampling different rotations
"""
| [
11748,
28686,
198,
11748,
4738,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
80,
36020,
198,
198,
6738,
43562,
62,
16302,
1330,
36871,
62,
3955,
... | 3.19398 | 299 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 15:01:56 2018
@author: rick
"""
from sqlalchemy import create_engine
# eof | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2556,
1987,
1315,
25,
486,
25,
3980,
2864,
198,
198,
31,
9800,
25,
374,
624,
198,
... | 2.47541 | 61 |
import pandas as pd
classes = ['A', 'B', 'C', 'D', 'E']
sorted(classes)
df = pd.read_csv('topk_ids.csv', header=None)
top1_ans = dict(df[1].value_counts())
new_dict = {classes[key]:val for key,val in top1_ans.items()}
print (new_dict)
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
37724,
796,
37250,
32,
3256,
705,
33,
3256,
705,
34,
3256,
705,
35,
3256,
705,
36,
20520,
198,
82,
9741,
7,
37724,
8,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
10786,
4852,
74,
... | 2.298077 | 104 |
import os
from flask import Blueprint
from jmapp.lib.auth import jwt_required
from model import job_mod, apply_mod, offer_mod
fname = os.path.basename(__file__).split(".")[0]
job = Blueprint(fname, __name__)
job_m = job_mod()
apply_m = apply_mod()
offer_m = offer_mod()
@job.route("/", methods=["GET"])
@jwt_required
@job.route("/view", methods=["GET"])
@job.route("/view", methods=["POST"])
@job.route("/apply", methods=["GET"])
@job.route("/jobcat", methods=["POST"])
@job.route("/offer", methods=["GET"])
| [
11748,
28686,
198,
6738,
42903,
1330,
39932,
198,
6738,
474,
76,
1324,
13,
8019,
13,
18439,
1330,
474,
46569,
62,
35827,
198,
198,
6738,
2746,
1330,
1693,
62,
4666,
11,
4174,
62,
4666,
11,
2897,
62,
4666,
198,
198,
69,
3672,
796,
28... | 2.581281 | 203 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Hiroshi Murayama <opiopan@gmail.com>
import os
METADATA = {
'name': 'pcb-tools-extension',
'version': "0.9.3",
'author': 'Hiroshi Murayama <opiopan@gmail.com>',
'author_email': "opiopan@gmail.com",
'description': ("Extension for pcb-tools package to panelize gerber files"),
'license': "Apache",
'keywords': "pcb gerber tools extension",
'url': "http://github.com/opiopan/pcb-tools-extension",
'packages': ['gerberex'],
'long_description': read('README.md'),
'long_description_content_type': 'text/markdown',
'classifiers': [
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
}
SETUPTOOLS_METADATA = {
'install_requires': ['pcb-tools', 'dxfgrabber'],
}
def install():
""" Install using setuptools, fallback to distutils
"""
try:
from setuptools import setup
METADATA.update(SETUPTOOLS_METADATA)
setup(**METADATA)
except ImportError:
from sys import stderr
stderr.write('Could not import setuptools, using distutils')
stderr.write('NOTE: You will need to install dependencies manualy')
from distutils.core import setup
setup(**METADATA)
if __name__ == '__main__':
install()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
13130,
35763,
72,
5921,
323,
1689,
1279,
404,
14922,
272,
31,
14816,
13,
785,
29,
198,
198,
11748,
286... | 2.484761 | 689 |
"""
Module to plot outages data.
"""
from .animated_availability import *
from .evolution_mean_availability import *
from .expected_program import *
from .incremental_programs import *
from .regression_delays import * | [
198,
37811,
198,
220,
220,
220,
19937,
284,
7110,
503,
1095,
1366,
13,
198,
198,
37811,
628,
198,
6738,
764,
11227,
515,
62,
47274,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
1635,
198,
6738,
764,
1990,
2122,
62,
32604,
62,
47274... | 2.46789 | 109 |
import IPAM.CLASS
# NOTE: Blocks with a masklen set are supernets of various subnets
# within the originating parent supernet. Most class B and C
# addresing has been rulled up int supernets as /8s. This
# means in most cases, you use/allocate a NET-BLOCK allocation
# first with the correct subnetlen applied. Then when you
# assign segments out of this block, they'll be indexed as seen
# in modern whois implementations. The CLASSFUL allocations
# are purely here for the edge use-case that requires them.
RIPE.NET2 = IPAM.CLASS.A.subnet( index=2, subnetlen=12 )
| [
11748,
6101,
2390,
13,
31631,
198,
198,
2,
24550,
25,
35111,
351,
257,
9335,
11925,
900,
389,
2208,
45938,
286,
2972,
850,
45938,
198,
2,
220,
220,
220,
220,
220,
220,
1626,
262,
37962,
2560,
2208,
3262,
13,
4042,
1398,
347,
290,
32... | 2.990244 | 205 |
#!/usr/bin/env python3
# encoding: utf-8
# @Time : 2017/12/16 下午8:41
# @Author : yuchangqian
# @Contact : changqian_yu@163.com
# @File : BaseDataset.py
import os
import time
import cv2
import torch
import numpy as np
import torch.utils.data as data
if __name__ == "__main__":
data_setting = {'img_root': ''}
bd = TestData(data_setting, 'test/', None)
print(bd.get_class_names())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2177,
14,
1065,
14,
1433,
220,
10310,
233,
39355,
230,
23,
25,
3901,
198,
2,
2488,
13838,
220,
1058,
... | 2.343023 | 172 |
'''
Url shortener model
'''
from django.db import models
from django.urls import reverse_lazy
from .utils import create_shortened_url
# Create your models here.
class Shortener(models.Model):
'''
Creates a short url based on the long one
created -> Hour and date a shortener was created
times_followed -> Times the shortened link has been followed
long_url -> The original link
short_url -> shortened link https://domain/(short_url)
'''
created = models.DateTimeField(auto_now_add=True)
times_followed = models.PositiveIntegerField(default=0)
long_url = models.URLField()
short_url = models.CharField(max_length=15, unique=True, blank=True)
| [
7061,
6,
198,
28165,
1790,
877,
2746,
198,
7061,
6,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
62,
75,
12582,
198,
198,
6738,
764,
26791,
1330,
2251,
62,
19509,
2945,
... | 2.913934 | 244 |
from .linear_fused import LinearBn1d
from .deconv_fused import ConvTransposeBnReLU2d, ConvTransposeBn2d, ConvTransposeReLU2d
from .conv_fused import ConvBnReLU2d, ConvBn2d, ConvReLU2d
from .freezebn import ConvFreezebn2d, ConvFreezebnReLU2d, ConvTransposeFreezebn2d, ConvTransposeFreezebnReLU2d
| [
6738,
764,
29127,
62,
69,
1484,
1330,
44800,
33,
77,
16,
67,
198,
6738,
764,
12501,
261,
85,
62,
69,
1484,
1330,
34872,
8291,
3455,
33,
77,
3041,
41596,
17,
67,
11,
34872,
8291,
3455,
33,
77,
17,
67,
11,
34872,
8291,
3455,
3041,
... | 2.521368 | 117 |
# print(bin(10))
# print(bin(10)[::-1].index('1'))
from collections import Counter
a = [1, 1, 1, 2, 3, 4, 5]
t = Counter(a)
print(t) | [
2,
3601,
7,
8800,
7,
940,
4008,
198,
2,
3601,
7,
8800,
7,
940,
38381,
3712,
12,
16,
4083,
9630,
10786,
16,
6,
4008,
198,
6738,
17268,
1330,
15034,
198,
198,
64,
796,
685,
16,
11,
352,
11,
352,
11,
362,
11,
513,
11,
604,
11,
... | 2.216667 | 60 |
from abc import ABCMeta, abstractmethod
class FitPredictOutput(object):
'content of output file for program fit_predict.py'
__metaclass__ = ABCMeta
@abstractmethod
def as_dict(self):
'return a dict with all the fields'
pass
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
628,
198,
4871,
25048,
47,
17407,
26410,
7,
15252,
2599,
198,
220,
220,
220,
705,
11299,
286,
5072,
2393,
329,
1430,
4197,
62,
79,
17407,
13,
9078,
6,
198,
220,
220,
220,
11593,
41... | 2.726316 | 95 |
import iotbx.mtz
from cctbx.array_family import flex
if __name__ == "__main__":
import sys
data = {}
for f in sys.argv[1:]:
data[f] = get_I(f)
for ix in xrange(len(data)-1):
for iy in xrange(ix+1, len(data)):
x, y = data.keys()[ix], data.keys()[iy]
xd, yd = data[x].common_sets(data[y], assert_is_similar_symmetry=False)
corr = flex.linear_correlation(xd.data(), yd.data())
assert corr.is_well_defined()
print x, "vs", y, " cc=", corr.coefficient()
| [
11748,
1312,
313,
65,
87,
13,
16762,
89,
198,
6738,
269,
310,
65,
87,
13,
18747,
62,
17989,
1330,
7059,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1330,
25064,
628,
220,
220,
220,
1366,
... | 1.978261 | 276 |
#visualize the data that go produced
import os
import sys
import plotly
import plotly.offline as offline
import plotly.graph_objs as go
import helper as help
plotly.tools.set_credentials_file(username="<>", api_key="<>")
gprotocol = sys.argv[2] if len(sys.argv) > 2 and sys.argv[1] == "quic" else "quic"
latency = sys.argv[3] if len(sys.argv) > 3 else "30ms"
packet_loss = sys.argv[4] if len(sys.argv) > 4 else 1
save_file_name_lines = "../graphs/lines/" + gprotocol +"_"+ latency +"_"+ str(packet_loss) + ".html"
save_filename_histogram = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + ".html"
save_filename_histogram_real = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + "_hist.html"
#log_relative_file_path = "./data/creating_quic_graphs.txt"
data_file_name = sys.argv[1] if len(sys.argv) > 1 else "./data/Log.txt"
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if 'log_relative_file_path' in locals():
new_path = help.give_complete_file_path(log_relative_file_path)
f = open(new_path, 'w')
sys.stdout = f
sys.stderr = f
try:
print "Opening the file ..."
# open the file from the command line
print help.give_complete_file_path(data_file_name)
datafile = open(help.give_complete_file_path(data_file_name), "r+")
print "Reading the lines ..."
# read the lines of the file
lines = datafile.readlines()
numberOfPackets = int(lines[0][:-1])
print "Number of packets: {0}".format(numberOfPackets)
final_deltas = [None] * numberOfPackets
print "Converting strings to integers..."
# get the number of the data as an int
print lines[1]
allAsStrings = lines[1].split(' ')[:-1]
for x in allAsStrings:
pair = x.split(":")
print pair
sequence_number = int(pair[0])
delta = float(pair[1])
final_deltas[sequence_number-1] = delta
print "Got the integers..."
#print the delta array
print final_deltas
print "Starting to make graphs"
trace1 = go.Scatter(
x=range(0, numberOfPackets),
y=final_deltas,
mode = 'lines',
name = 'latency3'
)
missed_sequences_data = map( give_dropped , final_deltas)
trace2 = go.Scatter(
x = range(0, numberOfPackets),
y = missed_sequences_data,
mode = 'markers',
name = 'dropped'
)
data = [trace1]
layout_lines = dict(
font=dict(size=20),
xaxis = dict(title="Packet number"),
yaxis = dict(title="Latency (seconds)")
)
fig_lines = dict(data=data, layout=layout_lines)
print help.give_complete_file_path(save_file_name_lines)
offline.plot(fig_lines, filename=help.give_complete_file_path(save_file_name_lines), auto_open=False)
trace3 = go.Box(
x=final_deltas
)
layout_histogram = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_box = dict(data = [trace3], layout=layout_histogram)
print help.give_complete_file_path(save_filename_histogram)
offline.plot(fig_box, filename=help.give_complete_file_path(save_filename_histogram), auto_open=False)
trace4 = go.Histogram(
x=final_deltas,
nbinsx=10
)
layout_histogram_real = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_hist = dict(data = [trace4], layout=layout_histogram_real)
print help.give_complete_file_path(save_filename_histogram_real)
offline.plot(fig_hist, filename=help.give_complete_file_path(save_filename_histogram_real), auto_open=False)
datafile.close()
#if log_relative_file_path in locals():
#f.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except IOError:
print "Could not open the file"
print sys.exc_info()
except:
print "An error occured\n"
print sys.exc_info()
| [
2,
41464,
1096,
262,
1366,
326,
467,
4635,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
7110,
306,
198,
11748,
7110,
306,
13,
2364,
1370,
355,
18043,
198,
11748,
7110,
306,
13,
34960,
62,
672,
8457,
355,
467,
198,
11748,
319... | 2.549117 | 1,415 |
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from pydl.nn.layers import FC
from pydl.nn.nn import NN
from pydl.training.sgd import SGD
from pydl.training.momentum import Momentum
from pydl.training.rmsprop import RMSprop
from pydl.training.adam import Adam
if __name__ == '__main__':
main()
| [
2,
16529,
982,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
685,
1238,
2481,
60,
685,
32,
7114,
1077,
371,
37089,
776,
60,
198,
2,
198,
2,
770,
2438,
318,
636,
286,
262,
5888,
9485,
19260,
1279,
5450,
1378,
12567,
13... | 3.435233 | 193 |
import pytest
from pytest import raises
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import pyfar as pf
from pyfar.dsp import InterpolateSpectrum
# TODO: Finish `test_interpolation()` for 'magnitude_minimum'
def test_init():
"""Test return objects"""
fd = pf.FrequencyData([1, .5], [100, 200])
# interpolation object
interpolator = InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"))
assert isinstance(interpolator, InterpolateSpectrum)
# interpolation result
signal = interpolator(8, 44100)
assert isinstance(signal, pf.Signal)
def test_init_assertions():
"""Test if init raises assertions correctly"""
fd = pf.FrequencyData([1, .5], [100, 200])
# data (invalid type)
with raises(TypeError, match="data must be"):
InterpolateSpectrum(1, "complex", ("linear", "linear", "linear"))
# data (invalid FFT normalization)
with raises(ValueError, match="data.fft_norm is 'rms'"):
fd_rms = pf.FrequencyData([1, .5], [100, 200], 'rms')
InterpolateSpectrum(
fd_rms, "complex", ("linear", "linear", "linear"))
# data (not enough bins)
with raises(ValueError, match="data.n_bins must be at least 2"):
fd_short = pf.FrequencyData(1, 100)
InterpolateSpectrum(
fd_short, "complex", ("linear", "linear", "linear"))
# test invalid method
with raises(ValueError, match="method is 'invalid'"):
InterpolateSpectrum(fd, "invalid", ("linear", "linear", "linear"))
# test kind (invald type)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", "linear")
# test kind (invalid length)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", ("linear", "linear"))
# test kind (wrong entry)
with raises(ValueError, match="kind contains 'wrong'"):
InterpolateSpectrum(fd, "complex", ("linear", "linear", "wrong"))
# test fscale
with raises(ValueError, match="fscale is 'nice'"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), fscale="nice")
# test clip (wrong value of bool)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=True)
# test clip (invalid type)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=1)
# test clip (invalid length)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=(1, 2, 3))
@pytest.mark.parametrize(
"method, freq_in, frequencies, n_samples, sampling_rate, freq_out",
[
("complex", [1+2j, 2+1j], [1, 2], 12, 6,
[0+3j, 0.5+2.5j, 1+2j, 1.5+1.5j, 2+1j, 2.5+0.5j, 3+0j]),
("magnitude_phase",
# magnitude increases with 1 per Hz, phase with pi per Hz
[np.linspace(1, 2, 3) * np.exp(-1j * np.linspace(np.pi, np.pi*2, 3))],
[1, 1.5, 2], 24, 6,
# freq_out be means of magnitude and unwrapped phase response
[np.linspace(0, 3, 13), np.linspace(0, 3*np.pi, 13)]),
("magnitude", [1, 2], [1, 2], 12, 6,
[0, .5, 1, 1.5, 2, 2.5, 3])
])
def test_interpolation(
method, freq_in, frequencies, freq_out, n_samples, sampling_rate):
"""
Test the if the interpolated spectrum matches the reference across methods.
"""
# create test data
data = pf.FrequencyData(freq_in, frequencies)
interpolator = InterpolateSpectrum(
data, method, ("linear", "linear", "linear"))
signal = interpolator(n_samples, sampling_rate)
# check output depending on method
if method == "magnitude_phase":
# test magnitude and unwrapped phase response
npt.assert_allclose(np.abs(signal.freq), np.atleast_2d(freq_out[0]))
npt.assert_allclose(pf.dsp.phase(signal, unwrap=True),
np.atleast_2d(freq_out[1]))
else:
# test complex spectrum
npt.assert_allclose(signal.freq, np.atleast_2d(freq_out))
def test_clip():
"""Test if clipping the magnitude data works."""
data = pf.FrequencyData([1, 2], [1, 2])
# interpolate with and without clipping
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
signal_no_clip = interpolator(6, 6)
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), clip=(1, 2))
signal_clip = interpolator(6, 6)
assert np.any(np.abs(signal_no_clip.freq) < 1) and \
np.any(np.abs(signal_no_clip.freq) > 2)
assert np.all(np.abs(signal_clip.freq) >= 1) and \
np.all(np.abs(signal_clip.freq) <= 2)
def test_fscale():
"""
Test frequency vectors for linear and logarithmic frequency interpolation.
"""
# test parametres and data
f_in_lin = [0, 10, 20]
f_in_log = np.log([10, 10, 20])
n_samples = 10
sampling_rate = 40
f_query_lin = pf.dsp.fft.rfftfreq(n_samples, sampling_rate)
f_query_log = f_query_lin.copy()
f_query_log[0] = f_query_log[1]
f_query_log = np.log(f_query_log)
data = pf.FrequencyData([1, 1, 1], f_in_lin)
# generate interpolator with linear frequency
interpolator_lin = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="linear")
_ = interpolator_lin(n_samples, sampling_rate)
# generate interpolator with logarithmic frequency
interpolator_log = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="log")
_ = interpolator_log(n_samples, sampling_rate)
# test frequency vectors
npt.assert_allclose(interpolator_lin._f_in, f_in_lin)
npt.assert_allclose(interpolator_lin._f_query, f_query_lin)
npt.assert_allclose(interpolator_log._f_in, f_in_log)
npt.assert_allclose(interpolator_log._f_query, f_query_log)
def test_show():
"""Test plotting the results.
This only tests if the code finishes without errors. Because the plot is
an informal plot for inspection, we don't test specifics of the figure and
axes for speed up the testing."""
data = pf.FrequencyData([1, 2], [1, 2])
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
_ = interpolator(10, 10, show=True)
plt.close()
| [
11748,
12972,
9288,
198,
6738,
12972,
9288,
1330,
12073,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
12972,
16370,
355,
... | 2.444362 | 2,705 |
"""Tests for execution of various command lines."""
import pytest
from almanac import (
MissingArgumentsError,
NoSuchArgumentError,
TooManyPositionalArgumentsError
)
from .utils import get_test_app
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
37811,
51,
3558,
329,
9706,
286,
2972,
3141,
3951,
526,
15931,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
435,
46870,
1330,
357,
198,
220,
220,
220,
25639,
28100,
2886,
12331,
11,
198,
220,
220,
220,
1400,
16678,
28100,
1713,
12331,
... | 2.639098 | 133 |
from rest_framework.viewsets import ModelViewSet
from meiduo_admin.utils import PageNum
from meiduo_admin.serializers.option import OptionSerialzier
from goods.models import SpecificationOption
class OptionsView(ModelViewSet):
"""
规格选项表的增删改查
"""
serializer_class = OptionSerialzier
queryset = SpecificationOption.objects.all()
pagination_class = PageNum
from rest_framework.generics import ListAPIView
from goods.models import GoodsSpecification
from meiduo_admin.serializers.option import OptionSpecificationSerializer
class OptionSimple(ListAPIView):
"""
获取规格信息
"""
serializer_class = OptionSpecificationSerializer
queryset = GoodsSpecification.objects.all()
| [
6738,
1334,
62,
30604,
13,
1177,
28709,
1330,
9104,
7680,
7248,
198,
6738,
502,
312,
20895,
62,
28482,
13,
26791,
1330,
7873,
33111,
198,
6738,
502,
312,
20895,
62,
28482,
13,
46911,
11341,
13,
18076,
1330,
16018,
32634,
89,
959,
198,
... | 2.772201 | 259 |
"""
79. Word Search
https://leetcode.com/problems/word-search/
"""
from typing import List
if __name__ == '__main__':
raise(SystemExit(main()))
| [
37811,
198,
3720,
13,
9678,
11140,
198,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
4775,
12,
12947,
14,
198,
37811,
198,
6738,
19720,
1330,
7343,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
... | 2.732143 | 56 |
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from ClockedObject import ClockedObject
from SimpleMemory import *
| [
2,
15069,
357,
66,
8,
3717,
13435,
4527,
29362,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
2... | 3.700441 | 454 |
import unittest
import json
import os
| [
11748,
555,
715,
395,
628,
198,
11748,
33918,
198,
11748,
28686,
628,
628
] | 3.307692 | 13 |
with open('input') as input:
lines = input.readlines()
number_sequence = lines[0].split(',')
board_numbers = []
called_indexes = []
# Flatten data structure for boards
for i, line in enumerate(lines):
if i == 0:
continue
if line == '\n':
continue
stripped_line = line.strip('\n')
num_list = line.split()
for num in num_list:
board_numbers.append(num)
# "Call" numbers and check for winner
winner = None
for num in number_sequence:
winner = checkForWin(board_numbers, called_indexes, num)
if winner != None:
board_start = winner*25
unmarked_sum = 0
for i in range(board_start, board_start+25):
if i not in called_indexes:
unmarked_sum += int(board_numbers[i])
print(f"SOLUTION = {unmarked_sum} * {num} = {int(unmarked_sum) * int(num)}")
break
| [
4480,
1280,
10786,
15414,
11537,
355,
5128,
25,
198,
220,
220,
220,
3951,
796,
5128,
13,
961,
6615,
3419,
198,
198,
17618,
62,
43167,
796,
3951,
58,
15,
4083,
35312,
7,
3256,
11537,
198,
3526,
62,
77,
17024,
796,
17635,
198,
7174,
6... | 2.374317 | 366 |
from nidm.experiment import Project, Session, AssessmentAcquisition, AssessmentObject, Acquisition, AcquisitionObject, Query
from nidm.core import Constants
import json
import re
from urllib import parse
import pprint
| [
6738,
299,
312,
76,
13,
23100,
3681,
1330,
4935,
11,
23575,
11,
25809,
12832,
421,
10027,
11,
25809,
10267,
11,
44564,
11,
44564,
10267,
11,
43301,
198,
6738,
299,
312,
76,
13,
7295,
1330,
4757,
1187,
198,
11748,
33918,
198,
11748,
30... | 4.055556 | 54 |
from os import path, mkdir
from pathlib import Path
import shutil
import tempfile
import types
import unittest
from submitty_utils import glob
if __name__ == '__main__':
unittest.main()
| [
6738,
28686,
1330,
3108,
11,
33480,
15908,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
11748,
3858,
198,
11748,
555,
715,
395,
198,
198,
6738,
850,
76,
9760,
62,
26791,
1330,
15095,
628,
198,... | 3.079365 | 63 |
from __future__ import unicode_literals, print_function, division
import math
import os
import time
import argparse
import tensorflow as tf
import torch
from model import Model
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adagrad
from torch.autograd import Variable
from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import calc_running_avg_loss
from train_util import get_input_from_batch, get_output_from_batch
use_cuda = config.use_gpu and torch.cuda.is_available()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
args = parser.parse_args()
train_processor = Train()
train_processor.trainIters(config.max_iterations, args.model_file_path)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
11,
7297,
198,
11748,
10688,
198,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2... | 2.641944 | 391 |
"""Remove SiWay, add GoLab
Revision ID: 49b367d3d25f
Revises: 13f9fd64f85b
Create Date: 2017-04-07 01:07:29.653200
"""
# revision identifiers, used by Alembic.
revision = '49b367d3d25f'
down_revision = '13f9fd64f85b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
| [
37811,
27914,
15638,
25309,
11,
751,
1514,
17822,
198,
198,
18009,
1166,
4522,
25,
5125,
65,
27824,
67,
18,
67,
1495,
69,
198,
18009,
2696,
25,
1511,
69,
24,
16344,
2414,
69,
5332,
65,
198,
16447,
7536,
25,
2177,
12,
3023,
12,
2998,... | 2.495935 | 123 |
from sd2wiki.config import *
from sd2wiki.core import *
from sd2wiki.loc import getLoc
from sd2wiki.buildings import buildings
import csv, re, os
techs = {}
techFile = open(os.path.join(basedir, 'Techs', 'TechTree.txt'))
for building in buildings.values():
building.tech = None
for line in csv.reader(techFile, csv.excel_tab):
uid = line[0].strip()
if uid == '': continue
techs[uid] = Tech(*line)
techFile.close()
| [
6738,
45647,
17,
15466,
13,
11250,
1330,
1635,
198,
6738,
45647,
17,
15466,
13,
7295,
1330,
1635,
198,
6738,
45647,
17,
15466,
13,
17946,
1330,
651,
33711,
198,
6738,
45647,
17,
15466,
13,
11249,
654,
1330,
6832,
198,
11748,
269,
21370,... | 2.634731 | 167 |
'''
A game- player dodge the falling blocks
'''
import random
import math
import pyglet
from pyglet.window import key
from assets.entities import player, timer, block
class Window(pyglet.window.Window):
'''
The window class with custom draw function
'''
def __init__(self, width, height, **kawgs):
'''
Sets up the main window
'''
super().__init__(width, height, **kawgs)
def on_draw(self):
'''
Overwrites the main draw function
'''
self.clear()
# Draws all the other needed items
main_batch.draw()
def update(dt):
'''
Updates all the entities so they can move
'''
# Based off decay function- probability increases over time
# Stars in 1/72 with y asomote at 2
chance = random.randint(
1,
round(70 * (math.e/2)**((-1/20)*float(timer.text))+2)
)
if chance == 1:
falling = block.Block(batch=main_batch)
falling.speed = random.uniform(
30,
70+(math.e**(float(timer.text)*9/100))
)
enitity_list.append(falling)
for enitity in enitity_list:
enitity.update(dt)
window = Window(600, 600)
window.set_caption("Dodger")
main_batch = pyglet.graphics.Batch()
pressed = key.KeyStateHandler()
window.push_handlers(pressed)
enitity_list = []
player = player.Player(pressed, batch=main_batch)
enitity_list.append(player)
timer = timer.Timer(batch=main_batch)
enitity_list.append(timer)
enemy_list = []
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run() | [
7061,
6,
198,
32,
983,
12,
2137,
24885,
262,
7463,
7021,
198,
7061,
6,
198,
11748,
4738,
198,
11748,
10688,
198,
11748,
12972,
70,
1616,
198,
6738,
12972,
70,
1616,
13,
17497,
1330,
1994,
198,
6738,
6798,
13,
298,
871,
1330,
2137,
1... | 2.323276 | 696 |
#!/usr/bin/python3
#Usage: lambda.py
#Author: David Caballero <d@dcaballero.net>
#Version: 1.0
import copy
my_dictionary = {'Key':'Value', ('K','E','Y'):5}
my_dictionary1 = copy.deepcopy(my_dictionary)
my_dictionary[1] = 1
print(my_dictionary)
print(my_dictionary1)
import math as m
print( m.cos(m.pi))
print( m.exp(m.pi))
print( m.ceil(m.pi))
import cmath as cm
print(dir(cm))
print(cm.sqrt(4))
print(cm.polar(complex(0,1)))
import random as ran
print(dir(ran))
print(ran.sample([1,2,3,4,5] ,3))
print(ran.random())
print(ran.randint(5,100))
import sys
print(sys.version)
print(sys.path)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
28350,
25,
37456,
13,
9078,
198,
2,
13838,
25,
3271,
15976,
439,
3529,
1279,
67,
31,
17896,
397,
439,
3529,
13,
3262,
29,
198,
2,
14815,
25,
352,
13,
15,
628,
198,
11748,
4866,
1... | 2.238971 | 272 |
"""
==============================================================
Controlling the position and size of colorbars with Inset Axes
==============================================================
This example shows how to control the position, height, and width of
colorbars using `~mpl_toolkits.axes_grid1.inset_locator.inset_axes`.
Controlling the placement of the inset axes is done similarly as that of the
legend: either by providing a location option ("upper right", "best", ...), or
by providing a locator with respect to the parent bbox.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 50% of parent_bbox width
height="5%", # height : 5%
loc='upper right')
im1 = ax1.imshow([[1, 2], [2, 3]])
fig.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1, 2, 3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 5% of parent_bbox width
height="50%", # height : 50%
loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im = ax2.imshow([[1, 2], [2, 3]])
fig.colorbar(im, cax=axins, ticks=[1, 2, 3])
plt.show()
| [
37811,
198,
10052,
4770,
25609,
855,
198,
4264,
18886,
262,
2292,
290,
2546,
286,
3124,
34046,
351,
554,
2617,
12176,
274,
198,
10052,
4770,
25609,
855,
198,
198,
1212,
1672,
2523,
703,
284,
1630,
262,
2292,
11,
6001,
11,
290,
9647,
2... | 2.373178 | 686 |
import re
from dataclasses import InitVar, asdict, dataclass, field
from datetime import datetime
from typing import Dict, Optional
REGEX = r'(?P<month>[A-Z][a-z]{2}) (?P<day>[0-9]{,2}) ' \
+ r'(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}) mail postfix/[a-z]+\[[0-9]+\]: ' \
+ r'(?P<mail_id>[A-Z0-9]+): to=<(?P<to_address>.*@.*)>, ' \
+ r'relay=(?P<relay>.*), delay=(?P<delay>[0-9.]+), ' \
+ r'delays=(?P<delays>[0-9][0-9/.]+), dsn=(?P<dsn>[0-9].[0-9].[0-9]), ' \
+ r'status=(?P<status>(sent|deferred|bounced)) \((?P<description>.*)\)'
PATTERN = re.compile(REGEX)
ParseResultType = Dict[str, str]
def parse(target: str) -> Optional[ParseResultType]:
"""Parse postfix maillog including send status
Args:
target (str): maillog
Returns:
Optional[ParseResultType]: return the following dict if match
{
'month': 'Aug',
'day': '1',
'time': '10:00:00',
'mail_id': '677RGS0',
'to_address': 'dummy@gmail.com',
'relay': 'local',
'delay': '0.06',
'delays': '0.06/0.01/0/0',
'dsn': '2.0.0',
'status': 'sent',
'description': 'delivered to maildir'
}
"""
match_obj = re.search(PATTERN, target)
if match_obj is None:
return None
result = match_obj.groupdict()
return ParseResult(**result).to_dict()
@dataclass
| [
11748,
302,
198,
6738,
4818,
330,
28958,
1330,
44707,
19852,
11,
355,
11600,
11,
4818,
330,
31172,
11,
2214,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
628,
198,
31553,
6369,
796,
374,
6,
7,
... | 1.946648 | 731 |
from datetime import timedelta
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.urls import path
from .base import BaseLoginView, BaseGetCodeView, DomainEmailValidator
urlpatterns = [
path('jlu/login/', LoginView.as_view()),
path('jlu/get_code/', GetCodeView.as_view()),
]
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
9570,
12837,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
8... | 3.027523 | 109 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Change Exploration Module
In this module we expose the Speculate function which generates a list of
potential Change instances for exploration in the effort to find candidate
Changes to identify culprits. We use a binary search within a range of changes
to identify potential culprits and expose the list of changes in between
revisions in a commit range.
If we think about the range of changes as a list of revisions, each represented
as a subscripted 'c' in the list below:
[c[0], c[1], c[2], ..., c[N]]
We can treat a binary search through the range c[0]..c[N] to be a binary tree of
subscripts in the range 0..N as shown below:
N1/2
N1/4 N3/4
N1/8 N3/8 N5/8 N7/8
....
This is useful when attempting to bisect a potentially large range of revisions
quickly when finding one or more culprit changes for performance regressions.
Being able to speculate levels ahead of the bisection range gives us a means for
trading machine resources to reduce overall time-to-culprits when bisecting
large revision ranges.
"""
import functools
__all__ = ['speculate']
def speculate(changes, change_detected, on_unknown, midpoint, levels=2):
"""Speculate on a range of changes.
This function yields a list of tuples with the following form:
(insertion index, change)
Where `insertion index` refers to an index in the `changes` argument. The
returned list is in insertion order with the property that if applied in the
given order to the `changes` list that the resulting `changes` list is in a
valid relative ordering of revisions to explore.
Arguments:
- changes: a list of Change instances.
- change_detected: a predicate returning True whether we can detect a change
between two Change instances, None if the result is inconclusive.
- on_unknown: a callable invoked when change_detected returns None
(or is inconclusive) taking both changes.
- midpoint: a callable invoked returning the midpoint between two changes,
returning an object of the same type as the arguments or None;
midpoint(a, b) -> m|None where type(m) == type(a) && type(m) == type(b).
- levels: the depth of the binary search to explore for speculation; default
is 2.
"""
if not changes:
return []
additional_changes = []
# We apply the speculator on each adjacent pair of Change elements in the
# changes we're provided.
functools.reduce(speculator, enumerate(changes))
# At this point in the function, we have the additional changes in infix
# traversal order (left, node, right), but we want to return the results in
# stable insertion order so we reverse this list. This way, we have the
# elements that will fall in the same insertion index to be inserted at the
# same index one after another, which will restore the traversal order in
# the final list.
#
# For example:
#
# c = [(0, change2), (0, change1), (0, change0)]
#
# When inserted to an empty list by insertion index:
#
# a = []
# for index, change in c:
# a.insert(index, change)
#
# We end up with:
#
# a = [change0, change1, change2]
return reversed(additional_changes)
| [
2,
15069,
13130,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
37811,
19... | 3.16819 | 1,094 |
# This program is by: Nidhi Patel
# It uses: Python & Tkinter
# It answers user's question using an api system.
from tkinter import *
import wolframalpha
# This program is for the main background of the function
root = Tk()
root.title("Chatbot")
root.geometry('400x400')
# tells the user to enter the question
theLabel = Label(root, text=" Enter your question here:")
theLabel.grid(row=1, column =1)
theLabel.config(font=("Times", 17))
entry = Entry(root, bg='light grey', font=35)
entry.place(x = 10, y= 50, height= 40, width = 290)
button = Button(root, text ="Enter", width = 8, font=20, height= 1, command=lambda:answer())
button.place(x=310,y=50)
# the output system of the code.
output = Text(bg='light grey')
output.config(state=DISABLED)
output.place(x=10, y= 100, height = 290, width= 360)
root.mainloop()
| [
2,
770,
1430,
318,
416,
25,
46798,
5303,
33110,
198,
2,
632,
3544,
25,
11361,
1222,
309,
74,
3849,
220,
198,
2,
632,
7429,
2836,
338,
1808,
1262,
281,
40391,
1080,
13,
628,
198,
6738,
256,
74,
3849,
1330,
1635,
220,
198,
11748,
17... | 2.957143 | 280 |
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.proto_builder."""
from google.apputils import basetest
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import proto_builder
from google.protobuf import text_format
if __name__ == '__main__':
basetest.main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
20497,
8792,
364,
532,
3012,
338,
1366,
26478,
5794,
198,
2,
15069,
3648,
3012,
3457,
13,
220,
1439,
2489,
10395,
13,
198,
2,
3740,
1378,
16244,
364,
13,
13297,
13,
785,
14,
... | 3.503584 | 558 |
from django import template
from ..models import Area
register = template.Library()
@register.assignment_tag(takes_context=True)
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
@register.inclusion_tag('home/include/side_menu_area.html', takes_context=True)
@register.filter
def url_param_dict_to_list(url_items_dict):
"""Turn this dictionary into a param list for the URL"""
params_list = ""
for key,value in url_items_dict:
if key != "page":
params_list += "&%s=%s" % (key, value)
return params_list
@register.filter
@register.inclusion_tag('home/include/blog_item.html', takes_context=True)
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
11485,
27530,
1330,
9498,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
562,
16747,
62,
12985,
7,
83,
1124,
62,
22866,
28,
17821,
8,
628,
198,
31,
30238,
13,
259,
... | 2.791045 | 268 |
#!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Last modified: 2020-04-13 15:34
# Filename : patent_crawber.py
# Description : res_kb_patent专利信息生成,目前是转移企业相关的专利信息,实际这一步是用爬虫替换
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
import pymysql
from dateutil import parser
from datetime import datetime, date, timedelta
import json
import logging
import re
import copy
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
if __name__=="__main__":
pc = PatentCrawber()
pc.process()
| [
2,
48443,
11195,
14,
499,
15578,
14,
272,
330,
13533,
18,
14,
8800,
14,
29412,
18,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17174,
17174,
46068,
1174,
198,
2,
6434,
220,
220,
220,
220,
220,
220,
1058... | 2.457711 | 402 |
# URI Online Judge 1142
X = int(input())
count = 1
for x in range(1,X+1):
String = ''
for i in range(4):
if i == 0:
String += "{}".format(count)
elif i == 3:
String += " PUM"
else:
String += " {}".format(count)
count += 1
print(String)
| [
2,
43975,
7467,
8974,
1367,
3682,
198,
198,
55,
796,
493,
7,
15414,
28955,
198,
9127,
796,
352,
198,
198,
1640,
2124,
287,
2837,
7,
16,
11,
55,
10,
16,
2599,
198,
220,
220,
220,
10903,
796,
10148,
628,
220,
220,
220,
329,
1312,
... | 1.921687 | 166 |
from application import app
from application import decorators
from flask import request, session, redirect, url_for, Blueprint, render_template
from run import gt
import gorbin_tools2
page = Blueprint('user', __name__,
template_folder='templates')
@page.route('/user', methods = ['GET', 'POST'])
@page.route('/user/<user_id>', methods = ['GET', 'POST'])
@decorators.login_required
@decorators.check_session | [
6738,
3586,
1330,
598,
198,
6738,
3586,
1330,
11705,
2024,
198,
6738,
42903,
1330,
2581,
11,
6246,
11,
18941,
11,
19016,
62,
1640,
11,
39932,
11,
8543,
62,
28243,
198,
6738,
1057,
1330,
308,
83,
198,
11748,
30344,
8800,
62,
31391,
17,... | 2.965753 | 146 |
#!/usr/bin/env python3
# Adoptd from https://github.com/google/python-laurel/blob/master/laurel/__init__.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import getpass
import json
import random
API_TIMEOUT = 5
# https://github.com/unixpickle/cbyge/blob/main/login.go
# https://github.com/juanboro/cync2mqtt/blob/main/src/acync/__init__.py
def authenticate():
"""Authenticate with the API and get a token."""
API_AUTH = "https://api.gelighting.com/v2/two_factor/email/verifycode"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,"local_lang": "en-us"}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
code=input("Enter emailed code:")
API_AUTH = "https://api.gelighting.com/v2/user_auth/two_factor"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username, 'password': password, "two_factor": code, "resource": randomLoginResource()}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed'))
def get_devices(auth_token, user):
"""Get a list of devices for a particular user."""
API_DEVICES = "https://api2.xlink.cn/v2/user/{user}/subscribe/devices"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICES.format(user=user), headers=headers,
timeout=API_TIMEOUT)
return r.json()
def get_properties(auth_token, product_id, device_id):
"""Get properties for a single device."""
API_DEVICE_INFO = "https://api2.xlink.cn/v2/product/{product_id}/device/{device_id}/property"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICE_INFO.format(product_id=product_id, device_id=device_id), headers=headers, timeout=API_TIMEOUT)
return r.json()
username = input("Cync Username/Email:")
password=getpass.getpass()
access_token, user_id = authenticate()
print("light:")
devices = get_devices(access_token, user_id)
errormsg = ""
for device in devices:
product_id = device['product_id']
device_id = device['id']
username = device['mac']
access_key = device['access_key']
print(" - platform: gelight")
print(" password: {}".format(access_key))
print(" username: {}".format(username))
print(" lights:")
device_info = get_properties(access_token, product_id, device_id)
try:
for bulb in device_info['bulbsArray']:
id = int(bulb['deviceID']) % 1000
mac = [bulb['mac'][i:i+2] for i in range(0, 12, 2)]
mac = "%s:%s:%s:%s:%s:%s" % (mac[5], mac[4], mac[3], mac[2], mac[1], mac[0])
name = bulb['displayName']
device_type = bulb['deviceType']
print(" - id: {}".format(id))
print(" mac: {}".format(mac))
print(" name: {}".format(name))
print(" type: {}".format(device_type))
except KeyError:
errormsg+="Warning: Missing bulb info.\n"
print(errormsg)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
1215,
404,
8671,
422,
3740,
1378,
12567,
13,
785,
14,
13297,
14,
29412,
12,
5031,
495,
75,
14,
2436,
672,
14,
9866,
14,
5031,
495,
75,
14,
834,
15003,
834,
13,
9078,
198,
... | 2.543785 | 1,416 |
from os import getenv
from flask import Flask, render_template, request
from dotenv import load_dotenv
from .models import to_list
| [
6738,
28686,
1330,
651,
24330,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
764,
27530,
1330,
284,
62,
4868,
198
] | 3.852941 | 34 |
import numpy as np
A=np.array([[10,2,1],[0,4,2],[1,2,2]])
b=np.array([3,2,1])
x=my_back_solve(A,b)
print('x=',x)
print('A x=', np.dot(A,x))
| [
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
32,
28,
37659,
13,
18747,
26933,
58,
940,
11,
17,
11,
16,
38430,
15,
11,
19,
11,
17,
38430,
16,
11,
17,
11,
17,
11907,
8,
201,
198,
65,
28,
37659,
13,
18747,
26933,
... | 1.527778 | 108 |
"""Extract features and save as .mat files for ED-TCN. Only used for
spatial-temporal or appearance stream (in the case of 2 stream). Do NOT use
for motion stream.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')))
import numpy as np
import tensorflow as tf
import skimage.io as sio
import scipy.io
from skimage.transform import resize
from progressbar import ProgressBar
from data_utils import dataset_factory
from networks import networks_factory, networks_utils
from tensorflow.contrib.framework import get_variables_to_restore
flags = tf.app.flags
FLAGS = flags.FLAGS
# paths and directories
flags.DEFINE_string('segmented_dir', None,
'segmented frames, used for reference')
flags.DEFINE_string('pretrained_model', None,
'path to the pretrained model')
flags.DEFINE_string('lbl_dict_pth', None,
'path to label dictionary')
flags.DEFINE_string('outputdir', None,
'output directory')
flags.DEFINE_string('featname', None,
'name of the layer to extract features')
# other parameters
flags.DEFINE_string('datasetname', '50salads', 'name of the dataset')
flags.DEFINE_integer('frameskip', 5, 'frame skip for downsampling')
flags.DEFINE_integer('stride', 2, 'stride after downsampling (this is testing '
'stride, not training stride)')
flags.DEFINE_string('netname', None, 'Resnet50 without offsets')
flags.DEFINE_string('bg_lbl', 'background', 'name of the background class')
flags.DEFINE_string('ext', 'png', 'extension of frame file names')
flags.DEFINE_integer('snippet_len', 1, 'extract features frame by frame')
flags.DEFINE_integer('target_height', 224, 'target image height')
flags.DEFINE_integer('target_width', 224, 'target image width')
flags.DEFINE_integer('batch_size', 1, 'number of images to feed at a time')
flags.DEFINE_integer('max_time_gap', 1, 'maximum time gap for motion loss')
flags.DEFINE_boolean('usemotionloss', False, 'no need to use motion loss')
flags.DEFINE_boolean('has_bg_lbl', True, 'has background class or not. If'
'True, the number of classes will be'
'increased by 1 from the content of'
'`labels_fname`')
flags.DEFINE_boolean('use_single_mid', False, 'use a single middle frame. Used for vanilla')
# set up mean image
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
im_mean = np.array([_R_MEAN, _G_MEAN, _B_MEAN], dtype=np.float32)
def read_n_process(im_batch):
"""Read images from given path then preprocess them
Args:
im_batch: a list of image file names
Returns:
images: preprocessed images (central crop and mean removal)
"""
# allocate memory
target_shape = [FLAGS.target_height, FLAGS.target_width, 3]
images = np.zeros([len(im_batch)] + target_shape, dtype=np.float32)
# load each image
for i in range(len(im_batch)):
# load images from filenames
img = sio.imread(im_batch[i])
# resize image
img = resize(img, (FLAGS.target_height, FLAGS.target_width, 3),
mode='constant', preserve_range=True)
# mean removal
img -= im_mean
# append
images[i] = img
return images
def make_mat_file(output_fname, all_feat, lbl_lst, expected_length=None):
"""Create mat files from given feature and label list to match Lea's
file format
Args:
all_feat: all extracted feature, ndarray (N, feat_dim)
lbl_lst: list of all labels, length of N
"""
# Expand or reduce the feature array if needed
if expected_length is not None:
N = all_feat.shape[0]
if expected_length < N:
all_feat = all_feat[:expected_length]
lbl_lst = lbl_lst[:expected_length]
elif expected_length > N:
diff = expected_length - N
left = np.ceil(diff / 2.0).astype(np.int)
right = diff - left
# Expand features
left_feat = np.expand_dims(all_feat[0], axis=0)
left_pad = np.repeat(left_feat, left, axis=0)
right_feat = np.expand_dims(all_feat[-1], axis=0)
right_pad = np.repeat(right_feat, right, axis=0)
all_feat = np.concatenate([left_pad, all_feat, right_pad], axis=0)
# Expand labels
left_lbl = np.repeat(lbl_lst[0], left)
right_lbl = np.repeat(lbl_lst[-1], right)
lbl_lst = np.concatenate([left_lbl, lbl_lst, right_lbl])
assert len(all_feat) == len(lbl_lst), \
'features and labels list must have the same length'
# Save as matlab *mat file
mdict = {'A': all_feat,
'Y': np.expand_dims(lbl_lst, axis=1)}
scipy.io.savemat(os.path.join(FLAGS.outputdir, output_fname), mdict)
pass
def main(_):
"""Main function"""
if not os.path.exists(FLAGS.outputdir):
os.makedirs(FLAGS.outputdir)
# load video list
vid_lst = os.listdir(FLAGS.segmented_dir)
vid_lst.sort()
# load label dictionary
lbl_list = open(FLAGS.lbl_dict_pth).read().splitlines()
n_classes = len(lbl_list)
if FLAGS.has_bg_lbl:
n_classes += 1
# lbl_dict = {'background': 0}
# for i in range(len(lbl_list)):
# lbl_dict[lbl_list[i]] = i + 1
# lbl_dict[lbl_list[i]] = i
# use the load_snippet_pths_test in data writer to get frames and labels
dataset_writer = dataset_factory.get_writer(FLAGS.datasetname)
writer = dataset_writer()
# set default graph
with tf.Graph().as_default():
# build network
if FLAGS.use_single_mid:
real_snippet_len = 1
else:
real_snippet_len = FLAGS.snippet_len
net = networks_factory.build_net(
FLAGS.netname, n_classes, real_snippet_len,
FLAGS.target_height, FLAGS.target_width,
max_time_gap=FLAGS.max_time_gap,
trainable=False)
# extract features
feat = net.get_output(FLAGS.featname)
# load pretrained weights
if '.pkl' in FLAGS.pretrained_model:
assign_ops = networks_utils.load_pretrained(
FLAGS.pretrained_model, ignore_missing=True,
extension='pkl',
initoffset=FLAGS.usemotionloss)
else:
variables_to_restore = get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# create session
with tf.Session() as sess:
# initialization
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
if '.pkl' in FLAGS.pretrained_model:
sess.run(assign_ops)
else:
init_fn(sess)
# for each video in video list
n_vids = len(vid_lst)
for vid_id in range(n_vids):
# skip existing feature files
output_fname = '{}.avi.mat'.format(vid_lst[vid_id])
if os.path.exists(os.path.join(FLAGS.outputdir, output_fname)):
print('{} already exists'.format(output_fname))
continue
# load all file names and labels
vid = vid_lst[vid_id]
print('\nExtracting features for ' + vid)
fname_lst, lbl_lst = writer.load_snippet_pths_test(
FLAGS.segmented_dir, [vid], FLAGS.lbl_dict_pth,
FLAGS.bg_lbl, FLAGS.ext, FLAGS.frameskip)
fname_lst = [x[0] for x in fname_lst]
# prefetch all frames of a video
frames_all = read_n_process(fname_lst)
# prepare indices
n_frames = len(lbl_lst)
left = FLAGS.snippet_len // 2
right = FLAGS.snippet_len - left
# go through the video frames in acausal fashion
frame_id = left
feats_per_vid = []
groundtruths_per_vid = []
pbar = ProgressBar(max_value=n_frames)
while frame_id < n_frames-right+1:
# produce inputs
snippet_batch = []
lbl_batch = []
for _ in range(FLAGS.batch_size):
if frame_id+right > n_frames:
break
if FLAGS.use_single_mid:
snippet = np.expand_dims(frames_all[frame_id], axis=0)
else:
snippet = frames_all[frame_id-left:frame_id+right]
lbl = lbl_lst[frame_id]
snippet_batch.append(snippet)
lbl_batch.append(lbl)
frame_id += FLAGS.stride
feed_dict = {net.data_raw: snippet_batch,
net.labels_raw: lbl_batch}
# extract features
feat_ = sess.run(feat, feed_dict=feed_dict)
# append data
for i in range(feat_.shape[0]):
feats_per_vid.append(feat_[i])
groundtruths_per_vid.append(lbl_batch[i])
pbar.update(frame_id)
# produce mat file for a video
feats_per_vid = np.array(feats_per_vid, dtype=np.float32)
groundtruths_per_vid = np.array(groundtruths_per_vid)
make_mat_file(output_fname, feats_per_vid,
groundtruths_per_vid,
expected_length=n_frames//FLAGS.stride)
pass
pass
pass
if __name__ == '__main__':
tf.app.run()
| [
37811,
11627,
974,
3033,
290,
3613,
355,
764,
6759,
3696,
329,
8392,
12,
4825,
45,
13,
5514,
973,
329,
198,
2777,
34961,
12,
11498,
35738,
393,
5585,
4269,
357,
259,
262,
1339,
286,
362,
4269,
737,
2141,
5626,
779,
198,
1640,
6268,
... | 2.02342 | 4,953 |
from flask import Flask,jsonify,request,make_response
#from flask_httpauth import HTTPBasicAuth
import sqlite3 as sql
import requests
#auth = HTTPBasicAuth()
import datetime
import base64
import binascii
from flask_cors import CORS,cross_origin
app = Flask(__name__)
CORS(app)
#3
@app.route('/api/v1/categories',methods=['GET'])
#4
@app.route('/api/v1/categories',methods=['POST'])
#5
@app.route('/api/v1/categories/<categoryName>',methods=['DELETE'])
#6 or #8
@app.route('/api/v1/categories/<categoryName>/acts',methods=['GET'])
#7
@app.route('/api/v1/categories/<categoryName>/acts/size',methods=['GET'])
#9
@app.route('/api/v1/acts/upvote',methods=['POST'])
#10
@app.route('/api/v1/acts/<actId>',methods=['DELETE'])
#11
@app.route('/api/v1/acts',methods=['POST'])
# Get total number of acts
@app.route('/api/v1/count',methods=['GET'])
@app.errorhandler(405)
'''
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
'''
if __name__=='__main__':
app.run(debug=True,host="0.0.0.0",port=80)#port=8000)
| [
6738,
42903,
1330,
46947,
11,
17752,
1958,
11,
25927,
11,
15883,
62,
26209,
198,
2,
6738,
42903,
62,
4023,
18439,
1330,
14626,
26416,
30515,
198,
11748,
44161,
578,
18,
355,
44161,
198,
11748,
7007,
198,
2,
18439,
796,
14626,
26416,
305... | 2.351293 | 464 |
import csv
import collections
import os.path
import re
import operator
import sys
import datetime
import shutil
import ntpath
import EsoLuaFile
#from skipdict import SkipDict
OUTPUT_PATH = "d:\\temp\\esoglobals\\"
INPUT_FILENAME = "d:\\esoexport\\goodimages10\\globals_6b.txt"
LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\"
#LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\pregame\\console\\"
#INPUT_FILENAME = "d:\\src\\uesp\\eso\\parseGlobals\\globals_6b.txt"
#LUA_ROOT_PATH = "d:\\src\\esoui\\"
functionCalls = { }
luaFunctions = { }
GlobalData_Time = ""
GlobalData_Date = ""
GlobalData_Version = ""
totalLuaFunctions = 0
totalLuaDuplicates = 0
totalLuaCalls = 0
totalIgnoredLuaFiles = 0
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\s*\n")
matchFunctions = re.compile("((?:local\s+)?function\s+.*)")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*\))\s*\n")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\n")
matchFunctionName = re.compile("(local)?\s*function\s+([A-Za-z0-9_]+)?([:.])?([A-Za-z0-9_]+)\s*\(\s*(.*)\s*\)")
matchFunctionParams = re.compile("([A-Za-z0-9_]+)\s*,?")
matchFunctionCall = re.compile("(?:([A-Za-z_\[][A-Za-z0-9_,.\[\]\t ]*)\s*=\s*)?([A-Za-z_][A-Za-z0-9_.:\[\]]*)\s*\((.*)\)")
# function name()
# function name(var)
# function name(var1, var2)
# x, y, z = func()
callFuncs = matchFunctionCall.findall("x = y()")
callFuncs = matchFunctionCall.findall("x[0], y.z = self:zy(abc[1].t, 123)")
print callFuncs
luaFunctions = FindLuaFunctions(LUA_ROOT_PATH)
DumpLuaFunctionCalls(OUTPUT_PATH + "funccalls.txt")
#sys.exit()
parsedGlobalLog = ParseGlobalLogFile(INPUT_FILENAME)
print "Loaded " + str(len(parsedGlobalLog)) + " rows from " + INPUT_FILENAME
globalData = ParseGlobalData(parsedGlobalLog)
print "Parsed into " + str(len(globalData)) + " root global objects"
DumpGlobalData(globalData, OUTPUT_PATH + "test.txt")
CreateFunctionCallHTML(OUTPUT_PATH + "functioncalls/")
CreateGlobalHTML(globalData, OUTPUT_PATH + "all.html")
CreateGlobalHTML(globalData, OUTPUT_PATH + "func.html", [ "function" ])
CreateGlobalHTML(globalData, OUTPUT_PATH + "var.html", [ "number", "string", "boolean" ] )
CreateGlobalHTML(globalData, OUTPUT_PATH + "data.html", [ "userdata", "table" ])
CreateLuaSource(LUA_ROOT_PATH, OUTPUT_PATH + "src/")
| [
11748,
269,
21370,
201,
198,
11748,
17268,
201,
198,
11748,
28686,
13,
6978,
201,
198,
11748,
302,
201,
198,
11748,
10088,
201,
198,
11748,
25064,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
4423,
346,
201,
198,
11748,
299,
83,
6978,
... | 2.190476 | 1,134 |
import logging
import urllib.request
from urllib.error import (HTTPError, URLError)
LOG = logging.getLogger(__name__)
HPO_URL = ("http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/"
"lastStableBuild/artifact/annotation/{0}")
def fetch_resource(url, file_name=None):
"""Fetch a resource and return the resulting lines in a list
Send file_name to get more clean log messages
Args:
url(str)
file_name(str)
Returns:
lines(list(str))
"""
try:
LOG.info("Requesting %s", (file_name or url))
response = urllib.request.urlopen(url)
data = response.read() # a `bytes` object
lines = data.decode('utf-8').split('\n')
except HTTPError as err:
LOG.warning("Something went wrong, perhaps the api key is not valid?")
raise err
except URLError as err:
LOG.warning("Something went wrong, are you connected to internet?")
raise err
return lines
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):
"""Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files
"""
LOG.info("Fetching OMIM files from https://omim.org/")
mim2genes_url = 'https://omim.org/static/omim/data/mim2gene.txt'
mimtitles_url= 'https://data.omim.org/downloads/{0}/mimTitles.txt'.format(api_key)
morbidmap_url = 'https://data.omim.org/downloads/{0}/morbidmap.txt'.format(api_key)
genemap2_url = 'https://data.omim.org/downloads/{0}/genemap2.txt'.format(api_key)
mim_files = {}
mim_urls = {}
if mim2genes is True:
mim_urls['mim2genes'] = mim2genes_url
if mimtitles is True:
mim_urls['mimtitles'] = mimtitles_url
if morbidmap is True:
mim_urls['morbidmap'] = morbidmap_url
if genemap2 is True:
mim_urls['genemap2'] = genemap2_url
for file_name in mim_urls:
url = mim_urls[file_name]
mim_files[file_name] = fetch_resource(url, file_name)
return mim_files
def fetch_hpo_terms():
"""Fetch the latest version of the hpo terms in .obo format
Returns:
res(list(str)): A list with the lines
"""
url = "http://purl.obolibrary.org/obo/hp.obo"
return fetch_resource(url)
def fetch_hpo_to_genes():
"""Fetch the latest version of the map from phenotypes to genes
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_genes():
"""Fetch the latest version of the map from genes to hpo terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_phenotype_to_terms():
"""Fetch the latest version of the map from phenotype to terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
| [
11748,
18931,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
357,
40717,
12331,
11,
37902,
2538,
81,
1472,
8,
198,
198,
25294,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
14... | 2.274536 | 1,508 |
class Solution:
"""
@param s: a string
@param words: a list of words
@return: all starting indices of substring(s)
"""
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
264,
25,
257,
4731,
198,
220,
220,
220,
2488,
17143,
2456,
25,
257,
1351,
286,
2456,
198,
220,
220,
220,
2488,
7783,
25,
477,
3599,
36525,
286,
3293,
1806,... | 2.673077 | 52 |
'''
script to train conditional normalizing flow to estimate p( theta | compressed
p0k ).
'''
import torch
import os, sys
import numpy as np
import pt_sbi as PTsbi
Nsims = int(sys.argv[1])
#############################################################################
# setup training and validation set
#############################################################################
dat_dir = PTsbi.dat_dir()
thetas = PTsbi.theta_samples()
wcp0ks = PTsbi.wcP0_samples()
thetas = thetas.astype(np.float32)[:Nsims]
condit = wcp0ks.astype(np.float32)[:Nsims]
#############################################################################
# train CNF
#############################################################################
Ntrain = int(np.floor(0.85 * thetas.shape[0]))
fcheck = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.check.pt' % Nsims)
fmodel = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.pt' % Nsims)
fbest = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.best.pt' % Nsims)
best_model, device = PTsbi.train_CNF(
thetas, condit,
Ntrain,
num_blocks=10,
fbest=fbest,
fcheck=fcheck,
fmodel=fmodel)
#############################################################################
# deploy on fiducial cP0k
#############################################################################
theta_fid = PTsbi.theta_fiducial()
wcp0k_fid = PTsbi.wcP0_fiducial().astype(np.float32)
condit = torch.from_numpy(wcp0k_fid)
best_model.eval()
with torch.no_grad():
post = np.array(best_model.sample(100000, cond_inputs=condit).detach().cpu())
fpost = fbest.replace('best.pt', 'posterior.npy')
np.save(fpost, post)
| [
7061,
6,
198,
198,
12048,
284,
4512,
26340,
3487,
2890,
5202,
284,
8636,
279,
7,
262,
8326,
930,
25388,
198,
79,
15,
74,
6739,
628,
198,
7061,
6,
198,
11748,
28034,
198,
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
1... | 2.702922 | 616 |
"""
السلاسل النصية / النصوص في لغة البايثون
"""
print("السلاسل النصية / النصوص في لغة البايثون")
print('Hello World') # ' '
print("Hello World") # " "
print("--------------------------")
# تعريف متغير يخزّن فيه نص
print("تعريف متغير يخزّن فيه نص ")
text = "Welcom To Python Lessons"
print(text)
print("--------------------------")
# تعريف متغير يخزّن فيه نص متعدد الاسطر
print(" تعريف متغير يخزّن فيه نص متعدد الاسطر")
install = """
Python source code and installers
are available for download for all
versions!
"""
print(install)
print("--------------------------")
# السلاسل النصية هي مصفوفات
# P y t h o n , Week 2, lesson
# 1 2 3 4 5 6
MyText = "Python, Week 2, lesson"
print(MyText) # All
print(MyText[0:6]) # From 0 To 6 Python Word
print(MyText[7:14]) # From 7 To 14 Week 2 Word
print(MyText[15:22]) # From 15 To 22 lesson Word
| [
37811,
198,
23525,
45692,
13862,
34247,
111,
13862,
28981,
23338,
148,
113,
22654,
45632,
1220,
28981,
23338,
148,
113,
30335,
148,
113,
18923,
223,
22654,
220,
13862,
148,
118,
45632,
28981,
39848,
12919,
22654,
148,
104,
30335,
23338,
198... | 1.877729 | 458 |
from scraper import *
s = Scraper(start=254826, end=256607, max_iter=30, scraper_instance=143)
s.scrape_letterboxd() | [
6738,
19320,
525,
1330,
1635,
220,
198,
82,
796,
1446,
38545,
7,
9688,
28,
1495,
2780,
2075,
11,
886,
28,
11645,
31980,
11,
3509,
62,
2676,
28,
1270,
11,
19320,
525,
62,
39098,
28,
21139,
8,
220,
198,
82,
13,
1416,
13484,
62,
9291... | 2.510638 | 47 |
import keras
import logging
| [
11748,
41927,
292,
198,
11748,
18931,
628,
628,
198
] | 3.555556 | 9 |
cipher_txt = open("ciphertext.txt").read()
cipher_num = []
for i in range(72):
cipher_num.append(ord(cipher_txt[i]) - 97)
p_num = ord("p") - 97
c_num = ord("c") - 97
t_num = ord("t") - 97
f_num = ord("f") - 97
# try all key
for a in range(26):
for b in range(26):
for c in range(26):
for d in range(26):
for i in range(34):
# make sure ciphertext matches
res1 = (a * p_num + b * c_num) % 26
res2 = (c * p_num + d * c_num) % 26
res3 = (a * t_num + b * f_num) % 26
res4 = (c * t_num + d * f_num) % 26
if (
(cipher_num[i * 2] == res1)
and (cipher_num[i * 2 + 1] == res2)
and (cipher_num[i * 2 + 2] == res3)
and (cipher_num[i * 2 + 3] == res4)
):
print("\nkey is", a, b, c, d)
# decode all ciphertext
for item in range(36):
for x in range(26):
for y in range(26):
if (
(cipher_num[item * 2] == (a * x + b * y) % 26)
and (cipher_num[item * 2 + 1] == (c * x + d * y) % 26)
):
print(chr(x + 97), chr(y + 97), end=" ")
| [
66,
10803,
62,
14116,
796,
1280,
7203,
66,
10803,
5239,
13,
14116,
11074,
961,
3419,
198,
66,
10803,
62,
22510,
796,
17635,
198,
198,
1640,
1312,
287,
2837,
7,
4761,
2599,
198,
220,
220,
220,
38012,
62,
22510,
13,
33295,
7,
585,
7,
... | 1.518 | 1,000 |
from ktd_q import *
import brl
import models
import matplotlib.pyplot as plt
import seeding
import numpy as np
import gym
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--kappa', type=int, default=10, help='kappa')
parser.add_argument('--epsilon', type=float, default=0.0, help= 'epsilon for covariance')
parser.add_argument('--gym', type=bool, default=False)
parser.add_argument('--scene', type=str, default='')
parser.add_argument('--iter', type=int, default=100, help='number of trials')
args = parser.parse_args()
if __name__ == "__main__":
main_brl() | [
198,
6738,
479,
8671,
62,
80,
1330,
1635,
198,
11748,
865,
75,
220,
198,
11748,
4981,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
8228,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11550,
198,
117... | 2.93 | 200 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2020-08-04 01:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
22,
319,
12131,
12,
2919,
12,
3023,
5534,
25,
2231,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.883117 | 77 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="anjie", # Replace with your own username
version="1.0.0",
author="Babatunde Adewole",
author_email="adewole63@gmail.com",
description="This python library provides corpus in English and various local african languages e.g(Youruba, Hausa, Pidgin), it also does sentiment analysis on brands",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Free-tek/Anjie_local_language_corpus_generator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.77592 | 299 |
import rospy, cv2, cv_bridge, numpy, math
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
rospy.init_node('follower')
follower = Follower()
rospy.spin()
| [
11748,
686,
2777,
88,
11,
269,
85,
17,
11,
269,
85,
62,
9458,
11,
299,
32152,
11,
10688,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
7412,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
48436,
2414,
628,
628,
198,
305,
... | 2.588235 | 68 |