content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
import os
from .kface import KFace
from .ms1m import MS1M
from .bin_datasets import BIN
from .ijb import IJB
| [
11748,
28686,
198,
198,
6738,
764,
74,
2550,
1330,
509,
32388,
198,
6738,
764,
907,
16,
76,
1330,
6579,
16,
44,
198,
6738,
764,
8800,
62,
19608,
292,
1039,
1330,
347,
1268,
198,
6738,
764,
2926,
65,
1330,
314,
47858,
628
] | 2.707317 | 41 |
""""""
# Standard library modules.
import abc
# Third party modules.
from django.core.mail import send_mail
from django.template import Engine, Context
# Local modules.
from .models import RunState
# Globals and constants variables.
| [
15931,
15931,
15931,
198,
198,
2,
8997,
5888,
13103,
13,
198,
11748,
450,
66,
198,
198,
2,
10467,
2151,
13103,
13,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
3758,
62,
4529,
198,
6738,
42625,
14208,
13,
28243,
1330,
7117,
11,
... | 3.567164 | 67 |
#!/usr/bin/env python2
import random
import itertools
import numpy
import sys
import json
import copy
def make_bins_ltpFR3(semArray):
"""
Creates four equal-width bins of WAS scores, identical to those used in ltpFR2. Then combine the middle two to give
three bins: low similarity, medium similarity, and high similarity.
A coordinate in semRows[i][j] and semCols[i][j] is the index of the jth word pair in semArray that falls in the ith
similarity bin.
"""
semArray_nondiag = semArray[numpy.where(semArray != 1)]
# Find lowest and highest similarity
min_sim = semArray_nondiag.min()
max_sim = semArray_nondiag.max()
# Split up the semantic space into four equal segments
semBins = list(numpy.linspace(min_sim, max_sim, 4))
# Combine the two middle bins by removing the bin boundary between them
# semBins = semBins[:2] + semBins[3:]
# Create bounds for the bins
semBins = zip(*[semBins[i:] + semBins[-1:i] for i in range(2)])
# For word pairs within the bounds of each bin, append the indices to semRows and semCols
semRows = []
semCols = []
for bin in semBins:
(i, j) = ((semArray > bin[0]) & (semArray < bin[1])).nonzero()
semRows.append(i)
semCols.append(j)
return semRows, semCols
def randomize_conditions_ltpFR3(config):
"""
Randomize the conditions for all sessions.
:param config: The imported configuration file, containing all parameters for the experiment
:return: A list of lists, where sublist n contains the ordering of list conditions for the nth session. cond[x][y][0]
defines the length of session x, list y; cond[x][y][1] defines the presentation rate of session x, list y;
cond[x][y][2] defines whether session x, list y uses visual or auditory presentation; cond[x][y][3] defines the
duration of the pre-list distractor task for session x, list y.
"""
options = [c for c in itertools.product(config.listLength, config.presRate, config.modality, config.distDur)]
cond = []
for i in range(config.nSessions):
sess = []
for j in range(config.reps):
random.shuffle(options)
sess += options[:]
cond.append(sess)
return cond
def choose_pairs_ltpFR3(wp_tot, cond, config, semRows, semCols):
"""
Selects word pairs to use in each list of each session.
:param wp_tot: A list containing all the words of the word pool. The order of the words is expected to correspond to
the indices used by semRows and semCols.
:param cond: A list of lists, where sublist n contains the ordering of list conditions for the nth session.
:param config: The imported configuration file, containing all parameters for the experiment.
:param semRows: See make_bins_ltpFR3()
:param semCols: See make_bins_ltpFR3()
:return: pairs - pairs[x][y][z] is the zth word pair in session x, list y
:return: pair_dicts - a list of dictionaries, where each dictionary contains all word pairs from a given session
:return: practice_lists - A list containing two practice lists, each with 18 words
"""
# pairs[x][y][z] will be the zth pair of words in the yth list on session x
pairs = []
# points to the other word in the pair for a given session
pair_dicts = []
# Deep copy the full word pool into full_wp_allowed, so it can be shuffled for each session without altering wp_tot
full_wp = wp_tot[:]
# Make word pairs for each session
session_num = 0
while session_num < config.nSessions:
#print 'Making session', session_num, ':',
#sys.stdout.flush()
# Shuffle the order of the word pool; I believe this is technically only necessary for the first session, in
# order to randomize which words are selected for the practice lists. All other lists have their items randomly
# chosen anyway
'''
IMPORTANT NOTE!!!:
Lists containing more than 2080 elements should not be randomized with shuffle, as explained here:
http://stackoverflow.com/questions/3062741/maximal-length-of-list-to-shuffle-with-python-random-shuffle
The full word pool contains 1638 words, so this is only a concern if the word pool is ever expanded.
'''
random.shuffle(full_wp)
# The first session has two 18-word practice lists
if session_num == 0:
practice_lists = [full_wp[:18], full_wp[18:36]]
sess_wp_allowed = full_wp[36:]
else:
sess_wp_allowed = full_wp[:]
# sess_pairs[x][y] will be the yth pair in the xth list on the current session
sess_pairs = []
# Track number of attempts to create the lists for the current session
sess_tries = 0
# Track whether the session completed successfully
goodSess = True
# Make word pairs for each list in the current session
list_num = 0
while list_num < len(cond[session_num]):
#print list_num,
#sys.stdout.flush()
# list_pairs[x] will be the xth pair in the current list on the current session
list_pairs = []
# Track number of attempts to create the current list
list_tries = 0
# Track whether the list completed successfully
goodList = True
# Retrieve the list length condition for the current list by looking in cond
listLength = cond[session_num][list_num][0]
# Length 12 lists have 2 pairs per bin, length 24 list have 4 pairs per bin
pairs_per_bin = 2 if listLength == 12 else 4
# Select two or four word pairs from each bin (based on list length)
for sem_i in range(len(semRows)):
# The pair for each semantic bin gets placed twice
pair_i = 0
while pair_i < pairs_per_bin:
# Get the indices (within the full word pool) of the words chosen for the current session
available_indices = [wp_tot.index(word) for word in sess_wp_allowed]
# Randomly choose indices/words from those in the current session until one is found that has one
# or more pairs in the current bin
index_word1 = random.choice(available_indices)
while index_word1 not in semRows[sem_i]:
index_word1 = random.choice(available_indices)
# Get the indices of all words whose pairing with the chosen word falls into the correct bin
good_second_indices = semCols[sem_i][semRows[sem_i] == index_word1]
# Eliminate the words that are not available in the session
good_second_indices = [i for i in good_second_indices if wp_tot[i] in sess_wp_allowed]
# Ensure that a word cannot be accidentally paired with itself
if index_word1 in good_second_indices:
del good_second_indices[good_second_indices.index(index_word1)]
# If there are no good words to choose from, restart
if len(good_second_indices) == 0:
list_tries += 1
if list_tries > 10:
goodList = False
break
else:
continue
# Choose the second word randomly
index_word2 = random.choice(good_second_indices)
# Add the pairs to list_pairs, delete them from the pool of allowed words
list_pairs.append([wp_tot[index_word1], wp_tot[index_word2]])
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word1])]
del sess_wp_allowed[sess_wp_allowed.index(wp_tot[index_word2])]
pair_i += 1
# If the list is bad, add the words back to the pool of allowed words
if not goodList:
sess_wp_allowed.extend([x[0] for x in list_pairs] + [x[1] for x in list_pairs])
break
# If the list is good, add the list_pairs to sess_pairs,
if goodList:
sess_pairs.append(list_pairs)
list_num += 1
else:
# Otherwise, try the session again (up to 50 times), then restart
list_pairs = []
sess_tries += 1
if sess_tries > 50:
goodSess = False
break
# If the whole session went successfully
if goodSess:
# Get the pairs from the lists, add them backwards and forwards to sess_pair_dict
sess_pair_dict = dict(itertools.chain(*sess_pairs))
sess_pair_dict.update(dict(zip(sess_pair_dict.values(), sess_pair_dict.keys())))
pair_dicts.append(sess_pair_dict)
pairs.append(sess_pairs)
session_num += 1
else: # If the session did not go well, try again.
sess_pairs = []
print ''
return pairs, pair_dicts, practice_lists
def place_pairs_ltpFR3(pairs, cond):
"""
:param pairs:
:param cond:
:param config:
:return:
"""
# Load all valid list compositions for 12-item lists (small lists are too restrictive to use trial and error)
with open('valid12.json', 'r') as f:
valid12 = json.load(f)['3bin-valid12']
# Loop through sessions
subj_wo = []
for (n, sess_pairs) in enumerate(pairs):
sess_wo = []
#print '\nPlacing session', n, ':',
#sys.stdout.flush()
# Loop through lists within each session
for (m, list_pairs) in enumerate(sess_pairs):
#print m,
#sys.stdout.flush()
# Create pairs of word pairs from the same bin -- one pair will have adjacent presentation, one distant
grouped_pairs = [list(group) for group in
zip([list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 0],
[list_pairs[i] for i in range(len(list_pairs)) if i % 2 == 1])]
# Retrieve list length for the current list
list_length = cond[n][m][0]
# For 12-item lists, select a random solution template and assign word pairs to the variables in the
# template, such that one pair from each bin has adjacent presentation and one pair from each bin has
# distant presentation
if list_length == 12:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents = ['a', 'b', 'c']
distants = ['d', 'e', 'f']
random.shuffle(adjacents)
random.shuffle(distants)
key = {}
for group in grouped_pairs:
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
key[adjacents.pop(0)] = group[0]
key[distants.pop(0)] = group[1]
# Choose a random valid solution
list_wo = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo)):
w = list_wo[i]
list_wo[i] = key[w[0]][int(w[1])]
# For 24-item lists, create two 12-item lists based on random solution templates and concatenate them.
elif list_length == 24:
# Randomize the ordering of the grouped pairs, as well as the orderings within each group and each pair
adjacents1 = ['a', 'b', 'c']
distants1 = ['d', 'e', 'f']
adjacents2 = ['a', 'b', 'c']
distants2 = ['d', 'e', 'f']
random.shuffle(adjacents1)
random.shuffle(distants1)
random.shuffle(adjacents2)
random.shuffle(distants2)
key1 = {}
key2 = {}
for group_num, group in enumerate(grouped_pairs):
random.shuffle(group)
random.shuffle(group[0])
random.shuffle(group[1])
if group_num % 2 == 0:
key1[adjacents1.pop(0)] = group[0]
key1[distants1.pop(0)] = group[1]
else:
key2[adjacents2.pop(0)] = group[0]
key2[distants2.pop(0)] = group[1]
# Choose a random valid solution
list_wo1 = copy.deepcopy(random.choice(valid12))
list_wo2 = copy.deepcopy(random.choice(valid12))
# Each entry in the solution list is a string containing a letter followed by 0 or 1
# The letter corresponds to the word pair and the number corresponds to the item in the pair.
# Letters a, b, and c are adjacent presentation pairs; d, e, and f are distant presentation pairs.
for i in range(len(list_wo1)):
w = list_wo1[i]
list_wo1[i] = key1[w[0]][int(w[1])]
w = list_wo2[i]
list_wo2[i] = key2[w[0]][int(w[1])]
list_wo = list_wo1 + list_wo2
else:
raise ValueError('Function place_pairs_ltpFR3() can only handle word lists of length 12 or 24!')
# Add finalized list to the session
sess_wo.append(list_wo)
subj_wo.append(sess_wo)
return subj_wo
def listgen_ltpFR3(n):
"""
Generate all lists for a participant, including the conditions, word pairs
and word ordering. This function saves the results to a json file labelled
with the participant's number.
"""
import config
# Read in the semantic association matrix
semMat = []
with open(config.w2vfile) as w2vfile:
for word in w2vfile:
wordVals = []
wordValsString = word.split()
for val in wordValsString:
thisVal = float(val)
wordVals.append(thisVal)
semMat.append(wordVals)
semArray = numpy.array(semMat)
# Create three semantic similarity bins and sort word pairs by bin
semRows, semCols = make_bins_ltpFR3(semArray)
# Read in the word pool
with open(config.wpfile) as wpfile:
wp_tot = [x.strip() for x in wpfile.readlines()]
counts = numpy.zeros(len(wp_tot))
for i in range(n):
print '\nSubject ' + str(i) + '\n'
# Randomize list conditions (list length, presentation rate, modality, distractor duration)
condi = randomize_conditions_ltpFR3(config)
# Choose all of the pairs to be used in the experiment
pairs, pair_dicts, practice_lists = choose_pairs_ltpFR3(wp_tot, condi, config, semRows, semCols)
# Create all lists by placing the word pairs in appropriate positions
subj_wo = place_pairs_ltpFR3(pairs, condi)
# Add practice lists
subj_wo[0] = practice_lists + subj_wo[0]
practice_condi = [[18, 1200, 'a', 18000], [18, 1200, 'v', 18000]]
random.shuffle(practice_condi)
condi[0] = practice_condi + condi[0]
d = {'word_order': subj_wo, 'pairs': pair_dicts, 'conditions': condi}
for sess_dict in pair_dicts:
counts[numpy.array([wp_tot.index(w) for w in sess_dict])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[0]])] += 1
counts[numpy.array([wp_tot.index(w) for w in practice_lists[1]])] += 1
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/%d.js' % i, 'w') as f:
s = 'var sess_info = ' + json.dumps(d) + ';'
f.write(s)
with open('/Users/jessepazdera/AtomProjects/ltpFR3_MTurk/static/pools/lists/counts.json', 'w') as f:
f.write(str([c for c in counts]))
print max(counts), min(counts), len([wp_tot[i] for i in range(len(counts)) if counts[i] == 0])
return counts
if __name__ == "__main__":
nsess = input('How many sessions would you like to generate? ')
counts = listgen_ltpFR3(nsess)
print counts.mean()
print counts.std()
print counts.max()
print counts.min()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
11748,
4738,
198,
11748,
340,
861,
10141,
198,
11748,
299,
32152,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
4866,
628,
198,
4299,
787,
62,
65,
1040,
62,
2528,
79,
10913,
18... | 2.229114 | 7,529 |
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple
import tensorflow as tf
from fastestimator.dataset.numpy_dataset import NumpyDataset
def load_data(image_key: str = "x", label_key: str = "y") -> Tuple[NumpyDataset, NumpyDataset]:
"""Load and return the CIFAR10 dataset.
Please consider using the ciFAIR10 dataset instead. CIFAR10 contains duplicates between its train and test sets.
Args:
image_key: The key for image.
label_key: The key for label.
Returns:
(train_data, eval_data)
"""
print("\033[93m {}\033[00m".format("FastEstimator-Warn: Consider using the ciFAIR10 dataset instead."))
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
train_data = NumpyDataset({image_key: x_train, label_key: y_train})
eval_data = NumpyDataset({image_key: x_eval, label_key: y_eval})
return train_data, eval_data
| [
2,
15069,
13130,
383,
12549,
22362,
320,
1352,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.092702 | 507 |
from pyais.messages import NMEAMessage
message = NMEAMessage(b"!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
# or
message = NMEAMessage.from_string("!AIVDM,1,1,,B,15M67FC000G?ufbE`FepT@3n00Sa,0*5C")
print(message.decode())
| [
6738,
279,
3972,
271,
13,
37348,
1095,
1330,
399,
11682,
2390,
7589,
198,
198,
20500,
796,
399,
11682,
2390,
7589,
7,
65,
40484,
32,
3824,
23127,
11,
16,
11,
16,
9832,
33,
11,
1314,
44,
3134,
4851,
830,
38,
30,
3046,
65,
36,
63,
... | 1.984375 | 128 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:02:33 2019
@author: sercangul
"""
if __name__ == '__main__':
n = int(input())
result = maxConsecutiveOnes(n)
print(result) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
7653,
220,
513,
678,
25,
2999,
25,
2091,
13130,
198,
198,
31,
9800,
25,
1055,
66,
... | 2.255319 | 94 |
import numpy as np
from .problem import Problem
from .algorithm_genetic import GeneralEvolutionaryAlgorithm
from .individual import Individual
from .operators import CustomGenerator, nondominated_truncate, RandomGenerator, UniformGenerator
import time
| [
11748,
299,
32152,
355,
45941,
198,
6738,
764,
45573,
1330,
20647,
198,
6738,
764,
282,
42289,
62,
5235,
5139,
1330,
3611,
15200,
2122,
560,
2348,
42289,
198,
6738,
764,
43129,
1330,
18629,
198,
6738,
764,
3575,
2024,
1330,
8562,
8645,
... | 4.216667 | 60 |
# -*- coding: utf-8 -*-
"""
APNS Proxy Server
"""
import time
import zmq
import simplejson as json
READ_TIMEOUT = 1500 # msec
FLUSH_TIMEOUT = 5000 # msec
COMMAND_ASK_ADDRESS = b'\1'
COMMAND_SEND = b'\2'
COMMAND_FEEDBACK = b'\3'
DEVICE_TOKEN_LENGTH = 64
JSON_ALERT_KEY_SET = set(['body', 'action_loc_key', 'loc_key', 'loc_args', 'launch_image'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
2969,
8035,
38027,
9652,
198,
37811,
198,
198,
11748,
640,
198,
198,
11748,
1976,
76,
80,
198,
11748,
2829,
17752,
355,
33918,
628,
198,
15675,
62,
34694,
12... | 2.269231 | 156 |
import sys
sys.path.insert(0, '..')
import numpy
import time
import ConfigParser
import topicmodel
if __name__ == "__main__":
t0 = time.time()
main()
t1 = time.time()
print "finished"
print "time=", t1 - t0
| [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
705,
492,
11537,
628,
198,
11748,
299,
32152,
198,
11748,
640,
198,
11748,
17056,
46677,
198,
11748,
7243,
19849,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1... | 2.473118 | 93 |
#coding:utf-8
#
# id: bugs.core_4318
# title: Regression: Predicates involving PSQL variables/parameters are not pushed inside the aggregation
# decription:
# tracker_id: CORE-4318
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table t2 (
id integer not null,
t1_id integer
);
commit;
recreate table t1 (
id integer not null
);
commit;
set term ^;
execute block
as
declare variable i integer = 0;
begin
while (i < 1000) do begin
i = i + 1;
insert into t2(id, t1_id) values(:i, mod(:i, 10));
merge into t1 using (
select mod(:i, 10) as f from rdb$database
) src on t1.id = src.f
when not matched then
insert (id) values(src.f);
end -- while (i < 1000) do begin
end^
set term ;^
commit;
alter table t1 add constraint pk_t1 primary key (id);
alter table t2 add constraint pk_t2 primary key (id);
alter table t2 add constraint fk_t2_ref_t1 foreign key (t1_id) references t1(id);
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set explain on;
set planonly;
set term ^;
execute block
returns (
s integer
)
as
declare variable v integer = 1;
begin
with t as (
select t1_id as t1_id, sum(id) as s
from t2
group by 1
)
select s
from t
where t1_id = :v
into :s;
suspend;
end
^
set term ;^
-- In 3.0.0.30837 plan was:
-- Select Expression
-- -> Singularity Check
-- -> Filter
-- -> Aggregate
-- -> Table "T T2" Access By ID
-- -> Index "FK_T2_REF_T1" Scan
-- (i.e. there was NO "Filter" between "Aggregate" and "Table "T T2" Access By ID")
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Select Expression
-> Singularity Check
-> Filter
-> Aggregate
-> Filter
-> Table "T2" as "T T2" Access By ID
-> Index "FK_T2_REF_T1" Range Scan (full match)
"""
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
198,
2,
4686,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
11316,
13,
7295,
62,
3559,
1507,
198,
2,
3670,
25,
220,
220,
220,
220,
220,
220,
220,
3310,
2234,
25,
14322,
16856,
74... | 2.038715 | 1,214 |
#Import the json library to parse JSON file to Python
import json
#Import list of punctuation characters from the string library
from string import punctuation as p
#This method checks if the given word is a profanity
#This method calculates the degree of profanity for a list of strings | [
2,
20939,
262,
33918,
5888,
284,
21136,
19449,
2393,
284,
11361,
198,
11748,
33918,
198,
198,
2,
20939,
1351,
286,
21025,
2288,
3435,
422,
262,
4731,
5888,
198,
6738,
4731,
1330,
21025,
2288,
355,
279,
198,
198,
2,
1212,
2446,
8794,
6... | 4.461538 | 65 |
from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
#NEWS = open(os.path.join(here, 'NEWS.txt')).read()
rootdir = os.path.dirname(os.path.abspath(__file__))
exec(open(rootdir + '/cerridwen/version.py').read())
version = __VERSION__
setup(name='cerridwen',
version=version,
description='Accurate solar system data for everyone',
long_description=README,
author='Leslie P. Polzer',
author_email='polzer@fastmail.com',
url='http://cerridwen.bluemagician.vc/',
license='MIT',
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta"
, "Environment :: Console"
, "Intended Audience :: Science/Research"
, "Intended Audience :: Developers"
, "License :: OSI Approved :: MIT License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 3"
, "Topic :: Scientific/Engineering :: Astronomy"
, "Topic :: Other/Nonlisted Topic"
, "Topic :: Software Development :: Libraries :: Python Modules"
, "Topic :: Utilities"
],
maintainer='Leslie P. Polzer',
maintainer_email='polzer@fastmail.com',
packages=['cerridwen'],
requires=['pyswisseph', 'numpy', 'astropy(>=0.4)'],
extras_require={'Flask':['flask']},
entry_points={
'console_scripts':
['cerridwen = cerridwen.cli:main',
'cerridwen-server = cerridwen.api_server:main [Flask]']
})
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
11748,
28686,
198,
198,
1456,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
15675,
11682,
796,
1280,
7,
418,
13,
6978,
13,
22179,... | 2.440242 | 661 |
import datetime
from abc import ABC, abstractmethod
import pajbot
| [
11748,
4818,
8079,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
11748,
279,
1228,
13645,
628,
628
] | 3.5 | 20 |
#! /usr/bin/python3
# Description: Data_Ghost, concealing data into spaces and tabs making it imperceptable to human eyes.
# Author: Ajay Dyavathi
# Github: Radical Ajay
# USAGE:
# ghoster = Ghost('data.txt')
# ghoster.ghost('ghosted.txt')
# ghoster.unghost('ghosted.txt', 'unghosted.txt')
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
2,
12489,
25,
6060,
62,
32001,
11,
8571,
4272,
1366,
656,
9029,
290,
22524,
1642,
340,
11071,
984,
540,
284,
1692,
2951,
13,
198,
2,
6434,
25,
22028,
323,
23524,
615,
44202,
198,
2,... | 2.93 | 100 |
# Image classification using AWS Sagemaker and Linear Learner
# Program set up and import libraries
import numpy as np
import pandas as pd
import os
from sagemaker import get_execution_role
role = get_execution_role()
bucket = 'chi-hackathon-skin-images'
# Import Data
import boto3
from sagemaker import get_execution_role
role = get_execution_role()
bucket='chi-hackathon-skin-images'
data_key = 'ISIC_0000000.json' # need a way to go through entire library
data_location = 's3://{}/{}'.format(bucket, data_key)
metadata_set = pd.read_json(data_location)
image_set = np.asarray(data_location)
# TBD - transform json data to array
# TBD - transform image data to dataframe
train_set = zip(image_set, metadata_set)
# Split Data into Train and Validate
import random
random.seed(9001)
split = np.random.rand(len(df)) < 0.8
valid_set = train_set[split]
train_set = train_set[~split]
# Train Model
import boto
import sagemaker
data_location = 's3://{}/linearlearner_highlevel_example/data'.format(bucket)
output_location = 's3://{}/linearlearner_highlevel_example/output'.format(bucket)
print('training data will be uploaded to: {}'.format(data_location))
print('training artifacts will be uploaded to: {}'.format(output_location))
sess = sagemaker.Session()
linear = sagemaker.estimator.Estimator(container, role, train_instance_count=1, rain_instance_type='ml.c4.xlarge',
output_path=output_location, sagemaker_session=sess)
linear.set_hyperparameters(feature_dim=784, predictor_type='binary_classifier', mini_batch_size=200)
linear.fit({'train': train_set})
# Deploy Model
linear_predictor = linear.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# Validate
from sagemaker.predictor import csv_serializer, json_deserializer
linear_predictor.content_type = 'text/csv'
linear_predictor.serializer = csv_serializer
linear_predictor.deserializer = json_deserializer
result = linear_predictor.predict(train_set[0][30:31])
print(result)
| [
2,
7412,
17923,
1262,
30865,
25605,
32174,
290,
44800,
8010,
1008,
198,
2,
6118,
900,
510,
290,
1330,
12782,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
198,
6738,
45229,
32174,
1330... | 3.023077 | 650 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2015 Hamilton Kibbe <ham@hamiltonkib.be> and Paulo Henrique Silva
# <ph.silva@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import asin
import math
from .primitives import *
from .utils import validate_coordinates, inch, metric, rotate_point
# TODO: Add support for aperture macro variables
__all__ = ['AMPrimitive', 'AMCommentPrimitive', 'AMCirclePrimitive',
'AMVectorLinePrimitive', 'AMOutlinePrimitive', 'AMPolygonPrimitive',
'AMMoirePrimitive', 'AMThermalPrimitive', 'AMCenterLinePrimitive',
'AMLowerLeftLinePrimitive', 'AMUnsupportPrimitive']
def to_inch(self):
self.diameter = inch(self.diameter)
self.position = tuple([inch(x) for x in self.position])
def to_metric(self):
self.diameter = metric(self.diameter)
self.position = tuple([metric(x) for x in self.position])
def to_gerber(self, settings=None):
data = dict(code=self.code,
exposure='1' if self.exposure == 'on' else 0,
diameter=self.diameter,
x=self.position[0],
y=self.position[1])
return '{code},{exposure},{diameter},{x},{y}*'.format(**data)
def to_primitive(self, units):
return Circle((self.position), self.diameter, units=units, level_polarity=self._level_polarity)
class AMVectorLinePrimitive(AMPrimitive):
""" Aperture Macro Vector Line primitive. Code 2 or 20.
A vector line is a rectangle defined by its line width, start, and end
points. The line ends are rectangular.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.3:** Vector Line, primitive code 2 or 20.
Parameters
----------
code : int
Vector Line Primitive code. Must be either 2 or 20.
exposure : string
'on' or 'off'
width : float
Line width
start : tuple (<float>, <float>)
coordinate of line start point
end : tuple (<float>, <float>)
coordinate of line end point
rotation : float
Line rotation about the origin.
Returns
-------
LinePrimitive : :class:`gerbers.am_statements.AMVectorLinePrimitive`
An initialized AMVectorLinePrimitive
Raises
------
ValueError, TypeError
"""
def __init__(self, code, exposure, width, start, end, rotation):
validate_coordinates(start)
validate_coordinates(end)
if code not in (2, 20):
raise ValueError('VectorLinePrimitive codes are 2 or 20')
super(AMVectorLinePrimitive, self).__init__(code, exposure)
self.width = width
self.start = start
self.end = end
self.rotation = rotation
def to_inch(self):
self.width = inch(self.width)
self.start = tuple([inch(x) for x in self.start])
self.end = tuple([inch(x) for x in self.end])
def to_metric(self):
self.width = metric(self.width)
self.start = tuple([metric(x) for x in self.start])
self.end = tuple([metric(x) for x in self.end])
def to_gerber(self, settings=None):
fmtstr = '{code},{exp},{width},{startx},{starty},{endx},{endy},{rotation}*'
data = dict(code=self.code,
exp=1 if self.exposure == 'on' else 0,
width=self.width,
startx=self.start[0],
starty=self.start[1],
endx=self.end[0],
endy=self.end[1],
rotation=self.rotation)
return fmtstr.format(**data)
def to_primitive(self, units):
"""
Convert this to a primitive. We use the Outline to represent this (instead of Line)
because the behaviour of the end caps is different for aperture macros compared to Lines
when rotated.
"""
# Use a line to generate our vertices easily
line = Line(self.start, self.end, Rectangle(None, self.width, self.width))
vertices = line.vertices
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(vertices[-1], self.rotation, (0, 0))
for point in vertices:
cur_point = rotate_point(point, self.rotation, (0, 0))
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMOutlinePrimitive(AMPrimitive):
""" Aperture Macro Outline primitive. Code 4.
An outline primitive is an area enclosed by an n-point polygon defined by
its start point and n subsequent points. The outline must be closed, i.e.
the last point must be equal to the start point. Self intersecting
outlines are not allowed.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.6:** Outline, primitive code 4.
Parameters
----------
code : int
OutlinePrimitive code. Must be 6.
exposure : string
'on' or 'off'
start_point : tuple (<float>, <float>)
coordinate of outline start point
points : list of tuples (<float>, <float>)
coordinates of subsequent points
rotation : float
outline rotation about the origin.
Returns
-------
OutlinePrimitive : :class:`gerber.am_statements.AMOutlineinePrimitive`
An initialized AMOutlinePrimitive
Raises
------
ValueError, TypeError
"""
def __init__(self, code, exposure, start_point, points, rotation):
""" Initialize AMOutlinePrimitive
"""
validate_coordinates(start_point)
for point in points:
validate_coordinates(point)
if code != 4:
raise ValueError('OutlinePrimitive code is 4')
super(AMOutlinePrimitive, self).__init__(code, exposure)
self.start_point = start_point
if points[-1] != start_point:
raise ValueError('OutlinePrimitive must be closed')
self.points = points
self.rotation = rotation
def to_inch(self):
self.start_point = tuple([inch(x) for x in self.start_point])
self.points = tuple([(inch(x), inch(y)) for x, y in self.points])
def to_metric(self):
self.start_point = tuple([metric(x) for x in self.start_point])
self.points = tuple([(metric(x), metric(y)) for x, y in self.points])
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
n_points=len(self.points),
start_point="%.6g,%.6g" % self.start_point,
points=",\n".join(["%.6g,%.6g" % point for point in self.points]),
rotation=str(self.rotation)
)
return "{code},{exposure},{n_points},{start_point},{points},{rotation}*".format(**data)
def to_primitive(self, units):
"""
Convert this to a drawable primitive. This uses the Outline instead of Line
primitive to handle differences in end caps when rotated.
"""
lines = []
prev_point = rotate_point(self.start_point, self.rotation)
for point in self.points:
cur_point = rotate_point(point, self.rotation)
lines.append(Line(prev_point, cur_point, Circle((0,0), 0)))
prev_point = cur_point
if lines[0].start != lines[-1].end:
raise ValueError('Outline must be closed')
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMPolygonPrimitive(AMPrimitive):
""" Aperture Macro Polygon primitive. Code 5.
A polygon primitive is a regular polygon defined by the number of
vertices, the center point, and the diameter of the circumscribed circle.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.8:** Polygon, primitive code 5.
Parameters
----------
code : int
PolygonPrimitive code. Must be 5.
exposure : string
'on' or 'off'
vertices : int, 3 <= vertices <= 12
Number of vertices
position : tuple (<float>, <float>)
X and Y coordinates of polygon center
diameter : float
diameter of circumscribed circle.
rotation : float
polygon rotation about the origin.
Returns
-------
PolygonPrimitive : :class:`gerbers.am_statements.AMPolygonPrimitive`
An initialized AMPolygonPrimitive
Raises
------
ValueError, TypeError
"""
def __init__(self, code, exposure, vertices, position, diameter, rotation):
""" Initialize AMPolygonPrimitive
"""
if code != 5:
raise ValueError('PolygonPrimitive code is 5')
super(AMPolygonPrimitive, self).__init__(code, exposure)
if vertices < 3 or vertices > 12:
raise ValueError('Number of vertices must be between 3 and 12')
self.vertices = vertices
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
vertices=self.vertices,
position="%.4g,%.4g" % self.position,
diameter='%.4g' % self.diameter,
rotation=str(self.rotation)
)
fmt = "{code},{exposure},{vertices},{position},{diameter},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
return Polygon(self.position, self.vertices, self.diameter / 2.0, 0, rotation=math.radians(self.rotation), units=units, level_polarity=self._level_polarity)
class AMMoirePrimitive(AMPrimitive):
""" Aperture Macro Moire primitive. Code 6.
The moire primitive is a cross hair centered on concentric rings (annuli).
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.9:** Moire, primitive code 6.
Parameters
----------
code : int
Moire Primitive code. Must be 6.
position : tuple (<float>, <float>)
X and Y coordinates of moire center
diameter : float
outer diameter of outer ring.
ring_thickness : float
thickness of concentric rings.
gap : float
gap between concentric rings.
max_rings : float
maximum number of rings
crosshair_thickness : float
thickness of crosshairs
crosshair_length : float
length of crosshairs
rotation : float
moire rotation about the origin.
Returns
-------
MoirePrimitive : :class:`gerbers.am_statements.AMMoirePrimitive`
An initialized AMMoirePrimitive
Raises
------
ValueError, TypeError
"""
def __init__(self, code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation):
""" Initialize AMoirePrimitive
"""
if code != 6:
raise ValueError('MoirePrimitive code is 6')
super(AMMoirePrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.max_rings = max_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
self.rotation = rotation
class AMThermalPrimitive(AMPrimitive):
""" Aperture Macro Thermal primitive. Code 7.
The thermal primitive is a ring (annulus) interrupted by four gaps.
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.10:** Thermal, primitive code 7.
Parameters
----------
code : int
Thermal Primitive code. Must be 7.
position : tuple (<float>, <float>)
X and Y coordinates of thermal center
outer_diameter : float
outer diameter of thermal.
inner_diameter : float
inner diameter of thermal.
gap : float
gap thickness
rotation : float
thermal rotation about the origin.
Returns
-------
ThermalPrimitive : :class:`gerbers.am_statements.AMThermalPrimitive`
An initialized AMThermalPrimitive
Raises
------
ValueError, TypeError
"""
def _approximate_arc_cw(self, start_angle, end_angle, radius, center):
"""
Get an arc as a series of points
Parameters
----------
start_angle : The start angle in radians
end_angle : The end angle in radians
radius`: Radius of the arc
center : The center point of the arc (x, y) tuple
Returns
-------
array of point tuples
"""
# The total sweep
sweep_angle = end_angle - start_angle
num_steps = 10
angle_step = sweep_angle / num_steps
radius = radius
center = center
points = []
for i in range(num_steps + 1):
current_angle = start_angle + (angle_step * i)
nextx = (center[0] + math.cos(current_angle) * radius)
nexty = (center[1] + math.sin(current_angle) * radius)
points.append((nextx, nexty))
return points
class AMCenterLinePrimitive(AMPrimitive):
""" Aperture Macro Center Line primitive. Code 21.
The center line primitive is a rectangle defined by its width, height, and center point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.4:** Center Line, primitive code 21.
Parameters
----------
code : int
Center Line Primitive code. Must be 21.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
center : tuple (<float>, <float>)
X and Y coordinates of line center
rotation : float
rectangle rotation about its center.
Returns
-------
CenterLinePrimitive : :class:`gerbers.am_statements.AMCenterLinePrimitive`
An initialized AMCenterLinePrimitive
Raises
------
ValueError, TypeError
"""
def __init__(self, code, exposure, width, height, center, rotation):
if code != 21:
raise ValueError('CenterLinePrimitive code is 21')
super(AMCenterLinePrimitive, self).__init__(code, exposure)
self.width = width
self.height = height
validate_coordinates(center)
self.center = center
self.rotation = rotation
def to_inch(self):
self.center = tuple([inch(x) for x in self.center])
self.width = inch(self.width)
self.height = inch(self.height)
def to_metric(self):
self.center = tuple([metric(x) for x in self.center])
self.width = metric(self.width)
self.height = metric(self.height)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure = '1' if self.exposure == 'on' else '0',
width = self.width,
height = self.height,
center="%.4g,%.4g" % self.center,
rotation=self.rotation
)
fmt = "{code},{exposure},{width},{height},{center},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
x = self.center[0]
y = self.center[1]
half_width = self.width / 2.0
half_height = self.height / 2.0
points = []
points.append((x - half_width, y + half_height))
points.append((x - half_width, y - half_height))
points.append((x + half_width, y - half_height))
points.append((x + half_width, y + half_height))
aperture = Circle((0, 0), 0)
lines = []
prev_point = rotate_point(points[3], self.rotation, self.center)
for point in points:
cur_point = rotate_point(point, self.rotation, self.center)
lines.append(Line(prev_point, cur_point, aperture))
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMLowerLeftLinePrimitive(AMPrimitive):
""" Aperture Macro Lower Left Line primitive. Code 22.
The lower left line primitive is a rectangle defined by its width, height, and the lower left point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.5:** Lower Left Line, primitive code 22.
Parameters
----------
code : int
Center Line Primitive code. Must be 22.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
lower_left : tuple (<float>, <float>)
X and Y coordinates of lower left corner
rotation : float
rectangle rotation about its origin.
Returns
-------
LowerLeftLinePrimitive : :class:`gerbers.am_statements.AMLowerLeftLinePrimitive`
An initialized AMLowerLeftLinePrimitive
Raises
------
ValueError, TypeError
"""
class AMUnsupportPrimitive(AMPrimitive):
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
6634,
1853,
11582,
509,
571,
1350,
1279,
2763,
31,
2763,
9044,
74,
571,
13,
1350,
29,
290,
34410,
6752,
33865... | 2.474655 | 7,536 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-19 17:47
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
24,
319,
1584,
12,
2919,
12,
1129,
1596,
25,
2857,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
from distutils.version import LooseVersion
import difflib
import os
import numpy as np
from .core import Array
from ..async import get_sync
if LooseVersion(np.__version__) >= '1.10.0':
allclose = np.allclose
else:
| [
6738,
1233,
26791,
13,
9641,
1330,
6706,
577,
14815,
198,
11748,
814,
8019,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
7295,
1330,
15690,
198,
6738,
11485,
292,
13361,
1330,
651,
62,
27261,
198,
198,
361... | 2.973333 | 75 |
# more specific selections for Python 3 (ASkr, 2/2018)
from launchpad_py.launchpad import Launchpad
from launchpad_py.launchpad import LaunchpadMk2
from launchpad_py.launchpad import LaunchpadPro
from launchpad_py.launchpad import LaunchControlXL
from launchpad_py.launchpad import LaunchKeyMini
from launchpad_py.launchpad import Dicer
from launchpad_py import charset
| [
2,
517,
2176,
28224,
329,
11361,
513,
357,
1921,
38584,
11,
362,
14,
7908,
8,
198,
6738,
4219,
15636,
62,
9078,
13,
35681,
15636,
1330,
21225,
15636,
198,
6738,
4219,
15636,
62,
9078,
13,
35681,
15636,
1330,
21225,
15636,
44,
74,
17,
... | 3.592233 | 103 |
import base64
import math
import re
from io import BytesIO
import matplotlib.cm
import numpy as np
import torch
import torch.nn
from PIL import Image
# Compute edge magnitudes
from scipy import ndimage
# Demo Utilities
| [
11748,
2779,
2414,
201,
198,
11748,
10688,
201,
198,
11748,
302,
201,
198,
6738,
33245,
1330,
2750,
4879,
9399,
201,
198,
201,
198,
11748,
2603,
29487,
8019,
13,
11215,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28034,
20... | 2.55 | 100 |
"""In programming, a factory is a function that returns an object.
Functions are easy to understand because they have clear inputs and outputs.
Most gdsfactory functions take some inputs and return a Component object.
Some of these inputs parameters are also functions.
- Component: Object with.
- name.
- references: to other components (x, y, rotation).
- polygons in different layers.
- ports dict.
- Route: dataclass with 3 attributes.
- references: list of references (straights, bends and tapers).
- ports: dict(input=PortIn, output=PortOut).
- length: how long is this route?
Factories:
- ComponentFactory: function that returns a Component.
- RouteFactory: function that returns a Route.
Specs:
- ComponentSpec: Component, ComponentFactory or dict(component=mzi, settings=dict(delta_length=20)).
- LayerSpec: (3, 0), 3 (asumes 0 as datatype) or string.
"""
import json
import pathlib
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import numpy as np
from omegaconf import OmegaConf
from phidl.device_layout import Label as LabelPhidl
from phidl.device_layout import Path
from pydantic import BaseModel, Extra
from typing_extensions import Literal
from gdsfactory.component import Component, ComponentReference
from gdsfactory.cross_section import CrossSection
from gdsfactory.port import Port
Anchor = Literal[
"ce",
"cw",
"nc",
"ne",
"nw",
"sc",
"se",
"sw",
"center",
"cc",
]
Axis = Literal["x", "y"]
NSEW = Literal["N", "S", "E", "W"]
Float2 = Tuple[float, float]
Float3 = Tuple[float, float, float]
Floats = Tuple[float, ...]
Strs = Tuple[str, ...]
Int2 = Tuple[int, int]
Int3 = Tuple[int, int, int]
Ints = Tuple[int, ...]
Layer = Tuple[int, int]
Layers = Tuple[Layer, ...]
LayerSpec = NewType("LayerSpec", Union[Layer, int, str, None])
LayerSpecs = Tuple[LayerSpec, ...]
ComponentFactory = Callable[..., Component]
ComponentFactoryDict = Dict[str, ComponentFactory]
PathFactory = Callable[..., Path]
PathType = Union[str, pathlib.Path]
PathTypes = Tuple[PathType, ...]
ComponentOrPath = Union[PathType, Component]
ComponentOrReference = Union[Component, ComponentReference]
NameToFunctionDict = Dict[str, ComponentFactory]
Number = Union[float, int]
Coordinate = Tuple[float, float]
Coordinates = Tuple[Coordinate, ...]
ComponentOrPath = Union[Component, PathType]
CrossSectionFactory = Callable[..., CrossSection]
CrossSectionOrFactory = Union[CrossSection, Callable[..., CrossSection]]
PortSymmetries = Dict[str, Dict[str, List[str]]]
PortsDict = Dict[str, Port]
PortsList = Dict[str, Port]
ComponentSpec = NewType(
"ComponentSpec", Union[str, ComponentFactory, Component, Dict[str, Any]]
)
ComponentSpecOrList = Union[ComponentSpec, List[ComponentSpec]]
CellSpec = Union[str, ComponentFactory, Dict[str, Any]]
ComponentSpecDict = Dict[str, ComponentSpec]
CrossSectionSpec = NewType(
"CrossSectionSpec", Union[str, CrossSectionFactory, CrossSection, Dict[str, Any]]
)
MultiCrossSectionAngleSpec = List[Tuple[CrossSectionSpec, Tuple[int, ...]]]
RouteFactory = Callable[..., Route]
__all__ = (
"ComponentFactory",
"ComponentFactoryDict",
"ComponentSpec",
"ComponentOrPath",
"ComponentOrReference",
"Coordinate",
"Coordinates",
"CrossSectionFactory",
"CrossSectionOrFactory",
"MultiCrossSectionAngleSpec",
"Float2",
"Float3",
"Floats",
"Int2",
"Int3",
"Ints",
"Layer",
"Layers",
"NameToFunctionDict",
"Number",
"PathType",
"PathTypes",
"Route",
"RouteFactory",
"Routes",
"Strs",
)
if __name__ == "__main__":
write_schema()
import jsonschema
import yaml
from gdsfactory.config import CONFIG
schema_path = CONFIG["schema_netlist"]
schema_dict = json.loads(schema_path.read_text())
yaml_text = """
name: mzi
pdk: ubcpdk
settings:
dy: -90
info:
polarization: te
wavelength: 1.55
description: mzi for ubcpdk
instances:
yr:
component: y_splitter
yl:
component: y_splitter
placements:
yr:
rotation: 180
x: 100
y: 0
routes:
route_top:
links:
yl,opt2: yr,opt3
settings:
cross_section: strip
route_bot:
links:
yl,opt3: yr,opt2
routing_strategy: get_bundle_from_steps
settings:
steps: [dx: 30, dy: '${settings.dy}', dx: 20]
cross_section: strip
ports:
o1: yl,opt1
o2: yr,opt1
"""
yaml_dict = yaml.safe_load(yaml_text)
jsonschema.validate(yaml_dict, schema_dict)
# from gdsfactory.components import factory
# c = NetlistModel(factory=factory)
# c.add_instance("mmi1", "mmi1x2", length=13.3)
| [
37811,
818,
8300,
11,
257,
8860,
318,
257,
2163,
326,
5860,
281,
2134,
13,
198,
198,
24629,
2733,
389,
2562,
284,
1833,
780,
484,
423,
1598,
17311,
290,
23862,
13,
198,
6943,
308,
9310,
69,
9548,
5499,
1011,
617,
17311,
290,
1441,
2... | 2.627753 | 1,816 |
# -*- coding: utf-8 -*-
# Code will only work with Django >= 1.5. See tests/config.py
import re
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager
from oscar.apps.customer.abstract_models import AbstractUser
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
6127,
481,
691,
670,
351,
37770,
18189,
352,
13,
20,
13,
4091,
5254,
14,
11250,
13,
9078,
198,
11748,
302,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
... | 3.179245 | 106 |
import pickle
import numpy as np
INPUT_FILENAME = 'NP_WEIGHTS.pck'
PRECISION = 100
# Open weights
fc1_k, fc1_b, fc2_k, fc2_b = pickle.load(
open(INPUT_FILENAME, 'rb'))
# Round them
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k*PRECISION//1, fc1_b*PRECISION//1, fc2_k*PRECISION//1, fc2_b*PRECISION*PRECISION//1
fc1_k, fc1_b, fc2_k, fc2_b = fc1_k.astype(np.int), fc1_b.astype(np.int), fc2_k.astype(np.int), fc2_b.astype(np.int)
"""
0: GENERATE C++ ARRAYS, TO BE USED IN A STANDARD LOOP
"""
OUTPUT_FILENAME = 'fc_weights_arrays.cpp'
# Generate .cpp text
out = 'int fc1_k[' + str(fc1_k.shape[0]) + '][' + str(fc1_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc1_k) + ';\n\n'
out += 'int fc1_b[' + str(fc1_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc1_b) + ';\n\n'
out += 'int fc2_k[' + str(fc2_k.shape[0]) + '][' + str(fc2_k.shape[1]) + '] = '
out += to_cpp_2_dims(fc2_k) + ';\n\n'
out += 'int fc2_b[' + str(fc2_b.shape[0]) + '] = '
out += to_cpp_1_dim(fc2_b) + ';\n\n'
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
1: GENERATE C++ LOOP, USING THE ABOVE ARRAY
"""
OUTPUT_FILENAME = 'fc_loop_unrolled.cpp'
def to_cpp_function(k, b, function_name, in_dim, out_dim):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+k+"["+str(i)+"]["+str(j)+"]) +\n"
out += "\t\t("+b+"["+str(j)+"]);\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function('fc1_k', 'fc1_b', 'fc_1', 27, 50)
# Second layer
out += to_cpp_function('fc2_k', 'fc2_b', 'fc_2', 50, 10)
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
"""
3: GENERATE C++ LOOP, WITH HARDCODED WEIGHTS
"""
OUTPUT_FILENAME = 'fc_loop_unrolled_hardcoded_weights.cpp'
def to_cpp_function(k, b, function_name):
"""
Generates C++ code for computing a fully connected layer of int values,
applying weights k and bias b, with hardcoded values in the source code.
The function is names after function_name.
"""
out = ""
(in_dim, out_dim) = k.shape
out += "inline void "+function_name+"(int in["+str(in_dim)+"], int out["+str(out_dim)+"]){\n"
for j in range(out_dim):
out += "\tout["+str(j)+"] = \n"
for i in range(in_dim):
out += "\t\tin["+str(i)+"]*("+str(k[i][j])+") +\n"
out += "\t\t("+str(b[j])+");\n"
out += "}\n\n"
return out
## Generate .cpp text
out = ""
# First layer
out += to_cpp_function(fc1_k, fc1_b, 'fc_1')
# Second layer
out += to_cpp_function(fc2_k, fc2_b, 'fc_2')
# Output it
with open(OUTPUT_FILENAME, 'w+', encoding='utf-8') as f:
f.write(out)
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
198,
1268,
30076,
62,
46700,
1677,
10067,
796,
705,
22182,
62,
8845,
34874,
13,
79,
694,
6,
198,
47,
38827,
42446,
796,
1802,
198,
198,
2,
4946,
19590,
198,
16072,
16,
62,
7... | 2.131124 | 1,388 |
import cv2
import numpy as np
import random
img = cv2.imread('../../Assets/Images/flower-white.jpeg', 1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
cv2.imshow('img', img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgG = cv2.GaussianBlur(gray, (3, 3), 0)
dst = cv2.Canny(img, 50, 50)
cv2.imshow('dst', dst)
cv2.waitKey(0) | [
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
10786,
40720,
40720,
8021,
1039,
14,
29398,
14,
25547,
12,
11186,
13,
73,
22071,
3256,
352,
8,
198,
198,
... | 2.09697 | 165 |
"""Detection model trainer.
This file provides a generic training method to train a
DetectionModel.
"""
import datetime
import os
import tensorflow as tf
import time
from avod.builders import optimizer_builder
from avod.core import trainer_utils
from avod.core import summary_utils
slim = tf.contrib.slim
def train(model, train_config):
"""Training function for detection models.
Args:
model: The detection model object.
train_config: a train_*pb2 protobuf.
training i.e. loading RPN weights onto AVOD model.
"""
model = model
train_config = train_config
# Get model configurations
model_config = model.model_config
# Create a variable tensor to hold the global step
global_step_tensor = tf.Variable(
0, trainable=False, name='global_step')
#############################
# Get training configurations
#############################
max_iterations = train_config.max_iterations
summary_interval = train_config.summary_interval
checkpoint_interval = train_config.checkpoint_interval
max_checkpoints = train_config.max_checkpoints_to_keep
paths_config = model_config.paths_config
logdir = paths_config.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
checkpoint_dir = paths_config.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_path = checkpoint_dir + '/' + \
model_config.checkpoint_name
pretrained_checkpoint_dir = checkpoint_dir + '/../../' + \
'pyramid_cars_with_aug_dt_5_tracking_corr_pretrained/checkpoints'
global_summaries = set([])
# The model should return a dictionary of predictions
prediction_dict = model.build()
summary_histograms = train_config.summary_histograms
summary_img_images = train_config.summary_img_images
summary_bev_images = train_config.summary_bev_images
# get variables to train
if not train_config.use_pretrained_model:
variable_to_train = None
else:
trainable_variables = tf.trainable_variables()
variable_to_train = trainable_variables[68:72] + \
trainable_variables[96:]
##############################
# Setup loss
##############################
losses_dict, total_loss = model.loss(prediction_dict)
# Optimizer
training_optimizer = optimizer_builder.build(
train_config.optimizer,
global_summaries,
global_step_tensor)
# Create the train op
with tf.variable_scope('train_op'):
train_op = slim.learning.create_train_op(
total_loss,
training_optimizer,
variables_to_train=variable_to_train,
clip_gradient_norm=1.0,
global_step=global_step_tensor)
# Add the result of the train_op to the summary
tf.summary.scalar("training_loss", train_op)
# Add maximum memory usage summary op
# This op can only be run on device with gpu
# so it's skipped on travis
is_travis = 'TRAVIS' in os.environ
if not is_travis:
# tf.summary.scalar('bytes_in_use',
# tf.contrib.memory_stats.BytesInUse())
tf.summary.scalar('max_bytes',
tf.contrib.memory_stats.MaxBytesInUse())
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_merged = summary_utils.summaries_to_keep(
summaries,
global_summaries,
histograms=summary_histograms,
input_imgs=summary_img_images,
input_bevs=summary_bev_images
)
allow_gpu_mem_growth = train_config.allow_gpu_mem_growth
if allow_gpu_mem_growth:
# GPU memory config
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
sess = tf.Session(config=config)
else:
sess = tf.Session()
# Create unique folder name using datetime for summary writer
datetime_str = str(datetime.datetime.now())
logdir = logdir + '/train'
train_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,
sess.graph)
# Save checkpoints regularly.
saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)
# Create init op
# if train_config.use_pretrained_model:
# init = tf.initialize_variables(variable_to_train)
# else:
# init = tf.global_variables_initializer()
init = tf.global_variables_initializer()
# Continue from last saved checkpoint
if not train_config.overwrite_checkpoints:
trainer_utils.load_checkpoints(checkpoint_dir,saver)
if len(saver.last_checkpoints) > 0:
checkpoint_to_restore = saver.last_checkpoints[-1]
saver.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
else:
sess.run(init)
# load pretrained model
if train_config.use_pretrained_model:
variable_to_restore = tf.trainable_variables()
variable_to_restore = variable_to_restore[:68] + \
variable_to_restore[72:96]
variable_to_restore = {var.op.name: var for var in variable_to_restore}
saver2 = tf.train.Saver(var_list=variable_to_restore)
print('Loading pretrained model...')
trainer_utils.load_checkpoints(pretrained_checkpoint_dir, saver2)
checkpoint_to_restore = saver2.last_checkpoints[11]
saver2.restore(sess, checkpoint_to_restore)
# Read the global step if restored
global_step = tf.train.global_step(sess, global_step_tensor)
print('Starting from step {} / {}'.format(
global_step, max_iterations))
# Main Training Loop
last_time = time.time()
for step in range(global_step, max_iterations + 1):
# Save checkpoint
if step % checkpoint_interval == 0:
global_step = tf.train.global_step(sess,
global_step_tensor)
saver.save(sess,
save_path=checkpoint_path,
global_step=global_step)
print('Step {} / {}, Checkpoint saved to {}-{:08d}'.format(
step, max_iterations,
checkpoint_path, global_step))
feed_dict = model.create_feed_dict()
# Write summaries and train op
if step % summary_interval == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = current_time
train_op_loss, summary_out = sess.run(
[train_op, summary_merged], feed_dict=feed_dict)
print('Step {}, Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
step, train_op_loss, time_elapsed))
train_writer.add_summary(summary_out, step)
else:
# Run the train op only
sess.run(train_op, feed_dict)
# Close the summary writers
train_writer.close() | [
37811,
11242,
3213,
2746,
21997,
13,
198,
198,
1212,
2393,
3769,
257,
14276,
3047,
2446,
284,
4512,
257,
198,
11242,
3213,
17633,
13,
198,
37811,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
... | 2.285084 | 3,399 |
import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
| [
11748,
9874,
292,
979,
72,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
... | 3.395349 | 43 |
import os
from tempfile import NamedTemporaryFile
import boto3
from moto import mock_s3
import pandas as pd
import pandavro as pdx
import pickle
import pytest
| [
11748,
28686,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
198,
11748,
275,
2069,
18,
198,
6738,
285,
2069,
1330,
15290,
62,
82,
18,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
19798,
615,
305,
355,
279,
34350,
1... | 3.12963 | 54 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-08 11:13
from __future__ import unicode_literals
from django.db import migrations
from company import helpers
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
940,
319,
1584,
12,
1065,
12,
2919,
1367,
25,
1485,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.887097 | 62 |
import pytest
import asyncio
from system.utils import *
from random import randrange as rr
import hashlib
import time
from datetime import datetime, timedelta, timezone
from indy import payment
import logging
logger = logging.getLogger(__name__)
| [
11748,
12972,
9288,
198,
11748,
30351,
952,
198,
6738,
1080,
13,
26791,
1330,
1635,
198,
6738,
4738,
1330,
43720,
9521,
355,
374,
81,
198,
11748,
12234,
8019,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
1... | 3.661765 | 68 |
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
import click
from click import testing as clicktest
import time
from . import CuratorTestCase
from unittest.case import SkipTest
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
rhost, rport = os.environ.get('REMOTE_ES_SERVER', 'localhost:9201').split(':')
port = int(port) if port else 9200
rport = int(rport) if rport else 9201
| [
11748,
1034,
1072,
998,
9078,
198,
11748,
46132,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4731,
198,
11748,
4738,
198,
11748,
20218,
7753,
198,
11748,
3904,
198,
6738,
3904,
1330,
4856,
355,
3904,
9288,
198,
11748,
640,
198,
198,... | 3.01087 | 184 |
import libhustpass.sbDes as sbDes
import libhustpass.captcha as captcha
import requests
import re
import random
| [
11748,
9195,
71,
436,
6603,
13,
36299,
5960,
355,
264,
65,
5960,
198,
11748,
9195,
71,
436,
6603,
13,
27144,
11693,
355,
48972,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
4738,
628
] | 3.424242 | 33 |
import cv2 as cv
import numpy as np
img = cv.imread('./Images/sample-image.png')
sorted_contours = sortedContoursByArea(img, larger_to_smaller=True)
# print(areaFinder(contours))
print(areaFinder(sorted_contours))
for c in sorted_contours:
cv.drawContours(img, c, -1, 244, 3)
cv.imshow('img', img)
cv.waitKey(0)
cv.destroyAllWindows() | [
11748,
269,
85,
17,
355,
269,
85,
220,
198,
11748,
299,
32152,
355,
45941,
198,
9600,
796,
269,
85,
13,
320,
961,
7,
4458,
14,
29398,
14,
39873,
12,
9060,
13,
11134,
11537,
198,
82,
9741,
62,
3642,
4662,
796,
23243,
4264,
4662,
38... | 2.383562 | 146 |
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
| [
37811,
6719,
16005,
329,
12759,
526,
15931,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2872,
89,
2238,
13,
18392,
13,
8692,
62,
4164,
1173,
1330,
357,
198,
220,
220,
220,
7308,
9171,
1173,
11,
3297,
62,
392,
62,
66,
43846,
... | 2.877551 | 49 |
from mung.torch_ext.eval import Loss
from ltprg.model.seq import DataParameter, SequenceModelNoInput, SequenceModelInputToHidden, SequenceModelAttendedInput
from ltprg.model.seq import VariableLengthNLLLoss
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# }
# name : [ID FOR MODEL]
# arch_type : [SequenceModelNoInput|SequenceModelInputToHidden]
# dropout : [DROPOUT]
# rnn_layers : [RNN_LAYERS]
# rnn_size : [SIZE OF RNN HIDDEN LAYER]
# embedding_size : [EMBEDDING_SIZE]
# rnn_type : [RNN TYPE]
# (SequenceModelAttendedInput) attn_type : [EMBEDDING|OUTPUT]
# (SequenceModelInputToHidden) conv_input : [INDICATOR OF WHETHER OR NOT TO CONVOLVE THE INPUT]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_kernel : [KERNEL SIZE FOR CONVOLUTION]
# (SequenceModelInputToHidden|SequenceModelAttendedInput) conv_stride : [STRIDE LENGTH FOR CONVOLUTION]
# }
# Expects config of the form:
# {
# data_parameter : {
# seq : [SEQUENCE PARAMETER NAME]
# input : [INPUT PARAMETER NAME]
# },
# evaluations : [
# name : [NAME FOR EVALUATION]
# type : (VariableLengthNLLLoss)
# data : [NAME OF DATA SUBSET]
# (Optional) data_size : [SIZE OF RANDOM SUBET OF DATA TO TAKE]
# ]
# }
| [
6738,
285,
2150,
13,
13165,
354,
62,
2302,
13,
18206,
1330,
22014,
201,
198,
6738,
300,
83,
1050,
70,
13,
19849,
13,
41068,
1330,
6060,
36301,
11,
45835,
17633,
2949,
20560,
11,
45835,
17633,
20560,
2514,
41691,
11,
45835,
17633,
8086,
... | 2.422939 | 558 |
from shutil import move
import piexif
from PIL import Image
def delete_metadata(full_path_to_img):
"""
This function used for remove metadata only from documents, if you send image 'as image' Telegram automatically
removes all metadata at sending. This function removes all metadata via 'piexif' lib, saved image in '/app'
folder, and after that move it to 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.jpg'
"""
piexif.remove(full_path_to_img, "clean_image.jpg")
move("clean_image.jpg", "documents/clean_image.jpg")
def delete_metadata_from_png(full_path_to_img):
"""
This function used for remove metadata only from png documents, if you send image 'as image' Telegram
automatically removes all metadata at sending. This function removes all metadata via 'PIL' lib and saved image
in 'documents' folder.
:param full_path_to_img: path to folder with documents e.g.'documents/image.png'
"""
image = Image.open(full_path_to_img)
image.save("documents/clean_image.png")
| [
6738,
4423,
346,
1330,
1445,
198,
198,
11748,
2508,
87,
361,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
4299,
12233,
62,
38993,
7,
12853,
62,
6978,
62,
1462,
62,
9600,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
2163,... | 3.138728 | 346 |
#py_unit_2.py
import unittest
if __name__=='__main__':
unittest.main() | [
2,
9078,
62,
20850,
62,
17,
13,
9078,
198,
11748,
555,
715,
395,
198,
197,
197,
198,
197,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
197,
403,
715,
395,
13,
12417,
3419
] | 2.054054 | 37 |
from .scheduled_task import ScheduledTask | [
6738,
764,
1416,
704,
6309,
62,
35943,
1330,
27774,
6309,
25714
] | 3.727273 | 11 |
from sys import exit
# ------------------------------------------------------------------------------
global dev_name
global game_title
dev_name = "" # enter your name in the quotes!
game_title = "" # enter the game title in the quotes!
# ------------------------------------------------------------------------------
# ---------- initial values ----------
# these are used to define the starting values of your game variables
init_health = 100
init_mana = 200
init_boss_health = 50
# ---------- game variables ----------
# these will be used during the game
health = 0
mana = 0
boss_health = 0
# ---------- some useful functions ----------
# initialize game variables
# game over
# ---------- room definitions ----------
# here is where you'll create the flow of the game!
# room 0: where the game starts
# ---------- game start ----------
start()
| [
6738,
25064,
1330,
8420,
198,
198,
2,
16529,
26171,
198,
20541,
1614,
62,
3672,
198,
20541,
983,
62,
7839,
198,
7959,
62,
3672,
796,
13538,
1303,
3802,
534,
1438,
287,
262,
13386,
0,
198,
6057,
62,
7839,
796,
13538,
1303,
3802,
262,
... | 4.335 | 200 |
import os, zipfile
# Zip files.
| [
11748,
28686,
11,
19974,
7753,
198,
198,
2,
38636,
3696,
13,
198
] | 2.75 | 12 |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import sys
import pytest
from tornado import httpclient
from .... import oscar as mo
from ....utils import get_next_port
from .. import WebActor, web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from ..api.web import MarsApiEntryHandler
| [
2,
15069,
7358,
12,
1238,
2481,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.714894 | 235 |
"""
MNIST
hello world
"""
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# API
"""
MNIST
"""
sess = tf.InteractiveSession()
# [batch_size, 784]
x = tf.placeholder('float', shape=[None, 784])
y_ = tf.placeholder('float', shape=[None, 10])
"""
"""
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# [batch_size, 28, 28, 1]
x_image = tf.reshape(x, [-1, 28, 28, 1])
# [batch_size, 28, 28, 32]
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# [batch_size, 14, 14, 32]
h_pool1 = max_pool_2x2(h_conv1)
"""
"""
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
# [batch_size, 14, 14, 64]
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# [batch_size, 7, 7, 64]
h_pool2 = max_pool_2x2(h_conv2)
"""
"""
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
# [batch_size, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# [batch_size, 1024]
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
"""
dropout
"""
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
"""
"""
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
# [batch_size, 10]
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_sum = tf.reduce_sum(y_conv[0])
#
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#
sess.run(tf.initialize_all_variables())
for i in range(1):
batch = mnist.train.next_batch(50)
# train_accuracy = accuracy.eval(feed_dict={x:batch[0],
# y_: batch[1],
# keep_prob: 1.0})
# print("step %d, training accuracy %g" % (i, train_accuracy))
y_conv_re = y_conv.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
# print(y_conv_re.shape)
print(y_conv_re)
y_sum_re = y_sum.eval(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 1.0})
print(y_sum_re)
train_step.run(feed_dict={x: batch[0],
y_: batch[1],
keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={x: mnist.test.images,
y_: mnist.test.labels,
keep_prob: 1.0}))
| [
37811,
198,
39764,
8808,
198,
31373,
995,
198,
37811,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
1069,
12629,
13,
83,
44917,
82,
13,
10295,
396,
1330,
5128,
62,
7890,... | 1.822338 | 1,531 |
import pbge
from game.content.plotutility import LMSkillsSelfIntro
from game.content import backstory
from pbge.plots import Plot
from pbge.dialogue import Offer, ContextTag
from game.ghdialogue import context
import gears
import game.content.gharchitecture
import game.content.ghterrain
import random
from game import memobrowser
Memo = memobrowser.Memo
# *******************
# *** UTILITIES ***
# *******************
# **************************
# *** RANDOM_LANCEMATE ***
# **************************
# **************************
# *** RLM_Relationship ***
# **************************
# Elements:
# NPC: The NPC who needs a personality
# METROSCENE: The city or whatever that the NPC calls home
#
# These subplots contain a personality for a random (potential) lancemate.
# Also include a means for the lancemate to gain the "RT_LANCEMATE" tag.
| [
11748,
279,
65,
469,
198,
6738,
983,
13,
11299,
13,
29487,
315,
879,
1330,
406,
5653,
74,
2171,
24704,
5317,
305,
198,
6738,
983,
13,
11299,
1330,
37998,
198,
6738,
279,
65,
469,
13,
489,
1747,
1330,
28114,
198,
6738,
279,
65,
469,
... | 3.173145 | 283 |
import json
if __name__ == '__main__':
jsonFile = '/data00/home/zhangrufeng1/projects/detectron2/projects/detr/datasets/mot/mot17/annotations/mot17_train_half.json'
with open(jsonFile, 'r') as f:
infos = json.load(f)
count_dict = dict()
for info in infos["images"]:
if info["file_name"] in ["MOT17-02-FRCNN/img1/000091.jpg"]:
for ann in infos['annotations']:
if ann["image_id"] not in count_dict.keys() and ann["iscrowd"] == 0 and ann["bbox"][2] >= 1e-5 and ann["bbox"][3] >= 1e-5:
count_dict[ann["image_id"]] = 1
elif ann["image_id"] in count_dict.keys() and ann["iscrowd"] == 0:
count_dict[ann["image_id"]] += 1
max_count = 0
min_count = 999
num_freq = 0
for key, value in count_dict.items():
max_count = max(max_count, value)
min_count = min(min_count, value)
if value > 100:
num_freq += 1
print("max_count: {}".format(max_count))
print("min_count: {}".format(min_count))
print("num_freq: {}".format(num_freq))
| [
11748,
33918,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
33918,
8979,
796,
31051,
7890,
405,
14,
11195,
14,
23548,
648,
622,
69,
1516,
16,
14,
42068,
14,
15255,
478,
1313,
17,
14,
42068,
... | 2.178645 | 487 |
# -*- coding: utf-8 -*-
"""Archivo principal para el echobot. Main File for the echobot"""
from fbmq import Page
from flask import Flask, request
# Token generado por la pgina web. Generated token in the facebook web page
PAGE_ACCESS_TOKEN = "COPY_HERE_YOUR_PAGE_ACCES_TOKEN"
# Token generado por nosotros. Token generated by us
VERIFY_TOKEN = "EchoBotChido" # Si cambias este token, asegrate de cambiarlo tambin en la pgina de configuracin del webhook. If you change this token, verify that you changed it too in the webhook configuration.
app = Flask(__name__)
page = Page(PAGE_ACCESS_TOKEN) # Generamos la instancia de la pgina de facebook. We make the facebook page instance
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True, threaded=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
19895,
23593,
10033,
31215,
1288,
304,
354,
672,
313,
13,
8774,
9220,
329,
262,
304,
354,
672,
313,
37811,
198,
6738,
277,
20475,
80,
1330,
7873,
198,
6738,
42903... | 2.914179 | 268 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.misc
import random
import os
import imageio
#############################
# global variables #
#############################
root_dir = "/home/water/DATA/camvid-master"
data_dir = os.path.join(root_dir, "701_StillsRaw_full") # train data
label_dir = os.path.join(root_dir, "LabeledApproved_full") # train label
label_colors_file = os.path.join(root_dir, "label_colors.txt") # color to label
val_label_file = os.path.join(root_dir, "val.csv") # validation file
train_label_file = os.path.join(root_dir, "train.csv") # train file
# create dir for label index
label_idx_dir = os.path.join(root_dir, "Labeled_idx")
if not os.path.exists(label_idx_dir):
os.makedirs(label_idx_dir)
label2color = {}
color2label = {}
label2index = {}
index2label = {}
'''debug function'''
if __name__ == '__main__':
print("it starts working")
divide_train_val(random_seed=1)
parse_label()
print("process finished") | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29... | 2.525275 | 455 |
#!/usr/bin/env python
import websocket
import time
try:
import thread
except ImportError:
import _thread as thread
runs = 100
if __name__ == "__main__":
websocket.enableTrace(True)
url = "ws://localhost:8082"
ws = websocket.WebSocketApp(url, on_message = on_message, on_error = on_error, on_close = on_close)
ws.on_open = on_open
ws.run_forever()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
11748,
2639,
5459,
201,
198,
11748,
640,
201,
198,
201,
198,
28311,
25,
201,
198,
220,
220,
220,
1330,
4704,
201,
198,
16341,
17267,
12331,
25,
201,
198,
220,
220,
220... | 2.288136 | 177 |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
from matplotlib import rcParams
import statsmodels.api as sm
from statsmodels.formula.api import ols
df = pd.read_csv('kc_house_data.csv')
# print(df.head())
# print(df.isnull().any())
# print(df.describe())
# fig = plt.figure(figsize=(12, 6))
# sqft = fig.add_subplot(121)
# cost = fig.add_subplot(122)
#
# sqft.hist(df.sqft_living, bins=80)
# sqft.set_xlabel('Ft^2')
# sqft.set_title("Histogram of House Square Footage")
#
# cost.hist(df.price, bins=80)
# cost.set_xlabel('Price ($)')
# cost.set_title("Histogram of Housing Prices")
#
# plt.show()
# m = ols('price ~ sqft_living', df).fit()
# print(m.summary())
# m = ols('price ~ sqft_living + bedrooms + grade + condition',df).fit()
# print (m.summary())
sns.jointplot(x="sqft_living", y="price", data=df, kind='reg', fit_reg=True, size=7)
plt.show()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
355,
9756,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
... | 2.518919 | 370 |
# Dialogs for setting filter parameters.
from PyQt5.QtWidgets import QLabel, QGridLayout, QPushButton, \
QLineEdit, QVBoxLayout, QHBoxLayout, QDialog, QComboBox, QWidget
from PyQt5.QtCore import pyqtSignal
| [
2,
21269,
18463,
329,
4634,
8106,
10007,
13,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
33986,
11,
1195,
41339,
32517,
11,
1195,
49222,
21864,
11,
3467,
198,
197,
197,
9711,
500,
18378,
11,
1195,
53,
... | 2.65 | 80 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from .build import build, make_path
from parlai.utils.misc import warn_once
from parlai.core.teachers import ParlAIDialogTeacher
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,... | 3.43 | 100 |
from math import ceil, sqrt
from problem import Problem
from utils.math import gcd
| [
6738,
10688,
1330,
2906,
346,
11,
19862,
17034,
198,
198,
6738,
1917,
1330,
20647,
198,
6738,
3384,
4487,
13,
11018,
1330,
308,
10210,
628
] | 3.541667 | 24 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 10:37:04 2021
@author: martin urbanec
"""
#calculates trajectory of small mass positioned close to L4 Lagrange point
#creates gif as output
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, PillowWriter
DistanceJ = 778570000000. # m JUPITER FROM SUN
G = 6.67259*10**-11
Jupiter_mass = 1.8982*10**27 # kg
Sun_mass = 1.989*10**30 # kg
M1=Sun_mass
M2=Jupiter_mass
a=DistanceJ
Ang_vel=math.sqrt(G*(M1+M2)/(a**3)) #FROM KEPLER LAW
P=2.*math.pi/Ang_vel #Period
#center of mass is located at [0,0] massive object (Sun) is located at -r1, secondary object (Jupiter) is located at +r2
r2=M1*a/(M1+M2)
r1=M2*a/(M1+M2)
# Calculations are done in corotating frame
# s1, s2 are distances from sources of gravity (Sun, Jupiter)
#Force per unit mass (acceleration) in x direction
# ax = \partial pot(x,y) / \partial x - 2 \Omega \times v
# in our case \Omega=(0,0,\Omega) and v=(vx,vy,0)
# second term is corresponding to Coriolis force
pot2=np.vectorize(pot)
#TRAJECTORY OF ASTEROID CLOSE STARTING CLOSE TO L4 in rest with respect to the rotating frame
x0=a/2.-r1
y0=math.sqrt(3)*a/2.
x0=1.005*x0
y0=1.005*y0
vx0=0.
vy0=0.
steps=300000
#initialize arrays
x= np.linspace(0, 10, steps)
y= np.linspace(0, 10, steps)
vx=np.linspace(0, 10, steps)
vy=np.linspace(0, 10, steps)
t= np.linspace(0, 10, steps)
x[0]=x0
vx[0]=vx0
y[0]=y0
vy[0]=vy0
t[0]=0.
i=0
timescale = math.sqrt((a*a)**1.5 / G/(M1+M2))
dt=timescale/1000.
#using 4th order Runge-Kutta to solve the a_x= d v_x/ dt
# dt is constant set to timescale/1000
for i in range (1,steps):
t[i]=(t[i-1]+dt)
Kx1=dt*ax(x[i-1],y[i-1],vx[i-1],vy[i-1])
Kx2=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx1/2.,vy[i-1])
Kx3=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx2/2.,vy[i-1])
Kx4=dt*ax(x[i-1],y[i-1],vx[i-1]+Kx3,vy[i-1])
vx[i]=vx[i-1] + Kx1/6. + Kx2/3. + Kx3/3. + Kx4/6.
Ky1=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1])
Ky2=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky1/2.)
Ky3=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky2/2.)
Ky4=dt*ay(x[i-1],y[i-1],vx[i-1],vy[i-1]+Ky3)
vy[i]=vy[i-1] + Ky1/6. + Ky2/3. + Ky3/3. + Ky4/6.
x[i]=x[i-1] + (vx[i-1]+vx[i])*dt/2. #taking the average of velocities
y[i]=y[i-1] + (vy[i-1]+vy[i])*dt/2.
dt=timescale/1000.
#LAGRANGE POINTS
#L3, L1 and L2 points are lying on x-axis (left to right) for small values of alpha=M2/(M1+M2) the positions can are given analytically (to first order in alpha)
alpha=M2/(M1+M2)
L1X=a*(1.-(alpha/3.)**(1./3.))
L1Y=0.
P1=pot(L1X,L1Y)
L2X=a*(1.+(alpha/3.)**(1./3.))
L2Y=0.
P2=pot(L2X,L2Y)
L3X=-a*(1. + 5.*alpha/12)
L3Y=0.
P3=pot(L3X,L3Y)
L4X=a/2.-r1
L4Y=math.sqrt(3)*a/2.
P4=pot2(L4X,L4Y)
P0=pot(x0,y0)
steps=301
xx= np.arange(-2*a, 2.*a,a/steps)
yy= np.arange(-1.5*a, 1.5*a,a/steps)
X, Y = np.meshgrid(xx, yy)
Z1=pot2(X,Y)
fig, ax = plt.subplots()
ax.set_aspect('equal','box')
ln1, = plt.plot([],[], 'k+')
ln2, = plt.plot([], [], 'm*')
XXX,YYY=[],[]
zed= np.arange(60)
ani = FuncAnimation(fig, update, np.arange(300), init_func=init)
plt.show()
writer = PillowWriter(fps=25)
ani.save("Animation.gif", writer=writer)
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2365,
1478,
838,
25,
2718,
25,
3023,
33448,
198,
198,
31,
9800,
25,
11277,
259,
7876,
721,
198,
37811,
198,
198,
2,
9948,
3129,
689... | 1.855581 | 1,738 |
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
sanitized_Request,
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
33918,
198,
198,
6738,
764,
11321,
1330,
14151,
11627,
40450,
198,
6738,
11485,
26791,
1330,
357,
198,
220,
220,
220,
29677,
273,
12331,
11,
198,
220,
220,
220... | 2.933333 | 60 |
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
import sys
from os.path import join, isdir, abspath, expanduser, exists
import shutil
from conda.cli.common import add_parser_prefix, get_prefix
from conda.cli.conda_argparse import ArgumentParser
from conda_build.main_build import args_func
from conda_build.post import mk_relative_osx
from conda_build.utils import _check_call, rec_glob
from conda.install import linked
def relink_sharedobjects(pkg_path, build_prefix):
'''
invokes functions in post module to relink to libraries in conda env
:param pkg_path: look for shared objects to relink in pkg_path
:param build_prefix: path to conda environment which contains lib/. to find
runtime libraries.
.. note:: develop mode builds the extensions in place and makes a link to
package in site-packages/. The build_prefix points to conda environment
since runtime libraries should be loaded from environment's lib/. first
'''
# find binaries in package dir and make them relocatable
bin_files = rec_glob(pkg_path, ['.so'])
for b_file in bin_files:
if sys.platform == 'darwin':
mk_relative_osx(b_file, build_prefix)
else:
print("Nothing to do on Linux or Windows.")
def write_to_conda_pth(sp_dir, pkg_path):
'''
Append pkg_path to conda.pth in site-packages directory for current
environment. Only add path if it doens't already exist.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to append to site-packes/. dir.
'''
c_file = join(sp_dir, 'conda.pth')
with open(c_file, 'a') as f:
with open(c_file, 'r') as cf:
# make sure file exists, before we try to read from it hence nested
# in append with block
# expect conda.pth to be small so read it all in at once
pkgs_in_dev_mode = cf.readlines()
# only append pkg_path if it doesn't already exist in conda.pth
if pkg_path + '\n' in pkgs_in_dev_mode:
print("path exits, skipping " + pkg_path)
else:
f.write(pkg_path + '\n')
print("added " + pkg_path)
def get_site_pkg(prefix, py_ver):
'''
Given the path to conda environment, find the site-packages directory
:param prefix: path to conda environment. Look here for current
environment's site-packages
:returns: absolute path to site-packages directory
'''
# get site-packages directory
stdlib_dir = join(prefix, 'Lib' if sys.platform == 'win32' else
'lib/python%s' % py_ver)
sp_dir = join(stdlib_dir, 'site-packages')
return sp_dir
def get_setup_py(path_):
''' Return full path to setup.py or exit if not found '''
# build path points to source dir, builds are placed in the
setup_py = join(path_, 'setup.py')
if not exists(setup_py):
sys.exit("No setup.py found in {0}. Exiting.".format(path_))
return setup_py
def clean(setup_py):
'''
This invokes:
$ python setup.py clean
:param setup_py: path to setup.py
'''
# first call setup.py clean
cmd = ['python', setup_py, 'clean']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def build_ext(setup_py):
'''
Define a develop function - similar to build function
todo: need to test on win32 and linux
It invokes:
$ python setup.py build_ext --inplace
:param setup_py: path to setup.py
'''
# next call setup.py develop
cmd = ['python', setup_py, 'build_ext', '--inplace']
_check_call(cmd)
print("Completed: " + " ".join(cmd))
print("===============================================")
def uninstall(sp_dir, pkg_path):
'''
Look for pkg_path in conda.pth file in site-packages directory and remove
it. If pkg_path is not found in conda.pth, it means package is not
installed in 'development mode' via conda develop.
:param sp_dir: path to site-packages/. directory
:param pkg_path: the package path to be uninstalled.
'''
o_c_pth = join(sp_dir, 'conda.pth')
n_c_pth = join(sp_dir, 'conda.pth.temp')
found = False
with open(n_c_pth, 'w') as new_c:
with open(o_c_pth, 'r') as orig_c:
for line in orig_c:
if line != pkg_path + '\n':
new_c.write(line)
else:
print("uninstalled: " + pkg_path)
found = True
if not found:
print("conda.pth does not contain path: " + pkg_path)
print("package not installed via conda develop")
shutil.move(n_c_pth, o_c_pth)
if __name__ == '__main__':
main()
| [
2,
357,
66,
8,
6389,
13814,
30437,
11,
3457,
13,
1220,
2638,
1378,
18487,
13814,
13,
952,
198,
2,
1439,
6923,
33876,
198,
2,
198,
2,
1779,
64,
318,
9387,
739,
262,
2846,
286,
262,
347,
10305,
513,
12,
565,
682,
5964,
13,
198,
2,... | 2.542218 | 1,966 |
#!/bin/env python3
from transformers import TFBertForTokenClassification
from data_preparation.data_preparation_pos import MBERTTokenizer as MBERT_Tokenizer_pos
import sys
if __name__ == "__main__":
if len(sys.argv) > 1:
modelname = sys.argv[1]
else:
modelname = "ltgoslo/norbert"
model = TFBertForTokenClassification.from_pretrained(modelname, from_pt=True)
tokenizer = MBERT_Tokenizer_pos.from_pretrained(modelname, do_lower_case=False)
print(tokenizer)
| [
2,
48443,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
6121,
364,
1330,
24958,
33,
861,
1890,
30642,
9487,
2649,
198,
6738,
1366,
62,
3866,
1845,
341,
13,
7890,
62,
3866,
1845,
341,
62,
1930,
1330,
10771,
1137,
15751,
4233,
7509,
355,
... | 2.572917 | 192 |
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import base
from nca47.common.i18n import _
from nca47.common.i18n import _LI, _LE
from nca47.common.exception import Nca47Exception
from oslo_log import log
from nca47.api.controllers.v1 import tools
from nca47.manager.central import CentralManager
from nca47.common.exception import ParamFormatError
from amqp.five import string
from nca47.common.exception import BadRequest
from oslo_messaging import RemoteError
from nca47.common import exception
LOG = log.getLogger(__name__)
| [
6738,
28686,
5439,
62,
46911,
1634,
1330,
33918,
26791,
355,
33918,
198,
6738,
299,
6888,
2857,
13,
15042,
13,
3642,
36667,
13,
85,
16,
1330,
2779,
198,
6738,
299,
6888,
2857,
13,
11321,
13,
72,
1507,
77,
1330,
4808,
198,
6738,
299,
... | 3.193182 | 176 |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
628,
628,
628,
628,
198
] | 3.5 | 28 |
from math import exp
from random import seed
from random import random
seed(1)
dataset = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
network = initialize_network(n_inputs, 2, n_outputs)
train_network(network, dataset, 0.5, 30, n_outputs)
for layer in network:
print(layer) | [
6738,
10688,
1330,
1033,
198,
6738,
4738,
1330,
9403,
198,
6738,
4738,
1330,
4738,
198,
220,
198,
220,
198,
220,
198,
220,
628,
220,
198,
220,
198,
220,
198,
198,
28826,
7,
16,
8,
198,
19608,
292,
316,
796,
16410,
17,
13,
3695,
15... | 2.029316 | 307 |
"""\
Copyright (c) 2009 Paul J. Davis <paul.joseph.davis@gmail.com>
This file is part of hypercouch which is released uner the MIT license.
"""
import time
import unittest
import couchdb
COUCHURI = "http://127.0.0.1:5984/"
TESTDB = "hyper_tests"
| [
37811,
59,
198,
15269,
357,
66,
8,
3717,
3362,
449,
13,
7802,
1279,
79,
2518,
13,
73,
577,
746,
13,
67,
23401,
31,
14816,
13,
785,
29,
198,
1212,
2393,
318,
636,
286,
8718,
66,
7673,
543,
318,
2716,
555,
263,
262,
17168,
5964,
1... | 2.725275 | 91 |
from .sample_setup import *
ctr = setup_control()
eff = ColorMeanderEffect(ctr, "solid")
eff.launch_rt()
input()
eff.stop_rt()
ctr.turn_off()
| [
6738,
764,
39873,
62,
40406,
1330,
1635,
628,
198,
24087,
796,
9058,
62,
13716,
3419,
198,
14822,
796,
5315,
5308,
4066,
18610,
7,
24087,
11,
366,
39390,
4943,
198,
14822,
13,
35681,
62,
17034,
3419,
198,
15414,
3419,
198,
14822,
13,
... | 2.716981 | 53 |
"""Torch NodeEmbedding."""
from datetime import timedelta
import torch as th
from ...backend import pytorch as F
from ...utils import get_shared_mem_array, create_shared_mem_array
_STORE = None
def reset_trace(self):
"""Clean up the trace of the indices of embeddings
used in the training step(s).
"""
self._trace = []
| [
37811,
15884,
354,
19081,
31567,
6048,
278,
526,
15931,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
28034,
355,
294,
198,
6738,
2644,
1891,
437,
1330,
12972,
13165,
354,
355,
376,
198,
6738,
2644,
26791,
1330,
651,
62,
28710,
... | 2.767442 | 129 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberTeam, Team
from sentry.testutils import TestCase, PermissionTestCase
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
6738,
1908,
563,
13,
27530,
1330,
12275,
27608,
11,
12275,
27608,
15592,
11,
4816,
198,
6738,
1908,
... | 4.018182 | 55 |
import abc
import datetime
import enum
import logging
import time
import typing
import aysncio
import Layout as layouts
from decimal import Decimal
from pyserum.market import Market
from pyserum.open_orders_account import OpenOrdersAccount
from solana.account import Account
from solana.publickey import PublicKey
from solana.rpc.commitment import Single
from solana.rpc.types import MemcmpOpts, TokenAccountOpts, RPCMethod, RPCResponse
from spl.token.client import Token as SplToken
from spl.token.constants import TOKEN_PROGRAM_ID
from Constants import NUM_MARKETS, NUM_TOKENS, SOL_DECIMALS, SYSTEM_PROGRAM_ADDRESS, MAX_RATE,OPTIMAL_RATE,OPTIMAL_UTIL
from Context import Context
from Decoder import decode_binary, encode_binary, encode_key
class Token:
def __init__(self, name: str, mint: PublicKey, decimals: Decimal):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.name: str = name.upper()
self.mint: PublicKey = mint
self.decimals: Decimal = decimals
# TokenMetadatas are equal if they have the same mint address.
def __eq__(self, other):
if hasattr(other, 'mint'):
return self.mint == other.mint
return False
def __str__(self) -> str:
return f" Token '{self.name}' [{self.mint} ({self.decimals} decimals)] "
def __repr__(self) -> str:
return f"{self}"
SolToken = Token("SOL", SYSTEM_PROGRAM_ADDRESS, SOL_DECIMALS)
def __str__(self) -> str:
return f""" BasketToken [{self.token}]:
Vault: {self.vault}
Index: {self.index}
"""
def __repr__(self) -> str:
return f"{self}"
class TokenValue:
def __init__(self, token: Token, value: Decimal):
self.token = token
self.value = value
def __str__(self) -> str:
return f" TokenValue: {self.value:>18,.8f} {self.token.name} "
def __repr__(self) -> str:
return f"{self}"
class OwnedTokenValue:
def __init__(self, owner: PublicKey, token_value: TokenValue):
self.owner = owner
self.token_value = token_value
def __str__(self) -> str:
return f"[{self.owner}]: {self.token_value}"
def __repr__(self) -> str:
return f"{self}"
class MarketMetadata:
def __init__(self, name: str, address: PublicKey, base: BasketToken, quote: BasketToken,
spot: PublicKey, oracle: PublicKey, decimals: Decimal):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.name: str = name
self.address: PublicKey = address
self.base: BasketToken = base
self.quote: BasketToken = quote
self.spot: PublicKey = spot
self.oracle: PublicKey = oracle
self.decimals: Decimal = decimals
self._market = None
class Group(AddressableAccount):
#TODO Test this method, implement get_ui_total_borrow,get_ui_total_deposit
def get_deposit_rate(self,token_index: int):
borrow_rate = self.get_borrow_rate(token_index)
total_borrows = self.get_ui_total_borrow(token_index)
total_deposits = self.get_ui_total_deposit(token_index)
if total_deposits == 0 and total_borrows == 0: return 0
elif total_deposits == 0: return MAX_RATE
utilization = total_borrows / total_deposits
return utilization * borrow_rate
#TODO Test this method, implement get_ui_total_borrow, get_ui_total_deposit
class TokenAccount(AddressableAccount):
def __str__(self) -> str:
return f" Token: Mint: {self.mint}, Owner: {self.owner}, Amount: {self.amount} "
class OpenOrders(AddressableAccount):
# Sometimes pyserum wants to take its own OpenOrdersAccount as a parameter (e.g. in settle_funds())
def __str__(self) -> str:
orders = ", ".join(map(str, self.orders)) or "None"
client_ids = ", ".join(map(str, self.client_ids)) or "None"
return f""" OpenOrders:
Flags: {self.account_flags}
Program ID: {self.program_id}
Address: {self.address}
Market: {self.market}
Owner: {self.owner}
Base Token: {self.base_token_free:,.8f} of {self.base_token_total:,.8f}
Quote Token: {self.quote_token_free:,.8f} of {self.quote_token_total:,.8f}
Referrer Rebate Accrued: {self.referrer_rebate_accrued}
Orders:
{orders}
Client IDs:
{client_ids}
"""
class BalanceSheet:
def __init__(self, token: Token, liabilities: Decimal, settled_assets: Decimal, unsettled_assets: Decimal):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.token: Token = token
self.liabilities: Decimal = liabilities
self.settled_assets: Decimal = settled_assets
self.unsettled_assets: Decimal = unsettled_assets
def __str__(self) -> str:
name = "Unspecified"
if self.token is not None:
name = self.token.name
return f""" BalanceSheet [{name}]:
Assets : {self.assets:>18,.8f}
Settled Assets : {self.settled_assets:>18,.8f}
Unsettled Assets : {self.unsettled_assets:>18,.8f}
Liabilities : {self.liabilities:>18,.8f}
Value : {self.value:>18,.8f}
Collateral Ratio : {self.collateral_ratio:>18,.2%}
"""
def __repr__(self) -> str:
return f"{self}"
class MarginAccount(AddressableAccount):
def load_open_orders_accounts(self, context: Context, group: Group) -> None:
for index, oo in enumerate(self.open_orders):
key = oo
if key != SYSTEM_PROGRAM_ADDRESS:
self.open_orders_accounts[index] = OpenOrders.load(context, key, group.basket_tokens[index].token.decimals, group.shared_quote_token.token.decimals)
class MarginAccountMetadata:
def __init__(self, margin_account: MarginAccount, balance_sheet: BalanceSheet, balances: typing.List[TokenValue]):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.margin_account = margin_account
self.balance_sheet = balance_sheet
self.balances = balances
def _notebook_tests():
log_level = logging.getLogger().level
try:
logging.getLogger().setLevel(logging.CRITICAL)
from Constants import SYSTEM_PROGRAM_ADDRESS
from Context import default_context
balances_before = [
TokenValue(TokenLookup.find_by_name(default_context, "ETH"), Decimal(1)),
TokenValue(TokenLookup.find_by_name(default_context, "BTC"), Decimal("0.1")),
TokenValue(TokenLookup.find_by_name(default_context, "USDT"), Decimal(1000))
]
balances_after = [
TokenValue(TokenLookup.find_by_name(default_context, "ETH"), Decimal(1)),
TokenValue(TokenLookup.find_by_name(default_context, "BTC"), Decimal("0.05")),
TokenValue(TokenLookup.find_by_name(default_context, "USDT"), Decimal(2000))
]
timestamp = datetime.datetime(2021, 5, 17, 12, 20, 56)
event = LiquidationEvent(timestamp, "signature", SYSTEM_PROGRAM_ADDRESS, SYSTEM_PROGRAM_ADDRESS,
balances_before, balances_after)
assert(str(event) == """ Liqudation Event at 2021-05-17 12:20:56
Signature: signature
Wallet: 11111111111111111111111111111111
Margin Account: 11111111111111111111111111111111
Changes:
0.00000000 ETH
-0.05000000 BTC
1,000.00000000 USDT
""")
finally:
logging.getLogger().setLevel(log_level)
_notebook_tests()
del _notebook_tests
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
import base64
from Constants import SYSTEM_PROGRAM_ADDRESS
from Context import default_context
# Just use any public key here
fake_public_key = SYSTEM_PROGRAM_ADDRESS
encoded = "AwAAAAAAAACCaOmpoURMK6XHelGTaFawcuQ/78/15LAemWI8jrt3SRKLy2R9i60eclDjuDS8+p/ZhvTUd9G7uQVOYCsR6+BhmqGCiO6EPYP2PQkf/VRTvw7JjXvIjPFJy06QR1Cq1WfTonHl0OjCkyEf60SD07+MFJu5pVWNFGGEO/8AiAYfduaKdnFTaZEHPcK5Eq72WWHeHg2yIbBF09kyeOhlCJwOoG8O5SgpPV8QOA64ZNV4aKroFfADg6kEy/wWCdp3fv0O4GJgAAAAAPH6Ud6jtjwAAQAAAAAAAADiDkkCi9UOAAEAAAAAAAAADuBiYAAAAACNS5bSy7soAAEAAAAAAAAACMvgO+2jCwABAAAAAAAAAA7gYmAAAAAAZFeDUBNVhwABAAAAAAAAABtRNytozC8AAQAAAAAAAABIBGiCcyaEZdNhrTyeqUY692vOzzPdHaxAxguht3JQGlkzjtd05dX9LENHkl2z1XvUbTNKZlweypNRetmH0lmQ9VYQAHqylxZVK65gEg85g27YuSyvOBZAjJyRmYU9KdCO1D+4ehdPu9dQB1yI1uh75wShdAaFn2o4qrMYwq3SQQEAAAAAAAAAAiH1PPJKAuh6oGiE35aGhUQhFi/bxgKOudpFv8HEHNCFDy1uAqR6+CTQmradxC1wyyjL+iSft+5XudJWwSdi7wvphsxb96x7Obj/AgAAAAAKlV4LL5ow6r9LMhIAAAAADvsOtqcVFmChDPzPnwAAAE33lx1h8hPFD04AAAAAAAA8YRV3Oa309B2wGwAAAAAA+yPBZRlZz7b605n+AQAAAACgmZmZmZkZAQAAAAAAAAAAMDMzMzMzMwEAAAAAAAAA25D1XcAtRzSuuyx3U+X7aE9vM1EJySU9KprgL0LMJ/vat9+SEEUZuga7O5tTUrcMDYWDg+LYaAWhSQiN2fYk7aCGAQAAAAAAgIQeAAAAAAAA8gUqAQAAAAYGBgICAAAA"
decoded = base64.b64decode(encoded)
group_account_info = AccountInfo(fake_public_key, False, Decimal(0), fake_public_key, Decimal(0), decoded)
group = Group.parse(default_context, group_account_info)
print("\n\nThis is hard-coded, not live information!")
print(group)
print(TokenLookup.find_by_name(default_context, "ETH"))
print(TokenLookup.find_by_name(default_context, "BTC"))
# USDT
print(TokenLookup.find_by_mint(default_context, PublicKey("Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB")))
single_account_info = AccountInfo.load(default_context, default_context.dex_program_id)
print("DEX account info", single_account_info)
multiple_account_info = AccountInfo.load_multiple(default_context, [default_context.program_id, default_context.dex_program_id])
print("Mango program and DEX account info", multiple_account_info)
balances_before = [
TokenValue(TokenLookup.find_by_name(default_context, "ETH"), Decimal(1)),
TokenValue(TokenLookup.find_by_name(default_context, "BTC"), Decimal("0.1")),
TokenValue(TokenLookup.find_by_name(default_context, "USDT"), Decimal(1000))
]
balances_after = [
TokenValue(TokenLookup.find_by_name(default_context, "ETH"), Decimal(1)),
TokenValue(TokenLookup.find_by_name(default_context, "BTC"), Decimal("0.05")),
TokenValue(TokenLookup.find_by_name(default_context, "USDT"), Decimal(2000))
]
timestamp = datetime.datetime(2021, 5, 17, 12, 20, 56)
event = LiquidationEvent(timestamp, "signature", SYSTEM_PROGRAM_ADDRESS, SYSTEM_PROGRAM_ADDRESS,
balances_before, balances_after)
print(event) | [
11748,
450,
66,
198,
11748,
4818,
8079,
198,
11748,
33829,
198,
11748,
18931,
198,
11748,
640,
198,
11748,
19720,
198,
198,
11748,
257,
893,
10782,
952,
198,
11748,
47639,
355,
38489,
198,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
... | 2.296288 | 4,580 |
# Generated by Django 3.2.8 on 2021-11-29 05:47
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
1959,
8870,
25,
2857,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.metric.v1 import capabilities
BODY = {
'aggregation_methods': ['mean', 'max', 'avg'],
}
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.630435 | 184 |
import unittest
import os
from pyxdsm.XDSM import XDSM, __file__
from numpy.distutils.exec_command import find_executable
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
6738,
12972,
24954,
5796,
13,
55,
5258,
44,
1330,
1395,
5258,
44,
11,
11593,
7753,
834,
198,
6738,
299,
32152,
13,
17080,
26791,
13,
18558,
62,
21812,
1330,
1064,
62,
18558,
18187,
628,
19... | 2.6875 | 64 |
# -*- coding: utf-8 -*-
from wecom_sdk.base.crypt import encrypt_msg, decrypt_msg
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
356,
785,
62,
21282,
74,
13,
8692,
13,
29609,
1330,
34117,
62,
19662,
11,
42797,
62,
19662,
198
] | 2.441176 | 34 |
import psycopg2
from decouple import config
import pandas as pd
import dbconnect
cursor, connection = dbconnect.connect_to_db()
sql = """
SELECT "table_name","column_name", "data_type", "table_schema"
FROM INFORMATION_SCHEMA.COLUMNS
WHERE "table_schema" = 'public'
ORDER BY table_name
"""
df = pd.read_sql(sql, con=connection)
print(df.to_string()) | [
11748,
17331,
22163,
70,
17,
198,
6738,
875,
43846,
1330,
4566,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
20613,
8443,
198,
198,
66,
21471,
11,
4637,
796,
20613,
8443,
13,
8443,
62,
1462,
62,
9945,
3419,
198,
25410,
796,
37227... | 2.830645 | 124 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
from agate import Table
from agate.aggregations import Sum
from agate.computations import Percent
from agate.data_types import Number, Text
from agate.testcase import AgateTestCase
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
269,
12501,
4402,
1330,
4280,
4402,
198,
16341,
17267,
123... | 3.106195 | 113 |
import os
os.makedirs(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'build')), exist_ok=True)
from .chamfer_distance import ChamferDistance
| [
11748,
28686,
198,
418,
13,
76,
4335,
17062,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
492,
3256,
705,
11249,
11537,
828,
2152,
62,
482,
28... | 2.644068 | 59 |
from functools import partial
from impartial import impartial
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
32521,
1330,
32521,
628,
628,
628,
628,
628,
628,
198
] | 3.947368 | 19 |
'''
Python script for uploading/downloading scripts for use with the game Screeps.
http://support.screeps.com/hc/en-us/articles/203022612-Commiting-scripts-using-direct-API-access
Usage:
#
# general help/usage
#
python3 manager.py --help
#
# retrieve all scripts from the game and store them
# in the folder "some_folder"
#
python3 manager.py from_game some_folder
#
# send all *.js files to the game
#
python3 manager.py to_game some_folder
WARNING: Use at your own risk! Make backups of all your game content!
'''
import sys
import os
import argparse
import json
import requests
from requests.auth import HTTPBasicAuth
SCREEPS_ENDPOINT = 'https://screeps.com/api/user/code'
USER_ENV = 'SCREEPS_USER'
PASSWORD_ENV = 'SCREEPS_PASSWORD'
TO_SCREEPS = 'to_game'
FROM_SCREEPS = 'from_game'
if __name__ == '__main__':
main()
| [
7061,
6,
198,
37906,
4226,
329,
33794,
14,
15002,
278,
14750,
329,
779,
351,
262,
983,
1446,
260,
25386,
13,
198,
198,
4023,
1378,
11284,
13,
1416,
260,
25386,
13,
785,
14,
71,
66,
14,
268,
12,
385,
14,
26845,
14,
1238,
1270,
2490... | 2.673591 | 337 |
import logging
from django.views.generic import TemplateView
from ...models import Feedback
from ..chat.chat_context_mixin import ChatContextMixin
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
2644,
27530,
1330,
37774,
198,
6738,
11485,
17006,
13,
17006,
62,
22866,
62,
19816,
259,
1330,
24101,
21947,
35608,
259,
198,
198,
6404,
... | 3.54717 | 53 |
from typing import Optional
import bisect
from tangled_up_in_unicode.u14_0_0_data.prop_list_to_property import prop_list_to_property
from tangled_up_in_unicode.u14_0_0_data.blocks_to_block_start import blocks_to_block_start
from tangled_up_in_unicode.u14_0_0_data.blocks_to_block_end import blocks_to_block_end
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_age_short_to_long import property_value_alias_age_short_to_long
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_bc_short_to_long import property_value_alias_bc_short_to_long
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_blk_long_to_short import property_value_alias_blk_long_to_short
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_ccc_short_to_long import property_value_alias_ccc_short_to_long
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_ea_short_to_long import property_value_alias_ea_short_to_long
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_gc_short_to_long import property_value_alias_gc_short_to_long
from tangled_up_in_unicode.u14_0_0_data.property_value_alias_sc_long_to_short import property_value_alias_sc_long_to_short
from tangled_up_in_unicode.u14_0_0_data.scripts_to_script_start import scripts_to_script_start
from tangled_up_in_unicode.u14_0_0_data.scripts_to_script_end import scripts_to_script_end
from tangled_up_in_unicode.u14_0_0_data.east_asian_width_to_east_asian_width_start import east_asian_width_to_east_asian_width_start
from tangled_up_in_unicode.u14_0_0_data.east_asian_width_to_east_asian_width_end import east_asian_width_to_east_asian_width_end
from tangled_up_in_unicode.u14_0_0_data.derived_age_to_age_start import derived_age_to_age_start
from tangled_up_in_unicode.u14_0_0_data.derived_age_to_age_end import derived_age_to_age_end
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_name_start import unicode_data_to_name_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_category_start import unicode_data_to_category_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_category_end import unicode_data_to_category_end
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_bidirectional_start import unicode_data_to_bidirectional_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_bidirectional_end import unicode_data_to_bidirectional_end
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_decimal_start import unicode_data_to_decimal_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_digit_start import unicode_data_to_digit_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_numeric_start import unicode_data_to_numeric_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_combining_start import unicode_data_to_combining_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_mirrored_start import unicode_data_to_mirrored_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_mirrored_end import unicode_data_to_mirrored_end
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_decomposition_start import unicode_data_to_decomposition_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_uppercase_start import unicode_data_to_uppercase_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_lowercase_start import unicode_data_to_lowercase_start
from tangled_up_in_unicode.u14_0_0_data.unicode_data_to_titlecase_start import unicode_data_to_titlecase_start
unidata_version = "14.0.0"
def name(chr: str, default=None) -> str:
"""Returns the name assigned to the character chr as a string.
If no name is defined, default is returned, or, if not given, ValueError is raised."""
idx = ord(chr)
try:
return unicode_data_to_name_start[idx]
except KeyError:
if default is None:
raise ValueError("no such name")
else:
return default
def category(chr: str) -> str:
"""Returns the general category assigned to the character chr as string."""
idx = ord(chr)
start_keys = sorted(unicode_data_to_category_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = unicode_data_to_category_start[key_start]
end_keys = sorted(unicode_data_to_category_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = unicode_data_to_category_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = unicode_data_to_category_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return "Zzzz"
except IndexError:
return "Zzzz"
def bidirectional(chr: str) -> str:
"""Returns the bidirectional class assigned to the character chr as string.
If no such value is defined, an empty string is returned."""
idx = ord(chr)
start_keys = sorted(unicode_data_to_bidirectional_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = unicode_data_to_bidirectional_start[key_start]
end_keys = sorted(unicode_data_to_bidirectional_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = unicode_data_to_bidirectional_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = unicode_data_to_bidirectional_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return ""
except IndexError:
return ""
def decimal(chr: str, default=None) -> int:
"""Returns the decimal value assigned to the character chr as integer.
If no such value is defined, default is returned, or, if not given, ValueError is raised."""
idx = ord(chr)
try:
return unicode_data_to_decimal_start[idx]
except KeyError:
if default is None:
raise ValueError("not a decimal")
else:
return default
def digit(chr: str, default=None) -> int:
"""Returns the digit value assigned to the character chr as integer.
If no such value is defined, default is returned, or, if not given, ValueError is raised."""
idx = ord(chr)
try:
return unicode_data_to_digit_start[idx]
except KeyError:
if default is None:
raise ValueError("not a digit")
else:
return default
def numeric(chr: str, default=None) -> float:
"""Returns the numeric value assigned to the character chr as float.
If no such value is defined, default is returned, or, if not given, ValueError is raised."""
idx = ord(chr)
try:
return unicode_data_to_numeric_start[idx]
except KeyError:
if default is None:
raise ValueError("not a numeric character")
else:
return default
def combining(chr: str) -> int:
"""Returns the canonical combining class assigned to the character chr as integer.
Returns 0 if no combining class is defined."""
idx = ord(chr)
try:
return unicode_data_to_combining_start[idx]
except KeyError:
return 0
def mirrored(chr: str) -> int:
"""Returns the mirrored property assigned to the character chr as integer.
Returns 1 if the character has been identified as a "mirrored" character in bidirectional text, 0 otherwise."""
idx = ord(chr)
start_keys = sorted(unicode_data_to_mirrored_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = unicode_data_to_mirrored_start[key_start]
end_keys = sorted(unicode_data_to_mirrored_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = unicode_data_to_mirrored_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = unicode_data_to_mirrored_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return 0
except IndexError:
return 0
def decomposition(chr: str) -> str:
"""Returns the character decomposition mapping assigned to the character chr as string.
An empty string is returned in case no such mapping is defined."""
idx = ord(chr)
try:
return unicode_data_to_decomposition_start[idx]
except KeyError:
return ""
def uppercase(chr: str) -> str:
""""""
idx = ord(chr)
try:
return unicode_data_to_uppercase_start[idx]
except KeyError:
return ""
def lowercase(chr: str) -> str:
""""""
idx = ord(chr)
try:
return unicode_data_to_lowercase_start[idx]
except KeyError:
return ""
def titlecase(chr: str) -> str:
""""""
idx = ord(chr)
try:
return unicode_data_to_titlecase_start[idx]
except KeyError:
return ""
def east_asian_width(chr: str, default=None) -> str:
"""Returns the east asian width assigned to the character chr as string."""
idx = ord(chr)
start_keys = sorted(east_asian_width_to_east_asian_width_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = east_asian_width_to_east_asian_width_start[key_start]
end_keys = sorted(east_asian_width_to_east_asian_width_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
key_end = end_keys[insertion_point]
result_end = east_asian_width_to_east_asian_width_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = east_asian_width_to_east_asian_width_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
if default is None:
raise ValueError("no east asian width")
else:
return default
def age(chr: str) -> str:
""""""
idx = ord(chr)
start_keys = sorted(derived_age_to_age_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = derived_age_to_age_start[key_start]
end_keys = sorted(derived_age_to_age_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = derived_age_to_age_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = derived_age_to_age_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return "1.0"
except IndexError:
return "1.0"
def block(chr: str) -> str:
""""""
idx = ord(chr)
start_keys = sorted(blocks_to_block_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = blocks_to_block_start[key_start]
end_keys = sorted(blocks_to_block_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = blocks_to_block_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = blocks_to_block_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return "Unknown"
except IndexError:
return "Unknown"
def script(chr: str) -> str:
""""""
idx = ord(chr)
start_keys = sorted(scripts_to_script_start.keys())
insertion_point = bisect.bisect_left(start_keys, idx)
if insertion_point == len(start_keys) or start_keys[insertion_point] != idx:
insertion_point -= 1
key_start = start_keys[insertion_point]
result_start = scripts_to_script_start[key_start]
end_keys = sorted(scripts_to_script_end.keys())
insertion_point = bisect.bisect_left(end_keys, idx)
try:
key_end = end_keys[insertion_point]
result_end = scripts_to_script_end[key_end]
if result_end != key_start:
result_end = result_start
key_end = key_start
else:
result_end = scripts_to_script_start[result_end]
if key_start <= idx <= key_end and result_start == result_end:
return result_start
else:
return "Unknown"
except IndexError:
return "Unknown"
def prop_list(chr: str) -> list:
""""""
idx = ord(chr)
try:
return prop_list_to_property[idx]
except KeyError:
return set()
def age_long(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_age_short_to_long[value]
except KeyError:
return None
def category_long(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_gc_short_to_long[value]
except KeyError:
return None
def east_asian_width_long(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_ea_short_to_long[value]
except KeyError:
return None
def bidirectional_long(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_bc_short_to_long[value]
except KeyError:
return None
def combining_long(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_ccc_short_to_long[value]
except KeyError:
return None
def block_abbr(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_blk_long_to_short[value]
except KeyError:
return None
def script_abbr(value: str) -> Optional[str]:
""""""
try:
return property_value_alias_sc_long_to_short[value]
except KeyError:
return None
| [
6738,
19720,
1330,
32233,
198,
198,
11748,
47457,
478,
198,
198,
6738,
41431,
62,
929,
62,
259,
62,
46903,
1098,
13,
84,
1415,
62,
15,
62,
15,
62,
7890,
13,
22930,
62,
4868,
62,
1462,
62,
26745,
1330,
2632,
62,
4868,
62,
1462,
62,... | 2.395444 | 6,322 |
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import sys
import ST7735
# Create ST7735 LCD display class object and set pin numbers and display hardware information.
disp = ST7735.ST7735(
dc=24,
cs=ST7735.BG_SPI_CS_BACK,
rst=25,
port=0,
width=122,
height=160,
rotation=270
)
# Initialize display.
disp.begin()
WIDTH = disp.width
HEIGHT = disp.height
img = Image.new('RGB', (WIDTH, HEIGHT), color=(0, 0, 0))
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSans-Regular.ttf", 12)
# Initialize a secondary text with the empty string
text2 = ""
# Print test-output on the display if n oargument is given
if len(sys.argv) == 1:
text = "Temperature:\nHumidity:\nUV:\nRain:\nLight:"
text2 = "20C\n50 %\n42\nyes\nOn"
# Print the argument if only one is given
elif len(sys.argv) == 2:
text = sys.argv[1]
# If 2 arguments are given use the second as the secondary text
elif len(sys.argv) == 3:
text = sys.argv[1]
text2 = sys.argv[2]
# For any other number of arguments draw them in one line each
else:
text = ''.join(i + "\n" for i in sys.argv[1:])
# Print both texts, with the secondary one starting with an 100 px offset
draw.text((10, 10), text, font=font, fill=(255, 255, 255))
draw.text((110, 10), text2, font=font, fill=(255, 255, 255))
disp.display(img)
| [
6738,
350,
4146,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
25302,
198,
6738,
350,
4146,
1330,
7412,
23252,
198,
11748,
25064,
198,
11748,
3563,
3324,
2327,
628,
198,
2,
13610,
3563,
3324,
2327,
23598,
3359,
1398,
2134,
290,
900,
675... | 2.644914 | 521 |
import sqlite3
conn=sqlite3.connect('Survey.db')
fo=open('insertcommand.txt')
str=fo.readline()
while str:
str="INSERT INTO data VALUES"+str
conn.execute(str)
#print(str)
str=fo.readline()
conn.commit()
conn.close()
fo.close()
| [
11748,
44161,
578,
18,
198,
198,
37043,
28,
25410,
578,
18,
13,
8443,
10786,
14214,
3304,
13,
9945,
11537,
198,
198,
6513,
28,
9654,
10786,
28463,
21812,
13,
14116,
11537,
198,
2536,
28,
6513,
13,
961,
1370,
3419,
198,
4514,
965,
25,
... | 2.378641 | 103 |
# DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: HeadlessExperimental (experimental)
from __future__ import annotations
from cdp.util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
| [
2,
8410,
5626,
48483,
12680,
45811,
0,
198,
2,
198,
2,
770,
2393,
318,
7560,
422,
262,
327,
6322,
20855,
13,
1002,
345,
761,
284,
787,
198,
2,
2458,
11,
4370,
262,
17301,
290,
43519,
477,
286,
262,
13103,
13,
198,
2,
198,
2,
327... | 3.676768 | 99 |
import unittest
from wvflib.geometry import Face
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
266,
85,
2704,
571,
13,
469,
15748,
1330,
15399,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.5 | 40 |
import pytest
from detectem.core import Detector, Result, ResultCollection
from detectem.plugin import Plugin, PluginCollection
from detectem.settings import INDICATOR_TYPE, HINT_TYPE, MAIN_ENTRY, GENERIC_TYPE
from detectem.plugins.helpers import meta_generator
class TestResultCollection():
| [
11748,
12972,
9288,
198,
198,
6738,
4886,
368,
13,
7295,
1330,
4614,
9250,
11,
25414,
11,
25414,
36307,
198,
6738,
4886,
368,
13,
33803,
1330,
42636,
11,
42636,
36307,
198,
6738,
4886,
368,
13,
33692,
1330,
24413,
2149,
25633,
62,
25216... | 3.654321 | 81 |
from django.dispatch.dispatcher import receiver
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, logout as auth_logout, login as auth_login
from django.contrib import messages
from django.db.models import Count
from django.template.loader import render_to_string
from django.http import HttpResponseRedirect, JsonResponse
from twitter.models import Tweet, Follow, Notification, Comment
from twitter.myDecor import check_if_user_logged
from twitter.forms import SignUpForm
# Create your views here.
def logout(reqeuest):
auth_logout(reqeuest)
return redirect('index')
def profile(request, username):
if request.method == 'POST':
user = User.objects.get(username=username)
user.profile.bio = request.POST['bio']
user.profile.profilePic = request.FILES['pic'] if 'pic' in request.FILES else user.profile.profilePic
user.profile.backgroundPic = request.FILES['banner'] if 'banner' in request.FILES else user.profile.backgroundPic
user.save()
return redirect('profile', username=username)
else:
try:
userProfile = User.objects.get(username=username)
except User.DoesNotExist:
return HttpResponse('User Not Found')
tweets = Tweet.objects.filter(author__exact=userProfile).order_by('-timeStamp')
is_following = False
for follow in request.user.followers.all():
if userProfile.id == follow.user_id_id:
is_following=True
rec_profiles = User.objects.annotate(count=Count('followers')).order_by('followers').exclude(username=request.user.username).exclude(username=username).exclude(id__in=request.user.followers.all().values_list('user_id', flat=True))[:5]
return render(request, 'profile.html', {'userProfile':userProfile, 'tweets':tweets, 'is_following':is_following, 'rec_profiles':rec_profiles})
def notifications(request):
notifics = request.user.your_notifications.all()
for notific in notifics:
notific.seen = True
notific.save()
notifics = request.user.your_notifications.all().order_by('-id')[:10]
return render(request, 'notifications.html', {'notifics':notifics})
def tweet_details(request, tweetID):
tweet = Tweet.objects.get(id=tweetID)
comments = tweet.main_tweet.all().order_by('-timeStamp')
return render(request, 'tweet_details.html', {'tweet':tweet, 'comments':comments})
def comment(request, tweetID):
if request.method == 'POST':
author = request.user
content = request.POST['comment']
tweet = Tweet.objects.get(id=tweetID)
Comment.objects.create(author=author, main_tweet=tweet, content=content)
if(request.user != tweet.author):
Notification.objects.create(sender = request.user, receiver = tweet.author, target = tweet, type = 'C')
return redirect(tweet_details, tweetID=tweetID)
else:
return redirect(home)
#Notification on post comment | [
6738,
42625,
14208,
13,
6381,
17147,
13,
6381,
8071,
2044,
1330,
9733,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
11,
8543,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12... | 2.756944 | 1,152 |
# Copyright (c) 2022, momscode and Contributors
# See license.txt
# import frappe
import unittest
| [
2,
15069,
357,
66,
8,
33160,
11,
1995,
1416,
1098,
290,
25767,
669,
198,
2,
4091,
5964,
13,
14116,
198,
198,
2,
1330,
5306,
27768,
198,
11748,
555,
715,
395,
198
] | 3.193548 | 31 |
"""Python APIs for STIX 2.
.. autosummary::
:toctree: api
confidence
datastore
environment
equivalence
exceptions
markings
parsing
pattern_visitor
patterns
properties
serialization
utils
v20
v21
versioning
workbench
"""
# flake8: noqa
DEFAULT_VERSION = '2.1' # Default version will always be the latest STIX 2.X version
from .confidence import scales
from .datastore import CompositeDataSource
from .datastore.filesystem import (
FileSystemSink, FileSystemSource, FileSystemStore,
)
from .datastore.filters import Filter
from .datastore.memory import MemorySink, MemorySource, MemoryStore
from .datastore.taxii import (
TAXIICollectionSink, TAXIICollectionSource, TAXIICollectionStore,
)
from .environment import Environment, ObjectFactory
from .markings import (
add_markings, clear_markings, get_markings, is_marked, remove_markings,
set_markings,
)
from .parsing import _collect_stix2_mappings, parse, parse_observable
from .patterns import (
AndBooleanExpression, AndObservationExpression, BasicObjectPathComponent,
BinaryConstant, BooleanConstant, EqualityComparisonExpression,
FloatConstant, FollowedByObservationExpression,
GreaterThanComparisonExpression, GreaterThanEqualComparisonExpression,
HashConstant, HexConstant, InComparisonExpression, IntegerConstant,
IsSubsetComparisonExpression, IsSupersetComparisonExpression,
LessThanComparisonExpression, LessThanEqualComparisonExpression,
LikeComparisonExpression, ListConstant, ListObjectPathComponent,
MatchesComparisonExpression, ObjectPath, ObservationExpression,
OrBooleanExpression, OrObservationExpression, ParentheticalExpression,
QualifiedObservationExpression, ReferenceObjectPathComponent,
RepeatQualifier, StartStopQualifier, StringConstant, TimestampConstant,
WithinQualifier,
)
from .v21 import * # This import will always be the latest STIX 2.X version
from .version import __version__
from .versioning import new_version, revoke
_collect_stix2_mappings()
| [
37811,
37906,
23113,
329,
3563,
10426,
362,
13,
198,
198,
492,
44619,
388,
6874,
3712,
198,
220,
220,
1058,
1462,
310,
631,
25,
40391,
628,
220,
220,
6628,
198,
220,
220,
4818,
459,
382,
198,
220,
220,
2858,
198,
220,
220,
6854,
594... | 3.184211 | 646 |
count = int(input())
title = 0
while count > 0:
title += 1
if '666' in str(title):
count -= 1
print(title)
| [
9127,
796,
493,
7,
15414,
28955,
198,
198,
7839,
796,
657,
198,
4514,
954,
1875,
657,
25,
198,
220,
220,
220,
3670,
15853,
352,
198,
220,
220,
220,
611,
705,
27310,
6,
287,
965,
7,
7839,
2599,
198,
220,
220,
220,
220,
220,
220,
... | 2.272727 | 55 |
# This file is automatically generated by tools/idna-data
# vim: set fileencoding=utf-8 :
"""IDNA Mapping Table from UTS46."""
__version__ = "11.0.0"
uts46data = tuple(
_seg_0()
+ _seg_1()
+ _seg_2()
+ _seg_3()
+ _seg_4()
+ _seg_5()
+ _seg_6()
+ _seg_7()
+ _seg_8()
+ _seg_9()
+ _seg_10()
+ _seg_11()
+ _seg_12()
+ _seg_13()
+ _seg_14()
+ _seg_15()
+ _seg_16()
+ _seg_17()
+ _seg_18()
+ _seg_19()
+ _seg_20()
+ _seg_21()
+ _seg_22()
+ _seg_23()
+ _seg_24()
+ _seg_25()
+ _seg_26()
+ _seg_27()
+ _seg_28()
+ _seg_29()
+ _seg_30()
+ _seg_31()
+ _seg_32()
+ _seg_33()
+ _seg_34()
+ _seg_35()
+ _seg_36()
+ _seg_37()
+ _seg_38()
+ _seg_39()
+ _seg_40()
+ _seg_41()
+ _seg_42()
+ _seg_43()
+ _seg_44()
+ _seg_45()
+ _seg_46()
+ _seg_47()
+ _seg_48()
+ _seg_49()
+ _seg_50()
+ _seg_51()
+ _seg_52()
+ _seg_53()
+ _seg_54()
+ _seg_55()
+ _seg_56()
+ _seg_57()
+ _seg_58()
+ _seg_59()
+ _seg_60()
+ _seg_61()
+ _seg_62()
+ _seg_63()
+ _seg_64()
+ _seg_65()
+ _seg_66()
+ _seg_67()
+ _seg_68()
+ _seg_69()
+ _seg_70()
+ _seg_71()
+ _seg_72()
+ _seg_73()
+ _seg_74()
+ _seg_75()
+ _seg_76()
+ _seg_77()
+ _seg_78()
)
| [
2,
770,
2393,
318,
6338,
7560,
416,
4899,
14,
312,
2616,
12,
7890,
201,
198,
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
1058,
201,
198,
201,
198,
37811,
2389,
4535,
337,
5912,
8655,
422,
471,
4694,
3510,
526,
15931,
... | 1.483366 | 1,022 |
# -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import nn
from kbcr.kernels import GaussianKernel
from kbcr.smart import NeuralKB
import pytest
if __name__ == '__main__':
pytest.main([__file__])
# test_smart_v1()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
47823,
6098,
13,
74,
44930,
1330,
12822,
31562,
42,
7948,
19... | 2.572917 | 96 |
import tkinter
window=tkinter.Tk()
window.title("YUN DAE HEE")
window.geometry("640x400+100+100")
window.resizable(True, True)
image=tkinter.PhotoImage(file="opencv_frame_0.png")
label=tkinter.Label(window, image=image)
label.pack()
window.mainloop() | [
11748,
256,
74,
3849,
198,
198,
17497,
28,
30488,
3849,
13,
51,
74,
3419,
198,
17497,
13,
7839,
7203,
56,
4944,
360,
14242,
367,
6500,
4943,
198,
17497,
13,
469,
15748,
7203,
31102,
87,
7029,
10,
3064,
10,
3064,
4943,
198,
17497,
13... | 2.591837 | 98 |
from django.urls import path, include
from .views import main_view, PredictionView
#router = routers.DefaultRouter(trailing_slash=False)
#router.register('years', YearView, basename='years')
#router.register('predict', PredictionView, basename='predict')
urlpatterns = [
#path('api/', get_dummy_data),
#path('pollution/predict', get_prediction, name='test_predict'),
#path('myform/', api_form_view, name='year_form'),
#path('api/', include(router.urls)),
path(r'', main_view, name="main"),
path(r'api/v1/predict', PredictionView.as_view(), name='predict')
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
33571,
1330,
1388,
62,
1177,
11,
46690,
7680,
198,
198,
2,
472,
353,
796,
41144,
13,
19463,
49,
39605,
7,
9535,
4386,
62,
6649,
1077,
28,
25101,
8,
198,
2,
47... | 2.728972 | 214 |
from django.conf import settings
from termcolor import colored
#: Red color constant for :func:`.ievv_colorize`.
COLOR_RED = 'red'
#: Blue color constant for :func:`.ievv_colorize`.
COLOR_BLUE = 'blue'
#: Yellow color constant for :func:`.ievv_colorize`.
COLOR_YELLOW = 'yellow'
#: Grey color constant for :func:`.ievv_colorize`.
COLOR_GREY = 'grey'
#: Green color constant for :func:`.ievv_colorize`.
COLOR_GREEN = 'green'
def colorize(text, color, bold=False):
"""
Colorize a string for stdout/stderr.
Colors are only applied if :setting:`IEVV_COLORIZE_USE_COLORS` is
``True`` or not defined (so it defaults to ``True``).
Examples:
Print blue text::
from ievv_opensource.utils import ievv_colorize
print(ievv_colorize('Test', color=ievv_colorize.COLOR_BLUE))
Print bold red text::
print(ievv_colorize('Test', color=ievv_colorize.COLOR_RED, bold=True))
Args:
text: The text (string) to colorize.
color: The color to use.
Should be one of:
- :obj:`.COLOR_RED`
- :obj:`.COLOR_BLUE`
- :obj:`.COLOR_YELLOW`
- :obj:`.COLOR_GREY`
- :obj:`.COLOR_GREEN`
- ``None`` (no color)
bold: Set this to ``True`` to use bold font.
"""
if getattr(settings, 'IEVV_COLORIZE_USE_COLORS', True) and color:
attrs = []
if bold:
attrs.append('bold')
return colored(text, color=color, attrs=attrs)
else:
return text
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
3381,
8043,
1330,
16396,
628,
198,
2,
25,
2297,
3124,
6937,
329,
1058,
20786,
25,
44646,
11203,
85,
62,
8043,
1096,
44646,
198,
46786,
62,
22083,
796,
705,
445,
6,
198,
198,
2,
2... | 2.268328 | 682 |
# coding: utf-8
# In[1]:
import baostock as bs
import pandas as pd
import numpy as np
import talib as ta
import matplotlib.pyplot as plt
import KlineService
import BaoStockUtil
import math
import datetime
from scipy import integrate
from RSI import DayRSI,WeekRSI,MonthRSI,SixtyMinRSI
from concurrent.futures import ThreadPoolExecutor, as_completed
from Stock import Stock
import dbutil
from IPython.core.debugger import set_trace
#
INTEGRATE_CALC_RANGE = 4
RSI_OVER_BUY = 80
RSI_OVER_SELL = 20
RSI_OVER_BUY_12 = 75
RSI_OVER_SELL_12 = 25
RSI_OVER_BUY_24 = 70
RSI_OVER_SELL_24 = 30
RSI_MIDDLE = 50
#
RSI_INTE_OVERSELL_THRESHOLD_DAY = 50
# In[3]:
##
# RSI
#
#
##
# RSI
#
#
##
# Kline Collection
#
##
# RSI
#
#
##
# RSI
#
#
#
#4424
#4RSI4*60=240
#
## TODO
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
628,
198,
11748,
26605,
455,
735,
355,
275,
82,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
3305,
571,
355,
20486,
198,... | 2.169399 | 366 |
# Copyright 2016 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
base_options = [
cfg.StrOpt(
'log_file_name',
default='osnoise.log',
help='Osnoise file name.'),
cfg.StrOpt(
'log_dir',
default='/var/log/osnoise/',
help='Osnoise log directory.'),
cfg.StrOpt(
'log_level',
default='info',
help='Log level.'),
cfg.StrOpt(
'log_file',
default='/var/log/osnoise/osnoise.log',
help='Log file'),
cfg.IntOpt(
'log_maxBytes',
default=1000000,
min=1000,
help='Log level.'),
cfg.IntOpt(
'log_backupCount',
default=5,
min=1,
help='Log level.'),
cfg.BoolOpt('log_config_append',
default=False,
deprecated_group='DEFAULT',
help='To append logs to existent log file or not.'),
]
| [
2,
15069,
1584,
11942,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,... | 2.355155 | 611 |
from kivy.properties import NumericProperty
from gui.sensors.sensor import Sensor
import config
| [
6738,
479,
452,
88,
13,
48310,
1330,
399,
39223,
21746,
198,
198,
6738,
11774,
13,
82,
641,
669,
13,
82,
22854,
1330,
35367,
198,
11748,
4566,
628
] | 3.62963 | 27 |
#
import discord
from discord.ext import commands
from discord.ext.commands import Context
from define import * | [
2,
198,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
30532,
198,
198,
6738,
8160,
1330,
1635
] | 4.035714 | 28 |
import cProfile
import pstats
import os
#
def do_cprofile(filename):
"""
Decorator for function profiling.
"""
return wrapper | [
11748,
269,
37046,
198,
11748,
279,
34242,
198,
11748,
28686,
628,
198,
2,
220,
198,
4299,
466,
62,
66,
13317,
7,
34345,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4280,
273,
1352,
329,
2163,
31582,
13,
198,
220,
220,
220,... | 2.88 | 50 |
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between cole Polytechnique Fdrale de Lausanne (EPFL) and
# Eidgenssische Technische Hochschule Zrich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for providers."""
import abc
| [
2,
15069,
13130,
532,
14780,
6060,
5800,
3337,
357,
50,
5258,
34,
8,
198,
2,
317,
10413,
1022,
763,
293,
12280,
23873,
2350,
376,
7109,
1000,
390,
4689,
385,
21952,
357,
8905,
3697,
8,
290,
198,
2,
31022,
70,
641,
82,
46097,
5429,
... | 3.641148 | 209 |
# -*- coding: utf-8 -*-
from .converters import Converter, UnstructureStrategy
__all__ = ('global_converter', 'unstructure', 'structure',
'structure_attrs_fromtuple', 'structure_attrs_fromdict',
'UnstructureStrategy')
__author__ = 'Tin Tvrtkovi'
__email__ = 'tinchester@gmail.com'
global_converter = Converter()
unstructure = global_converter.unstructure
structure = global_converter.structure
structure_attrs_fromtuple = global_converter.structure_attrs_fromtuple
structure_attrs_fromdict = global_converter.structure_attrs_fromdict
register_structure_hook = global_converter.register_structure_hook
register_structure_hook_func = global_converter.register_structure_hook_func
register_unstructure_hook = global_converter.register_unstructure_hook
register_unstructure_hook_func = \
global_converter.register_unstructure_hook_func
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
764,
1102,
332,
1010,
1330,
35602,
353,
11,
791,
301,
5620,
13290,
4338,
198,
198,
834,
439,
834,
796,
19203,
20541,
62,
1102,
332,
353,
3256,
705,
403,
301,
562... | 2.739683 | 315 |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run dask scheduler and worker."""
import os
import subprocess
import shutil
import logging
import socket
import random
from distributed import Client
from distributed.security import Security
from .conf import get_config
from .verify_cert import verify_cert
sec_cfg = get_config('server')
def get_client_security(address):
"""Get client."""
address = address.replace("tcp", "tls")
if not verify_cert(sec_cfg.ca_cert, sec_cfg.client_cert_dask):
logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.client_cert_dask} are invalid, please check.")
sec = Security(tls_ca_file=sec_cfg.ca_cert,
tls_client_cert=sec_cfg.client_cert_dask,
tls_client_key=sec_cfg.client_secret_key_dask,
require_encryption=True)
return Client(address, security=sec)
def get_address_security(master_host, master_port):
"""Get address."""
return "tls://{}:{}".format(master_host, master_port)
def run_scheduler_security(ip, port, tmp_file):
"""Run scheduler."""
if not verify_cert(sec_cfg.ca_cert, sec_cfg.server_cert_dask):
logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.server_cert_dask} are invalid, please check.")
return subprocess.Popen(
[
"dask-scheduler",
"--no-dashboard",
"--no-show",
f"--tls-ca-file={sec_cfg.ca_cert}",
f"--tls-cert={sec_cfg.server_cert_dask}",
f"--tls-key={sec_cfg.server_secret_key_dask}",
f"--host={ip}",
"--protocol=tls",
f"--port={port}",
f"--scheduler-file={tmp_file}",
f"--local-directory={os.path.dirname(tmp_file)}",
],
env=os.environ
)
def run_local_worker_security(slave_ip, address, local_dir):
"""Run dask-worker on local node."""
address = address.replace("tcp", "tls")
nanny_port = _available_port(30000, 30999)
worker_port = _available_port(29000, 29999)
pid = subprocess.Popen(
[
"dask-worker",
address,
'--nthreads=1',
'--nprocs=1',
'--memory-limit=0',
f"--local-directory={local_dir}",
f"--tls-ca-file={sec_cfg.ca_cert}",
f"--tls-cert={sec_cfg.client_cert_dask}",
f"--tls-key={sec_cfg.client_secret_key_dask}",
"--no-dashboard",
f"--host={slave_ip}",
"--protocol=tls",
f"--nanny-port={nanny_port}",
f"--worker-port={worker_port}",
],
env=os.environ
)
return pid
def run_remote_worker_security(slave_ip, address, local_dir):
"""Run dask-worker on remote node."""
address = address.replace("tcp", "tls")
nanny_port = _available_port(30000, 30999)
worker_port = _available_port(29000, 29999)
pid = subprocess.Popen(
[
"ssh",
slave_ip,
shutil.which("dask-worker"),
address,
'--nthreads=1',
'--nprocs=1',
'--memory-limit=0',
f"--local-directory={local_dir}",
f"--tls-ca-file={sec_cfg.ca_cert}",
f"--tls-cert={sec_cfg.client_cert_dask}",
f"--tls-key={sec_cfg.client_secret_key_dask}",
"--no-dashboard",
f"--host={slave_ip}",
"--protocol=tls",
f"--nanny-port={nanny_port}",
f"--worker-port={worker_port}",
],
env=os.environ
)
return pid
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
34,
8,
12131,
13,
43208,
21852,
1766,
1539,
12052,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.135593 | 1,947 |
"""
StarGAN v2
Copyright (c) 2020-present NAVER Corp.
This work is licensed under the Creative Commons Attribution-NonCommercial
4.0 International License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""
import os
from os.path import join as ospj
import time
import datetime
from munch import Munch
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.model import build_model
from core.checkpoint import CheckpointIO
from core.data_loader import InputFetcher
import core.utils as utils
from metrics.eval import calculate_metrics
| [
37811,
198,
8248,
45028,
410,
17,
198,
15269,
357,
66,
8,
12131,
12,
25579,
11746,
5959,
11421,
13,
198,
198,
1212,
670,
318,
11971,
739,
262,
17404,
13815,
45336,
12,
15419,
48401,
198,
19,
13,
15,
4037,
13789,
13,
1675,
1570,
257,
... | 3.437186 | 199 |
# Uses python3
import sys
if __name__ == '__main__':
# input = sys.stdin.read()
a, b = map(int, input().split())
# print(lcm_naive(a, b))
print(lcm(a, b))
| [
2,
36965,
21015,
18,
198,
11748,
25064,
628,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
5128,
796,
25064,
13,
19282,
259,
13,
961,
... | 2.043956 | 91 |
import random
highscore = []
def not_in_range(guess_it):
"""This is to check that the numbers inputted by the user are in range,
and will let the user know. If the numbers are in range then it passes.
"""
if guess_it < 1:
print('I am not thinking of negative numbers!')
elif guess_it > 10:
print('That number is way bigger than 10!')
else:
pass
def new_game(tries):
"""After the user has guessed the number correctly, the game
will ask the player if they would like to play again. Yes will start
the game again. No will exit the game. Highscore will be displayed
by the lowest amount of tries recorded.
"""
play_again = input('Would you like to play again? (Yes/No) ')
if play_again.upper() == 'YES':
highscore.append(tries)
highscore.sort
print('The highscore is {}.'.format(highscore[0]))
start_game()
elif play_again.upper() == 'NO':
exit()
else:
play_again = input('Please let me know by typing yes or no: ')
def start_game(): # title screen of the game
"""This is the start of the game which include the title screen and
is the main function that runs all the other functions as well.
"""
print('-' * 40)
print('Welcome to the Number Guessing Game!!!')
print('-' * 40)
print('I am thinking of a number between 1-10.')
random_number = random.randint(1, 10)
tries = 0
while True:
try:
guess_it = int(input('Can you guess it?: '))
except ValueError:
print('I said number, not gibberish!')
else:
while guess_it != random_number:
not_in_range(guess_it)
tries += 1
if guess_it > random_number:
print('That is too high!')
elif guess_it < random_number:
print('That is too low')
break
else:
print('You guessed it right! Your number was {}.'.format(random_number))
print('It took you {} tries.'.format(tries))
break
new_game(tries)
if __name__ == '__main__':
# Kick off the program by calling the start_game function.
start_game()
| [
11748,
4738,
198,
198,
8929,
26675,
796,
17635,
628,
198,
4299,
407,
62,
259,
62,
9521,
7,
5162,
408,
62,
270,
2599,
198,
220,
220,
220,
37227,
1212,
318,
284,
2198,
326,
262,
3146,
5128,
1513,
416,
262,
2836,
389,
287,
2837,
11,
... | 2.395127 | 944 |
#!/usr/bin/env python3
USER = r'server\user'
PASSWORD = 'server_password'
HOSTNAME = 'hostname.goes.here.com'
DOMAIN = 'domain.goes.here.com'
FROM_ADDR = 'emailyouwanttosendmessagesfrom@something.com'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29904,
796,
374,
338,
18497,
59,
7220,
6,
198,
47924,
54,
12532,
796,
705,
15388,
62,
28712,
6,
198,
39,
10892,
20608,
796,
705,
4774,
3672,
13,
2188,
274,
13,
1456,
13,
785,
6... | 2.45122 | 82 |
#!/usr/bin/python3
#
# MIT License
#
# Copyright (c) 2021 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
try:
## following import success only when file is directly executed from command line
## otherwise will throw exception when executing as parameter for "python -m"
# pylint: disable=W0611
import __init__
except ImportError as error:
## when import fails then it means that the script was executed indirectly
## in this case __init__ is already loaded
pass
import sys
import argparse
import rsscast.logger as logger
from rsscast.rss.ytconverter import convert_yt
if __name__ != '__main__':
sys.exit(0)
parser = argparse.ArgumentParser(description='YouTube convert example')
args = parser.parse_args()
logger.configure_console()
converted = convert_yt( "https://www.youtube.com/watch?v=BLRUiVXeZKU", "/tmp/yt_example.mp3" )
print("converted:", converted)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
9128,
324,
3754,
89,
3433,
26691,
2724,
1279,
7959,
13,
1501,
316,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
2448,
... | 3.443662 | 568 |