content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
'''
urlcanon/rules.py - url matching rules
Copyright (C) 2017 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import urlcanon
import re
import logging
try:
unicode
except NameError:
unicode = str
def host_matches_domain(host, domain):
'''
Returns true if
- domain is an ip address and host is the same ip address
- domain is a domain and host is the same domain
- domain is a domain and host is a subdomain of it
Does not do any normalization. Probably a good idea to call
`host_matches_domain(
urlcanon.normalize_host(host), urlcanon.normalize_host(domain))`.
'''
if isinstance(domain, unicode):
domain = domain.encode('utf-8')
if isinstance(host, unicode):
host = host.encode('utf-8')
if domain == host:
return True
if (urlcanon.parse_ipv4or6(domain) != (None, None)
or urlcanon.parse_ipv4or6(host) != (None, None)):
# if either of self.domain or host is an ip address and they're
# not identical (the first check, above), not a match
return False
return urlcanon.reverse_host(host).startswith(urlcanon.reverse_host(domain))
def url_matches_domain(url, domain):
'''
Returns true if
- domain is an ip address and url.host is the same ip address
- domain is a domain and url.host is the same domain
- domain is a domain and url.host is a subdomain of it
Does not do any normalization/canonicalization. Probably a good idea to
call `host_matches_domain(
canonicalize(url), urlcanon.normalize_host(domain))`.
'''
if not isinstance(url, urlcanon.ParsedUrl):
url = urlcanon.parse_url(url)
return host_matches_domain(url.host, domain)
class MatchRule:
'''
A url-matching rule, with one or more conditions.
All conditions must match for a url to be considered a match.
The supported conditions are `surt`, `ssurt`, `regex`, `domain`,
`substring`, `parent_url_regex`. Values should be bytes objects. If they
are unicode strings, they will be utf-8 encoded.
No canonicalization is performed on any of the conditions. It's the
caller's responsibility to make sure that `domain` is in a form that their
urls can match.
The url passed to `MatchRule.applies` is not canonicalized either. The
caller should canonicalize it first. Same with `parent_url`. See also
`urlcanon.Canonicalizer.rule_applies`.
Here are some examples of valid rules expressed as yaml.
- domain: bad.domain.com
# preferred:
- domain: monkey.org
substring: bar
# deprecated version of the same:
- domain: monkey.org
url_match: STRING_MATCH
value: bar
# preferred:
- surt: http://(com,woop,)/fuh/
# deprecated version of the same:
- url_match: SURT_MATCH
value: http://(com,woop,)/fuh/
# preferred:
- regex: ^https?://(www.)?youtube.com/watch?.*$
parent_url_regex: ^https?://(www.)?youtube.com/user/.*$
# deprecated version of the same:
- url_match: REGEX_MATCH
value: ^https?://(www.)?youtube.com/watch?.*$
parent_url_regex: ^https?://(www.)?youtube.com/user/.*$
'''
def __init__(
self, surt=None, ssurt=None, regex=None, domain=None,
substring=None, parent_url_regex=None,
url_match=None, value=None):
'''
Args:
surt (bytes or str):
ssurt (bytes or str):
regex (bytes or str):
domain (bytes or str):
substring (bytes or str):
parent_url_regex (bytes or str):
url_match (str, deprecated):
value (bytes, deprecated):
'''
self.surt = surt.encode('utf-8') if isinstance(surt, unicode) else surt
self.ssurt = ssurt.encode('utf-8') if isinstance(ssurt, unicode) else ssurt
self.ssurt = ssurt
self.domain = domain.encode('utf-8') if isinstance(domain, unicode) else domain
self.substring = substring.encode('utf-8') if isinstance(substring, unicode) else substring
# append \Z to get a full match (py2 doesn't have re.fullmatch)
# (regex still works in case of \Z\Z)
if isinstance(regex, unicode):
regex = regex.encode('utf-8')
self.regex = regex and re.compile(regex + br'\Z')
if isinstance(parent_url_regex, unicode):
parent_url_regex = parent_url_regex.encode('utf-8')
self.parent_url_regex = parent_url_regex and re.compile(
parent_url_regex + br'\Z')
if url_match:
if isinstance(value, unicode):
value = value.encode('utf-8')
if url_match == 'REGEX_MATCH':
assert not self.regex
self.regex = re.compile(value + br'\Z')
elif url_match == 'SURT_MATCH':
assert not self.surt
self.surt = value
elif url_match == 'STRING_MATCH':
assert not self.substring
self.substring = value
else:
raise Exception(
'invalid scope rule with url_match '
'%s' % repr(url_match))
def applies(self, url, parent_url=None):
'''
Returns true if `url` matches `match_rule`.
All conditions must match for a url to be considered a match.
The caller should normally canonicalize before `url` and `parent_url`
passing them to this method.
Args:
url (urlcanon.ParsedUrl or bytes or str): already canonicalized url
parent_url (urlcanon.ParsedUrl or bytes or str, optional): parent
url, should be supplied if the rule has a `parent_url_regex`
Returns:
bool: True if the rule matches, False otherwise
'''
if not isinstance(url, urlcanon.ParsedUrl):
url = urlcanon.parse_url(url)
if self.domain and not url_matches_domain(url, self.domain):
return False
if self.surt and not url.surt().startswith(self.surt):
return False
if self.ssurt and not url.ssurt().startswith(self.ssurt):
return False
if self.substring and not url.__bytes__().find(self.substring) >= 0:
return False
if self.regex:
if not self.regex.match(url.__bytes__()):
return False
if self.parent_url_regex:
if not parent_url:
return False
if isinstance(parent_url, urlcanon.ParsedUrl):
parent_url = parent_url.__bytes__()
elif isinstance(parent_url, unicode):
parent_url = parent_url.encode('utf-8')
if not self.parent_url_regex.match(parent_url):
return False
return True
| [
7061,
6,
198,
6371,
49883,
14,
38785,
13,
9078,
532,
19016,
12336,
3173,
198,
198,
15269,
357,
34,
8,
2177,
4455,
20816,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
... | 2.372378 | 3,099 |
import pandas as pd
x=pd.read_pickle("c:/temp/ffMonthly.pkl")
print(x.head())
print(x.tail()) | [
11748,
19798,
292,
355,
279,
67,
198,
87,
28,
30094,
13,
961,
62,
27729,
293,
7203,
66,
14079,
29510,
14,
487,
31948,
306,
13,
79,
41582,
4943,
198,
4798,
7,
87,
13,
2256,
28955,
198,
4798,
7,
87,
13,
13199,
28955
] | 2.268293 | 41 |
import json
import unittest
from unittest.mock import patch
from api.app import app
from api.models import Planet, db
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
40391,
13,
1324,
1330,
598,
198,
6738,
40391,
13,
27530,
1330,
11397,
11,
20613,
628
] | 3.361111 | 36 |
import numpy as np
import math
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
X_ = np.genfromtxt('data_nextyear.csv',delimiter=',',skip_header = 2,dtype=float,usecols=(range(2,50)))
scaler = preprocessing.StandardScaler().fit(X_)
X = scaler.transform(X_)
Y = np.genfromtxt('data_nextyear.csv',delimiter=',',skip_header = 2,dtype=float,usecols=(51))
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.3, random_state = 42)
log_r = LogisticRegression(class_weight='balanced',penalty='l2')
linear_svm = SVC(kernel='linear', class_weight='balanced')
nn = MLPClassifier(hidden_layer_sizes=(25,),max_iter=1000, solver='sgd', momentum=0.95)
r_forest = RandomForestClassifier(n_estimators=100, max_features=7)
models = {"Logistic Regression":log_r, "SVM":linear_svm, "Neural Network":nn, "Random Forest":r_forest}
y_base = base_model(X_test)
print("Base rate accuracy (frequency of zeros):")
print(accuracy_score(Y_test,y_base))
print()
for name, model in models.items():
if name == 'Logistic Regression':
printout(add_intercept(X_test), Y_test, name, model, add_intercept(X_train), Y_train)
else:
printout(X_test, Y_test, name, model, X_train, Y_train)
X_addendum = np.genfromtxt('addendum_test.csv',delimiter=',', dtype=float,usecols=(range(48)))
X_addendum = scaler.transform(X_addendum)
Y_addendum = np.genfromtxt('addendum_test.csv',delimiter=',',dtype=float,usecols=(49))
for name, model in models.items():
if name == 'Logistic Regression':
printout(add_intercept(X_addendum), Y_addendum, name, model)
else:
printout(X_addendum, Y_addendum, name, model)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
198,
6738,
1341,
35720,
13,
4164,
10466,
13... | 2.795455 | 748 |
obj1 = Car("Suzuki", "Grey", "2015", 4)
obj1.printCarDetails()
| [
628,
198,
26801,
16,
796,
1879,
7203,
5606,
89,
11308,
1600,
366,
49141,
1600,
366,
4626,
1600,
604,
8,
198,
26801,
16,
13,
4798,
9914,
24259,
3419,
198
] | 2.357143 | 28 |
import sys, os
import math
import collections
import re
import multiprocessing
import time
import contextlib
import json
import tqdm
import nltk
import numpy as np
import tensorflow as tf
import pandas as pd
import sentencepiece as spm
GLOVE_PATH = "../input/embeddings/glove.840B.300d/glove.840B.300d.txt"
PARAGRAM_PATH = "../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt"
MAX_SEQ_LEN = 400
USE_CHARACTER = False
USE_REPLACE_TOKEN = False
USE_POS = False
USE_HOMEBREW = False
USE_SENTENCE_PIECE = False
SAVE = True
assert not USE_REPLACE_TOKEN or not USE_SENTENCE_PIECE
# preload
if USE_POS:
nltk.pos_tag(["this", "is", "test"])
nltk.stem.WordNetLemmatizer().lemmatize("test")
#----------------------------------------------------------------------------
print("load csv", end="...", flush=True)
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print("done.")
#/---------------------------------------------------------------------------
#----------------------------------------------------------------------------
NUM_KEYS = [str(i) for i in range(10)] + ["="]
RE_SINGLE_NUM = re.compile("[0-9]")
MATH_TOKEN = "MATHTOKEN"
FWORD_TOKEN = "FWORDTOKEN"
ONLY_STAR_TOKEN = "ONLYSTARTOKEN"
print("tokenize", end="...", flush=True)
s = time.time()
with multiprocessing.Pool(8) as pool:
if not USE_POS:
all_train_sents, train_token_map = zip(*pool.map(tokenize, train_df.question_text))
else:
all_train_sents, all_train_pos_tags, all_train_lemmas, train_token_map = zip(*pool.map(tokenize, train_df.question_text))
with multiprocessing.Pool(8) as pool:
if not USE_POS:
test_sents, test_token_map = zip(*pool.map(tokenize, test_df.question_text))
else:
test_sents, test_pos_tags, test_lemmas, test_token_map = zip(*pool.map(tokenize, test_df.question_text))
print("done.", time.time() - s)
#/---------------------------------------------------------------------------
print("build vocab", end="...", flush=True)
train_vocab_counter = collections.Counter([word for sent in all_train_sents for word in sent])
test_only_vocab = {word for sent in test_sents for word in sent} - set(train_vocab_counter)
word_to_id = {word:id_+1 for id_,word in enumerate(sorted(set(train_vocab_counter) | test_only_vocab))}
word_to_id["$$UNK$$"] = 0
id_to_word = [word for word,id_ in sorted(word_to_id.items(), key=lambda x:x[1])]
print("done.", flush=True)
print("load glove", end="...", flush=True)
s = time.time()
glove_emb, glove_oov = load_embedding(GLOVE_PATH, word_to_id, train_vocab_counter, logarithm=True)
e = time.time()
print("done.", e-s, flush=True)
print("load paragram", end="...", flush=True)
s = time.time()
paragram_emb, paragram_oov = load_embedding(PARAGRAM_PATH, word_to_id, train_vocab_counter, logarithm=True, paragram=True)
e = time.time()
print("done.", e-s, flush=True)
# character
if USE_CHARACTER:
train_char_counter = collections.Counter()
for word, count in train_vocab_counter.items():
sub_counter = collections.Counter(word * count)
train_char_counter.update(sub_counter)
MIN_CHAR_FREQUENCY = 1000
char_to_id = {char:i+3 for i,char in enumerate(sorted([char for char,count in train_char_counter.items() if count >= MIN_CHAR_FREQUENCY]))}
char_to_id["$$PAD$$"] = 0
char_to_id["$$CENTER$$"] = 1
char_to_id["$$UNK$$"] = 2
unk_char_id = char_to_id["$$UNK$$"]
id_to_char = [char for char,id_ in sorted(char_to_id.items(), key=lambda x:x[1])]
MAX_WORD_LEN = 13
word_to_chars = {word:func_word_to_chars(word) for word in sorted(set(train_vocab_counter) | test_only_vocab)}
# homebrew
if USE_HOMEBREW:
print("homebrew")
dim_homebrew = 50 # 66.5% (default) 1epoch目で65.91、2epoch目で66.53
glove_window = 15
glove_iter = 15
glove_min = 5
glove_lower = False
#dim_homebrew = 300 # 66.5% 学習が早い。66.32->66.49。word-simはぱっと見変わらないがロスは小さい
#dim_homebrew, glove_iter = 300, 50 # 66.4% 66.40->66.16
#dim_homebrew = 150 # 66.6% 66.04->66.58
#glove_window = 7 # 66.4% 65.51->66.41。word-simは強く関連してそうなものだけ残って変な単語が減る。
#glove_window = 11 # 66.4% 65.54->66.35
#glove_iter = 50 # 66.4% 65.69->66.36 it15とword-simはスコア含めほぼ変わらないように見える。ロスは1割ほど落ちた。(iter15=0.040320, iter50=0.036944)
#glove_min = 50 # 66.2% 悪い。ねばる。64.98->66.20->66.21。word-simはぱっと見変わらない。よく見るとレアワードでちゃんと変わってるかも。
#glove_lower = True # 66.7% 65.61->66.68
#dim_homebrew, glove_lower = 300, True # 66.18->66.69
homebrew_word_to_id = {"<unk>":0}
homebrew_id_to_word = ["<unk>"]
homebrew_new_id = 1
homebrew_init_emb = []
with open("../homebrew/glove-homebrew{}.{}d.win{}-it{}-min{}.txt".format((".lower" if glove_lower else ""), dim_homebrew, glove_window, glove_iter, glove_min)) as f:
for line in f:
line = line.strip()
if len(line) == 0: continue
word, *vec = line.split(" ")
assert len(vec) == dim_homebrew
vec = np.array([float(v) for v in vec], dtype=np.float32)
if word == "<unk>":
homebrew_init_emb = [vec] + homebrew_init_emb
continue
homebrew_word_to_id[word] = homebrew_new_id
homebrew_new_id += 1
homebrew_id_to_word.append(word)
homebrew_init_emb.append(vec)
homebrew_init_emb = np.stack(homebrew_init_emb, axis=0)
if USE_POS:
pos_tag_set = {pos_tag for sents in [all_train_pos_tags, test_pos_tags] for sent in sents for pos_tag in sent}
#id_to_pos_tag = ["$$UNK$$"] + list(pos_tag_set)
id_to_pos_tag = list(pos_tag_set)
pos_tag_to_id = {t:i for i,t in enumerate(id_to_pos_tag)}
all_train_pos_tags = [[pos_tag_to_id[pos_tag] for pos_tag in sent] for sent in all_train_pos_tags]
test_pos_tags = [[pos_tag_to_id[pos_tag] for pos_tag in sent] for sent in test_pos_tags]
if USE_SENTENCE_PIECE:
with open("sentences.txt", "w") as f:
for sents in [all_train_sents, test_sents]:
for words in sents:
print(" ".join(words), file=f)
SP_VOCAB_SIZE = 2048
spm.SentencePieceTrainer.Train('--input=sentences.txt --model_prefix=sp{vocab} --vocab_size={vocab} --character_coverage=0.9995'.format(vocab=SP_VOCAB_SIZE))
sp = spm.SentencePieceProcessor()
sp.Load('sp{}.model'.format(SP_VOCAB_SIZE))
with ctqdm(sorted(set(train_vocab_counter) | test_only_vocab), desc="build sp map") as vocab:
word_to_sp = {word:sp.EncodeAsIds(word) for word in vocab}
if not USE_POS:
all_train_instances = [to_instance(idx, sent, label) for idx, [sent,label] in enumerate(zip(all_train_sents, train_df.target))]
test_instances = [to_instance(idx, sent, 0) for idx, sent in enumerate(test_sents)]
else:
all_train_instances = [to_instance(idx, sent, label, pos, lemma) for idx, [sent,label,pos,lemma] in enumerate(zip(all_train_sents, train_df.target, all_train_pos_tags, all_train_lemmas))]
test_instances = [to_instance(idx, sent, 0, pos, lemma) for idx, [sent,pos,lemma] in enumerate(zip(test_sents, test_pos_tags, test_lemmas))]
all_train_instances = np.array(all_train_instances)
test_instances = np.array(test_instances)
save = {"all_train_instances":all_train_instances,
"test_instances":test_instances,
"id_to_word":id_to_word,
"glove_emb":glove_emb,
"glove_oov":glove_oov,
"paragram_emb":paragram_emb,
"mean_emb":np.mean([glove_emb, paragram_emb], axis=0),
}
if USE_HOMEBREW:
save["homebrew_init_emb"] = homebrew_init_emb
if USE_POS:
save["id_to_pos_tag"] = id_to_pos_tag
if USE_CHARACTER:
save["id_to_char"] = id_to_char
if USE_SENTENCE_PIECE:
save["sp_bos_eos"] = [sp.bos_id(), sp.eos_id()]
if SAVE:
np.save("preprocessed", np.array(save))
exit(0)
| [
11748,
25064,
11,
28686,
198,
11748,
10688,
198,
11748,
17268,
198,
11748,
302,
198,
11748,
18540,
305,
919,
278,
198,
11748,
640,
198,
11748,
4732,
8019,
198,
11748,
33918,
198,
11748,
256,
80,
36020,
198,
198,
11748,
299,
2528,
74,
19... | 2.27005 | 3,429 |
# -*- coding: utf-8 -*-
'''
Neste problema foi usada uma lógica simples em que cada palavra do texto é colocado em uma lista. Depois disso é
executado o loop que itera por estas palavras. Se a palavra é igual a palavra procurada então a posição é
adicionada em uma lista. A posição é calculada somando o comprimento de cada palavra e mais "1" para contabilizar
os espaços.
Caso a lista de posições esteja vazias é adicionado "-1". Significando que a palavra não está no texto.
'''
n = int(input()) # Entrada de n
for _ in range(n): # Loop para cada caso
txt = input().split() # Entrada do texto
w = input() # Entrada da palavra a ser procurada
p = 0 # Variável para a posição
v = [] # Lista para as posições das palavras
for t in txt: # Para cada palavra no texto
if t == w: # Caso a palavra seja igual a procurada
v.append(p) # Adiciona a posição na lista
p += len(t) + 1 # Calcula a posição
if len(v) == 0: # Caso a palavra não esteja no texto é adicionado "-1" na lista
v.append(-1)
print(*v, sep = ' ') # Exibe o resultado | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
45,
29872,
1917,
64,
11511,
72,
514,
4763,
334,
2611,
300,
10205,
70,
3970,
985,
2374,
795,
8358,
269,
4763,
6340,
615,
430,
466,
2420,
78,
38251,
... | 2.32563 | 476 |
#! python3
import re
from os import path
from io import open
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
setup(
name = "safeprint",
version = find_version("safeprint/__init__.py"),
description = 'A printer suppressing UnicodeEncodeError',
long_description = read("README.rst"),
url = 'https://github.com/eight04/safeprint',
author = 'eight',
author_email = 'eight04@gmail.com',
license = 'MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 5 - Production/Stable',
"Environment :: Console",
"Environment :: Win32 (MS Windows)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: Chinese (Traditional)",
"Operating System :: Microsoft :: Windows :: Windows 7",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Terminals"
],
keywords = 'windows cmd unicode print',
packages = find_packages(),
install_requires = [
"win-unicode-console >= 0.4; sys_platform == 'win32' and python_version < '3.6'"
],
entry_points = {
"console_scripts": [
]
}
)
| [
2,
0,
21015,
18,
198,
198,
11748,
302,
198,
6738,
28686,
1330,
3108,
198,
6738,
33245,
1330,
1280,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908... | 2.921241 | 419 |
# part 1
prog = [int(s) for s in open('input.txt', 'r').readline().split(',')]
print(prog)
prog[1] = 12
prog[2] = 2
p = 0
while True:
op = prog[p]
if op == 99:
break
val1, val2, wPos = prog[p+1], prog[p+2], prog[p+3]
if op == 1: # add
prog[wPos] = prog[val1] + prog[val2]
elif op == 2: # multiply
prog[wPos] = prog[val1] * prog[val2]
else:
raise ValueError('Encountered invalid opcode')
p += 4
print(prog)
| [
2,
636,
352,
198,
198,
1676,
70,
796,
685,
600,
7,
82,
8,
329,
264,
287,
220,
1280,
10786,
15414,
13,
14116,
3256,
705,
81,
27691,
961,
1370,
22446,
35312,
7,
3256,
11537,
60,
198,
4798,
7,
1676,
70,
8,
198,
198,
1676,
70,
58,
... | 1.987342 | 237 |
"""
Read, write and manipulate VESTA save files
"""
import re
import shutil
from typing import Tuple, List, Dict, Union
from collections import OrderedDict
from pathlib import Path
from collections import Counter
from yaml import safe_load
class DotVesta:
"""
Representation of a VESTA save file
"""
def __init__(self, path: str):
"""
Instantiate from an existing .vesta file
"""
if isinstance(path, (str, Path)):
self._content = Path(path).read_text().split('\n')
elif hasattr(path, 'readlines'):
self._content = path.readlines()
else:
raise ValueError(
'<path> should a path-like object or a file object.')
self.entries = read_content(self._content[2:])
self.unqiue_fields, self.duplicated_fields = self._find_unique_and_duplicated_fields()
def write(self, outfile: str) -> None:
"""
Write the output file
Args:
outfile (str): Name of the output file.
"""
with open(outfile, 'w') as fhandle:
fhandle.write('#VESTA_FORMAT_VERSION 3.5.0\n\n')
for entry in self.entries:
for name, item in entry.items():
fhandle.write(name + ' ' + item[0] + '\n') # Write the title line
for line in item[1]:
fhandle.write(line + '\n')
def _find_unique_and_duplicated_fields(self):
"""Locate the fields that are 'per-phase'"""
fields = []
for entry in self.entries:
for key in entry.keys():
fields.append(key)
counts = Counter(fields)
unique = []
duplicated = []
for key in counts.keys():
if counts[key] == 1:
unique.append(key)
else:
duplicated.append(key)
return unique, duplicated
def apply_colour_mapping(self, mapping: dict, tetra_mapping=None) -> None:
"""
Apply a userdefined colour mapping for each atoms
Args:
mapping (dict): A dictionary of the mappings with the RGB in the 'rgb' key and alpha under the 'alpha' key (for tetrahedron) for each specie
tetra_mapping (dict): A dictionary of the mappings for the tetrahedron with the RGB in the 'rgb' key.
"""
# More sure the values are in RGB numerical tuple
for _mapping in [mapping, tetra_mapping]:
if not _mapping:
continue
for key in _mapping:
value = _mapping[key]['rgb']
if isinstance(value, str):
_mapping[key]['rgb'] = hex2rgb(value)
lines = self.entries[-1]['ATOMT'][1]
self.entries[-1]['ATOMT'][1] = update_colour_lines(lines, mapping,
tetra_mapping)
for entry in self.entries:
lines = entry['SITET'][1]
entry['SITET'][1] = update_colour_lines(lines,
mapping,
tetra_mapping,
is_sitet=True)
def read_content(content: list) -> Dict[str, Tuple[str, List[str]]]:
"""
Read each entry of the VESTA files
Args:
content (list): A list of lines read from a .vesta file
Returns:
A dictionary of name of values of each field.
"""
all_entries = []
current_name = None
current_lines = []
entries = OrderedDict()
icrystal = 0
for line in content:
if line.endswith('\n'):
line = line[:-1]
# CRYSTAL marks the begin of a phase
if line.startswith("CRYSTAL"):
# The second entry - reset the entries
if icrystal != 0:
entries[current_name] = (tagline, current_lines)
current_lines = []
entries = OrderedDict()
entries["CRYSTAL"] = ["", [""]]
else:
# First encouter - keep using the initial entries
entries["CRYSTAL"] = ["", [""]]
# Push the entries in the list of all entries
all_entries.append(entries)
current_name = None
icrystal += 1
continue
if not line:
if current_name is not None:
current_lines.append('')
continue
if line[0].isupper() and line[1].isupper():
if current_name:
entries[current_name] = [tagline, current_lines]
current_lines = []
# Get the new tag and tag line part
tag = line.split()[0]
current_name = tag
tagline = line[len(tag) + 1:]
continue
current_lines.append(line)
# remove the last empty line
if not current_lines[-1]:
current_lines.pop()
entries[current_name] = (tagline, current_lines)
return all_entries
def update_colour_lines(lines, mapping, tetra_mapping=None, is_sitet=False) -> List[str]:
"""
Update the colour mapping for dotvesta for the ATOMT and SITET
"""
new_lines = []
for line in lines:
tokens = line.split()
# Use re to match sites like Li1, Fe2 etc.
orig_name = tokens[1]
match = re.match(r'([A-Za-z]+)\d*', tokens[1])
# If not matching, just skip the line. The last line of the section will never match
if not match:
new_lines.append(line)
continue
atom_name = match.group(1)
if atom_name in mapping:
radius = tokens[2]
r, g, b = mapping[atom_name]['rgb']
# Assign the tetragonal mapping
if tetra_mapping is not None and atom_name in tetra_mapping:
tr, tg, tb = tetra_mapping[atom_name]['rgb']
else:
tr, tg, tb = r, g, b
# Alpha for the tetrahedral
if is_sitet:
alpha = mapping[atom_name].get('alpha', int(tokens[-2]))
line = f'{int(tokens[0]):>3d}{orig_name:>12}{radius:>8}{r:>4d}{g:>4d}{b:>4d}{tr:>4d}{tg:>4d}{tb:>4d}{alpha:>4d} 0'
else:
alpha = mapping[atom_name].get('alpha', int(tokens[-1]))
line = f'{int(tokens[0]):>3d}{orig_name:>11}{radius:>8}{r:>4d}{g:>4d}{b:>4d}{tr:>4d}{tg:>4d}{tb:>4d}{alpha:>4d}'
new_lines.append(line)
else:
new_lines.append(line)
return new_lines
def hex2rgb(hex_string: str) -> Tuple[int, int, int]:
"""Convert a hexstring to RGB tuple"""
hex_string = hex_string.lstrip('#')
return tuple(int(hex_string[i:i + 2], 16) for i in (0, 2, 4))
def apply_colour_scheme(file: Union[str, Path], scheme: str) -> None:
"""
Shortcut function for applying a colour scheme
"""
file = Path(file)
obj = DotVesta(file)
with open(scheme) as fhandle:
colours = safe_load(fhandle)
if 'tetra_mapping' in colours:
obj.apply_colour_mapping(**colours)
else:
obj.apply_colour_mapping(colours)
shutil.move(file, file.with_suffix('.vesta.bak'))
obj.write(file) | [
37811,
198,
5569,
11,
3551,
290,
18510,
569,
1546,
5603,
3613,
3696,
198,
37811,
198,
11748,
302,
198,
11748,
4423,
346,
198,
6738,
19720,
1330,
309,
29291,
11,
7343,
11,
360,
713,
11,
4479,
198,
6738,
17268,
1330,
14230,
1068,
35,
71... | 2.011357 | 3,610 |
import os
from glob import glob
from setuptools import setup
from setuptools.config import read_configuration
config = read_configuration('setup.cfg')
config_dict = {}
for section in config:
for k in config[section]:
config_dict[k] = config[section][k]
if os.path.exists('scripts'):
config_dict['scripts'] = glob(os.path.join('scripts', '*'))
setup(**config_dict)
| [
11748,
28686,
201,
198,
6738,
15095,
1330,
15095,
201,
198,
6738,
900,
37623,
10141,
1330,
9058,
201,
198,
6738,
900,
37623,
10141,
13,
11250,
1330,
1100,
62,
11250,
3924,
201,
198,
201,
198,
11250,
796,
1100,
62,
11250,
3924,
10786,
40... | 2.702703 | 148 |
import os, pprint, platform;
comp = platform.system()
user = "ghost"
print comp
try:
cmd = os.popen("whoami")
try:
user = cmd.readlines()
user = user[0].strip("\n")
if 'Windows' == comp:
user = user.split("\\")[1]
finally:
cmd.close()
except IOError:
print "Error: can't use CMD"
print user
if 'Windows' == comp:
sav_dir = "C:/Users/"+user+"/.config/EasyXdcc/"
else:
sav_dir = "/home/"+user+"/.config/EasyXdcc/"
check_dirs(sav_dir)
sav_file = sav_dir + "queue"
try:
file = open(sav_file,'rb')
try:
for line in file.readlines():
print line
finally:
file.close()
except IOError:
print "Error: can\'t find file or read data"
| [
11748,
28686,
11,
279,
4798,
11,
3859,
26,
198,
198,
5589,
796,
3859,
13,
10057,
3419,
198,
7220,
796,
366,
38933,
1,
198,
198,
4798,
552,
198,
198,
28311,
25,
198,
220,
220,
220,
23991,
796,
28686,
13,
79,
9654,
7203,
8727,
6277,
... | 2.179941 | 339 |
#!/usr/bin/env python3
try:
import sys
from os import system, popen
except:
raise
sys.exit(1)
class ProgressBar(object):
"""print a progress bar with:
1. processes count given
2. do a loop in processes count and call "mark_as_done" method
"""
def mark_as_done(self):
"""mark the active process as done.
"""
self.step += 1
self.percentage = int((self.step * 100) / self.processes_count)
self.term_step = int((self.percentage * self.term_lenght()) / 100)
system(f'echo \033[A{self.command()}')
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
422,
28686,
1330,
1080,
11,
1461,
268,
198,
16341,
25,
198,
220,
220,
220,
5298,
198,
220,
220,
220,
25064,
... | 2.360902 | 266 |
from __future__ import absolute_import
import struct
import io
import zlx.int
import zlx.record
import zlx.io
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
PACK_FMT_DICT = {
'u8': 'B',
'i8': 'b',
'u16le': '<H',
'u16be': '>H',
'i16le': '<h',
'i16be': '>h',
'u32le': '<I',
'u32be': '>I',
'i32le': '<i',
'i32be': '>i',
'u64le': '<Q',
'u64be': '>Q',
'i64le': '<q',
'i64be': '>q',
}
CODEC_REGISTRY = {}
INT_CODECS = []
for codec_name in PACK_FMT_DICT:
codec = stream_codec(
name = codec_name,
decode = lambda stream, pack_fmt=PACK_FMT_DICT[codec_name], pack_len=len(struct.pack(PACK_FMT_DICT[codec_name], 0)): stream_decode_unpack(stream, pack_fmt, pack_len),
encode = lambda stream, value, pack_fmt=PACK_FMT_DICT[codec_name]: stream_encode_pack(stream, value, pack_fmt),
desc = dec_hex_int_desc)
globals()[codec_name] = codec
INT_CODECS.append(codec)
stream_record_field = zlx.record.make('record_field', 'name codec desc')
#* stream_record_codec ******************************************************/
#* encoded_stream ***********************************************************/
#* stream *******************************************************************/
#__slots__ = 'stream codec_streams'.split()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
2878,
198,
11748,
33245,
198,
198,
11748,
1976,
75,
87,
13,
600,
198,
11748,
1976,
75,
87,
13,
22105,
198,
11748,
1976,
75,
87,
13,
952,
198,
198,
36078,
42,
62,
28480,
796... | 2.267918 | 586 |
#!/usr/bin/env python3
"""
Extract content of different types of tag from an html or xml file matching
regular expressions and save the output to a file.
There are other methods but this can be used to use more powerful regex.
"""
import re
source_file = 'source.html'
destination_file = 'output.html'
f = open(source_file, 'r')
content = f.read()
f.close()
rx = re.compile('<a href="(.*?)".*?(?:title="(.*?)").*?>(.*?)</a>|'
'<li>(.*?)</li>') # for multiline add ', re.DOTALL)' after the regex
with open(destination_file, 'w') as quiz:
quiz.write('<html><body>\n')
for i in rx.findall(content):
if i[0]:
quiz.write("HREF : " + i[0] + '\n')
if i[1]:
quiz.write("TITLE : " + i[1] + '\n')
if i[2]:
quiz.write("TEXT : " + i[2] + '\n')
if i[3]:
quiz.write("ITEM : " + i[3] + '\n')
quiz.write('</body></html>')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
11627,
974,
2695,
286,
1180,
3858,
286,
7621,
422,
281,
27711,
393,
35555,
2393,
12336,
198,
16338,
14700,
290,
3613,
262,
5072,
284,
257,
2393,
13,
198,
1858,
389,
584... | 2.193317 | 419 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OcrResult(Model):
"""OcrResult.
:param language: The BCP-47 language code of the text in the image.
:type language: str
:param text_angle: The angle, in degrees, of the detected text with
respect to the closest horizontal or vertical direction. After rotating
the input image clockwise by this angle, the recognized text lines become
horizontal or vertical. In combination with the orientation property it
can be used to overlay recognition results correctly on the original
image, by rotating either the original image or recognition results by a
suitable angle around the center of the original image. If the angle
cannot be confidently detected, this property is not present. If the image
contains text at different angles, only part of the text will be
recognized correctly.
:type text_angle: float
:param orientation: Orientation of the text recognized in the image. The
value (up, down, left, or right) refers to the direction that the top of
the recognized text is facing, after the image has been rotated around its
center according to the detected text angle (see textAngle property).
:type orientation: str
:param regions: An array of objects, where each object represents a region
of recognized text.
:type regions:
list[~azure.cognitiveservices.vision.computervision.models.OcrRegion]
"""
_attribute_map = {
'language': {'key': 'language', 'type': 'str'},
'text_angle': {'key': 'textAngle', 'type': 'float'},
'orientation': {'key': 'orientation', 'type': 'str'},
'regions': {'key': 'regions', 'type': '[OcrRegion]'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 3.607201 | 611 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @File : spider_test_selenium.py
# @Time : 2018/8/2 23:03
# @Author : dong
'''
测试火狐浏览器驱动geckodriver
selenium切换和定位iframe
模拟登陆QQ空间
'''
from selenium import webdriver
from bs4 import BeautifulSoup
import time
driver = webdriver.Firefox()
# 登录QQ空间
if __name__ == '__main__':
login_qzone() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
2488,
8979,
220,
220,
220,
1058,
19230,
62,
9288,
62,
741,
47477,
13,
9078,
198,
2,
2488,
7575,
220,
220,
220,
... | 1.757576 | 198 |
##
# AmberLeafBox
# Soup - 2014
##
import gtk
import webkit
from time import time
from gobject import timeout_add_seconds, timeout_add
import pickle
| [
2235,
198,
2,
21896,
3123,
1878,
14253,
198,
2,
34011,
532,
1946,
198,
2235,
198,
198,
11748,
308,
30488,
198,
11748,
3992,
15813,
198,
6738,
640,
1330,
640,
198,
6738,
48484,
752,
1330,
26827,
62,
2860,
62,
43012,
11,
26827,
62,
2860... | 3.1875 | 48 |
from . import csv_loader
from . import xlsx_loader
from . import yaml_loader
data_plugins = {
"csv": csv_loader.load,
"xlsx": xlsx_loader.load,
"yaml": yaml_loader.load,
"yml": yaml_loader.load
} | [
6738,
764,
1330,
269,
21370,
62,
29356,
198,
6738,
764,
1330,
2124,
7278,
87,
62,
29356,
198,
6738,
764,
1330,
331,
43695,
62,
29356,
198,
198,
7890,
62,
37390,
796,
1391,
198,
220,
220,
220,
366,
40664,
1298,
269,
21370,
62,
29356,
... | 2.27957 | 93 |
# ============================================================================
# FILE: sorter/sublime.py
# AUTHOR: Tomoki Ohno <wh11e7rue@icloud.com>
# DESCRIPTION: Base code is from
# https://github.com/forrestthewoods/lib_fts/blob/master/code/fts_fuzzy_match.js
# See explanation in
# http://bit.ly/reverse-engineering-sublime-text-s-fuzzy-match
# License: MIT license
# ============================================================================
from pynvim import Nvim
from unicodedata import category
from denite.base.filter import Base
from denite.util import UserContext, Candidates
# Score consts
# bonus for adjacent matches
ADJACENCY_BONUS = 5
# bonus if match occurs after a separato
SEPARATOR_BONUS = 10
# bonus if match is uppercase and prev is lower
CAMEL_BONUS = 10
# penalty applied for every letter in str before the first match
LEADING_LETTER_PENALTY = -3
# maximum penalty for leading letters
MAX_LEADING_LETTER_PENALTY = -9
# penalty for every letter that doesn't matter
UNMATCHED_LETTER_PENALTY = -1
| [
2,
38093,
2559,
18604,
198,
2,
45811,
25,
264,
4337,
14,
7266,
27299,
13,
9078,
198,
2,
44746,
25,
4186,
18228,
3966,
3919,
1279,
1929,
1157,
68,
22,
24508,
31,
291,
75,
2778,
13,
785,
29,
198,
2,
22196,
40165,
25,
7308,
2438,
318... | 3.203647 | 329 |
#
# This is the Robotics Language compiler
#
# parsing.py: Implements Error Handling functions
#
# Created on: September 26, 2018
# Author: Gabriel A. D. Lopes
# Licence: Apache 2.0
# Copyright: 2014-2017 Robot Care Systems BV, The Hague, The Netherlands. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
from RoboticsLanguage.Base import Utilities
| [
2,
198,
2,
220,
220,
770,
318,
262,
47061,
15417,
17050,
198,
2,
198,
2,
220,
220,
32096,
13,
9078,
25,
1846,
1154,
902,
13047,
49500,
5499,
198,
2,
198,
2,
220,
220,
15622,
319,
25,
2693,
2608,
11,
2864,
198,
2,
220,
220,
220,
... | 3.394161 | 274 |
import matplotlib.pyplot as plt
def plot_surf_stat_map(coords, faces, stat_map=None,
elev=0, azim=0,
cmap='jet',
threshold=None, bg_map=None,
mask=None,
bg_on_stat=False,
alpha='auto',
vmax=None, symmetric_cbar="auto", returnAx=False,
figsize=(14,11), label=None, lenient=None,
**kwargs):
''' Visualize results on cortical surface using matplotlib'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from mpl_toolkits.mplot3d import Axes3D
# load mesh and derive axes limits
faces = np.array(faces, dtype=int)
limits = [coords.min(), coords.max()]
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if cmap is given as string, translate to matplotlib cmap
if type(cmap) == str:
cmap = plt.cm.get_cmap(cmap)
# initiate figure and 3d axes
if figsize is not None:
fig = plt.figure(figsize=figsize)
else:
fig = plt.figure()
fig.patch.set_facecolor('white')
ax1 = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits)
# ax1._axis3don = False
ax1.grid(False)
ax1.set_axis_off()
ax1.w_zaxis.line.set_lw(0.)
ax1.set_zticks([])
ax1.view_init(elev=elev, azim=azim)
# plot mesh without data
p3dcollec = ax1.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
if mask is not None:
cmask = np.zeros(len(coords))
cmask[mask] = 1
cutoff = 2
if lenient:
cutoff = 0
fmask = np.where(cmask[faces].sum(axis=1) > cutoff)[0]
# If depth_map and/or stat_map are provided, map these onto the surface
# set_facecolors function of Poly3DCollection is used as passing the
# facecolors argument to plot_trisurf does not seem to work
if bg_map is not None or stat_map is not None:
face_colors = np.ones((faces.shape[0], 4))
face_colors[:, :3] = .5*face_colors[:, :3]
if bg_map is not None:
bg_data = bg_map
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha*face_colors[:, 3]
if stat_map is not None:
stat_map_data = stat_map
stat_map_faces = np.mean(stat_map_data[faces], axis=1)
if label:
stat_map_faces = np.median(stat_map_data[faces], axis=1)
# Call _get_plot_stat_map_params to derive symmetric vmin and vmax
# And colorbar limits depending on symmetric_cbar settings
cbar_vmin, cbar_vmax, vmin, vmax = \
_get_plot_stat_map_params(stat_map_faces, vmax,
symmetric_cbar, kwargs)
if threshold is not None:
kept_indices = np.where(abs(stat_map_faces) >= threshold)[0]
stat_map_faces = stat_map_faces - vmin
stat_map_faces = stat_map_faces / (vmax-vmin)
if bg_on_stat:
face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices]
else:
face_colors[kept_indices] = cmap(stat_map_faces[kept_indices])
else:
stat_map_faces = stat_map_faces - vmin
stat_map_faces = stat_map_faces / (vmax-vmin)
if bg_on_stat:
if mask is not None:
face_colors[fmask,:] = cmap(stat_map_faces)[fmask,:] * face_colors[fmask,:]
else:
face_colors = cmap(stat_map_faces) * face_colors
else:
face_colors = cmap(stat_map_faces)
p3dcollec.set_facecolors(face_colors)
if returnAx == True:
return fig, ax1
else:
return fig
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
4299,
7110,
62,
11793,
69,
62,
14269,
62,
8899,
7,
1073,
3669,
11,
6698,
11,
1185,
62,
8899,
28,
14202,
11,
198,
220,
220,
220,
220,
220,
220,
220,
7662,
28,
15,
... | 1.911866 | 2,326 |
# unit tests for consistent model outputs
import os
import platform
import shutil
from pathlib import Path
import numpy as np
import pytest
from netCDF4 import Dataset
import unittest.mock as mock
from pyDeltaRCM import DeltaModel
from pyDeltaRCM import preprocessor
from .. import utilities
@mock.patch(
'pyDeltaRCM.iteration_tools.iteration_tools.solve_water_and_sediment_timestep',
new=utilities.FastIteratingDeltaModel.solve_water_and_sediment_timestep)
class TestCheckpointingIntegrations:
"""
The above patch implements an augmented DeltaModel from `utilities`. In
this modified DeltaModel, the `solve_water_and_sediment_timestep`
operations (i.e., the time consuming part of the model) is replaced with
an updating random field. This guarantees that the random-repeatedness of
checkpointing is validated, but it is much faster and easier to isolate
checkpointing-related issues from model issues.
"""
def test_simple_checkpoint(self, tmp_path: Path) -> None:
"""Test checkpoint vs a base run.
Also, checks resumed model against another checkpoint run.
"""
# define a yaml for the longer model run
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(base_f, 'save_checkpoint', True)
base_f.close()
longModel = DeltaModel(input_file=base_p)
# run for some number of updates
for _ in range(0, 50):
longModel.update()
longModel.finalize()
# try defining a new model but plan to load checkpoint from longModel
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
base_f.close()
resumeModel = DeltaModel(input_file=base_p)
# advance the resumed model until it catch up to longModel
assert resumeModel.time < longModel.time
while resumeModel._time < longModel._time:
resumeModel.update()
resumeModel.finalize()
# the longModel and resumeModel should match
assert longModel.time == resumeModel.time
assert np.all(longModel.eta == resumeModel.eta)
assert np.all(longModel.uw == resumeModel.uw)
assert np.all(longModel.ux == resumeModel.ux)
assert np.all(longModel.uy == resumeModel.uy)
assert np.all(longModel.depth == resumeModel.depth)
assert np.all(longModel.stage == resumeModel.stage)
assert np.all(longModel.sand_frac == resumeModel.sand_frac)
assert np.all(longModel.active_layer == resumeModel.active_layer)
# define another model that loads the checkpoint
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
base_f.close()
resumeModel2 = DeltaModel(input_file=base_p)
# advance the resumed model until it catch up to longModel
while resumeModel2._time < resumeModel._time:
resumeModel2.update()
resumeModel2.finalize()
# the two models that resumed from the checkpoint should be the same
assert resumeModel2.time == resumeModel.time
assert np.all(resumeModel2.uw == resumeModel.uw)
assert np.all(resumeModel2.ux == resumeModel.ux)
assert np.all(resumeModel2.uy == resumeModel.uy)
assert np.all(resumeModel2.depth == resumeModel.depth)
assert np.all(resumeModel2.stage == resumeModel.stage)
assert np.all(resumeModel2.sand_frac == resumeModel.sand_frac)
assert np.all(resumeModel2.active_layer == resumeModel.active_layer)
def test_checkpoint_nc(self, tmp_path: Path) -> None:
"""Test the netCDF that is written to by the checkpointing."""
# define a yaml for the base model run
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_depth_grids', True)
utilities.write_parameter_to_file(base_f, 'save_discharge_grids', True)
utilities.write_parameter_to_file(base_f, 'save_sandfrac_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', True)
base_f.close()
baseModel = DeltaModel(input_file=base_p)
# run for some base number of steps
nt_base = 50
for _ in range(0, 50):
baseModel.update()
# force the model run to end immmediately after exporting a checkpoint
nt_var = 0
while (baseModel._save_time_since_checkpoint != 0):
baseModel.update()
nt_var += 1
# then finalize
baseModel.finalize()
# check that the time makes sense
assert baseModel.time == baseModel._dt * (nt_base + nt_var)
# try defining a new model but plan to load checkpoint from baseModel
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_depth_grids', True)
utilities.write_parameter_to_file(base_f, 'save_discharge_grids', True)
utilities.write_parameter_to_file(base_f, 'save_sandfrac_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', False)
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
base_f.close()
resumeModel = DeltaModel(input_file=base_p)
assert resumeModel.time == baseModel.time # same when resumed
# advance it until output_data has been called again
nt_resume = 0
while (resumeModel._save_time_since_data != 0) or (nt_resume < 50):
resumeModel.update()
nt_resume += 1
resumeModel.finalize()
assert nt_resume > 0
assert resumeModel.time > baseModel.time
# assert that output netCDF4 exists
exp_path_nc = os.path.join(tmp_path / 'test', 'pyDeltaRCM_output.nc')
assert os.path.isfile(exp_path_nc)
# load it into memory and check values in the netCDF4
output = Dataset(exp_path_nc, 'r', allow_pickle=True)
out_vars = output.variables.keys()
# check that expected variables are in the file
assert 'x' in out_vars
assert 'y' in out_vars
assert 'time' in out_vars
assert 'eta' in out_vars
assert 'depth' in out_vars
assert 'discharge' in out_vars
assert 'sandfrac' in out_vars
# check attributes of variables
assert output['time'][0].tolist() == 0.0
assert output['time'][-1] == resumeModel.time
assert output['time'][-1].tolist() == resumeModel._dt * \
(nt_base + nt_var + nt_resume)
assert output['eta'][0].shape == resumeModel.eta.shape
assert output['eta'][-1].shape == resumeModel.eta.shape
assert output['depth'][-1].shape == resumeModel.eta.shape
assert output['discharge'][-1].shape == resumeModel.eta.shape
assert output['sandfrac'][-1].shape == resumeModel.eta.shape
# check the metadata
assert output['meta']['L0'][:] == resumeModel.L0
assert output['meta']['N0'][:] == resumeModel.N0
assert output['meta']['CTR'][:] == resumeModel.CTR
assert output['meta']['dx'][:] == resumeModel.dx
assert output['meta']['h0'][:] == resumeModel.h0
assert np.all(output['meta']['cell_type'][:] == resumeModel.cell_type)
assert output['meta']['H_SL'][-1].data == resumeModel.H_SL
assert output['meta']['f_bedload'][-1].data == resumeModel.f_bedload
C0_from_file = float(output['meta']['C0_percent'][-1].data)
assert pytest.approx(C0_from_file) == resumeModel.C0_percent
assert output['meta']['u0'][-1].data == resumeModel.u0
# checkpoint interval aligns w/ timestep dt so these should match
assert output['time'][-1].tolist() == resumeModel.time
def test_checkpoint_diff_dt(self, tmp_path: Path) -> None:
"""Test when checkpoint_dt does not match dt or save_dt."""
# define a yaml for the base model run
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_depth_grids', True)
utilities.write_parameter_to_file(base_f, 'save_discharge_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
base_f.close()
baseModel = DeltaModel(input_file=base_p)
# modify the checkpoint dt to be different than save_dt
baseModel._checkpoint_dt = (baseModel.save_dt * 0.65)
for _ in range(0, 50):
baseModel.update()
baseModel.finalize()
assert baseModel.time == baseModel._dt * 50
baseModelSavedTime = (baseModel.time -
baseModel._save_time_since_checkpoint)
assert baseModelSavedTime > 0
# try defining a new model but plan to load checkpoint from baseModel
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_depth_grids', True)
utilities.write_parameter_to_file(base_f, 'save_discharge_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', False)
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
base_f.close()
resumeModel = DeltaModel(input_file=base_p)
assert resumeModel.time == baseModelSavedTime
# advance until some steps and just saved
nt_resume = 0
while (resumeModel._save_time_since_data != 0) or (nt_resume < 50):
resumeModel.update()
nt_resume += 1
resumeModel.finalize()
# assert that output netCDF4 exists
exp_path_nc = os.path.join(tmp_path / 'test', 'pyDeltaRCM_output.nc')
assert os.path.isfile(exp_path_nc)
# load it into memory and check values in the netCDF4
output = Dataset(exp_path_nc, 'r', allow_pickle=True)
out_vars = output.variables.keys()
# check that expected variables are in the file
assert 'x' in out_vars
assert 'y' in out_vars
assert 'time' in out_vars
assert 'eta' in out_vars
assert 'depth' in out_vars
assert 'discharge' in out_vars
# check attributes of variables
assert output['time'][0].tolist() == 0.0
assert output['time'][-1].tolist() == resumeModel.time
def test_multi_checkpoints(self, tmp_path: Path) -> None:
"""Test using checkpoints multiple times for a given model run."""
# define a yaml for the base model run
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
base_f.close()
baseModel = DeltaModel(input_file=base_p)
# run base for 2 timesteps
for _ in range(0, 50):
baseModel.update()
baseModel.finalize()
# try defining a new model but plan to load checkpoint from baseModel
file_name = 'base_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'save_eta_grids', True)
utilities.write_parameter_to_file(base_f, 'save_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
base_f.close()
resumeModel = DeltaModel(input_file=base_p)
assert resumeModel.time <= baseModel.time
# advance it more steps
for _ in range(0, 25):
resumeModel.update()
resumeModel.finalize()
# create another resume model
resumeModel02 = DeltaModel(input_file=base_p)
assert resumeModel02.time <= resumeModel.time # should be same
# step it some more
nt_resume02 = 0
while (resumeModel02._save_time_since_data != 0) or (nt_resume02 < 50):
resumeModel02.update()
nt_resume02 += 1
# assert that output netCDF4 exists
exp_path_nc = os.path.join(tmp_path / 'test', 'pyDeltaRCM_output.nc')
assert os.path.isfile(exp_path_nc)
# load it into memory and check values in the netCDF4
output = Dataset(exp_path_nc, 'r', allow_pickle=True)
out_vars = output.variables.keys()
# check that expected variables are in the file
assert 'x' in out_vars
assert 'y' in out_vars
assert 'time' in out_vars
assert 'eta' in out_vars
# check attributes of variables
assert output['time'][0].tolist() == 0.0
assert output['time'][-1].tolist() == resumeModel02.time
def test_load_nocheckpoint(self, tmp_path: Path) -> None:
"""Try loading a checkpoint file when one doesn't exist."""
# define a yaml
file_name = 'trial_run.yaml'
base_p, base_f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(base_f, 'resume_checkpoint', True)
utilities.write_parameter_to_file(base_f, 'out_dir', tmp_path / 'test')
base_f.close()
# try loading the model yaml despite no checkpoint existing
with pytest.raises(FileNotFoundError):
_ = DeltaModel(input_file=base_p)
@pytest.mark.skipif(
platform.system() != 'Linux',
reason='Parallel support only on Linux OS.')
def test_py_hlvl_parallel_checkpoint(self, tmp_path: Path) -> None:
"""Test checkpointing in parallel."""
file_name = 'user_parameters.yaml'
p, f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(f, 'ensemble', 2)
utilities.write_parameter_to_file(f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(f, 'parallel', 2)
utilities.write_parameter_to_file(f, 'save_checkpoint', True)
utilities.write_parameter_to_file(f, 'save_eta_grids', True)
f.close()
pp = preprocessor.Preprocessor(input_file=p, timesteps=50)
# assertions for job creation
assert len(pp.file_list) == 2
assert pp._is_completed is False
# run the jobs, mocked deltas
pp.run_jobs()
# compute the expected final time recorded
_dt = pp.job_list[1].deltamodel._dt
_checkpoint_dt = pp.job_list[1].deltamodel._checkpoint_dt
expected_save_interval = (((_checkpoint_dt // _dt) + 1) * _dt)
expected_last_save_time = (((50 * _dt) // expected_save_interval) *
expected_save_interval)
# assertions after running jobs
assert isinstance(pp.job_list[0], preprocessor._ParallelJob)
assert pp._is_completed is True
exp_path_nc0 = os.path.join(
tmp_path / 'test', 'job_000', 'pyDeltaRCM_output.nc')
exp_path_nc1 = os.path.join(
tmp_path / 'test', 'job_001', 'pyDeltaRCM_output.nc')
assert os.path.isfile(exp_path_nc0)
assert os.path.isfile(exp_path_nc1)
# check that checkpoint files exist
exp_path_ckpt0 = os.path.join(
tmp_path / 'test', 'job_000', 'checkpoint.npz')
exp_path_ckpt1 = os.path.join(
tmp_path / 'test', 'job_001', 'checkpoint.npz')
assert os.path.isfile(exp_path_ckpt0)
assert os.path.isfile(exp_path_ckpt1)
# load one output files and check values
out_old = Dataset(exp_path_nc1)
assert 'meta' in out_old.groups.keys()
assert out_old['time'][0].tolist() == 0.0
assert out_old['time'][-1].tolist() == expected_last_save_time
# close netCDF file
out_old.close()
# try to resume jobs
file_name = 'user_parameters.yaml'
p, f = utilities.create_temporary_file(tmp_path, file_name)
utilities.write_parameter_to_file(f, 'ensemble', 2)
utilities.write_parameter_to_file(f, 'out_dir', tmp_path / 'test')
utilities.write_parameter_to_file(f, 'parallel', 2)
utilities.write_parameter_to_file(f, 'resume_checkpoint', True)
utilities.write_parameter_to_file(f, 'save_eta_grids', True)
f.close()
pp = preprocessor.Preprocessor(input_file=p, timesteps=50)
# assertions for job creation
assert len(pp.file_list) == 2
assert pp._is_completed is False
# run the jobs, mocked deltas
pp.run_jobs()
# assertions after running jobs
assert isinstance(pp.job_list[0], preprocessor._ParallelJob)
assert pp._is_completed is True
exp_path_nc0 = os.path.join(
tmp_path / 'test', 'job_000', 'pyDeltaRCM_output.nc')
exp_path_nc1 = os.path.join(
tmp_path / 'test', 'job_001', 'pyDeltaRCM_output.nc')
assert os.path.isfile(exp_path_nc0)
assert os.path.isfile(exp_path_nc1)
# check that checkpoint files still exist
exp_path_ckpt0 = os.path.join(
tmp_path / 'test', 'job_000', 'checkpoint.npz')
exp_path_ckpt1 = os.path.join(
tmp_path / 'test', 'job_001', 'checkpoint.npz')
assert os.path.isfile(exp_path_ckpt0)
assert os.path.isfile(exp_path_ckpt1)
# load one output file to check it out
out_fin = Dataset(exp_path_nc1)
assert 'meta' in out_old.groups.keys()
assert out_fin['time'][0].tolist() == 0
assert out_fin['time'][-1].tolist() == expected_last_save_time * 2
# close netcdf file
out_fin.close()
| [
2,
4326,
5254,
329,
6414,
2746,
23862,
198,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
4423,
346,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
2010,
34,
8068,
19,... | 2.326932 | 8,176 |
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import opstestfw
def lagHeartbeat(**kwargs):
"""
Library function to configure heartbeat speed on a LAG
:param deviceObj: device object
:type deviceObj: VSwitch device object
:param lagId: LAG identifier
:type lagId: int
:param lacpFastFlag: True for LACP fast heartbeat, false for slow heartbeat
:type lacpFastFlag: boolean
:return: returnStruct object
:rtype: object
"""
# Params
lagId = kwargs.get('lagId', None)
deviceObj = kwargs.get('deviceObj', None)
lacpFastFlag = kwargs.get('lacpFastFlag', True)
# Variables
overallBuffer = []
finalReturnCode = 0
# If device, LAG Id or lacpFastFlag are not passed, return an error
if deviceObj is None or lagId is None or lacpFastFlag is None:
opstestfw.LogOutput('error',
"Need to pass deviceObj and lagId to use "
"this routine")
returnCls = opstestfw.returnStruct(returnCode=1)
return returnCls
# Get into vtyshelll
returnStructure = deviceObj.VtyshShell(enter=True)
overallBuffer.append(returnStructure.buffer())
returnCode = returnStructure.returnCode()
if returnCode != 0:
opstestfw.LogOutput('error', "Failed to get vtysh prompt")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=returnCode,
buffer=bufferString)
return returnCls
# Get into config context
returnStructure = deviceObj.ConfigVtyShell(enter=True)
returnCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if returnCode != 0:
opstestfw.LogOutput('error', "Failed to get vtysh config prompt")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=returnCode,
buffer=bufferString)
return returnCls
# enter LAG configuration context
command = "interface lag %s" % str(lagId)
returnDevInt = deviceObj.DeviceInteract(command=command)
returnCode = returnDevInt['returnCode']
overallBuffer.append(returnDevInt['buffer'])
if returnCode != 0:
opstestfw.LogOutput('error', "Failed to create LAG " + str(lagId)
+ " on device " + deviceObj.device)
else:
opstestfw.LogOutput('debug', "Created LAG " + str(lagId)
+ " on device " + deviceObj.device)
# configure LAG heartbeat settings
command = ""
if lacpFastFlag is False:
command = "no "
command += "lacp rate fast"
returnDevInt = deviceObj.DeviceInteract(command=command)
finalReturnCode = returnDevInt['returnCode']
overallBuffer.append(returnDevInt['buffer'])
if finalReturnCode != 0:
if lacpFastFlag is True:
opstestfw.LogOutput('error',
"Failed to configure LACP fast heartbeat on "
"interface lag " + str(lagId) + " on device "
+ deviceObj.device)
else:
opstestfw.LogOutput('error',
"Failed to configure LACP slow heartbeat on "
"interface lag " + str(lagId) + " on device "
+ deviceObj.device)
else:
if lacpFastFlag is True:
opstestfw.LogOutput('debug',
"Configured LACP fast heartbeat on interface"
" lag " + str(lagId) + " on device "
+ deviceObj.device)
else:
opstestfw.LogOutput('debug',
"Configure LACP slow heartbeat on interface"
" lag " + str(lagId) + " on device "
+ deviceObj.device)
# exit LAG configuration context
command = "exit"
returnDevInt = deviceObj.DeviceInteract(command=command)
returnCode = returnDevInt['returnCode']
overallBuffer.append(returnDevInt['buffer'])
if returnCode != 0:
opstestfw.LogOutput('error', "Failed to exit LAG " + str(lagId)
+ " configuration context")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=returnCode,
buffer=bufferString)
return returnCls
# Get out of config context
returnStructure = deviceObj.ConfigVtyShell(enter=False)
returnCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if returnCode != 0:
opstestfw.LogOutput('error',
"Failed to get out of vtysh config context")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=returnCode,
buffer=bufferString)
return returnCls
# Get out of vtyshell
returnStructure = deviceObj.VtyshShell(enter=False)
returnCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if returnCode != 0:
opstestfw.LogOutput('error', "Failed to exit vty shell")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=returnCode,
buffer=bufferString)
return returnCls
# Compile information to return
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = opstestfw.returnStruct(returnCode=finalReturnCode,
buffer=bufferString)
return returnCls
| [
2,
357,
34,
8,
15069,
1853,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
... | 2.279362 | 2,946 |
import sublime, sublime_plugin
import string
# In Sublime Text 3 things are loaded async, using plugin_loaded() callback before try accessing.
pleasurazy = PleasurazyAPICompletionsPackage()
if int(sublime.version()) < 3000:
pleasurazy.init()
else:
| [
11748,
41674,
11,
41674,
62,
33803,
198,
11748,
4731,
628,
628,
198,
2,
554,
3834,
27299,
8255,
513,
1243,
389,
9639,
30351,
11,
1262,
13877,
62,
14578,
3419,
23838,
878,
1949,
22534,
13,
198,
1154,
292,
333,
12582,
796,
18063,
292,
3... | 3.381579 | 76 |
#--------------------------------------------------
# Blender Python API Script
# Converts and .obj file to a .stl file in Blender
# Usage: blender -b -P blenderObjToStl.py -- [inputfile]
#--------------------------------------------------
import bpy
import sys
import time
argv = sys.argv
argv = argv[argv.index("--") + 1:]
#Delete all objects in the scene
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
#import .obj file
bpy.ops.import_scene.obj(filepath=argv[0]+'.obj', axis_forward='Z', axis_up='Y')
#make imported object active
bpy.context.scene.objects.active = bpy.data.objects[0]
bpy.ops.object.select_all(action='SELECT')
#export scene to .stl
bpy.ops.export_mesh.stl(filepath=argv[0]+'.stl', axis_forward='Z', axis_up='Y') | [
2,
47232,
438,
198,
2,
1086,
2194,
11361,
7824,
12327,
198,
2,
1482,
24040,
290,
764,
26801,
2393,
284,
257,
764,
301,
75,
2393,
287,
1086,
2194,
198,
2,
29566,
25,
40047,
532,
65,
532,
47,
40047,
49201,
2514,
1273,
75,
13,
9078,
... | 2.949416 | 257 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-22 23:21
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django_markdown.models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
18,
319,
1584,
12,
1065,
12,
1828,
2242,
25,
2481,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.885714 | 70 |
"""Generic support for serial connections."""
from typing import Optional
import attr
from serial.tools import list_ports # type: ignore
def find_port(device_filter: str) -> str:
"""Find a port based on the given filter."""
for port in list_ports.comports():
if device_filter in port.description:
return str(port.device)
try:
next_port: str = next(list_ports.grep(device_filter))
return next_port
except StopIteration:
pass
raise IOError(f'No {device_filter} ports found.')
@attr.s(auto_attribs=True)
class SerialProps:
"""Defines driver properties for serial devices."""
port: str ='/dev/ttyACM0'
port_filter: str = ''
baud_rate: int = 115200
message_length: int = 4096
message_delimiter: bytes = b'\0'
open_delay: float = 1.0
read_timeout: Optional[float] = None
write_timeout: Optional[float] = None
| [
37811,
46189,
1104,
329,
11389,
8787,
526,
15931,
198,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
708,
81,
198,
198,
6738,
11389,
13,
31391,
1330,
1351,
62,
3742,
220,
1303,
2099,
25,
8856,
628,
198,
4299,
1064,
62,
634,
7,
25202... | 2.661808 | 343 |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import MindMap, MindMapComponent
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
764,
27530,
1330,
10175,
13912,
... | 3.122449 | 49 |
from unittest import mock
from py42.exceptions import Py42NotFoundError
from pytest import fixture
from tests.conftest import (
assert_success,
create_fake_connector,
create_mock_response,
assert_successful_single_data,
assert_successful_message,
assert_successful_summary,
assert_fail_message,
attach_client,
TEST_USER_UID,
)
_MOCK_GET_DEPARTING_EMPLOYEE_RESPONSE = {
"type$": "DEPARTING_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"notes": "Test test test",
"createdAt": "2021-05-24T17:19:06.2830000Z",
"status": "OPEN",
"cloudUsernames": ["alias1"],
"departureDate": "2021-02-02",
}
_MOCK_LIST_DEPARTING_EMPLOYEES_RESPONSE = {
"totalCount": 2,
"items": [
{
"type$": "DEPARTING_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"notes": "Test test test",
"createdAt": "2021-04-22T00:00:00.0000000Z",
"status": "OPEN",
"cloudUsernames": ["alias1",],
"totalBytes": 0,
"numEvents": 3,
},
{
"type$": "DEPARTING_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": "id2",
"userName": "test2@example.com",
"displayName": "Test2 Testerson",
"notes": "Test test test2",
"createdAt": "2021-04-22T00:00:00.0000000Z",
"status": "OPEN",
"cloudUsernames": ["alias2",],
"totalBytes": 0,
"numEvents": 6,
},
],
}
_MOCK_GET_HIGH_RISK_EMPLOYEE_RESPONSE = {
"type$": "HIGH_RISK_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"notes": "Test test test",
"createdAt": "2021-05-25T18:43:29.6890000Z",
"status": "OPEN",
"cloudUsernames": ["alias1"],
"riskFactors": ["FLIGHT_RISK", "CONTRACT_EMPLOYEE"],
}
_MOCK_LIST_HIGH_RISK_EMPLOYEES_RESPONSE = {
"totalCount": 2,
"items": [
{
"type$": "HIGH_RISK_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"notes": "Test test test",
"createdAt": "2021-04-22T00:00:00.0000000Z",
"status": "OPEN",
"cloudUsernames": ["alias1",],
"totalBytes": 0,
"numEvents": 3,
"riskFactors": ["FLIGHT_RISK", "CONTRACT_EMPLOYEE"],
},
{
"type$": "HIGH_RISK_EMPLOYEE_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": "id2",
"userName": "test2@example.com",
"displayName": "Test2 Testerson",
"notes": "Test test test2",
"createdAt": "2021-04-22T00:00:00.0000000Z",
"status": "OPEN",
"cloudUsernames": ["alias2",],
"totalBytes": 0,
"numEvents": 6,
},
],
}
_MOCK_ADD_RISK_TAGS_RESPONSE = {
"type$": "USER_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"cloudUsernames": ["test@example.com"],
"riskFactors": ["FLIGHT_RISK", "HIGH_IMPACT_EMPLOYEE",],
}
_MOCK_REMOVE_RISK_TAGS_RESPONSE = {
"type$": "USER_V2",
"tenantId": "11114444-2222-3333-4444-666634888863",
"userId": TEST_USER_UID,
"userName": "test@example.com",
"displayName": "Test Testerson",
"cloudUsernames": ["test@example.com"],
"riskFactors": ["ELEVATED_ACCESS_PRIVILEGES"],
}
@fixture
@fixture
@fixture
| [
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
12972,
3682,
13,
1069,
11755,
1330,
9485,
3682,
3673,
21077,
12331,
198,
6738,
12972,
9288,
1330,
29220,
198,
198,
6738,
5254,
13,
1102,
701,
395,
1330,
357,
198,
220,
220,
220,
6818,
... | 1.904695 | 2,130 |
"""empty message
Revision ID: 12c0f685cde
Revises: 5a44cbcf5e2
Create Date: 2015-10-05 14:42:20.631202
"""
# revision identifiers, used by Alembic.
revision = '12c0f685cde'
down_revision = '5a44cbcf5e2'
from alembic import op
import sqlalchemy as sa
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
1105,
66,
15,
69,
35978,
66,
2934,
198,
18009,
2696,
25,
642,
64,
2598,
21101,
12993,
20,
68,
17,
198,
16447,
7536,
25,
1853,
12,
940,
12,
2713,
1478,
25,
3682,
25,
1238,
13,
50... | 2.392523 | 107 |
import os
import json
import statistics
from urllib.parse import urljoin
import encode_utils as eu
from encode_utils.connection import Connection
sample_data_file, = snakemake.output
dcc_mode = snakemake.config["dcc_mode"]
experiment = snakemake.params["experiment"]
replicate_num = snakemake.params["replicate"]
modality = snakemake.params["modality"]
assembly = snakemake.params["assembly"]
log_dir, = snakemake.log
os.environ["DCC_API_KEY"] = snakemake.params["dcc_api_key"]
os.environ["DCC_SECRET_KEY"] = snakemake.params["dcc_secret_key"]
eu.connection.LOG_DIR = log_dir
conn = Connection(dcc_mode)
server = conn.dcc_url
data = conn.get(experiment)
r1 = {}
r2 = {}
bc = {}
replicate_id = None
for rep in data["replicates"]:
if rep["biological_replicate_number"] == replicate_num:
replicate_id = rep["uuid"]
platform = None
read_lengths = []
files = data["files"]
for f in files:
id = f["@id"]
if f["file_format"] != "fastq":
continue
if f["replicate"]["biological_replicate_number"] != replicate_num:
continue
if "derived_from" in f:
continue
p = f["platform"]["uuid"]
if platform is not None and p != platform:
raise ValueError("Multiple sequencing platforms detected in input")
platform = p
if f["output_type"] == "index reads":
bc[id] = f
continue
l = f["read_length"]
read_lengths.append(l)
if f["paired_end"] == "1":
r1[id] = f
elif f["paired_end"] == "2":
r2[id] = f
if max(read_lengths) - min(read_lengths) > 4:
raise ValueError("Inconsistent read lengths in input FASTQs")
read_length = statistics.median_low(read_lengths)
out_data = {
"experiment": experiment,
"replicate_num": replicate_num,
"replicate_id": replicate_id,
"modality": modality,
"platform": platform,
"read_length": read_length,
"assembly": assembly
}
if modality == "ren":
out_data |= {
"fastq": {"R1": [], "R2": []},
"accessions": {"R1": [], "R2": []}
}
for k, v in r1.items():
r1_fq = urljoin(server, v["href"])
r1_acc = v["accession"]
p2 = v["paired_with"]
r2_fq = urljoin(server, r2[p2]["href"])
r2_acc = r2[p2]["accession"]
out_data["fastq"]["R1"].append(r1_fq)
out_data["fastq"]["R2"].append(r2_fq)
out_data["accessions"]["R1"].append(r1_acc)
out_data["accessions"]["R2"].append(r2_acc)
else:
out_data |= {
"fastq": {"R1": [], "R2": [], "BC": []},
"accessions": {"R1": [], "R2": [], "BC": []}
}
for f in bc.values():
m0, m1 = f["index_of"]
if m0 in r1 and m1 in r2:
r1_fq = urljoin(server, r1[m0]["href"])
r2_fq = urljoin(server, r2[m1]["href"])
r1_acc = r1[m0]["accession"]
r2_acc = r2[m1]["accession"]
out_data["fastq"]["R1"].append(r1_fq)
out_data["fastq"]["R2"].append(r2_fq)
out_data["accessions"]["R1"].append(r1_acc)
out_data["accessions"]["R2"].append(r2_acc)
elif m1 in r1 and m0 in r2:
r1_fq = urljoin(server, r1[m1]["href"])
r2_fq = urljoin(server, r2[m0]["href"])
r1_acc = r1[m1]["accession"]
r2_acc = r2[m0]["accession"]
out_data["fastq"]["R1"].append(r1_fq)
out_data["fastq"]["R2"].append(r2_fq)
out_data["accessions"]["R1"].append(r1_acc)
out_data["accessions"]["R2"].append(r2_acc)
else:
raise ValueError("Index FASTQ does not properly match with reads")
bc_fq = urljoin(server, f["href"])
bc_acc = f["accession"]
out_data["fastq"]["BC"].append(bc_fq)
out_data["accessions"]["BC"].append(bc_acc)
with open(sample_data_file, 'w') as f:
metadata = json.dump(out_data, f, indent=4)
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
7869,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
11748,
37773,
62,
26791,
355,
304,
84,
198,
6738,
37773,
62,
26791,
13,
38659,
1330,
26923,
198,
198,
39873,
62,
7890,
62,... | 2.058855 | 1,886 |
# -*- coding: utf-8 -*-
# pylint: disable=no-name-in-module
""" Main module """
import sys
from fbs_runtime.application_context.PyQt5 import ApplicationContext, cached_property
class WatchdogAppContext(ApplicationContext):
""" FBS Watchdog App Context """
@cached_property # pylint: disable=missing-function-docstring
if __name__ == '__main__':
appctxt = WatchdogAppContext()
# exit_code = appctxt.app.exec_()
# sys.exit(exit_code)
exit_code = appctxt.run()
sys.exit(exit_code)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
3672,
12,
259,
12,
21412,
198,
198,
37811,
8774,
8265,
37227,
198,
198,
11748,
25064,
198,
6738,
277,
1443,
62,
43282,... | 2.616915 | 201 |
# Lucio 2020
# Feather M4 + Propmaker + amps + lots of neopixels
import board
import busio
from digitalio import DigitalInOut, Direction, Pull
import audioio
import audiomixer
import audiomp3
import adafruit_lis3dh
import neopixel
from adafruit_led_animation.animation.solid import Solid
from adafruit_led_animation.animation.comet import Comet
from adafruit_led_animation.animation.pulse import Pulse
from adafruit_led_animation.helper import PixelSubset
from adafruit_led_animation.group import AnimationGroup
from adafruit_led_animation.color import RED, ORANGE, WHITE
ORANGE_DIM = 0x801400 # half value version
RED_DIM = 0x800000
# ---Set Volume Max Here---
VOLUME_MULT = 0.65 # 1 = full volume, 0.1 is very quiet, 0 is muted
# ---SWITCH/BUTTON SETUP---
mode_switch = DigitalInOut(board.D9)
mode_switch.switch_to_input(pull=Pull.UP)
mode_state = mode_switch.value
trig_button = DigitalInOut(board.A4)
trig_button.switch_to_input(pull=Pull.UP)
alt_button = DigitalInOut(board.A5)
alt_button.switch_to_input(pull=Pull.UP)
# ---ACCELEROMETER SETUP---
# Set up accelerometer on I2C bus, 4G range:
i2c = busio.I2C(board.SCL, board.SDA)
int1 = DigitalInOut(board.D6)
accel = adafruit_lis3dh.LIS3DH_I2C(i2c, int1=int1)
# ---SPEAKER SETUP---
enable = DigitalInOut(board.D10)
enable.direction = Direction.OUTPUT
enable.value = True
# Set up speakers and mixer. Stereo files, where music has empty right channel, FX empty left
speaker = audioio.AudioOut(board.A0, right_channel=board.A1)
mixer = audiomixer.Mixer(channel_count=2, buffer_size=2304, sample_rate=22050)
# ---NEOPIXEL SETUP---
pixel_pin = board.D5
pixel_num = 154
pixels = neopixel.NeoPixel(
pixel_pin, pixel_num, brightness=0.6, auto_write=False, pixel_order=neopixel.GRBW
)
# ^ change pixel_order depending on RGB vs. RGBW pixels
# ---Pixel Map---
# this is the physical order in which the strips are plugged
pixel_stripA = PixelSubset(pixels, 0, 18) # 18 pixel strip
pixel_stripB = PixelSubset(pixels, 18, 36) # 18 pixel strip
pixel_jewel = PixelSubset(pixels, 36, 43) # 7 pixel jewel
pixel_ringsAll = PixelSubset(pixels, 43, 151) # all of the rings
# or use rings individually:
# pixel_ringA = PixelSubset(pixels, 43, 59) # 16 pixel ring
# pixel_ringB = PixelSubset(pixels, 59, 75) # 16 pixel ring
# pixel_ringC = PixelSubset(pixels, 75, 91) # 16 pixel ring
# pixel_ringD = PixelSubset(pixels, 91, 151) # 60 pixel ring
# ---BPM---
BPM = 128
BEAT = 60 / BPM # quarter note beat
b16TH = BEAT / 4 # 16TH note
b64TH = BEAT / 16 # sixty-fourth
# ---Anim Setup---
# heal color mode
# Pulse 'speed' = smoothness
pulse_rings_m0 = Pulse(pixel_ringsAll, speed=0.01, color=ORANGE, period=BEAT)
pulse_jewel_m0 = Pulse(pixel_jewel, speed=0.01, color=ORANGE, period=BEAT)
comet_stripA_m0 = Comet(
pixel_stripA, speed=b64TH, color=ORANGE, tail_length=9, bounce=False
)
comet_stripB_m0 = Comet(
pixel_stripB, speed=b64TH, color=ORANGE, tail_length=9, bounce=False
)
# speed color mode
pulse_rings_m1 = Pulse(pixel_ringsAll, speed=0.02, color=RED, period=BEAT / 2)
pulse_jewel_m1 = Pulse(pixel_jewel, speed=0.02, color=RED, period=BEAT / 2)
comet_stripA_m1 = Comet(
pixel_stripA, speed=b64TH, color=RED, tail_length=9, bounce=False
)
comet_stripB_m1 = Comet(
pixel_stripB, speed=b64TH, color=RED, tail_length=9, bounce=False
)
solid_white = Solid(pixel_ringsAll, color=WHITE)
# ---Anim Modes---
vu_strip_animations_mode0 = AnimationGroup(comet_stripA_m0, comet_stripB_m0, sync=True)
vu_strip_animations_mode1 = AnimationGroup(comet_stripA_m1, comet_stripB_m1, sync=True)
# ---Audio Setup---
if mode_state:
BGM = "/lucio/bgmheal.mp3"
else:
BGM = "/lucio/bgmspeed.mp3"
sample0 = audiomp3.MP3Decoder(open(BGM, "rb"))
FX = "/lucio/shoot.mp3"
sample1 = audiomp3.MP3Decoder(open(FX, "rb"))
speaker.play(mixer)
mixer.voice[0].play(sample0, loop=True)
mixer.voice[0].level = 0.3 * VOLUME_MULT
mixer.voice[1].level = 0.7 * VOLUME_MULT
while True:
if mode_state: # heal mode on startup
vu_strip_animations_mode0.animate()
pulse_rings_m0.animate()
pulse_jewel_m0.animate()
else: # speed mode on startup
vu_strip_animations_mode1.animate()
pulse_rings_m1.animate()
pulse_jewel_m1.animate()
# Change modes
if mode_switch.value:
if mode_state == 0: # state has changed, toggle it
BGM = "/lucio/bgmheal.mp3"
sample0.file = open(BGM, "rb")
mixer.voice[0].play(sample0, loop=True)
vu_strip_animations_mode0.animate()
pulse_rings_m0.animate()
pulse_jewel_m0.animate()
mode_state = 1
else:
if mode_state == 1:
BGM = "/lucio/bgmspeed.mp3"
sample0.file = open(BGM, "rb")
mixer.voice[0].play(sample0, loop=True)
vu_strip_animations_mode1.animate()
pulse_rings_m1.animate()
pulse_jewel_m1.animate()
mode_state = 0
x, _, _ = accel.acceleration # get accelerometer values
if not mixer.voice[1].playing:
if not trig_button.value: # trigger squeezed
FX_sample = "/lucio/shoot.mp3"
sample1.file = open(FX_sample, "rb")
mixer.voice[1].play(sample1)
if mode_state:
solid_white.animate()
else:
solid_white.animate()
if not alt_button.value: # alt trigger squeezed
FX_sample = "/lucio/alt_shoot.mp3"
sample1.file = open(FX_sample, "rb")
mixer.voice[1].play(sample1)
if mode_state:
solid_white.animate()
else:
solid_white.animate()
if accel.acceleration.x > 8: # reload
FX_sample = "/lucio/reload.mp3"
sample1.file = open(FX_sample, "rb")
mixer.voice[1].play(sample1)
if mode_state:
solid_white.animate()
else:
solid_white.animate()
if accel.acceleration.x < -8: # Ultimate
FX_sample = "/lucio/ultimate.mp3"
sample1.file = open(FX_sample, "rb")
mixer.voice[1].play(sample1)
if mode_state:
solid_white.animate()
else:
solid_white.animate()
| [
2,
7598,
952,
12131,
198,
2,
34501,
337,
19,
1343,
1041,
4426,
3110,
1343,
45796,
1343,
6041,
286,
497,
404,
14810,
198,
11748,
3096,
198,
11748,
1323,
952,
198,
6738,
4875,
952,
1330,
10231,
818,
7975,
11,
41837,
11,
21429,
198,
1174... | 2.258435 | 2,786 |
import numpy as np
import os
from time import sleep
from sense_hat import SenseHat
### Set up config variables
## User adjustable
# Random Seed
try :
SEED = int(os.getenv('SEED'))
except (TypeError, ValueError) as e:
SEED = None
## Preset
# Field size
SIZE = (8, 8) # The Size of the SenseHAT LED matrix
PIXEL = [0, 128, 0] # R, G, B colour of the displayed state
ZERO = [0, 0, 0]
DELAY = 1.0 # seconds between updates
sense = SenseHat()
# https://jakevdp.github.io/blog/2013/08/07/conways-game-of-life/
def life_step(X):
"""Game of life step using generator expressions"""
nbrs_count = sum(np.roll(np.roll(X, i, 0), j, 1)
for i in (-1, 0, 1) for j in (-1, 0, 1)
if (i != 0 or j != 0))
return (nbrs_count == 3) | (X & (nbrs_count == 2))
def display(state):
"""Convert Game of Life state into display pixel values"""
a = state.reshape(SIZE[0]*SIZE[1])
leds = [ PIXEL if x else ZERO for x in a.tolist() ]
sense.set_pixels(leds)
def initialize(size, seed=None):
"""Initialize the Game of Life field"""
np.random.seed(SEED)
X1 = np.zeros(SIZE, dtype=bool)
X = np.zeros(SIZE, dtype=bool)
r = np.random.random(SIZE)
X = (r > 0.75)
return X, X1
if __name__ == "__main__":
sense.clear() # no arguments defaults to off
X, X1 = initialize(SIZE, SEED) # set up display
display(X)
reset = False
while True: # Main loop
if reset or len(sense.stick.get_events()) > 0:
reset = False
X, X1 = initialize(SIZE, SEED) # set up display
display(X)
sleep(DELAY)
X = life_step(X)
display(X)
if np.array_equal(X, X1):
reset = True
sleep(DELAY * 3)
X1 = X
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
198,
6738,
2565,
62,
5183,
1330,
24956,
40483,
198,
198,
21017,
5345,
510,
4566,
9633,
198,
2235,
11787,
28138,
198,
2,
14534,
23262,
198,
28311,
1058,
1... | 2.220698 | 802 |
import os
import requests
| [
11748,
28686,
198,
11748,
7007,
198
] | 4.333333 | 6 |
#! /usr/bin/env python
__author__ = 'zieghailo'
from time import sleep
import plotter
from sphereofinfluence import distance
if __name__ == "__main__":
input_graph() | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
796,
705,
49746,
456,
603,
78,
6,
198,
198,
6738,
640,
1330,
3993,
198,
11748,
7110,
353,
198,
198,
6738,
16558,
1659,
10745,
23079,
1330,
5253,
628,
198,
198,... | 2.868852 | 61 |
"""Convenience interface for using CodePy with Boost.Python."""
from __future__ import absolute_import
| [
37811,
3103,
574,
1240,
7071,
329,
1262,
6127,
20519,
351,
19835,
13,
37906,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
628
] | 4.038462 | 26 |
import time
import random
import numpy as np
from collections import deque
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()
from matplotlib import pyplot as plt
class DQNAgent:
""" DQN agent """
def build_model(self):
""" Model builder function """
self.input = tf.placeholder(dtype=tf.float32, shape=(None, ) + self.states, name='input')
self.q_true = tf.placeholder(dtype=tf.float32, shape=[None], name='labels')
self.a_true = tf.placeholder(dtype=tf.int32, shape=[None], name='actions')
self.reward = tf.placeholder(dtype=tf.float32, shape=[], name='reward')
self.input_float = tf.to_float(self.input) / 255.
# Online network
with tf.variable_scope('online'):
self.conv_1 = tf.layers.conv2d(inputs=self.input_float, filters=32, kernel_size=8, strides=4, activation=tf.nn.relu)
self.conv_2 = tf.layers.conv2d(inputs=self.conv_1, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu)
self.conv_3 = tf.layers.conv2d(inputs=self.conv_2, filters=64, kernel_size=3, strides=1, activation=tf.nn.relu)
self.flatten = tf.layers.flatten(inputs=self.conv_3)
self.dense = tf.layers.dense(inputs=self.flatten, units=512, activation=tf.nn.relu)
self.output = tf.layers.dense(inputs=self.dense, units=self.actions, name='output')
# Target network
with tf.variable_scope('target'):
self.conv_1_target = tf.layers.conv2d(inputs=self.input_float, filters=32, kernel_size=8, strides=4, activation=tf.nn.relu)
self.conv_2_target = tf.layers.conv2d(inputs=self.conv_1_target, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu)
self.conv_3_target = tf.layers.conv2d(inputs=self.conv_2_target, filters=64, kernel_size=3, strides=1, activation=tf.nn.relu)
self.flatten_target = tf.layers.flatten(inputs=self.conv_3_target)
self.dense_target = tf.layers.dense(inputs=self.flatten_target, units=512, activation=tf.nn.relu)
self.output_target = tf.stop_gradient(tf.layers.dense(inputs=self.dense_target, units=self.actions, name='output_target'))
# Optimizer
self.action = tf.argmax(input=self.output, axis=1)
self.q_pred = tf.gather_nd(params=self.output, indices=tf.stack([tf.range(tf.shape(self.a_true)[0]), self.a_true], axis=1))
self.loss = tf.losses.huber_loss(labels=self.q_true, predictions=self.q_pred)
self.train = tf.train.AdamOptimizer(learning_rate=0.00025).minimize(self.loss)
# Summaries
self.summaries = tf.summary.merge([
tf.summary.scalar('reward', self.reward),
tf.summary.scalar('loss', self.loss),
tf.summary.scalar('max_q', tf.reduce_max(self.output))
])
self.writer = tf.summary.FileWriter(logdir='./logs', graph=self.session.graph)
def copy_model(self):
""" Copy weights to target network """
self.session.run([tf.assign(new, old) for (new, old) in zip(tf.trainable_variables('target'), tf.trainable_variables('online'))])
def save_model(self):
""" Saves current model to disk """
self.saver.save(sess=self.session, save_path='./models/model', global_step=self.step)
def add(self, experience):
""" Add observation to experience """
self.memory.append(experience)
def predict(self, model, state):
""" Prediction """
if model == 'online':
return self.session.run(fetches=self.output, feed_dict={self.input: np.array(state)})
if model == 'target':
return self.session.run(fetches=self.output_target, feed_dict={self.input: np.array(state)})
def run(self, state):
""" Perform action """
if np.random.rand() < self.eps:
# Random action
action = np.random.randint(low=0, high=self.actions)
else:
# Policy action
q = self.predict('online', np.expand_dims(state, 0))
action = np.argmax(q)
# Decrease eps
self.eps *= self.eps_decay
self.eps = max(self.eps_min, self.eps)
# Increment step
self.step += 1
return action
def learn(self):
""" Gradient descent """
# Sync target network
if self.step % self.copy == 0:
self.copy_model()
# Checkpoint model
if self.step % self.save_each == 0:
self.save_model()
# Break if burn-in
if self.step < self.burnin:
return
# Break if no training
if self.learn_step < self.learn_each:
self.learn_step += 1
return
# Sample batch
batch = random.sample(self.memory, self.batch_size)
state, next_state, action, reward, done = map(np.array, zip(*batch))
# Get next q values from target network
next_q = self.predict('target', next_state)
# Calculate discounted future reward
if self.double_q:
q = self.predict('online', next_state)
a = np.argmax(q, axis=1)
target_q = reward + (1. - done) * self.gamma * next_q[np.arange(0, self.batch_size), a]
else:
target_q = reward + (1. - done) * self.gamma * np.amax(next_q, axis=1)
# Update model
summary, _ = self.session.run(fetches=[self.summaries, self.train],
feed_dict={self.input: state,
self.q_true: np.array(target_q),
self.a_true: np.array(action),
self.reward: np.mean(reward)})
# Reset learn step
self.learn_step = 0
# Write
self.writer.add_summary(summary, self.step) | [
11748,
640,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
390,
4188,
198,
11748,
11192,
273,
11125,
13,
5589,
265,
13,
85,
16,
355,
48700,
198,
27110,
13,
40223,
62,
85,
17,
62,
46571,
3419,
198,
27110,... | 2.14395 | 2,744 |
import subsystems
import oi
import wpilib
from wpilib.command import Command
from wpilib.drive.differentialdrive import DifferentialDrive
from wpilib.sendablechooser import SendableChooser
from wpilib.smartdashboard import SmartDashboard
from commands.drive.measure import Measure
# Dashboard control to select drive mode
modeChooser : SendableChooser = None
# Used to indicate which end of the robot is the front
isFlipped : bool = False
# Used to control whether brake mode is enabled on the motor controllers
enableBrakeMode : bool = False
# Drive mode choices
kModeArcade : int = 0
kModeTank : int = 1
kModeCurvature : int = 2
kModeFixed : int = 3
kModeIndexedArcade: int = 4
kModeIndexedTank: int = 5
kThrottlesIndexed = [ 0.125, 3/16.0, 0.25, 0.375, 0.5, 0.625, 0.75, 1.0 ]
kRotationIndexed = [ 0.125, 3/16.0, 0.25, 5/16.0 ]
| [
11748,
39335,
82,
198,
11748,
267,
72,
198,
198,
11748,
266,
79,
22282,
198,
6738,
266,
79,
22282,
13,
21812,
1330,
9455,
198,
6738,
266,
79,
22282,
13,
19472,
13,
39799,
498,
19472,
1330,
20615,
498,
24825,
198,
6738,
266,
79,
22282,... | 3.02518 | 278 |
#!/usr/bin/env python3
from utils import ensure
from result import Ok, Err, Result
import urllib
import httplib2
from normalise_uri import normalise_uri
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
3384,
4487,
220,
220,
220,
1330,
4155,
198,
6738,
1255,
220,
220,
1330,
6762,
11,
41512,
11,
25414,
198,
11748,
2956,
297,
571,
198,
11748,
1841,
489,
571,
17,
198,
19... | 2.693333 | 75 |
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_pattern_2_xsd.nistschema_sv_iv_list_nmtoken_pattern_2 import NistschemaSvIvListNmtokenPattern2
__all__ = [
"NistschemaSvIvListNmtokenPattern2",
]
| [
6738,
5072,
13,
27530,
13,
77,
396,
62,
7890,
13,
4868,
62,
35339,
13,
77,
16762,
4233,
13,
15952,
2611,
62,
39098,
13,
77,
1023,
2395,
2611,
62,
21370,
62,
452,
62,
4868,
62,
77,
16762,
4233,
62,
33279,
62,
17,
62,
87,
21282,
1... | 2.292453 | 106 |
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
import pickle
import itertools
import math
from combi._python_toolbox.third_party import functools
from combi._python_toolbox import cute_testing
from combi._python_toolbox import math_tools
from combi._python_toolbox import cute_iter_tools
from combi._python_toolbox import nifty_collections
from combi._python_toolbox import caching
from combi._python_toolbox import sequence_tools
import combi
from combi import *
infinity = float('inf')
infinities = (infinity, -infinity)
| [
2,
15069,
3717,
12,
5539,
7431,
371,
620,
388,
13,
198,
2,
770,
1430,
318,
9387,
739,
262,
17168,
5964,
13,
198,
198,
11748,
2298,
293,
198,
11748,
340,
861,
10141,
198,
11748,
10688,
198,
198,
6738,
1974,
72,
13557,
29412,
62,
2598... | 2.752174 | 230 |
# -*- coding: utf-8 -*-
import json
import yaml
import os
import threading
import pytest
from mock import patch, sentinel, Mock
from freezegun import freeze_time
from botocore.exceptions import ClientError
from sceptre.template import Template
from sceptre.connection_manager import ConnectionManager
from sceptre.exceptions import UnsupportedTemplateFileTypeError
from sceptre.exceptions import TemplateSceptreHandlerError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
11748,
331,
43695,
198,
11748,
28686,
198,
11748,
4704,
278,
198,
198,
11748,
12972,
9288,
198,
6738,
15290,
1330,
8529,
11,
1908,
20538,
11,
44123... | 3.763158 | 114 |
import pytest
import requests_mock
from openff.bespokefit.cli.executor.list import list_cli
from openff.bespokefit.executor.services import settings
from openff.bespokefit.executor.services.coordinator.models import (
CoordinatorGETPageResponse,
)
from openff.bespokefit.executor.services.models import Link
@pytest.mark.parametrize(
"n_results, expected_message",
[(0, "No optimizations were found"), (3, "The following optimizations were found")],
)
| [
11748,
12972,
9288,
198,
11748,
7007,
62,
76,
735,
198,
198,
6738,
1280,
487,
13,
65,
9774,
2088,
11147,
13,
44506,
13,
18558,
38409,
13,
4868,
1330,
1351,
62,
44506,
198,
6738,
1280,
487,
13,
65,
9774,
2088,
11147,
13,
18558,
38409,
... | 3.134228 | 149 |
class Solution:
'''
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。
找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2)
'''
def threeSumClosest(self, nums, target: int) -> int:
'''
先排序以便应用双指针
固定一个值, 然后双指针
'''
ans, length = float('inf'), len(nums)
nums.sort()
for i in range(length):
# 与前一数组值一致, 则跳过
# i>0: [0,0,0], 1
if i > 0 and nums[i] == nums[i - 1]:
continue
# 数组排过序并且当前索引之前的元素已经取得最小值, 不需要再比较
left, right = i + 1, length - 1
while left < right:
s = nums[i] + nums[left] + nums[right]
if s == target:
return target
if abs(s - target) < abs(ans - target):
ans = s
# 移动指针
if s < target:
left += 1
else:
right -= 1
return ans
so = Solution()
# print(so.threeSum([-1, 23, -5, 6, 77, 1, 0]))
# print(so.threeSumClosest([-1, 2, 1, -4], 1))
print(so.threeSumClosestF([0, 0, 0], 1))
| [
4871,
28186,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
13328,
119,
247,
22522,
248,
31660,
10310,
103,
44293,
227,
162,
233,
105,
299,
220,
10310,
103,
46763,
112,
46763,
108,
21410,
46763,
108,
163,
119,
226,
997,
82,
... | 1.256701 | 970 |
import pdb
import pytest
from pdbr._pdbr import rich_pdb_klass
@pytest.fixture
| [
11748,
279,
9945,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
279,
67,
1671,
13557,
30094,
1671,
1330,
5527,
62,
79,
9945,
62,
74,
31172,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
628
] | 2.444444 | 36 |
from django.core.checks import Tags, Warning, register # pylint: disable=redefined-builtin
from axes.conf import settings
@register(Tags.security, Tags.caches, Tags.compatibility)
@register(Tags.security, Tags.compatibility)
@register(Tags.security, Tags.compatibility)
@register(Tags.compatibility)
| [
6738,
42625,
14208,
13,
7295,
13,
42116,
1330,
44789,
11,
15932,
11,
7881,
220,
1303,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
18780,
259,
198,
198,
6738,
34197,
13,
10414,
1330,
6460,
628,
628,
198,
198,
31,
30238,
7,
36142,
... | 3.329787 | 94 |
"""
Run spike sorting on concatenated recordings
============================================
In several experiments, several recordings are performed in sequence, for example a baseline/intervention.
In these cases, since the underlying spiking activity can be assumed to be the same (or at least very similar), the
recordings can be concatenated. This notebook shows how to concatenate the recordings before spike sorting and how to
split the sorted output based on the concatenation.
"""
import spikeinterface.extractors as se
import spikeinterface.sorters as ss
import time
##############################################################################
# When performing an experiment with multiple consecutive recordings, it can be a good idea to concatenate the single
# recordings, as this can improve the spike sorting performance and it doesn't require to track the neurons over the
# different recordings.
#
# This can be done very easily in SpikeInterface using a combination of the :code:`MultiRecordingTimeExtractor` and the
# :code:`SubSortingExtractor` objects.
#
# Let's create a toy example with 4 channels (the :code:`dumpable=True` dumps the extractors to a file, which is
# required for parallel sorting):
recording_single, _ = se.example_datasets.toy_example(duration=10, num_channels=4, dumpable=True)
##############################################################################
# Let's now assume that we have 4 recordings. In our case we will concatenate the :code:`recording_single` 4 times. We
# first need to build a list of :code:`RecordingExtractor` objects:
recordings_list = []
for i in range(4):
recordings_list.append(recording_single)
##############################################################################
# We can now use the :code:`recordings_list` to instantiate a :code:`MultiRecordingTimeExtractor`, which concatenates
# the traces in time:
multirecording = se.MultiRecordingTimeExtractor(recordings=recordings_list)
##############################################################################
# Since the :code:`MultiRecordingTimeExtractor` is a :code:`RecordingExtractor`, we can run spike sorting "normally"
multisorting = ss.run_klusta(multirecording)
##############################################################################
# The returned :code:`multisorting` object is a normal :code:`SortingExtractor`, but we now that its spike trains are
# concatenated similarly to the recording concatenation. So we have to split them back. We can do that using the `epoch`
# information in the :code:`MultiRecordingTimeExtractor`:
sortings = []
sortings = []
for epoch in multisorting.get_epoch_names():
info = multisorting.get_epoch_info(epoch)
sorting_single = se.SubSortingExtractor(multisorting, start_frame=info['start_frame'], end_frame=info['end_frame'])
sortings.append(sorting_single)
##############################################################################
# The :code:`SortingExtractor` objects in the :code:`sortings` list contain now split spike trains. The nice thing of
# this approach is that the unit_ids for the different epochs are the same unit!
| [
37811,
198,
10987,
20240,
29407,
319,
1673,
36686,
515,
18813,
198,
10052,
25609,
198,
198,
818,
1811,
10256,
11,
1811,
18813,
389,
6157,
287,
8379,
11,
329,
1672,
257,
14805,
14,
3849,
4018,
13,
198,
818,
777,
2663,
11,
1201,
262,
10... | 4.024204 | 785 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: test_platform/result_flow/ctp.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from chromite.api.gen.test_platform.result_flow import common_pb2 as test__platform_dot_result__flow_dot_common__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='test_platform/result_flow/ctp.proto',
package='test_platform.result_flow',
syntax='proto3',
serialized_options=_b('ZCgo.chromium.org/chromiumos/infra/proto/go/test_platform/result_flow'),
serialized_pb=_b('\n#test_platform/result_flow/ctp.proto\x12\x19test_platform.result_flow\x1a&test_platform/result_flow/common.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa4\x01\n\nCTPRequest\x12.\n\x03\x63tp\x18\x01 \x01(\x0b\x32!.test_platform.result_flow.Source\x12\x38\n\rtest_plan_run\x18\x02 \x01(\x0b\x32!.test_platform.result_flow.Target\x12,\n\x08\x64\x65\x61\x64line\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\">\n\x0b\x43TPResponse\x12/\n\x05state\x18\x01 \x01(\x0e\x32 .test_platform.result_flow.StateBEZCgo.chromium.org/chromiumos/infra/proto/go/test_platform/result_flowb\x06proto3')
,
dependencies=[test__platform_dot_result__flow_dot_common__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_CTPREQUEST = _descriptor.Descriptor(
name='CTPRequest',
full_name='test_platform.result_flow.CTPRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ctp', full_name='test_platform.result_flow.CTPRequest.ctp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_plan_run', full_name='test_platform.result_flow.CTPRequest.test_plan_run', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deadline', full_name='test_platform.result_flow.CTPRequest.deadline', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=304,
)
_CTPRESPONSE = _descriptor.Descriptor(
name='CTPResponse',
full_name='test_platform.result_flow.CTPResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='test_platform.result_flow.CTPResponse.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=306,
serialized_end=368,
)
_CTPREQUEST.fields_by_name['ctp'].message_type = test__platform_dot_result__flow_dot_common__pb2._SOURCE
_CTPREQUEST.fields_by_name['test_plan_run'].message_type = test__platform_dot_result__flow_dot_common__pb2._TARGET
_CTPREQUEST.fields_by_name['deadline'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CTPRESPONSE.fields_by_name['state'].enum_type = test__platform_dot_result__flow_dot_common__pb2._STATE
DESCRIPTOR.message_types_by_name['CTPRequest'] = _CTPREQUEST
DESCRIPTOR.message_types_by_name['CTPResponse'] = _CTPRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CTPRequest = _reflection.GeneratedProtocolMessageType('CTPRequest', (_message.Message,), dict(
DESCRIPTOR = _CTPREQUEST,
__module__ = 'test_platform.result_flow.ctp_pb2'
# @@protoc_insertion_point(class_scope:test_platform.result_flow.CTPRequest)
))
_sym_db.RegisterMessage(CTPRequest)
CTPResponse = _reflection.GeneratedProtocolMessageType('CTPResponse', (_message.Message,), dict(
DESCRIPTOR = _CTPRESPONSE,
__module__ = 'test_platform.result_flow.ctp_pb2'
# @@protoc_insertion_point(class_scope:test_platform.result_flow.CTPResponse)
))
_sym_db.RegisterMessage(CTPResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
1332,
62,
24254,
14,
20274,
62,
11125,
14,
310,
79,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
... | 2.532181 | 2,082 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 2.954545 | 44 |
# ******************************************************************************
# Name: Calculate Vij matrices and Adinkra Gadget values
# Author: Vadim Korotkikh
# Email: va.korotki@gmail.com
# Date: November 2016
# Version: 1.3
#
# Description: Scripts for calculating Vij matrices for each one of 36864
# unique Adinkra tetrads and scripts for calculating the Gadget values from the
# Vij matrices
#
# ******************************************************************************
# ******************************************************************************
# Begin Imports
import math
import sys
import numpy as np
import numpy.matlib
import itertools
from numpy import array
from numpy.linalg import inv
import time
# import matrix_outerprod_calc
import alpha_beta_4x4
# ******************************************************************************
# Do the final Vij calculation
def calculate_vij_matrices(main_tetrad_list):
""" Remember that the main_tetrad_ark is a list of lists,
with each list containing four tuples, with tuples being
matrix number and the matrices itself. """
vij_possibilities = []
vij_possibilities = alpha_beta_4x4.illuminator_of_elfes()
vij_sixset = []
print(" ")
print("Calculating Vij matrices")
print(" ")
vij_alphas = []
vij_betas = []
calc_check = []
vij_matrices = []
anomaly_switch = 0
debug = 0
for ti, teti in enumerate(main_tetrad_list):
if debug:
print("# ********************************")
print(" ")
print("Tetrad i: ", ti)
temp_combos = []
alpha_temp = []
beta_temp = []
vij_tempset = []
""" Store 6 Vij matrices in temp_vijmat"""
temp_vijmat = []
""" This section does a double loop over the same tetrad to calculate
the set of 6 Vij matrices for the tetrad.
So for each matrix in the tetrad its checked against all the possible others,
bypassing the duplicate calculations
"""
for i, li in enumerate(teti):
# print(li[1])
bigli = li[1]
tr_bigli = np.transpose(bigli)
for j, lj in enumerate(teti):
biglj = lj[1]
ij_temp = [i, j]
ij_temp.sort()
ir = i + 1
jr = j + 1
ijstr = str(ir) + str(jr)
if ij_temp not in temp_combos and i != j:
# print("Vij matrix i-j vals:", ij_temp)
# print("Vij matrix i-j vals:", ijstr)
temp_combos.append(ij_temp)
tr_biglj = np.transpose(biglj)
# temp_mat = np.dot(tr_bigli, biglj) - np.dot(tr_biglj, bigli)
""" Vij eq from 1601.00 (3.2) """
# temp_mat = np.matmul(tr_biglj, bigli) - np.matmul(tr_bigli, biglj)
temp_mat = np.dot(tr_bigli, biglj) - np.dot(tr_biglj, bigli)
""" Compare against the 6 possible matrix solutions """
tf_bool = 0
for xi, ijx in enumerate(vij_possibilities):
ijx_neg = np.multiply(ijx, -1)
# print(xi)
if np.array_equal(temp_mat, ijx):
tf_bool = 1
temp_vijmat.append(temp_mat)
if debug:
print("*************$$$$$$$$$$$$$$$$$$ ")
print("l-solution found:")
print(ijx)
tmint = np.int(1)
if xi < 3:
tmp_str = "alpha" + str((xi + 1))
# print(tmp_str)
vij_tempset.append([tmp_str, ijstr, tmint])
alpha_temp.append([tmp_str, ijstr, tmint])
elif xi >= 3:
tmp_str = "beta" + str((xi - 2))
vij_tempset.append([tmp_str, ijstr, tmint])
beta_temp.append([tmp_str, ijstr, tmint])
elif np.array_equal(temp_mat, ijx_neg):
tf_bool = 1
temp_vijmat.append(temp_mat)
if debug:
print("*************$$$$$$$$$$$$$$$$$$ ")
print("l-solution found:")
print(ijx_neg)
# xint = (xi + 1) * ( -1)
tmint = np.int(-1)
if xi < 3:
tmp_str = "alpha" + str((xi + 1))
# print(tmp_str)
vij_tempset.append([tmp_str, ijstr, tmint])
alpha_temp.append([tmp_str, ijstr, tmint])
elif xi >= 3:
tmp_str = "beta" + str((xi - 2))
vij_tempset.append([tmp_str, ijstr, tmint])
beta_temp.append([tmp_str, ijstr, tmint])
else:
if i != j and tf_bool == 0 and xi >= 5:
if not(np.array_equal(temp_mat, ijx)) or not np.array_equal(temp_mat, ijx_neg):
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ")
print("Anomaly found:",i,j)
print(temp_mat)
anomaly_switch = 1
tf_bool = 0
vij_matrices.append(temp_vijmat)
calc_check.append(vij_tempset)
if alpha_temp:
vij_alphas.append(alpha_temp)
elif beta_temp:
vij_betas.append(beta_temp)
beta_temp = []
alpha_temp = []
print("*************$$$$$$$$$$$$$$$$$$ ")
print("Vij Matrix Coefficients Results:")
print("")
for mvals in calc_check:
if any(x for x in mvals if x[0].startswith('alpha')) and any(x for x in mvals if x[0].startswith('beta')):
print("MIXED ALPHA_BETA ERROR")
print(mvals)
else:
print(mvals)
print("Length Vij alphas tetrads: %d" % (len(vij_alphas)))
print("length Vij beta tetrads: %d" % (len(vij_betas)))
gadget_vals = []
one_count = 0
ptre_count = 0
ntre_count = 0
zero_count = 0
if not anomaly_switch:
for fi, ijf in enumerate(calc_check):
for xj, ijx in enumerate(calc_check):
# ind_temp = [fi, xj]
# ind_temp.sort()
# x = [val]
if ijf[0][0:2] == ijx[0][0:2] and ijf[1][0:2] == ijx[1][0:2] and ijf[2][0:2] == ijx[2][0:2]:
# als = ijf[0][3] * ijx[0][3]
gadget_sum = sum([(ijf[z][2] * ijx[z][2]) for z in range(0, len(ijf))])
if gadget_sum == 2:
ptre_count += 1
elif gadget_sum == -2:
ntre_count += 1
elif gadget_sum == 6:
one_count += 1
elif gadget_sum == 0:
zero_count += 1
else:
print(ijf)
print(ijx)
print("Gadget ERROR 1:",gadget_sum, "Tetrad#:",fi,xj)
div_const = gadget_sum / 6
# print("****** Gadget calculation ******")
# print("Calc #:", calc_count)
# print(div_const)
# print("G values:", gadget_vals)
if div_const not in gadget_vals:
gadget_vals.append(div_const)
elif ijf[0][0:2] == ijx[0][0:2] and ijf[1][0:2] != ijx[1][0:2]:
gadget_sum = sum([(ijf[z][2] * ijx[z][2]) for z in [0, 5]])
if gadget_sum == 2:
ptre_count += 1
elif gadget_sum == -2:
ntre_count += 1
elif gadget_sum == 6:
one_count += 1
elif gadget_sum == 0:
zero_count += 1
else:
print("Gadget ERROR 2:",gadget_sum, "Tetrad#:",fi,xj)
div_const = gadget_sum / 6
# print("Calc #:", calc_count)
if div_const not in gadget_vals:
gadget_vals.append(div_const)
elif ijf[0][0:2] != ijx[0][0:2] and ijf[1][0:2] == ijx[1][0:2]:
# print(ijf, ijx)
gadget_sum = sum([(ijf[z][2] * ijx[z][2]) for z in [1, 4]])
if gadget_sum == 2:
ptre_count += 1
elif gadget_sum == -2:
ntre_count += 1
elif gadget_sum == 6:
one_count += 1
elif gadget_sum == 0:
zero_count += 1
else:
print("Gadget ERROR 3:",gadget_sum, "Tetrad#:",fi,xj)
div_const = gadget_sum / 6
# print("Calc #:", calc_count)
if div_const not in gadget_vals:
gadget_vals.append(div_const)
elif ijf[0][0:2] != ijx[0][0:2] and ijf[2][0:2] == ijx[2][0:2]:
gadget_sum = sum([(ijf[z][2] * ijx[z][2]) for z in [2, 3]])
if gadget_sum == 2:
ptre_count += 1
elif gadget_sum == -2:
ntre_count += 1
elif gadget_sum == 6:
one_count += 1
elif gadget_sum == 0:
zero_count += 1
else:
print("Gadget ERROR 4:",gadget_sum, "Tetrad#:",fi,xj)
div_const = gadget_sum / 6
# print("Calc #:", calc_count)
if div_const not in gadget_vals:
gadget_vals.append(div_const)
elif ijf[0][0:2] != ijx[0][0:2] and ijf[1][0:2] != ijx[1][0:2] and ijf[2][0:2] != ijx[2][0:2]:
gadget_sum = 0
zero_count += 1
div_const = gadget_sum / 6
if div_const not in gadget_vals:
gadget_vals.append(div_const)
else:
print("ERROR**********")
print(ijf)
print(ijx)
print("zero count %d " % (zero_count))
print(" 1/3 count %d " % (ptre_count))
print("-1/3 count %d " % (ntre_count))
print(" 1 count %d " % (one_count))
print(gadget_vals)
else:
pass
print("################################################")
print(" Printing final Gadget values and counts ")
print(" ")
print("zero count %d " % (zero_count))
print(" 1/3 count %d " % (ptre_count))
print("-1/3 count %d " % (ntre_count))
print(" 1 count %d " % (one_count))
print(gadget_vals)
| [
2,
41906,
17174,
46068,
1174,
198,
2,
6530,
25,
220,
220,
220,
27131,
378,
49219,
2603,
45977,
290,
1215,
676,
430,
39266,
3815,
198,
2,
6434,
25,
220,
569,
324,
320,
14769,
313,
74,
13848,
198,
2,
9570,
25,
220,
220,
46935,
13,
7... | 2.059308 | 4,131 |
# -*- coding: utf-8 -*-
"""
257. Binary Tree Paths
Given a binary tree, return all root-to-leaf paths.
Note: A leaf is a node with no children.
"""
# Definition for a binary tree node.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
28676,
13,
45755,
12200,
10644,
82,
198,
198,
15056,
257,
13934,
5509,
11,
1441,
477,
6808,
12,
1462,
12,
33201,
13532,
13,
198,
198,
6425,
25,
317,
12835,
... | 2.96875 | 64 |
from flask import Blueprint
passportBlp = Blueprint("passportBlp", __name__, url_prefix="/passport")
from .views import *
| [
6738,
42903,
1330,
39932,
198,
198,
6603,
634,
3629,
79,
796,
39932,
7203,
6603,
634,
3629,
79,
1600,
11593,
3672,
834,
11,
19016,
62,
40290,
35922,
6603,
634,
4943,
198,
198,
6738,
764,
33571,
1330,
1635,
198
] | 3.351351 | 37 |
# Generated by Django 3.2.9 on 2022-01-22 20:18
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33160,
12,
486,
12,
1828,
1160,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
"""
文件名: conf/__init__.py
配置文件
"""
from .conf import conf_args
from .font.noto import noto_font, noto_bold_font, noto_medium_font, noto_thin_font, noto_black_font, noto_regular_font
from .picture import head_pic, rank_bg_pic, logo_pic, logo_ico
from .args import p_args
from .equipment import ConfigCapture
from .sql import ConfigDatabase
from .aliyun import ConfigAliyun
from .sys_default import ConfigSystem, ConfigSecret, ConfigTkinter, ConfUser
from .matplotlib_conf import ConfigMatplotlib
| [
37811,
198,
23877,
229,
20015,
114,
28938,
235,
25,
1013,
14,
834,
15003,
834,
13,
9078,
198,
165,
227,
235,
163,
121,
106,
23877,
229,
20015,
114,
198,
37811,
198,
198,
6738,
764,
10414,
1330,
1013,
62,
22046,
198,
6738,
764,
10331,
... | 2.84 | 175 |
# Generated from PromQLLexer.g4 by ANTLR 4.9.3
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
| [
2,
2980,
515,
422,
10335,
48,
3069,
1069,
263,
13,
70,
19,
416,
3537,
14990,
49,
604,
13,
24,
13,
18,
198,
6738,
1885,
14050,
19,
1330,
1635,
198,
6738,
33245,
1330,
10903,
9399,
198,
11748,
25064,
198,
361,
25064,
13,
9641,
62,
1... | 2.697368 | 76 |
import unittest
import numpy as np
from numpy.testing import assert_allclose
import copy
import sys
sys.path.append('..')
from angler import Simulation, Optimization
from angler.structures import three_port
import autograd.numpy as npa
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
11748,
4866,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
492,
11537,
198,
6738,
3550,
1754,
1330... | 3.142857 | 91 |
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages the names of a Business."""
from contextlib import suppress
from typing import Dict, Optional
from flask_babel import _ as babel # noqa: N813
from legal_api.models import Filing
from legal_api.utils.datetime import datetime
def update_filing_court_order(filing_submission: Filing, court_order_json: Dict) -> Optional[Dict]:
"""Update the court_order info for a Filing."""
if not Filing:
return {'error': babel('Filing required before alternate names can be set.')}
filing_submission.court_order_file_number = court_order_json.get('fileNumber')
filing_submission.court_order_effect_of_order = court_order_json.get('effectOfOrder')
with suppress(IndexError, KeyError, TypeError, ValueError):
filing_submission.court_order_date = datetime.fromisoformat(court_order_json.get('orderDate'))
return None
| [
2,
15069,
10673,
33448,
22783,
286,
3517,
9309,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.364269 | 431 |
import asyncio
import json
import logging
import random
from copy import copy
from dataclasses import Field
from aiogram import Bot, Dispatcher
from aiogram.types import ParseMode
from aiothornode.types import ThorPool
from localization import BaseLocalization
from services.jobs.fetch.net_stats import NetworkStatisticsFetcher
from services.jobs.fetch.pool_price import PoolPriceFetcher
from services.lib.date_utils import DAY
from services.lib.depcont import DepContainer
from services.lib.texts import up_down_arrow
from services.lib.utils import setup_logs, load_pickle, save_pickle
from services.models.net_stats import NetworkStats
from services.models.pool_info import PoolInfoMap, parse_thor_pools
from services.notify.broadcast import Broadcaster
from tools.lib.lp_common import LpAppFramework
CACHE_NET_STATS = True
CACHE_NET_STATS_FILE = '../../tmp/net_stats.pickle'
DRY_RUN = False
if __name__ == "__main__":
# test_upd()
setup_logs(logging.INFO)
asyncio.run(main())
| [
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
4738,
198,
6738,
4866,
1330,
4866,
198,
6738,
4818,
330,
28958,
1330,
7663,
198,
198,
6738,
257,
72,
21857,
1330,
18579,
11,
3167,
8071,
2044,
198,
6738,
257,
72,
2185... | 3.15674 | 319 |
#!/usr/bin/env python3
from caproto import ChannelType
from caproto.server import PVGroup, get_pv_pair_wrapper, ioc_arg_parser, run
# Create _two_ PVs with a single pvproperty_with_rbv:
pvproperty_with_rbv = get_pv_pair_wrapper(setpoint_suffix='',
readback_suffix='_RBV')
# NOTE: _RBV is areaDetector-like naming suffix for a read-back value
if __name__ == '__main__':
ioc_options, run_options = ioc_arg_parser(
default_prefix='setpoint_rbv:',
desc='Run an IOC with two setpoint/readback pairs.')
ioc = Group(**ioc_options)
run(ioc.pvdb, **run_options)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
1451,
305,
1462,
1330,
11102,
6030,
198,
6738,
1451,
305,
1462,
13,
15388,
1330,
31392,
13247,
11,
651,
62,
79,
85,
62,
24874,
62,
48553,
11,
1312,
420,
62,
853,
62,
48610,... | 2.288321 | 274 |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
# Cross-platform build script for JS bundles required by Python layer.
import subprocess
import os
import sys
from shutil import copyfile
if __name__ == '__main__':
in_devops = False
if len(sys.argv) == 2 and sys.argv[1] == "devops":
in_devops = True
script_path = os.path.dirname(os.path.abspath(__file__))
js_dir = os.path.join(script_path, "..", "interpret-core", "js")
# NOTE: Using shell=True can be a security hazard where there is user inputs.
# In this case, there are no user inputs.
# NOTE: Workaround for Azure DevOps.
if in_devops:
subprocess.run(["npm install"], cwd=js_dir, shell=True)
subprocess.run(["npm run build-prod"], cwd=js_dir, shell=True)
else:
subprocess.run(["npm", "install"], cwd=js_dir, shell=True)
subprocess.run(["npm", "run", "build-prod"], cwd=js_dir, shell=True)
js_bundle_src = os.path.join(js_dir, "dist", "interpret-inline.js")
js_bundle_dest = os.path.join(
script_path, "..", "interpret-core",
"interpret", "lib", "interpret-inline.js"
)
os.makedirs(os.path.dirname(js_bundle_dest), exist_ok=True)
copyfile(js_bundle_src, js_bundle_dest)
| [
2,
15069,
357,
66,
8,
13130,
5413,
10501,
198,
2,
4307,
6169,
739,
262,
17168,
3788,
5964,
198,
2,
6372,
12,
24254,
1382,
4226,
329,
26755,
36344,
2672,
416,
11361,
7679,
13,
198,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
11748,... | 2.505859 | 512 |
# Generated by Django 2.1.4 on 2019-01-25 07:17
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
13130,
12,
486,
12,
1495,
8753,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import numpy as np
import random
import os
import json
import PIL.Image as Image
import time
import copy
import sys
if __name__ == '__main__':
all_size = ["13x16", "26x32", "52x64", "104x128", "208x256"]
for size in all_size:
x = time.time()
loaded_training_labels = np.load("../DatasetBinaryStorage/" + size + "/train/labels0.npz")
loaded_training_features = np.load("../DatasetBinaryStorage/" + size + "/train/features0.npz")
loaded_validation_labels = np.load("../DatasetBinaryStorage/" + size + "/validate/labels0.npz")
loaded_validation_features = np.load("../DatasetBinaryStorage/" + size + "/validate/features0.npz")
loaded_training_features = loaded_training_features['arr_0']
loaded_training_labels = loaded_training_labels['arr_0']
loaded_validation_features = loaded_validation_features['arr_0']
loaded_validation_labels = loaded_validation_labels['arr_0']
print(size, " - ", time.time() - x)
time.sleep(5)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
350,
4146,
13,
5159,
355,
7412,
198,
11748,
640,
198,
11748,
4866,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12... | 2.524876 | 402 |
from numpy import nan
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from pymove import MoveDataFrame, stay_point_detection
from pymove.utils.constants import DATETIME, LATITUDE, LONGITUDE, TRAJ_ID
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:15', 2],
]
list_data_test = [
[39.984093, 116.319237, '2008-10-23 05:53:05', 1],
[39.984200, 116.319321, '2008-10-23 05:53:06', 1],
[39.984222, 116.319405, '2008-10-23 05:53:11', 1],
[39.984211, 116.319389, '2008-10-23 05:53:16', 1],
[39.984219, 116.319420, '2008-10-23 05:53:21', 1],
]
| [
6738,
299,
32152,
1330,
15709,
198,
6738,
19798,
292,
1330,
6060,
19778,
11,
5045,
27823,
198,
6738,
19798,
292,
13,
33407,
1330,
6818,
62,
14535,
62,
40496,
198,
198,
6738,
279,
4948,
659,
1330,
10028,
6601,
19778,
11,
2652,
62,
4122,
... | 2.097297 | 370 |
import logging
import math
import sys
import time
from collections import namedtuple
from io import BytesIO
from flask import Blueprint, Flask, current_app, make_response, render_template, request, abort
from flask_caching import Cache
from flask_cors import CORS
from zaloa import (
generate_coordinates_512,
generate_coordinates_256,
generate_coordinates_260,
generate_coordinates_516,
is_tile_valid,
process_tile,
ImageReducer,
S3TileFetcher,
HttpTileFetcher,
Tile,
)
tile_bp = Blueprint('tiles', __name__)
cache = Cache()
@tile_bp.route('/tilezen/terrain/v1/<int:tilesize>/<tileset>/<int:z>/<int:x>/<int:y>.png')
@tile_bp.route('/tilezen/terrain/v1/<tileset>/<int:z>/<int:x>/<int:y>.png')
@tile_bp.route('/health_check')
| [
11748,
18931,
198,
11748,
10688,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
42903,
1330,
39932,
11,
46947,
11,
1459,
62,
1324,
11,
787,
62,
26209,
11... | 2.547855 | 303 |
import re
import pathlib
import unittest
from net_parser.config import BaseConfigParser, ConfigDiff, IosConfigDiff, IosConfigParser
from tests import RESOURCES_DIR
VERBOSITY = 4
if __name__ == '__main__':
unittest.main() | [
11748,
302,
198,
11748,
3108,
8019,
198,
11748,
555,
715,
395,
198,
198,
6738,
2010,
62,
48610,
13,
11250,
1330,
7308,
16934,
46677,
11,
17056,
28813,
11,
314,
418,
16934,
28813,
11,
314,
418,
16934,
46677,
198,
198,
6738,
5254,
1330,
... | 2.974026 | 77 |
import glob
import pandas as pd
import argparse
from gensim.models import Word2Vec
import gensim.downloader as api
from scipy.stats import pearsonr
parser = argparse.ArgumentParser()
parser.add_argument(
'-w',
'--w2v',
action='store',
default=None,
dest='model_path',
help='File with the word2vec model'
)
parser.add_argument(
'--test_input',
dest='testFolder',
action='store',
required=True,
help='path to folder containing test files'
)
parser.add_argument(
'--outputFile',
'-o',
dest='results_path',
action='store',
required=True,
help='Path to store results'
)
args = parser.parse_args()
'''
Read Files to test for similarities
'''
print('Loading Test Datasets.')
test_files = glob.glob(args.testFolder+'*.csv')
test_dataset = []
for f in test_files:
dataset = pd.read_csv(f, header=None).values
test_dataset.append(dataset)
'''
Loading/ Training the model.
'''
# load model
print('Loading previously trained model.')
if args.model_path == "pretrained":
model = api.load("word2vec-google-news-300")
else:
model = Word2Vec.load(args.model_path).wv
'''
Testing the model.
'''
print('Testing the trained model.')
result = open(args.results_path, 'w')
for d in range(0, len(test_dataset)):
predictions = []
result.write("---------- " + str(test_files[d]) + " ----------\n")
for pair in test_dataset[d]:
if pair[0] in model and pair[1] in model:
sim = model.similarity(pair[0], pair[1])
predictions.append(sim)
result.write(str(sim) + "\n")
else:
print("Missing one of the words in the model: ", pair[0], pair[1])
predictions.append(None)
result.write("None\n")
test_removed = [ x for i, x in enumerate(test_dataset[d][:, 2]) if predictions[i]]
predictions_removed = [ x for x in predictions if x]
print("Pearson Correlation Coefficient: ", pearsonr(predictions_removed, test_removed)[0])
result.write("Pearson Correlation Coefficient: "+ str(pearsonr(predictions_removed, test_removed)[0])+"\n")
result.write("--------------------\n")
| [
11748,
15095,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1822,
29572,
198,
198,
6738,
308,
641,
320,
13,
27530,
1330,
9678,
17,
53,
721,
198,
11748,
308,
641,
320,
13,
15002,
263,
355,
40391,
198,
6738,
629,
541,
88,
13,
3424... | 2.540094 | 848 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, redefined-outer-name
"""
Convolve a TDI table that tabulates "regular" photons with a Cherenkov cone to
arrive at a Cherenkov TDI table.
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'generate_ckv_tdi_table',
'parse_args',
]
__author__ = 'P. Eller, J.L. Lanfranchi'
__license__ = '''Copyright 2017 Philipp Eller and Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from os import remove
from os.path import abspath, dirname, isdir, isfile, join
import pickle
import sys
import numpy as np
from six import string_types
if __name__ == '__main__' and __package__ is None:
RETRO_DIR = dirname(dirname(dirname(abspath(__file__))))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro.utils.ckv import convolve_table
from retro.utils.misc import expand, mkdir
# TODO: allow different directional binning in output table
# TODO: write all keys of the table that are missing from the target directory
def generate_ckv_tdi_table(
tdi_table,
beta,
oversample,
num_cone_samples,
n_phase=None,
outdir=None,
mmap_src=True,
mmap_dst=False,
):
"""
Parameters
----------
tdi_table : string or mapping
If string, path to TDI table file (or directory containing a
`tdi_table.npy' file).
beta : float in [0, 1]
Beta factor, i.e. velocity of the charged particle divided by the speed
of light in vacuum: `v/c`.
oversample : int > 0
Sample from each directional bin (costhetadir and deltaphidir) this
many times. Increase to obtain a more accurate average over the range
of directions that the resulting ckv-emitter-direction can take within
the same output (directional) bin. Note that there is no unique
information given by sampling (more than once) in the spatial
dimensions, so these dimensions ignore `oversample`. Therefore,
the computational cost is `oversample**2`.
num_cone_samples : int > 0
Number of samples around the circumference of the Cherenkov cone.
n_phase : float or None
Required if `tdi_table` is an array; if `tdi_table` specifies a table
location, then `n_phase` will be read from the `tdi_metadata.pkl`
file.
outdir : string or None
If a string, use this directory to place the resulting
`ckv_tdi_table.npy` file. This is optional if `tdi_table` specifies a
file or directory (in which case the `outdir` will be inferred from
this path).
mmap_src : bool, optional
Whether to (attempt to) memory map the source `tdi_table` (if `table`
is a string pointing to the file/directory). Default is `True`, as
tables can easily exceed the memory capacity of a machine.
mmap_dst : bool, optional
Whether to memory map the destination `ckv_tdi_table.npy` file.
"""
input_filename = None
input_dirname = None
if isinstance(tdi_table, string_types):
tdi_table = expand(tdi_table)
if isdir(tdi_table):
input_filename = join(tdi_table, 'tdi_table.npy')
elif isfile(tdi_table):
input_filename = tdi_table
else:
raise IOError(
'`tdi_table` is not a directory or file: "{}"'
.format(tdi_table)
)
input_dirname = dirname(input_filename)
if input_filename is None and outdir is None:
raise ValueError(
'You must provide an `outdir` if `tdi_table` is a python object'
' (i.e., not a file or directory path).'
)
if input_filename is None and n_phase is None:
raise ValueError(
'You must provide `n_phase` if `tdi_table` is a python object'
' (i.e., not a file or directory path).'
)
if n_phase is None:
meta = pickle.load(file(join(input_dirname, 'tdi_metadata.pkl'), 'rb'))
n_phase = meta['n_phase']
if outdir is None:
outdir = input_dirname
mkdir(outdir)
if input_filename is not None:
tdi_table = np.load(
input_filename,
mmap_mode='r' if mmap_src else None,
)
cos_ckv = 1 / (n_phase * beta)
if cos_ckv > 1:
raise ValueError(
'Particle moving at beta={} in medium with n_phase={} does not'
' produce Cherenkov light!'.format(beta, n_phase)
)
ckv_tdi_table_fpath = join(outdir, 'ckv_tdi_table.npy')
if isfile(ckv_tdi_table_fpath):
print(
'WARNING! Destination file exists "{}"'
.format(ckv_tdi_table_fpath)
)
if mmap_dst:
# Allocate memory-mapped file
ckv_tdi_table = np.lib.format.open_memmap(
filename=ckv_tdi_table_fpath,
mode='w+',
dtype=np.float32,
shape=tdi_table.shape,
)
else:
ckv_tdi_table = np.empty(shape=tdi_table.shape, dtype=np.float32)
try:
convolve_table(
src=tdi_table,
dst=ckv_tdi_table,
cos_ckv=cos_ckv,
num_cone_samples=num_cone_samples,
oversample=oversample,
costhetadir_min=-1,
costhetadir_max=+1,
phidir_min=-np.pi,
phidir_max=+np.pi,
)
except:
del ckv_tdi_table
if mmap_dst:
remove(ckv_tdi_table_fpath)
raise
if not mmap_dst:
np.save(ckv_tdi_table_fpath, ckv_tdi_table)
return ckv_tdi_table
def parse_args(description=__doc__):
"""Parse command line arguments"""
parser = ArgumentParser(description=description)
parser.add_argument(
'--tdi-table', required=True,
help='''Path to TDI table or path to directory containing the file
`tdi_table.npy`'''
)
parser.add_argument(
'--beta', type=float, default=1.0,
help='''Cherenkov emitter beta factor (v / c).'''
)
parser.add_argument(
'--oversample', type=int, required=True,
help='''Sample each output (costhetadir, deltaphidir) bin oversample^2
times.'''
)
parser.add_argument(
'--num-cone-samples', type=int, required=True,
help='''Number of samples around the cone.'''
)
parser.add_argument(
'--outdir', default=None,
help='''Directory in which to store the resulting table; if not
specified, output table will be stored alongside the input table'''
)
return parser.parse_args()
if __name__ == '__main__':
ckv_tdi_table = generate_ckv_tdi_table(**vars(parse_args())) # pylint: disable=invalid-name
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
279,
2645,
600,
25,
15560,
28,
36460,
12,
11748,
12,
9150,
11,
2266,
18156,
12,
39605,
12,
3672,
198,
198,
37811,
... | 2.362545 | 3,081 |
#!/usr/bin/env python
"""Functions for server logging."""
import logging
from logging import handlers
import os
import socket
import time
from grr import config
from grr.lib import flags
try:
# pylint: disable=g-import-not-at-top
from grr.server.grr_response_server.local import log as local_log
# pylint: enable=g-import-not-at-top
except ImportError:
local_log = None
# Global Application Logger.
LOGGER = None
class GrrApplicationLogger(object):
"""The GRR application logger.
These records are used for machine readable authentication logging of security
critical events.
"""
def GetNewEventId(self, event_time=None):
"""Return a unique Event ID string."""
if event_time is None:
event_time = long(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid())
def LogHttpAdminUIAccess(self, request, response):
"""Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
api_method = response.headers.get("X-API-Method", "unknown")
api_reason = response.headers.get("X-GRR-Reason", "none")
log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % (
event_id, api_method, request.user, api_reason, request.full_path,
response.status_code)
logging.info(log_msg)
def LogHttpFrontendAccess(self, request, source=None, message_count=None):
"""Write a log entry for a Frontend or UI Request.
Args:
request: A HttpRequest protobuf.
source: Client id of the client initiating the request. Optional.
message_count: Number of messages received from the client. Optional.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % (event_id, request.source_ip,
source or "<unknown>",
request.method, request.url,
request.user_agent,
request.user,
message_count or 0)
logging.info(log_msg)
class PreLoggingMemoryHandler(handlers.BufferingHandler):
"""Handler used before logging subsystem is initialized."""
def flush(self):
"""Flush the buffer.
This is called when the buffer is really full, we just just drop one oldest
message.
"""
self.buffer = self.buffer[-self.capacity:]
class RobustSysLogHandler(handlers.SysLogHandler):
"""A handler which does not raise if it fails to connect."""
def handleError(self, record):
"""Just ignore socket errors - the syslog server might come back."""
BASE_LOG_LEVELS = {
"FileHandler": logging.ERROR,
"NTEventLogHandler": logging.CRITICAL,
"StreamHandler": logging.ERROR,
"RobustSysLogHandler": logging.CRITICAL,
}
VERBOSE_LOG_LEVELS = {
"FileHandler": logging.DEBUG,
"NTEventLogHandler": logging.INFO,
"StreamHandler": logging.DEBUG,
"RobustSysLogHandler": logging.INFO,
}
LOG_FORMAT = "%(levelname)s:%(asctime)s %(module)s:%(lineno)s] %(message)s"
def LogInit():
"""Configure the logging subsystem."""
logging.debug("Initializing Logging subsystem.")
# The root logger.
logger = logging.getLogger()
memory_handlers = [
m for m in logger.handlers
if m.__class__.__name__ == "PreLoggingMemoryHandler"
]
# Clear all handers.
logger.handlers = list(GetLogHandlers())
SetLogLevels()
# Now flush the old messages into the log files.
for handler in memory_handlers:
for record in handler.buffer:
logger.handle(record)
def AppLogInit():
"""Initialize the Application Log.
This log is what will be used whenever someone does a log.LOGGER call. These
are used for more detailed application or event logs.
Returns:
GrrApplicationLogger object
"""
logging.debug("Initializing Application Logger.")
return GrrApplicationLogger()
def ServerLoggingStartupInit():
"""Initialize the server logging configuration."""
global LOGGER
if local_log:
logging.debug("Using local LogInit from %s", local_log)
local_log.LogInit()
logging.debug("Using local AppLogInit from %s", local_log)
LOGGER = local_log.AppLogInit()
else:
LogInit()
LOGGER = AppLogInit()
# There is a catch 22 here: We need to start logging right away but we will only
# configure the logging system once the config is read. Therefore we set up a
# memory logger now and then when the log destination is configured we replay
# the logs into that. This ensures we do not lose any log messages during early
# program start up.
root_logger = logging.root
memory_logger = PreLoggingMemoryHandler(1000)
root_logger.addHandler(memory_logger)
memory_logger.setLevel(logging.DEBUG)
logging.debug("Starting GRR Prelogging buffer.")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
24629,
2733,
329,
4382,
18931,
526,
15931,
198,
198,
11748,
18931,
198,
6738,
18931,
1330,
32847,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
640,
198,
198,
6738,
1036,
81,
... | 2.787863 | 1,862 |
# coding: utf-8
# In[54]:
# In[56]:
from __future__ import print_function
import mxnet as mx
from mxnet import nd, gluon, autograd
from mxnet.gluon import nn
# In[57]:
import sys
from zipfile import ZipFile
import numpy as np
from matplotlib import pyplot as plt
'''load your data here'''
from sklearn.model_selection import train_test_split
# Returns images and labels corresponding for training and testing. Default mode is train.
# For retrieving test data pass mode as 'test' in function call.
# In[61]:
d=DataLoader()
images_train,labels_train=d.load_data()
images_test,labels_test=d.load_data('test')
X_train, X_val, y_train, y_val = train_test_split(images_train, labels_train, test_size=0.30, random_state=42)
batch_size=1024
# In[58]:
X_test=mx.nd.array(images_test)
y_test=mx.nd.array(labels_test)
dataset=mx.gluon.data.dataset.ArrayDataset(X_train, y_train)
Val_set=mx.gluon.data.dataset.ArrayDataset(X_val, y_val)
test_set=mx.gluon.data.dataset.ArrayDataset(X_test, y_test)
train_loader=mx.gluon.data.DataLoader(dataset, shuffle='True', batch_size=batch_size)
valid_loader=mx.gluon.data.DataLoader(Val_set, shuffle='False', batch_size=batch_size)
Test_loader=mx.gluon.data.DataLoader(test_set, shuffle='False', batch_size=batch_size)
# In[63]:
# In[64]:
# In[65]:
ctx = mx.gpu(0) if mx.test_utils.list_gpus() else mx.cpu(0)
if(sys.argv[1]=='--train'):
net=Model()
net.initialize(mx.init.Uniform(0.1), ctx=ctx)
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer='Adam',
optimizer_params={'learning_rate': 0.001},
)
metric = mx.metric.Accuracy()
loss_function = gluon.loss.SoftmaxCrossEntropyLoss()
num_epochs = 50
number_ex=60000
Train_loss=[]
Val_loss=[]
for epoch in range(num_epochs):
sum_loss=0
for inputs, labels in train_loader:
#print(labels)
inputs,labels = transform(inputs,labels)
inputs = inputs.as_in_context(ctx)
labels = labels.as_in_context(ctx)
with autograd.record():
outputs = net(inputs)
loss = loss_function(outputs, labels)
loss.backward()
metric.update(labels, outputs)
sum_loss+=nd.sum(loss).asscalar()
trainer.step(batch_size=inputs.shape[0])
Train_loss.append(sum_loss/number_ex)
val_acc,val_loss=evaluate_accuracy(valid_loader,net)
Val_loss.append(val_loss)
name, acc = metric.get()
print('After epoch {}: Training {} ={} Validation accuracy = {}'.format(epoch + 1, name, acc,val_acc))
metric.reset()
plt.figure("Image")
plt.title("Network 2 Loss vs Epoch")
Train_loss1=[]
for j in range(len(Train_loss)):
Train_loss1.append(Train_loss[j]/np.sum(Train_loss))
Val_loss1=[]
for i in range(len(Val_loss)):
Val_loss1.append(Val_loss[i]/np.sum(Val_loss))
plt.plot(Train_loss1,c="red", label="Training Loss")
plt.plot(Val_loss1,c="green", label="Validation Loss")
plt.legend()
file_name = "net1.params"
net.save_parameters(file_name)
elif(sys.argv[1]=='--test'):
net = Model()
net.load_parameters("net1.params")
X=net.collect_params()
cnt = 0
accuracy = 0
for data, label in Test_loader:
data , label = transform(data,label)
data = data.as_in_context(mx.cpu()).reshape((-1, 784))
label = label.as_in_context(mx.cpu())
with autograd.record():
output = net(data)
acc = mx.metric.Accuracy()
acc.update(preds=nd.argmax(output,axis=1),labels=label)
#print("Test Accuracy : %f"%acc.get()[1])
accuracy = accuracy + acc.get()[1]
cnt = cnt + 1
print("Total Accuracy: ", float(accuracy/cnt))
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
4051,
5974,
198,
2,
554,
58,
3980,
5974,
628,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
220,
220,
198,
11748,
285,
87,
3262,
355,
285,
87,
198,
6738,
285,
87,
... | 2.193201 | 1,765 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from argparse import RawTextHelpFormatter
from jdcloud_cli.cement.ext.ext_argparse import expose
from jdcloud_cli.controllers.base_controller import BaseController
from jdcloud_cli.client_factory import ClientFactory
from jdcloud_cli.parameter_builder import collect_user_args, collect_user_headers
from jdcloud_cli.printer import Printer
from jdcloud_cli.skeleton import Skeleton
| [
2,
19617,
28,
40477,
23,
198,
198,
2,
15069,
2864,
28591,
5097,
2606,
35,
13,
9858,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.611684 | 291 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib import parse
from knobclient.common import utils
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.247525 | 202 |
import torch
import pydiffvg
| [
11748,
28034,
198,
11748,
279,
5173,
733,
45119,
198
] | 3.222222 | 9 |
import json
import os
from typing import Optional, Tuple
from pych_client.constants import (
BASE_URL_ENV,
CREDENTIALS_FILE,
DATABASE_ENV,
DEFAULT_BASE_URL,
DEFAULT_DATABASE,
DEFAULT_PASSWORD,
DEFAULT_USERNAME,
PASSWORD_ENV,
USERNAME_ENV,
)
from pych_client.logger import logger
from pych_client.typing import Params, Settings
# TODO: Benchmark different functions
| [
11748,
33918,
198,
11748,
28686,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
198,
198,
6738,
12972,
354,
62,
16366,
13,
9979,
1187,
1330,
357,
198,
220,
220,
220,
49688,
62,
21886,
62,
1677,
53,
11,
198,
220,
220,
220,
8740,
1961,
... | 2.439759 | 166 |
"""Telesat constellation"""
from . import satellite as stk_sat
from . import graphics
def addConstellation(sc):
"""Add Telesat constellation to the scenario."""
Re = 6371 # earth radius in km
alt_pol = 1000
sma_pol = alt_pol + Re
inc_pol = 99.5
numPlanes_pol = 3
numSatsPerPlane_pol = 12
satObjs = []
for plane in range(numPlanes_pol):
raan = 0 + plane * 63.2
trueAnomalyOffset = 0
for sat in range(numSatsPerPlane_pol):
trueAnomaly = trueAnomalyOffset + sat * 360 / numSatsPerPlane_pol
satName = 'Telesat_pol%02d%02d' % (plane, sat)
satObj = stk_sat.add(sc, satName, sma_pol, 0, inc_pol, raan, trueAnomaly)
stk_sat.graphics(satObj, graphics.Telesat)
satObjs.append(satObj)
print('.',end='')
subPlane = 2
for plane in range(subPlane):
raan = 94.8 + plane * 63.2
trueAnomalyOffset = 15
for sat in range(numSatsPerPlane_pol):
trueAnomaly = trueAnomalyOffset + sat * 360 / numSatsPerPlane_pol
satName = 'Telesat_pol%02d%02d' % (numPlanes_pol + plane, sat)
satObj = stk_sat.add(sc, satName, sma_pol, 0, inc_pol, raan, trueAnomaly)
stk_sat.graphics(satObj, graphics.Telesat)
satObjs.append(satObj)
print('.',end='')
for plane in range(1):
raan = 31.6
trueAnomalyOffset = 15
for sat in range(numSatsPerPlane_pol):
trueAnomaly = sat * 360 / numSatsPerPlane_pol
satName = 'Telesat_pol%02d%02d' % (numPlanes_pol + 5, sat)
satObj = stk_sat.add(sc, satName, sma_pol, 0, inc_pol, raan, trueAnomaly)
stk_sat.graphics(satObj, graphics.Telesat)
satObjs.append(satObj)
print('.',end='')
alt_inc = 1248
sma_inc = alt_inc + Re
inc_inc = 37.4
numPlanes_inc = 5
numSatsPerPlane_inc = 9
for plane in range(numPlanes_inc):
raan = 0 + plane * 72
trueAnomalyOffset = 0
for sat in range(numSatsPerPlane_inc):
trueAnomaly = trueAnomalyOffset + sat * 360 / numSatsPerPlane_inc
satName = 'Telesat_inc%02d%02d' % (plane, sat)
satObj = stk_sat.add(sc, satName, sma_inc, 0, inc_inc, raan, trueAnomaly)
stk_sat.graphics(satObj, graphics.Telesat)
satObjs.append(satObj)
print('.',end='')
print('\n', end='')
return satObjs
| [
37811,
33317,
274,
265,
38712,
37811,
198,
198,
6738,
764,
1330,
11210,
355,
336,
74,
62,
49720,
198,
6738,
764,
1330,
9382,
198,
198,
4299,
751,
34184,
28828,
7,
1416,
2599,
198,
220,
220,
220,
37227,
4550,
12088,
274,
265,
38712,
28... | 2.033634 | 1,219 |
from flask import Flask
from flask_mail import Mail, Message
app =Flask(__name__)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USERNAME'] = 'python2flask@gmail.com'
app.config['MAIL_PASSWORD'] = 'flask2python'
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
mail=Mail(app)
@app.route("/")
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
4529,
1330,
11099,
11,
16000,
198,
198,
1324,
796,
7414,
2093,
7,
834,
3672,
834,
8,
198,
198,
1324,
13,
11250,
17816,
5673,
4146,
62,
35009,
5959,
20520,
11639,
5796,
34788,
13,
14816,
... | 2.45679 | 162 |
import os
ROOT = 0
DIR = 1
FILE = 2
| [
11748,
28686,
198,
198,
13252,
2394,
796,
657,
198,
34720,
796,
352,
198,
25664,
796,
362,
628,
198
] | 2.166667 | 18 |
# -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.caixa import BoletoCaixa
from .testutils import BoletoTestCase
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoCaixa)
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
11748,
555,
715,
395,
201,
198,
11748,
4818,
8079,
201,
198,
201,
198,
6738,
12972,
45693,
1462,
13,
17796,
13,
6888,
844,
64,
1330,
347,
2305,
1462,
24334,
844,
... | 2.313559 | 118 |
import random
import re
from . import word_utl
# получить случайное предложение из массива схем
# получить массив схем из предложения
| [
11748,
4738,
201,
198,
11748,
302,
201,
198,
201,
198,
6738,
764,
1330,
1573,
62,
315,
75,
201,
198,
201,
198,
201,
198,
2,
12466,
123,
25443,
119,
35072,
141,
229,
18849,
20375,
45367,
220,
21727,
30143,
35072,
141,
229,
16142,
140,
... | 1.201613 | 124 |
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Tuple, List
import re
import time
from .Settings import Settings
from .Auxiliaries import Auxiliaries
from .POP3ResponseCodes import POP3ResponseCodes
from .SendDataToClientException import SendDataToClientException
from .AdapterThreadLockingWrapper import AdapterThreadLockingWrapper
from .adapters.AdapterBase import AdapterBase
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
18,
12,
2601,
682,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
569,
8836,
83,
3498,
15339,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
... | 3.674952 | 523 |
import numpy as np
import pytest
from analysis_lib.dlc_results_adapter import DlcResults, get_labels
from analysis_lib.behaviour.analyze_behaviour import get_region_stats, basic_behavioural_assay_algorithm
from analysis_lib.behaviour.arena_setup_adapter import ArenaSetup, Point, RectangleGeometry, Region
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
3781,
62,
8019,
13,
25404,
66,
62,
43420,
62,
324,
3429,
1330,
360,
44601,
25468,
11,
651,
62,
23912,
1424,
198,
6738,
3781,
62,
8019,
13,
20709,
37716,
13,
38200... | 3.244898 | 98 |
# -*- coding: utf-8 -*- #
from enum import IntEnum
from typing import T
class Singleton:
"""
使用单例模式
"""
@staticmethod
@staticmethod
@staticmethod
class Recursion:
"""
递归
"""
@staticmethod
def find_key_for_dict(obj_data: dict, target: str):
"""
在字典里重复递归,直到得出最后的值,如果查不到就返回None
"""
return parse_obj(obj_data, target)
class DictTemplate(object):
"""
字典对象模板
"""
class DictToObject(object):
"""
将字典转成对象,解决懒得写中括号
"""
@staticmethod
def verification(self, node: DictTemplate, value):
"""
验证模块
"""
node.init_data = value
if isinstance(value, dict):
for key, val in value.items():
if isinstance(val, (dict, list, tuple)):
val = self.verification(DictTemplate(val), val)
node.add(key, val)
elif isinstance(value, list):
list_temp = []
for val in value:
if isinstance(val, (dict, list, tuple)):
val = self.verification(DictTemplate(val), val)
list_temp.append(val)
node.add('', list_temp)
return node
class Switch:
"""
弥补python没有switch的缺陷
使用教程:
from aestate.util.others import Switch,Case,CaseDefault
base_symbol = lambda x: x + x
val = 3
方式1:
# case(选择性参数,满足条件时执行的方法,当满足条件后中间方法需要的参数)
source = Switch(Case(val)) + \
Case(0, base_symbol, val) + \
Case(1, base_symbol, val) + \
Case(2, base_symbol, val) + \
Case(3, base_symbol, val) + \
Case(4, base_symbol, val) + \
Case(5, base_symbol, val) + \
CaseDefault(lambda: False)
print(ajson.aj.parse(source, bf=True))
方式2:
source = Switch(Case(val)). \
case(0, base_symbol, val). \
case(1, base_symbol, val). \
case(2, base_symbol, val). \
case(3, base_symbol, val). \
case(4, base_symbol, val). \
case(5, base_symbol, val). \
end(lambda: False)
print(ajson.aj.parse(source, bf=True))
"""
def end(self, default_method, *args, **kwargs):
"""
默认处理函数
"""
for k, v in self.opera.items():
if v.flag:
return v.method(*v.args, **v.kwargs)
return default_method(*args, **kwargs)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
6738,
33829,
1330,
2558,
4834,
388,
198,
6738,
19720,
1330,
309,
628,
198,
198,
4871,
5573,
10565,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
45635,
... | 1.630146 | 1,579 |
from django.contrib import admin
from .models import AddTask
admin.site.register(AddTask)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
3060,
25714,
628,
198,
28482,
13,
15654,
13,
30238,
7,
4550,
25714,
8,
198
] | 3.407407 | 27 |
import torch
import torch.nn as nn
from torch.autograd import Variable
# CNN Model (2 conv layer)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
628,
198,
2,
8100,
9104,
357,
17,
3063,
7679,
8,
198
] | 3.333333 | 30 |
from __future__ import unicode_literals
import os
import re
from django.utils import six
from django.utils.six.moves import range
from reviewboard.diffviewer.processors import (filter_interdiff_opcodes,
post_process_filtered_equals)
class MoveRange(object):
"""Stores information on a move range.
This will store the start and end of the range, and all groups that
are a part of it.
"""
@property
_generator = DiffOpcodeGenerator
def get_diff_opcode_generator_class():
"""Returns the DiffOpcodeGenerator class used for generating opcodes."""
return _generator
def set_diff_opcode_generator_class(renderer):
"""Sets the DiffOpcodeGenerator class used for generating opcodes."""
assert renderer
globals()['_generator'] = renderer
def get_diff_opcode_generator(*args, **kwargs):
"""Returns a DiffOpcodeGenerator instance used for generating opcodes."""
return _generator(*args, **kwargs)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
42625,
14208,
13,
26791,
1330,
2237,
198,
6738,
42625,
14208,
13,
26791,
13,
19412,
13,
76,
5241,
1330,
2837,
198,
198,... | 2.848571 | 350 |
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
logger = logger.bind(name='my_anime_list')
STATUS = {'watching': 1, 'completed': 2, 'on_hold': 3, 'dropped': 4, 'plan_to_watch': 6, 'all': 7}
AIRING_STATUS = {'airing': 1, 'finished': 2, 'planned': 3, 'all': 6}
ANIME_TYPE = ['all', 'tv', 'ova', 'movie', 'special', 'ona', 'music', 'unknown']
class MyAnimeList:
"""" Creates entries for series and movies from MyAnimeList list
Syntax:
my_anime_list:
username: <value>
status:
- <watching|completed|on_hold|dropped|plan_to_watch>
- <watching|completed|on_hold|dropped|plan_to_watch>
...
airing_status:
- <airing|finished|planned>
- <airing|finished|planned>
...
type:
- <series|ova...>
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'status': one_or_more(
{'type': 'string', 'enum': list(STATUS.keys()), 'default': 'all'},
unique_items=True,
),
'airing_status': one_or_more(
{'type': 'string', 'enum': list(AIRING_STATUS.keys()), 'default': 'all'},
unique_items=True,
),
'type': one_or_more(
{'type': 'string', 'enum': list(ANIME_TYPE), 'default': 'all'}, unique_items=True
),
},
'required': ['username'],
'additionalProperties': False,
}
@cached('my_anime_list', persist='2 hours')
@event('plugin.register')
| [
6738,
2604,
14717,
1330,
49706,
198,
198,
6738,
7059,
1136,
1330,
13877,
198,
6738,
7059,
1136,
13,
11250,
62,
15952,
2611,
1330,
530,
62,
273,
62,
3549,
198,
6738,
7059,
1136,
13,
13000,
1330,
21617,
198,
6738,
7059,
1136,
13,
15596,
... | 2.189889 | 811 |
import abc
import enum
import logging
import time
from pynput.mouse import Controller, Button
logger = logging.getLogger(__name__)
@enum.unique
| [
11748,
450,
66,
198,
11748,
33829,
198,
11748,
18931,
198,
11748,
640,
198,
198,
6738,
279,
2047,
1996,
13,
35888,
1330,
22741,
11,
20969,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
31,
... | 3.166667 | 48 |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from numpy import argmax
import logging
import os
import pickle
import copy
from dsrt.config.defaults import DataConfig
| [
6738,
1341,
35720,
13,
3866,
36948,
1330,
36052,
27195,
12342,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1881,
21352,
27195,
12342,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
1822,
9806,
198,
198,
11748,
18... | 3.646154 | 65 |
import json
import re
import sys
import textwrap
from collections import defaultdict, OrderedDict
from six.moves.collections_abc import Iterable
from copy import deepcopy
from itertools import product
################
# Constants.
################
PATTERNS = dict(
simple = dict(
long_opt = r'--(\w[\w\-]*)',
short_opts = r'-(\w+)',
short_opt = r'-(\w)',
opt_arg = r'([A-Z][A-Z\d]*)',
pos_arg = r'\<([\w]+)\>',
),
)
PATTERNS['anchored'] = {
k : r'\A' + v + r'\Z'
for k, v in PATTERNS['simple'].items()
}
N_ZERO = 0
N_ONE = 1
N_MAX = 999999
ZERO_TUPLE = (N_ZERO, N_ZERO)
ONE_TUPLE = (N_ONE, N_ONE)
ZERO_OR_ONE_TUPLE = (N_ZERO, N_ONE)
ANY_TUPLE = (N_ZERO, N_MAX)
OPT_PREFIX = '-'
UNDERSCORE = '_'
WILDCARD_OPTION = '*'
LONG_OPT_PREFIX = OPT_PREFIX + OPT_PREFIX
SHORT_OPT_PREFIX = OPT_PREFIX
OPT_SPEC_STRIP_CHARS = OPT_PREFIX + '<>'
# Token types
WHITESPACE = 'WHITESPACE'
LONG_OPT = 'LONG_OPT'
SHORT_OPT = 'SHORT_OPT'
POS_OPT = 'POS_OPT'
OPT_ARG = 'OPT_ARG'
EOF = 'EOF'
# Regex components.
PATT_END = r'(?=\s|$)'
PATT_OPT_CHAR = r'[\w\-]+'
# Token types:
# - The type.
# - Whether the RegexLexer should emit the tokens of this type.
# - The regex to match the token.
# - TODO: should create a TokenType data object.
SIMPLE_SPEC_TOKENS = (
(WHITESPACE, False, re.compile(r'\s+')),
(LONG_OPT, True, re.compile(r'--' + PATT_OPT_CHAR + PATT_END)),
(SHORT_OPT, True, re.compile(r'-' + PATT_OPT_CHAR + PATT_END)),
(POS_OPT, True, re.compile(r'\<' + PATT_OPT_CHAR + r'\>' + PATT_END)),
(OPT_ARG, True, re.compile(r'[A-Z\d_\-]+' + PATT_END)),
)
################
# Parser.
################
class Parser(object):
'''
'''
VALID_KWARGS = {
'opts',
'simple_spec',
'wildcards',
'sections',
'formatter_config',
'program',
'add_help',
}
@property
@wildcards.setter
################
# Enum.
################
################
# EnumMember.
################
################
# Enum instances: user facing.
################
AliasStyle = Enum('AliasStyle', 'SEPARATE', 'MERGED')
HelpTextStyle = Enum('HelpTextStyle', 'CLI', 'MAN')
OptTextStyle = Enum('OptTextStyle', 'CLI', 'MAN')
SectionName = Enum(
'SectionName',
dict(name = 'USAGE', label = 'Usage'),
dict(name = 'POS', label = 'Positional arguments'),
dict(name = 'OPT', label = 'Options'),
dict(name = 'ALIASES', label = 'Aliases'),
dict(name = 'ERR', label = 'Errors'),
)
################
# Enum instances: not user facing.
################
OptType = Enum('OptType', 'LONG', 'SHORT', 'POS', 'WILD')
PhraseLogicType = Enum('PhraseLogicType', 'AND', 'OR')
PhraseType = Enum('PhraseType', 'OPT', 'POS', 'PHRASE', 'WILD', 'ZONE')
ExitCode = Enum(
'ExitCode',
dict(name = 'SUCCESS', code = 0),
dict(name = 'PARSE_HELP', code = 0),
dict(name = 'PARSE_FAIL', code = 2),
)
################
# Errors.
################
class OptoPyError(Exception):
'''
'''
pass
################
# FormatterConfig.
################
class FormatterConfig(object):
'''
'''
DEFAULTS = dict(
program_name = '',
section_label_punct = ':',
after_section_label = '',
after_section = '\n',
program_summary = '',
style = HelpTextStyle.CLI,
opt_style = OptTextStyle.CLI,
alias_style = AliasStyle.SEPARATE,
)
################
# Section.
################
class Section(object):
'''
'''
@property
################
# GrammarSpecParser.
################
################
# Opt.
################
class Opt(object):
'''
'''
@property
@property
@property
@property
@property
@nargs.setter
@property
@ntimes.setter
@property
################
# ParsedOptions.
################
class ParsedOptions(object):
'''
'''
################
# ParsedOpt.
################
class ParsedOpt(object):
'''
'''
@property
@property
@property
@property
@property
################
# Phrase.
################
################
# RegexLexer.
################
################
# GenericParserMixin.
################
################
# SimpleSpecParser.
################
####
#
# To implement a parser:
#
# - Inherit from GenericParserMixin.
#
# - Define self.lexer and self.parser_functions.
#
# - Each of those functions should return some data element
# appropriate for the grammar (if the current Token matches)
# or None.
#
# Usage example:
#
# txt = '--foo FF GG -x --blort -z Z1 Z2 <q> <r> --debug'
# ssp = SimpleSpecParser(txt)
# tokens = list(ssp.parse())
#
####
################
# Token.
################
################
# Helpers.
################
################
# Temporary stuff.
################
| [
11748,
33918,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
2420,
37150,
198,
6738,
17268,
1330,
4277,
11600,
11,
14230,
1068,
35,
713,
198,
6738,
2237,
13,
76,
5241,
13,
4033,
26448,
62,
39305,
1330,
40806,
540,
198,
6738,
4866,
1330... | 2.336921 | 2,137 |
import pymongo
| [
11748,
279,
4948,
25162,
628,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220
] | 1.6 | 20 |
import os
import time
from datetime import datetime
import pandas as pd
import numpy as np
# Monte Carlo Trial Random Seeds
random_states = list(range(1, 2019, 20)) # 10 seeds
num_estimators_range = [15, 20, 25, 30, 35, 45, 50]
#num_estimators_range = [50, 100, 500, 1000]
# Discrete Wavelet Transform Types
Discrete_Meyer = ["dmey"]
Daubechies = ["db1", "db2", "db3", "db4", "db5", "db6", "db7", "db8", "db9", "db10", "db11", "db12", "db13", "db14", "db15", "db16", "db17", "db18", "db19", "db20"]
Symlets = ["sym2", "sym3", "sym4", "sym5", "sym6", "sym7", "sym8", "sym9", "sym10", "sym11", "sym12", "sym13", "sym14", "sym15", "sym16", "sym17", "sym18", "sym19", "sym20"]
Coiflet = ["coif1", "coif2", "coif3", "coif4", "coif5"]
Biorthogonal = ["bior1.1", "bior1.3", "bior1.5", "bior2.2", "bior2.4", "bior2.6", "bior2.8", "bior3.1", "bior3.3", "bior3.5", "bior3.7", "bior3.9", "bior4.4", "bior5.5", "bior6.8"]
Reverse_Biorthogonal = ["rbio1.1", "rbio1.3", "rbio1.5", "rbio1.2", "rbio1.4", "rbio1.6", "rbio1.8", "rbio3.1", "rbio3.3", "rbio3.5", "rbio3.7", "rbio3.9", "rbio4.4", "rbio5.5", "rbio6.8"]
dwt_types = Discrete_Meyer + Coiflet + Daubechies[1:4] + Symlets[1:4] + Daubechies[5:6] # DWTs used to extract features so far
dwt_types = ["db4"]
# Run Monte Carlo Trials
monte_df_cols = ["dwt_type", "random_seed", "num_estimators", "accuracy", "recall", "precision", "f1_score", "matthews_corr_coef"]
monte_df = pd.DataFrame([], columns=monte_df_cols)
for dwt in dwt_types:
print("Starting monte carlo trials for the "+dwt+" transform at "+datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
for number_estimators in num_estimators_range:
for seed in random_states:
file_name = "/home/jeffrey/repos/VSB_Power_Line_Fault_Detection/extracted_features/train_features_"+dwt+".csv"
file_name = "/home/jeffrey/repos/VSB_Power_Line_Fault_Detection/extracted_features/train_features_thresh_0.71_"+dwt+".csv"
df = load_feature_data(file_name)
features = df[["entropy", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings"]]
features = df[["entropy", "n5", "n25", "n75", "n95", "median", "mean", "std", "var", "rms", "no_zero_crossings", "no_mean_crossings", "min_height", "max_height", "mean_height", "min_width", "max_width", "mean_width", "num_detect_peak", "num_true_peaks"]]
labels = df[["fault"]]
m_accuracy, m_recall, m_precision, m_f1, mcc = classification_random_forest(features, labels, number_estimators, seed)
trial_results = pd.DataFrame([[dwt, seed, number_estimators, m_accuracy, m_recall, m_precision, m_f1, mcc]], columns=monte_df_cols)
monte_df = monte_df.append(trial_results, ignore_index=True)
monte_df.to_csv("random_forest_monte_carlo_trials.csv", sep=",")
print("Done! at "+datetime.now().strftime('%Y-%m-%d %H:%M:%S')) | [
198,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
2,
22489,
40089,
21960,
14534,
41833,
198,
25120,
62,
27219,
796,... | 2.15994 | 1,338 |
import os
import sys
from six import StringIO
from dagster.utils.indenting_printer import IndentingPrinter
class IndentingBufferPrinter(IndentingPrinter):
'''Subclass of IndentingPrinter wrapping a StringIO.'''
def read(self):
'''Get the value of the backing StringIO.'''
return self.buffer.getvalue()
| [
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
2237,
1330,
10903,
9399,
198,
198,
6738,
48924,
1706,
13,
26791,
13,
521,
36589,
62,
1050,
3849,
1330,
1423,
36589,
6836,
3849,
628,
198,
4871,
1423,
36589,
28632,
6836,
3849,
7,
5497,
36... | 3.036697 | 109 |
from pycorda import Node
from datetime import datetime
import matplotlib
from matplotlib import pyplot
import pandas as pd
import chart_studio, chart_studio.plotly as py, plotly.graph_objs as go
from sklearn import linear_model as lm
# Format for timestamp string is YYYY-MM-DD HH:MM:SS.FFF
def plot_time_series(timestamp_column, title=None):
"""Plots time series for a given sequence of timestamps
Parameters
----------
timestamp_column : iterable object
iterable of timestamp strings in the %Y-%m-%d %H:%M:%S.%f format
title : str, optional
figure title
"""
dt_list = [datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f') for timestamp in timestamp_column]
dates = matplotlib.dates.date2num(dt_list)
fig, ax = pyplot.subplots()
if title is not None:
ax.set_title(title)
ax.plot_date(dates, [0]*len(dates))
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M:%S.%f')
fig.autofmt_xdate()
def plot_ids(ids, fontsize, title=None):
"""Plots IDs as labelled equally spaced points
Parameters
----------
ids : iterable object
iterable of ID strings
fontsize : int
font size of point labels
title : str, optional
figure title
"""
sorted_ids = sorted(ids)
n = len(ids)
points = range(n)
fig, ax = pyplot.subplots()
if title is not None:
ax.set_title(title)
ax.scatter(points, [0]*n)
for i, txt in enumerate(sorted_ids):
ax.annotate(txt, (points[i], 0.001), ha='center', fontsize=fontsize)
ax.set_xlim(-0.5, min(5, n))
class Plotter(object):
"""Plotter object for plotting data obtained from a database node
tbname_ts methods will plot time series for table TBNAME. After choosing which plots
to create by calling the relevant methods, use the show method to
display the plots.
"""
def __init__(self, node):
"""
Parameters
----------
node: pycorda.Node
node used to gather data for display
"""
self.node = node
def plot_timeseries_fungible_qty(self,contract):
'''
SELECT RECORDED_TIMESTAMP,QUANTITY FROM VAULT_STATES, VAULT_FUNGIBLE_STATES
WHERE VAULT_STATES.TRANSACTION_ID = VAULT_FUNGIBLE_STATES.TRANSACTION_ID
AND VAULT_STATES.CONTRACT_STATE_CLASS_NAME = 'net.corda.finance.contracts.asset.Cash$State'
'''
vault_states = self.node.get_vault_states()
vault_states = vault_states[vault_states.CONTRACT_STATE_CLASS_NAME==contract]
vault_fungible_states = self.node.get_vault_fungible_states()
df = vault_states.merge(vault_fungible_states)[['RECORDED_TIMESTAMP','QUANTITY']]
df['RECORDED_TIMESTAMP'] = pd.to_datetime(df['RECORDED_TIMESTAMP'])
df.plot(kind='line',x='RECORDED_TIMESTAMP',y='QUANTITY',color='red')
print(df)
# def vault_states_recorded_ts(self):
# df = self.node.get_vault_states()[]
# pyplot.plot()
# def vault_states_recorded_ts(self):
# df = self.node.get_vault_states()
# plot_time_series(df['RECORDED_TIMESTAMP'].dropna(), 'Vault states recorded times')
def vault_states_status(self):
"""Plots pie chart of the relative frequencies of vault state status"""
df = self.node.get_vault_states()
df['STATE_STATUS'].value_counts().plot.pie()
def show(self):
"""Displays all plots"""
pyplot.show()
| [
6738,
12972,
66,
585,
64,
1330,
19081,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
2603,
29487,
8019,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
8262,
62,
19149,
952,
11,... | 2.528346 | 1,270 |
import os
import argparse
from omegaconf import OmegaConf
from argparse import ArgumentParser
CONFIG_PATH = 'train/configs/gpt_config.yaml'
if __name__ == "__main__":
__arg_parser = configure_arg_parser()
__args = __arg_parser.parse_args()
config = OmegaConf.load(__args.config)
preprocesser = GPTPreprocess(config.preprocess.raw_data, config.preprocess.train_data)
preprocesser.preprocess()
| [
11748,
28686,
201,
198,
11748,
1822,
29572,
201,
198,
6738,
267,
28917,
7807,
69,
1330,
19839,
18546,
201,
198,
6738,
1822,
29572,
1330,
45751,
46677,
201,
198,
201,
198,
10943,
16254,
62,
34219,
796,
705,
27432,
14,
11250,
82,
14,
70,
... | 2.60119 | 168 |
import contextlib
import os
from typing import ContextManager, Optional, Sequence
import stable_baselines3.common.logger as sb_logger
from imitation.data import types
def _build_output_formats(
folder: types.AnyPath,
format_strs: Sequence[str] = None,
) -> Sequence[sb_logger.KVWriter]:
"""Build output formats for initializing a Stable Baselines Logger.
Args:
folder: Path to directory that logs are written to.
format_strs: An list of output format strings. For details on available
output formats see `stable_baselines3.logger.make_output_format`.
"""
os.makedirs(folder, exist_ok=True)
output_formats = [sb_logger.make_output_format(f, folder) for f in format_strs]
return output_formats
def is_configured() -> bool:
"""Return True if the custom logger is active."""
return isinstance(sb_logger.Logger.CURRENT, _HierarchicalLogger)
def configure(
folder: types.AnyPath, format_strs: Optional[Sequence[str]] = None
) -> None:
"""Configure Stable Baselines logger to be `accumulate_means()`-compatible.
After this function is called, `stable_baselines3.logger.{configure,reset}()`
are replaced with stubs that raise RuntimeError.
Args:
folder: Argument from `stable_baselines3.logger.configure`.
format_strs: An list of output format strings. For details on available
output formats see `stable_baselines3.logger.make_output_format`.
"""
# Replace `stable_baselines3.logger` methods with erroring stubs to
# prevent unexpected logging state from mixed logging configuration.
sb_logger.configure = _sb_logger_configure_replacement
sb_logger.reset = _sb_logger_reset_replacement
if format_strs is None:
format_strs = ["stdout", "log", "csv"]
output_formats = _build_output_formats(folder, format_strs)
default_logger = sb_logger.Logger(folder, output_formats)
hier_logger = _HierarchicalLogger(default_logger, format_strs)
sb_logger.Logger.CURRENT = hier_logger
sb_logger.log("Logging to %s" % folder)
assert is_configured()
def record(key, val, exclude=None) -> None:
"""Alias for `stable_baselines3.logger.record`."""
sb_logger.record(key, val, exclude)
def dump(step=0) -> None:
"""Alias for `stable_baselines3.logger.dump`."""
sb_logger.dump(step)
def accumulate_means(subdir_name: types.AnyPath) -> ContextManager:
"""Temporarily redirect record() to a different logger and auto-track kvmeans.
Within this context, the original logger is swapped out for a special logger
in directory `"{current_logging_dir}/raw/{subdir_name}"`.
The special logger's `stable_baselines3.logger.record(key, val)`, in addition
to tracking its own logs, also forwards the log to the original logger's
`.record_mean()` under the key `mean/{subdir_name}/{key}`.
After the context exits, these means can be dumped as usual using
`stable_baselines3.logger.dump()` or `imitation.util.logger.dump()`.
Note that the behavior of other logging methods, `log` and `record_mean`
are unmodified and will go straight to the original logger.
This context cannot be nested.
Args:
subdir_name: A string key for building the logger, as described above.
Returns:
A context manager.
"""
assert is_configured()
hier_logger = sb_logger.Logger.CURRENT # type: _HierarchicalLogger
return hier_logger.accumulate_means(subdir_name)
| [
11748,
4732,
8019,
198,
11748,
28686,
198,
6738,
19720,
1330,
30532,
13511,
11,
32233,
11,
45835,
198,
198,
11748,
8245,
62,
12093,
20655,
18,
13,
11321,
13,
6404,
1362,
355,
264,
65,
62,
6404,
1362,
198,
198,
6738,
40260,
13,
7890,
1... | 2.848485 | 1,221 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
#socket server
import socket
import datetime
import os
import sys
mi_socket = socket.socket()
mi_socket.bind( ('localhost', 8000) )
mi_socket.listen(5)
mi_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_connect()
| [
2,
44971,
4382,
220,
198,
198,
11748,
17802,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11632,
62,
44971,
796,
17802,
13,
44971,
3419,
198,
11632,
62,
44971,
13,
21653,
7,
19203,
36750,
3256,
38055,
8,
1267,... | 2.50495 | 101 |
#-*- coding: UTF-8 -*-
"""
http://www.apple.com/DTDs/PropertyList-1.0.dtd
plistObject :
(array | data | date | dict | real | integer | string | true | false )
Collections:
array:
dict: key plistObject
Primitive types
string
data: Base-64 encoded
date: ISO 8601, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'
Numerical primitives:
true, false, real, integer
"""
from collections import OrderedDict
import xml.etree.ElementTree as ET
# escape '&', '<', '>'
from xml.sax.saxutils import unescape, escape
import datetime
import base64
import dateutil.parser
| [
2,
12,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
37811,
198,
4023,
1378,
2503,
13,
18040,
13,
785,
14,
35,
21016,
82,
14,
21746,
8053,
12,
16,
13,
15,
13,
67,
8671,
198,
489,
396,
10267,
1058,
198,
7,
18747,
930,
1366,... | 2.714976 | 207 |
import os
import pytest
from ..tools import process
EXAMPLES_DIR = "./examples"
@pytest.mark.parametrize(
"directory, command",
[
("grouped_pmdarima", ["python", "grouped_pmdarima_arima_example.py"]),
("grouped_pmdarima", ["python", "grouped_pmdarima_autoarima_example.py"]),
("grouped_pmdarima", ["python", "grouped_pmdarima_series_exploration.py"]),
("grouped_pmdarima", ["python", "grouped_pmdarima_pipeline_example.py"]),
(
"grouped_pmdarima",
["python", "grouped_pmdarima_subset_prediction_example.py"],
),
(
"grouped_pmdarima",
["python", "grouped_pmdarima_analyze_differencing_terms_and_apply.py"],
),
("grouped_prophet", ["python", "grouped_prophet_example.py"]),
("grouped_prophet", ["python", "grouped_prophet_subset_prediction_example.py"]),
],
)
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
6738,
11485,
31391,
1330,
1429,
198,
198,
6369,
2390,
6489,
1546,
62,
34720,
796,
366,
19571,
1069,
12629,
1,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,... | 2.165865 | 416 |